1 /* 2 * linux/ipc/shm.c 3 * Copyright (C) 1992, 1993 Krishna Balasubramanian 4 * Many improvements/fixes by Bruno Haible. 5 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 6 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 7 * 8 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 9 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 10 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 11 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 12 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 13 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 14 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 15 * 16 * support for audit of ipc object properties and permission changes 17 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 18 * 19 * namespaces support 20 * OpenVZ, SWsoft Inc. 21 * Pavel Emelianov <xemul@openvz.org> 22 */ 23 24 #include <linux/slab.h> 25 #include <linux/mm.h> 26 #include <linux/hugetlb.h> 27 #include <linux/shm.h> 28 #include <linux/init.h> 29 #include <linux/file.h> 30 #include <linux/mman.h> 31 #include <linux/shmem_fs.h> 32 #include <linux/security.h> 33 #include <linux/syscalls.h> 34 #include <linux/audit.h> 35 #include <linux/capability.h> 36 #include <linux/ptrace.h> 37 #include <linux/seq_file.h> 38 #include <linux/rwsem.h> 39 #include <linux/nsproxy.h> 40 #include <linux/mount.h> 41 #include <linux/ipc_namespace.h> 42 43 #include <asm/uaccess.h> 44 45 #include "util.h" 46 47 struct shm_file_data { 48 int id; 49 struct ipc_namespace *ns; 50 struct file *file; 51 const struct vm_operations_struct *vm_ops; 52 }; 53 54 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 55 56 static const struct file_operations shm_file_operations; 57 static const struct vm_operations_struct shm_vm_ops; 58 59 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 60 61 #define shm_unlock(shp) \ 62 ipc_unlock(&(shp)->shm_perm) 63 64 static int newseg(struct ipc_namespace *, struct ipc_params *); 65 static void shm_open(struct vm_area_struct *vma); 66 static void shm_close(struct vm_area_struct *vma); 67 static void shm_destroy (struct ipc_namespace *ns, struct shmid_kernel *shp); 68 #ifdef CONFIG_PROC_FS 69 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 70 #endif 71 72 void shm_init_ns(struct ipc_namespace *ns) 73 { 74 ns->shm_ctlmax = SHMMAX; 75 ns->shm_ctlall = SHMALL; 76 ns->shm_ctlmni = SHMMNI; 77 ns->shm_tot = 0; 78 ipc_init_ids(&shm_ids(ns)); 79 } 80 81 /* 82 * Called with shm_ids.rw_mutex (writer) and the shp structure locked. 83 * Only shm_ids.rw_mutex remains locked on exit. 84 */ 85 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 86 { 87 struct shmid_kernel *shp; 88 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 89 90 if (shp->shm_nattch){ 91 shp->shm_perm.mode |= SHM_DEST; 92 /* Do not find it any more */ 93 shp->shm_perm.key = IPC_PRIVATE; 94 shm_unlock(shp); 95 } else 96 shm_destroy(ns, shp); 97 } 98 99 #ifdef CONFIG_IPC_NS 100 void shm_exit_ns(struct ipc_namespace *ns) 101 { 102 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 103 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 104 } 105 #endif 106 107 void __init shm_init (void) 108 { 109 shm_init_ns(&init_ipc_ns); 110 ipc_init_proc_interface("sysvipc/shm", 111 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime\n", 112 IPC_SHM_IDS, sysvipc_shm_proc_show); 113 } 114 115 /* 116 * shm_lock_(check_) routines are called in the paths where the rw_mutex 117 * is not necessarily held. 118 */ 119 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 120 { 121 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 122 123 if (IS_ERR(ipcp)) 124 return (struct shmid_kernel *)ipcp; 125 126 return container_of(ipcp, struct shmid_kernel, shm_perm); 127 } 128 129 static inline struct shmid_kernel *shm_lock_check(struct ipc_namespace *ns, 130 int id) 131 { 132 struct kern_ipc_perm *ipcp = ipc_lock_check(&shm_ids(ns), id); 133 134 if (IS_ERR(ipcp)) 135 return (struct shmid_kernel *)ipcp; 136 137 return container_of(ipcp, struct shmid_kernel, shm_perm); 138 } 139 140 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 141 { 142 ipc_rmid(&shm_ids(ns), &s->shm_perm); 143 } 144 145 146 /* This is called by fork, once for every shm attach. */ 147 static void shm_open(struct vm_area_struct *vma) 148 { 149 struct file *file = vma->vm_file; 150 struct shm_file_data *sfd = shm_file_data(file); 151 struct shmid_kernel *shp; 152 153 shp = shm_lock(sfd->ns, sfd->id); 154 BUG_ON(IS_ERR(shp)); 155 shp->shm_atim = get_seconds(); 156 shp->shm_lprid = task_tgid_vnr(current); 157 shp->shm_nattch++; 158 shm_unlock(shp); 159 } 160 161 /* 162 * shm_destroy - free the struct shmid_kernel 163 * 164 * @ns: namespace 165 * @shp: struct to free 166 * 167 * It has to be called with shp and shm_ids.rw_mutex (writer) locked, 168 * but returns with shp unlocked and freed. 169 */ 170 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 171 { 172 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 173 shm_rmid(ns, shp); 174 shm_unlock(shp); 175 if (!is_file_hugepages(shp->shm_file)) 176 shmem_lock(shp->shm_file, 0, shp->mlock_user); 177 else if (shp->mlock_user) 178 user_shm_unlock(shp->shm_file->f_path.dentry->d_inode->i_size, 179 shp->mlock_user); 180 fput (shp->shm_file); 181 security_shm_free(shp); 182 ipc_rcu_putref(shp); 183 } 184 185 /* 186 * remove the attach descriptor vma. 187 * free memory for segment if it is marked destroyed. 188 * The descriptor has already been removed from the current->mm->mmap list 189 * and will later be kfree()d. 190 */ 191 static void shm_close(struct vm_area_struct *vma) 192 { 193 struct file * file = vma->vm_file; 194 struct shm_file_data *sfd = shm_file_data(file); 195 struct shmid_kernel *shp; 196 struct ipc_namespace *ns = sfd->ns; 197 198 down_write(&shm_ids(ns).rw_mutex); 199 /* remove from the list of attaches of the shm segment */ 200 shp = shm_lock(ns, sfd->id); 201 BUG_ON(IS_ERR(shp)); 202 shp->shm_lprid = task_tgid_vnr(current); 203 shp->shm_dtim = get_seconds(); 204 shp->shm_nattch--; 205 if(shp->shm_nattch == 0 && 206 shp->shm_perm.mode & SHM_DEST) 207 shm_destroy(ns, shp); 208 else 209 shm_unlock(shp); 210 up_write(&shm_ids(ns).rw_mutex); 211 } 212 213 static int shm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 214 { 215 struct file *file = vma->vm_file; 216 struct shm_file_data *sfd = shm_file_data(file); 217 218 return sfd->vm_ops->fault(vma, vmf); 219 } 220 221 #ifdef CONFIG_NUMA 222 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 223 { 224 struct file *file = vma->vm_file; 225 struct shm_file_data *sfd = shm_file_data(file); 226 int err = 0; 227 if (sfd->vm_ops->set_policy) 228 err = sfd->vm_ops->set_policy(vma, new); 229 return err; 230 } 231 232 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 233 unsigned long addr) 234 { 235 struct file *file = vma->vm_file; 236 struct shm_file_data *sfd = shm_file_data(file); 237 struct mempolicy *pol = NULL; 238 239 if (sfd->vm_ops->get_policy) 240 pol = sfd->vm_ops->get_policy(vma, addr); 241 else if (vma->vm_policy) 242 pol = vma->vm_policy; 243 244 return pol; 245 } 246 #endif 247 248 static int shm_mmap(struct file * file, struct vm_area_struct * vma) 249 { 250 struct shm_file_data *sfd = shm_file_data(file); 251 int ret; 252 253 ret = sfd->file->f_op->mmap(sfd->file, vma); 254 if (ret != 0) 255 return ret; 256 sfd->vm_ops = vma->vm_ops; 257 #ifdef CONFIG_MMU 258 BUG_ON(!sfd->vm_ops->fault); 259 #endif 260 vma->vm_ops = &shm_vm_ops; 261 shm_open(vma); 262 263 return ret; 264 } 265 266 static int shm_release(struct inode *ino, struct file *file) 267 { 268 struct shm_file_data *sfd = shm_file_data(file); 269 270 put_ipc_ns(sfd->ns); 271 shm_file_data(file) = NULL; 272 kfree(sfd); 273 return 0; 274 } 275 276 static int shm_fsync(struct file *file, int datasync) 277 { 278 struct shm_file_data *sfd = shm_file_data(file); 279 280 if (!sfd->file->f_op->fsync) 281 return -EINVAL; 282 return sfd->file->f_op->fsync(sfd->file, datasync); 283 } 284 285 static unsigned long shm_get_unmapped_area(struct file *file, 286 unsigned long addr, unsigned long len, unsigned long pgoff, 287 unsigned long flags) 288 { 289 struct shm_file_data *sfd = shm_file_data(file); 290 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 291 pgoff, flags); 292 } 293 294 static const struct file_operations shm_file_operations = { 295 .mmap = shm_mmap, 296 .fsync = shm_fsync, 297 .release = shm_release, 298 #ifndef CONFIG_MMU 299 .get_unmapped_area = shm_get_unmapped_area, 300 #endif 301 }; 302 303 static const struct file_operations shm_file_operations_huge = { 304 .mmap = shm_mmap, 305 .fsync = shm_fsync, 306 .release = shm_release, 307 .get_unmapped_area = shm_get_unmapped_area, 308 }; 309 310 int is_file_shm_hugepages(struct file *file) 311 { 312 return file->f_op == &shm_file_operations_huge; 313 } 314 315 static const struct vm_operations_struct shm_vm_ops = { 316 .open = shm_open, /* callback for a new vm-area open */ 317 .close = shm_close, /* callback for when the vm-area is released */ 318 .fault = shm_fault, 319 #if defined(CONFIG_NUMA) 320 .set_policy = shm_set_policy, 321 .get_policy = shm_get_policy, 322 #endif 323 }; 324 325 /** 326 * newseg - Create a new shared memory segment 327 * @ns: namespace 328 * @params: ptr to the structure that contains key, size and shmflg 329 * 330 * Called with shm_ids.rw_mutex held as a writer. 331 */ 332 333 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 334 { 335 key_t key = params->key; 336 int shmflg = params->flg; 337 size_t size = params->u.size; 338 int error; 339 struct shmid_kernel *shp; 340 int numpages = (size + PAGE_SIZE -1) >> PAGE_SHIFT; 341 struct file * file; 342 char name[13]; 343 int id; 344 int acctflag = 0; 345 346 if (size < SHMMIN || size > ns->shm_ctlmax) 347 return -EINVAL; 348 349 if (ns->shm_tot + numpages > ns->shm_ctlall) 350 return -ENOSPC; 351 352 shp = ipc_rcu_alloc(sizeof(*shp)); 353 if (!shp) 354 return -ENOMEM; 355 356 shp->shm_perm.key = key; 357 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 358 shp->mlock_user = NULL; 359 360 shp->shm_perm.security = NULL; 361 error = security_shm_alloc(shp); 362 if (error) { 363 ipc_rcu_putref(shp); 364 return error; 365 } 366 367 sprintf (name, "SYSV%08x", key); 368 if (shmflg & SHM_HUGETLB) { 369 /* hugetlb_file_setup applies strict accounting */ 370 if (shmflg & SHM_NORESERVE) 371 acctflag = VM_NORESERVE; 372 file = hugetlb_file_setup(name, size, acctflag, 373 &shp->mlock_user, HUGETLB_SHMFS_INODE); 374 } else { 375 /* 376 * Do not allow no accounting for OVERCOMMIT_NEVER, even 377 * if it's asked for. 378 */ 379 if ((shmflg & SHM_NORESERVE) && 380 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 381 acctflag = VM_NORESERVE; 382 file = shmem_file_setup(name, size, acctflag); 383 } 384 error = PTR_ERR(file); 385 if (IS_ERR(file)) 386 goto no_file; 387 388 id = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 389 if (id < 0) { 390 error = id; 391 goto no_id; 392 } 393 394 shp->shm_cprid = task_tgid_vnr(current); 395 shp->shm_lprid = 0; 396 shp->shm_atim = shp->shm_dtim = 0; 397 shp->shm_ctim = get_seconds(); 398 shp->shm_segsz = size; 399 shp->shm_nattch = 0; 400 shp->shm_file = file; 401 /* 402 * shmid gets reported as "inode#" in /proc/pid/maps. 403 * proc-ps tools use this. Changing this will break them. 404 */ 405 file->f_dentry->d_inode->i_ino = shp->shm_perm.id; 406 407 ns->shm_tot += numpages; 408 error = shp->shm_perm.id; 409 shm_unlock(shp); 410 return error; 411 412 no_id: 413 if (is_file_hugepages(file) && shp->mlock_user) 414 user_shm_unlock(size, shp->mlock_user); 415 fput(file); 416 no_file: 417 security_shm_free(shp); 418 ipc_rcu_putref(shp); 419 return error; 420 } 421 422 /* 423 * Called with shm_ids.rw_mutex and ipcp locked. 424 */ 425 static inline int shm_security(struct kern_ipc_perm *ipcp, int shmflg) 426 { 427 struct shmid_kernel *shp; 428 429 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 430 return security_shm_associate(shp, shmflg); 431 } 432 433 /* 434 * Called with shm_ids.rw_mutex and ipcp locked. 435 */ 436 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 437 struct ipc_params *params) 438 { 439 struct shmid_kernel *shp; 440 441 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 442 if (shp->shm_segsz < params->u.size) 443 return -EINVAL; 444 445 return 0; 446 } 447 448 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 449 { 450 struct ipc_namespace *ns; 451 struct ipc_ops shm_ops; 452 struct ipc_params shm_params; 453 454 ns = current->nsproxy->ipc_ns; 455 456 shm_ops.getnew = newseg; 457 shm_ops.associate = shm_security; 458 shm_ops.more_checks = shm_more_checks; 459 460 shm_params.key = key; 461 shm_params.flg = shmflg; 462 shm_params.u.size = size; 463 464 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 465 } 466 467 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 468 { 469 switch(version) { 470 case IPC_64: 471 return copy_to_user(buf, in, sizeof(*in)); 472 case IPC_OLD: 473 { 474 struct shmid_ds out; 475 476 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 477 out.shm_segsz = in->shm_segsz; 478 out.shm_atime = in->shm_atime; 479 out.shm_dtime = in->shm_dtime; 480 out.shm_ctime = in->shm_ctime; 481 out.shm_cpid = in->shm_cpid; 482 out.shm_lpid = in->shm_lpid; 483 out.shm_nattch = in->shm_nattch; 484 485 return copy_to_user(buf, &out, sizeof(out)); 486 } 487 default: 488 return -EINVAL; 489 } 490 } 491 492 static inline unsigned long 493 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 494 { 495 switch(version) { 496 case IPC_64: 497 if (copy_from_user(out, buf, sizeof(*out))) 498 return -EFAULT; 499 return 0; 500 case IPC_OLD: 501 { 502 struct shmid_ds tbuf_old; 503 504 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 505 return -EFAULT; 506 507 out->shm_perm.uid = tbuf_old.shm_perm.uid; 508 out->shm_perm.gid = tbuf_old.shm_perm.gid; 509 out->shm_perm.mode = tbuf_old.shm_perm.mode; 510 511 return 0; 512 } 513 default: 514 return -EINVAL; 515 } 516 } 517 518 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 519 { 520 switch(version) { 521 case IPC_64: 522 return copy_to_user(buf, in, sizeof(*in)); 523 case IPC_OLD: 524 { 525 struct shminfo out; 526 527 if(in->shmmax > INT_MAX) 528 out.shmmax = INT_MAX; 529 else 530 out.shmmax = (int)in->shmmax; 531 532 out.shmmin = in->shmmin; 533 out.shmmni = in->shmmni; 534 out.shmseg = in->shmseg; 535 out.shmall = in->shmall; 536 537 return copy_to_user(buf, &out, sizeof(out)); 538 } 539 default: 540 return -EINVAL; 541 } 542 } 543 544 /* 545 * Called with shm_ids.rw_mutex held as a reader 546 */ 547 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 548 unsigned long *swp) 549 { 550 int next_id; 551 int total, in_use; 552 553 *rss = 0; 554 *swp = 0; 555 556 in_use = shm_ids(ns).in_use; 557 558 for (total = 0, next_id = 0; total < in_use; next_id++) { 559 struct kern_ipc_perm *ipc; 560 struct shmid_kernel *shp; 561 struct inode *inode; 562 563 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 564 if (ipc == NULL) 565 continue; 566 shp = container_of(ipc, struct shmid_kernel, shm_perm); 567 568 inode = shp->shm_file->f_path.dentry->d_inode; 569 570 if (is_file_hugepages(shp->shm_file)) { 571 struct address_space *mapping = inode->i_mapping; 572 struct hstate *h = hstate_file(shp->shm_file); 573 *rss += pages_per_huge_page(h) * mapping->nrpages; 574 } else { 575 #ifdef CONFIG_SHMEM 576 struct shmem_inode_info *info = SHMEM_I(inode); 577 spin_lock(&info->lock); 578 *rss += inode->i_mapping->nrpages; 579 *swp += info->swapped; 580 spin_unlock(&info->lock); 581 #else 582 *rss += inode->i_mapping->nrpages; 583 #endif 584 } 585 586 total++; 587 } 588 } 589 590 /* 591 * This function handles some shmctl commands which require the rw_mutex 592 * to be held in write mode. 593 * NOTE: no locks must be held, the rw_mutex is taken inside this function. 594 */ 595 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 596 struct shmid_ds __user *buf, int version) 597 { 598 struct kern_ipc_perm *ipcp; 599 struct shmid64_ds shmid64; 600 struct shmid_kernel *shp; 601 int err; 602 603 if (cmd == IPC_SET) { 604 if (copy_shmid_from_user(&shmid64, buf, version)) 605 return -EFAULT; 606 } 607 608 ipcp = ipcctl_pre_down(&shm_ids(ns), shmid, cmd, &shmid64.shm_perm, 0); 609 if (IS_ERR(ipcp)) 610 return PTR_ERR(ipcp); 611 612 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 613 614 err = security_shm_shmctl(shp, cmd); 615 if (err) 616 goto out_unlock; 617 switch (cmd) { 618 case IPC_RMID: 619 do_shm_rmid(ns, ipcp); 620 goto out_up; 621 case IPC_SET: 622 ipc_update_perm(&shmid64.shm_perm, ipcp); 623 shp->shm_ctim = get_seconds(); 624 break; 625 default: 626 err = -EINVAL; 627 } 628 out_unlock: 629 shm_unlock(shp); 630 out_up: 631 up_write(&shm_ids(ns).rw_mutex); 632 return err; 633 } 634 635 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 636 { 637 struct shmid_kernel *shp; 638 int err, version; 639 struct ipc_namespace *ns; 640 641 if (cmd < 0 || shmid < 0) { 642 err = -EINVAL; 643 goto out; 644 } 645 646 version = ipc_parse_version(&cmd); 647 ns = current->nsproxy->ipc_ns; 648 649 switch (cmd) { /* replace with proc interface ? */ 650 case IPC_INFO: 651 { 652 struct shminfo64 shminfo; 653 654 err = security_shm_shmctl(NULL, cmd); 655 if (err) 656 return err; 657 658 memset(&shminfo, 0, sizeof(shminfo)); 659 shminfo.shmmni = shminfo.shmseg = ns->shm_ctlmni; 660 shminfo.shmmax = ns->shm_ctlmax; 661 shminfo.shmall = ns->shm_ctlall; 662 663 shminfo.shmmin = SHMMIN; 664 if(copy_shminfo_to_user (buf, &shminfo, version)) 665 return -EFAULT; 666 667 down_read(&shm_ids(ns).rw_mutex); 668 err = ipc_get_maxid(&shm_ids(ns)); 669 up_read(&shm_ids(ns).rw_mutex); 670 671 if(err<0) 672 err = 0; 673 goto out; 674 } 675 case SHM_INFO: 676 { 677 struct shm_info shm_info; 678 679 err = security_shm_shmctl(NULL, cmd); 680 if (err) 681 return err; 682 683 memset(&shm_info, 0, sizeof(shm_info)); 684 down_read(&shm_ids(ns).rw_mutex); 685 shm_info.used_ids = shm_ids(ns).in_use; 686 shm_get_stat (ns, &shm_info.shm_rss, &shm_info.shm_swp); 687 shm_info.shm_tot = ns->shm_tot; 688 shm_info.swap_attempts = 0; 689 shm_info.swap_successes = 0; 690 err = ipc_get_maxid(&shm_ids(ns)); 691 up_read(&shm_ids(ns).rw_mutex); 692 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) { 693 err = -EFAULT; 694 goto out; 695 } 696 697 err = err < 0 ? 0 : err; 698 goto out; 699 } 700 case SHM_STAT: 701 case IPC_STAT: 702 { 703 struct shmid64_ds tbuf; 704 int result; 705 706 if (cmd == SHM_STAT) { 707 shp = shm_lock(ns, shmid); 708 if (IS_ERR(shp)) { 709 err = PTR_ERR(shp); 710 goto out; 711 } 712 result = shp->shm_perm.id; 713 } else { 714 shp = shm_lock_check(ns, shmid); 715 if (IS_ERR(shp)) { 716 err = PTR_ERR(shp); 717 goto out; 718 } 719 result = 0; 720 } 721 err = -EACCES; 722 if (ipcperms (&shp->shm_perm, S_IRUGO)) 723 goto out_unlock; 724 err = security_shm_shmctl(shp, cmd); 725 if (err) 726 goto out_unlock; 727 memset(&tbuf, 0, sizeof(tbuf)); 728 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf.shm_perm); 729 tbuf.shm_segsz = shp->shm_segsz; 730 tbuf.shm_atime = shp->shm_atim; 731 tbuf.shm_dtime = shp->shm_dtim; 732 tbuf.shm_ctime = shp->shm_ctim; 733 tbuf.shm_cpid = shp->shm_cprid; 734 tbuf.shm_lpid = shp->shm_lprid; 735 tbuf.shm_nattch = shp->shm_nattch; 736 shm_unlock(shp); 737 if(copy_shmid_to_user (buf, &tbuf, version)) 738 err = -EFAULT; 739 else 740 err = result; 741 goto out; 742 } 743 case SHM_LOCK: 744 case SHM_UNLOCK: 745 { 746 struct file *uninitialized_var(shm_file); 747 748 lru_add_drain_all(); /* drain pagevecs to lru lists */ 749 750 shp = shm_lock_check(ns, shmid); 751 if (IS_ERR(shp)) { 752 err = PTR_ERR(shp); 753 goto out; 754 } 755 756 audit_ipc_obj(&(shp->shm_perm)); 757 758 if (!capable(CAP_IPC_LOCK)) { 759 uid_t euid = current_euid(); 760 err = -EPERM; 761 if (euid != shp->shm_perm.uid && 762 euid != shp->shm_perm.cuid) 763 goto out_unlock; 764 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) 765 goto out_unlock; 766 } 767 768 err = security_shm_shmctl(shp, cmd); 769 if (err) 770 goto out_unlock; 771 772 if(cmd==SHM_LOCK) { 773 struct user_struct *user = current_user(); 774 if (!is_file_hugepages(shp->shm_file)) { 775 err = shmem_lock(shp->shm_file, 1, user); 776 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)){ 777 shp->shm_perm.mode |= SHM_LOCKED; 778 shp->mlock_user = user; 779 } 780 } 781 } else if (!is_file_hugepages(shp->shm_file)) { 782 shmem_lock(shp->shm_file, 0, shp->mlock_user); 783 shp->shm_perm.mode &= ~SHM_LOCKED; 784 shp->mlock_user = NULL; 785 } 786 shm_unlock(shp); 787 goto out; 788 } 789 case IPC_RMID: 790 case IPC_SET: 791 err = shmctl_down(ns, shmid, cmd, buf, version); 792 return err; 793 default: 794 return -EINVAL; 795 } 796 797 out_unlock: 798 shm_unlock(shp); 799 out: 800 return err; 801 } 802 803 /* 804 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 805 * 806 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 807 * "raddr" thing points to kernel space, and there has to be a wrapper around 808 * this. 809 */ 810 long do_shmat(int shmid, char __user *shmaddr, int shmflg, ulong *raddr) 811 { 812 struct shmid_kernel *shp; 813 unsigned long addr; 814 unsigned long size; 815 struct file * file; 816 int err; 817 unsigned long flags; 818 unsigned long prot; 819 int acc_mode; 820 unsigned long user_addr; 821 struct ipc_namespace *ns; 822 struct shm_file_data *sfd; 823 struct path path; 824 fmode_t f_mode; 825 826 err = -EINVAL; 827 if (shmid < 0) 828 goto out; 829 else if ((addr = (ulong)shmaddr)) { 830 if (addr & (SHMLBA-1)) { 831 if (shmflg & SHM_RND) 832 addr &= ~(SHMLBA-1); /* round down */ 833 else 834 #ifndef __ARCH_FORCE_SHMLBA 835 if (addr & ~PAGE_MASK) 836 #endif 837 goto out; 838 } 839 flags = MAP_SHARED | MAP_FIXED; 840 } else { 841 if ((shmflg & SHM_REMAP)) 842 goto out; 843 844 flags = MAP_SHARED; 845 } 846 847 if (shmflg & SHM_RDONLY) { 848 prot = PROT_READ; 849 acc_mode = S_IRUGO; 850 f_mode = FMODE_READ; 851 } else { 852 prot = PROT_READ | PROT_WRITE; 853 acc_mode = S_IRUGO | S_IWUGO; 854 f_mode = FMODE_READ | FMODE_WRITE; 855 } 856 if (shmflg & SHM_EXEC) { 857 prot |= PROT_EXEC; 858 acc_mode |= S_IXUGO; 859 } 860 861 /* 862 * We cannot rely on the fs check since SYSV IPC does have an 863 * additional creator id... 864 */ 865 ns = current->nsproxy->ipc_ns; 866 shp = shm_lock_check(ns, shmid); 867 if (IS_ERR(shp)) { 868 err = PTR_ERR(shp); 869 goto out; 870 } 871 872 err = -EACCES; 873 if (ipcperms(&shp->shm_perm, acc_mode)) 874 goto out_unlock; 875 876 err = security_shm_shmat(shp, shmaddr, shmflg); 877 if (err) 878 goto out_unlock; 879 880 path = shp->shm_file->f_path; 881 path_get(&path); 882 shp->shm_nattch++; 883 size = i_size_read(path.dentry->d_inode); 884 shm_unlock(shp); 885 886 err = -ENOMEM; 887 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 888 if (!sfd) 889 goto out_put_dentry; 890 891 file = alloc_file(&path, f_mode, 892 is_file_hugepages(shp->shm_file) ? 893 &shm_file_operations_huge : 894 &shm_file_operations); 895 if (!file) 896 goto out_free; 897 898 file->private_data = sfd; 899 file->f_mapping = shp->shm_file->f_mapping; 900 sfd->id = shp->shm_perm.id; 901 sfd->ns = get_ipc_ns(ns); 902 sfd->file = shp->shm_file; 903 sfd->vm_ops = NULL; 904 905 down_write(¤t->mm->mmap_sem); 906 if (addr && !(shmflg & SHM_REMAP)) { 907 err = -EINVAL; 908 if (find_vma_intersection(current->mm, addr, addr + size)) 909 goto invalid; 910 /* 911 * If shm segment goes below stack, make sure there is some 912 * space left for the stack to grow (at least 4 pages). 913 */ 914 if (addr < current->mm->start_stack && 915 addr > current->mm->start_stack - size - PAGE_SIZE * 5) 916 goto invalid; 917 } 918 919 user_addr = do_mmap (file, addr, size, prot, flags, 0); 920 *raddr = user_addr; 921 err = 0; 922 if (IS_ERR_VALUE(user_addr)) 923 err = (long)user_addr; 924 invalid: 925 up_write(¤t->mm->mmap_sem); 926 927 fput(file); 928 929 out_nattch: 930 down_write(&shm_ids(ns).rw_mutex); 931 shp = shm_lock(ns, shmid); 932 BUG_ON(IS_ERR(shp)); 933 shp->shm_nattch--; 934 if(shp->shm_nattch == 0 && 935 shp->shm_perm.mode & SHM_DEST) 936 shm_destroy(ns, shp); 937 else 938 shm_unlock(shp); 939 up_write(&shm_ids(ns).rw_mutex); 940 941 out: 942 return err; 943 944 out_unlock: 945 shm_unlock(shp); 946 goto out; 947 948 out_free: 949 kfree(sfd); 950 out_put_dentry: 951 path_put(&path); 952 goto out_nattch; 953 } 954 955 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 956 { 957 unsigned long ret; 958 long err; 959 960 err = do_shmat(shmid, shmaddr, shmflg, &ret); 961 if (err) 962 return err; 963 force_successful_syscall_return(); 964 return (long)ret; 965 } 966 967 /* 968 * detach and kill segment if marked destroyed. 969 * The work is done in shm_close. 970 */ 971 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 972 { 973 struct mm_struct *mm = current->mm; 974 struct vm_area_struct *vma; 975 unsigned long addr = (unsigned long)shmaddr; 976 int retval = -EINVAL; 977 #ifdef CONFIG_MMU 978 loff_t size = 0; 979 struct vm_area_struct *next; 980 #endif 981 982 if (addr & ~PAGE_MASK) 983 return retval; 984 985 down_write(&mm->mmap_sem); 986 987 /* 988 * This function tries to be smart and unmap shm segments that 989 * were modified by partial mlock or munmap calls: 990 * - It first determines the size of the shm segment that should be 991 * unmapped: It searches for a vma that is backed by shm and that 992 * started at address shmaddr. It records it's size and then unmaps 993 * it. 994 * - Then it unmaps all shm vmas that started at shmaddr and that 995 * are within the initially determined size. 996 * Errors from do_munmap are ignored: the function only fails if 997 * it's called with invalid parameters or if it's called to unmap 998 * a part of a vma. Both calls in this function are for full vmas, 999 * the parameters are directly copied from the vma itself and always 1000 * valid - therefore do_munmap cannot fail. (famous last words?) 1001 */ 1002 /* 1003 * If it had been mremap()'d, the starting address would not 1004 * match the usual checks anyway. So assume all vma's are 1005 * above the starting address given. 1006 */ 1007 vma = find_vma(mm, addr); 1008 1009 #ifdef CONFIG_MMU 1010 while (vma) { 1011 next = vma->vm_next; 1012 1013 /* 1014 * Check if the starting address would match, i.e. it's 1015 * a fragment created by mprotect() and/or munmap(), or it 1016 * otherwise it starts at this address with no hassles. 1017 */ 1018 if ((vma->vm_ops == &shm_vm_ops) && 1019 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1020 1021 1022 size = vma->vm_file->f_path.dentry->d_inode->i_size; 1023 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1024 /* 1025 * We discovered the size of the shm segment, so 1026 * break out of here and fall through to the next 1027 * loop that uses the size information to stop 1028 * searching for matching vma's. 1029 */ 1030 retval = 0; 1031 vma = next; 1032 break; 1033 } 1034 vma = next; 1035 } 1036 1037 /* 1038 * We need look no further than the maximum address a fragment 1039 * could possibly have landed at. Also cast things to loff_t to 1040 * prevent overflows and make comparisions vs. equal-width types. 1041 */ 1042 size = PAGE_ALIGN(size); 1043 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1044 next = vma->vm_next; 1045 1046 /* finding a matching vma now does not alter retval */ 1047 if ((vma->vm_ops == &shm_vm_ops) && 1048 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) 1049 1050 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1051 vma = next; 1052 } 1053 1054 #else /* CONFIG_MMU */ 1055 /* under NOMMU conditions, the exact address to be destroyed must be 1056 * given */ 1057 retval = -EINVAL; 1058 if (vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1059 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start); 1060 retval = 0; 1061 } 1062 1063 #endif 1064 1065 up_write(&mm->mmap_sem); 1066 return retval; 1067 } 1068 1069 #ifdef CONFIG_PROC_FS 1070 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1071 { 1072 struct shmid_kernel *shp = it; 1073 1074 #if BITS_PER_LONG <= 32 1075 #define SIZE_SPEC "%10lu" 1076 #else 1077 #define SIZE_SPEC "%21lu" 1078 #endif 1079 1080 return seq_printf(s, 1081 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1082 "%5lu %5u %5u %5u %5u %10lu %10lu %10lu\n", 1083 shp->shm_perm.key, 1084 shp->shm_perm.id, 1085 shp->shm_perm.mode, 1086 shp->shm_segsz, 1087 shp->shm_cprid, 1088 shp->shm_lprid, 1089 shp->shm_nattch, 1090 shp->shm_perm.uid, 1091 shp->shm_perm.gid, 1092 shp->shm_perm.cuid, 1093 shp->shm_perm.cgid, 1094 shp->shm_atim, 1095 shp->shm_dtim, 1096 shp->shm_ctim); 1097 } 1098 #endif 1099