1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * linux/ipc/shm.c 4 * Copyright (C) 1992, 1993 Krishna Balasubramanian 5 * Many improvements/fixes by Bruno Haible. 6 * Replaced `struct shm_desc' by `struct vm_area_struct', July 1994. 7 * Fixed the shm swap deallocation (shm_unuse()), August 1998 Andrea Arcangeli. 8 * 9 * /proc/sysvipc/shm support (c) 1999 Dragos Acostachioaie <dragos@iname.com> 10 * BIGMEM support, Andrea Arcangeli <andrea@suse.de> 11 * SMP thread shm, Jean-Luc Boyard <jean-luc.boyard@siemens.fr> 12 * HIGHMEM support, Ingo Molnar <mingo@redhat.com> 13 * Make shmmax, shmall, shmmni sysctl'able, Christoph Rohland <cr@sap.com> 14 * Shared /dev/zero support, Kanoj Sarcar <kanoj@sgi.com> 15 * Move the mm functionality over to mm/shmem.c, Christoph Rohland <cr@sap.com> 16 * 17 * support for audit of ipc object properties and permission changes 18 * Dustin Kirkland <dustin.kirkland@us.ibm.com> 19 * 20 * namespaces support 21 * OpenVZ, SWsoft Inc. 22 * Pavel Emelianov <xemul@openvz.org> 23 * 24 * Better ipc lock (kern_ipc_perm.lock) handling 25 * Davidlohr Bueso <davidlohr.bueso@hp.com>, June 2013. 26 */ 27 28 #include <linux/slab.h> 29 #include <linux/mm.h> 30 #include <linux/hugetlb.h> 31 #include <linux/shm.h> 32 #include <linux/init.h> 33 #include <linux/file.h> 34 #include <linux/mman.h> 35 #include <linux/shmem_fs.h> 36 #include <linux/security.h> 37 #include <linux/syscalls.h> 38 #include <linux/audit.h> 39 #include <linux/capability.h> 40 #include <linux/ptrace.h> 41 #include <linux/seq_file.h> 42 #include <linux/rwsem.h> 43 #include <linux/nsproxy.h> 44 #include <linux/mount.h> 45 #include <linux/ipc_namespace.h> 46 47 #include <linux/uaccess.h> 48 49 #include "util.h" 50 51 struct shmid_kernel /* private to the kernel */ 52 { 53 struct kern_ipc_perm shm_perm; 54 struct file *shm_file; 55 unsigned long shm_nattch; 56 unsigned long shm_segsz; 57 time64_t shm_atim; 58 time64_t shm_dtim; 59 time64_t shm_ctim; 60 struct pid *shm_cprid; 61 struct pid *shm_lprid; 62 struct user_struct *mlock_user; 63 64 /* The task created the shm object. NULL if the task is dead. */ 65 struct task_struct *shm_creator; 66 struct list_head shm_clist; /* list by creator */ 67 } __randomize_layout; 68 69 /* shm_mode upper byte flags */ 70 #define SHM_DEST 01000 /* segment will be destroyed on last detach */ 71 #define SHM_LOCKED 02000 /* segment will not be swapped */ 72 73 struct shm_file_data { 74 int id; 75 struct ipc_namespace *ns; 76 struct file *file; 77 const struct vm_operations_struct *vm_ops; 78 }; 79 80 #define shm_file_data(file) (*((struct shm_file_data **)&(file)->private_data)) 81 82 static const struct file_operations shm_file_operations; 83 static const struct vm_operations_struct shm_vm_ops; 84 85 #define shm_ids(ns) ((ns)->ids[IPC_SHM_IDS]) 86 87 #define shm_unlock(shp) \ 88 ipc_unlock(&(shp)->shm_perm) 89 90 static int newseg(struct ipc_namespace *, struct ipc_params *); 91 static void shm_open(struct vm_area_struct *vma); 92 static void shm_close(struct vm_area_struct *vma); 93 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp); 94 #ifdef CONFIG_PROC_FS 95 static int sysvipc_shm_proc_show(struct seq_file *s, void *it); 96 #endif 97 98 int shm_init_ns(struct ipc_namespace *ns) 99 { 100 ns->shm_ctlmax = SHMMAX; 101 ns->shm_ctlall = SHMALL; 102 ns->shm_ctlmni = SHMMNI; 103 ns->shm_rmid_forced = 0; 104 ns->shm_tot = 0; 105 return ipc_init_ids(&shm_ids(ns)); 106 } 107 108 /* 109 * Called with shm_ids.rwsem (writer) and the shp structure locked. 110 * Only shm_ids.rwsem remains locked on exit. 111 */ 112 static void do_shm_rmid(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp) 113 { 114 struct shmid_kernel *shp; 115 116 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 117 118 if (shp->shm_nattch) { 119 shp->shm_perm.mode |= SHM_DEST; 120 /* Do not find it any more */ 121 ipc_set_key_private(&shm_ids(ns), &shp->shm_perm); 122 shm_unlock(shp); 123 } else 124 shm_destroy(ns, shp); 125 } 126 127 #ifdef CONFIG_IPC_NS 128 void shm_exit_ns(struct ipc_namespace *ns) 129 { 130 free_ipcs(ns, &shm_ids(ns), do_shm_rmid); 131 idr_destroy(&ns->ids[IPC_SHM_IDS].ipcs_idr); 132 rhashtable_destroy(&ns->ids[IPC_SHM_IDS].key_ht); 133 } 134 #endif 135 136 static int __init ipc_ns_init(void) 137 { 138 const int err = shm_init_ns(&init_ipc_ns); 139 WARN(err, "ipc: sysv shm_init_ns failed: %d\n", err); 140 return err; 141 } 142 143 pure_initcall(ipc_ns_init); 144 145 void __init shm_init(void) 146 { 147 ipc_init_proc_interface("sysvipc/shm", 148 #if BITS_PER_LONG <= 32 149 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 150 #else 151 " key shmid perms size cpid lpid nattch uid gid cuid cgid atime dtime ctime rss swap\n", 152 #endif 153 IPC_SHM_IDS, sysvipc_shm_proc_show); 154 } 155 156 static inline struct shmid_kernel *shm_obtain_object(struct ipc_namespace *ns, int id) 157 { 158 struct kern_ipc_perm *ipcp = ipc_obtain_object_idr(&shm_ids(ns), id); 159 160 if (IS_ERR(ipcp)) 161 return ERR_CAST(ipcp); 162 163 return container_of(ipcp, struct shmid_kernel, shm_perm); 164 } 165 166 static inline struct shmid_kernel *shm_obtain_object_check(struct ipc_namespace *ns, int id) 167 { 168 struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&shm_ids(ns), id); 169 170 if (IS_ERR(ipcp)) 171 return ERR_CAST(ipcp); 172 173 return container_of(ipcp, struct shmid_kernel, shm_perm); 174 } 175 176 /* 177 * shm_lock_(check_) routines are called in the paths where the rwsem 178 * is not necessarily held. 179 */ 180 static inline struct shmid_kernel *shm_lock(struct ipc_namespace *ns, int id) 181 { 182 struct kern_ipc_perm *ipcp = ipc_lock(&shm_ids(ns), id); 183 184 /* 185 * Callers of shm_lock() must validate the status of the returned ipc 186 * object pointer (as returned by ipc_lock()), and error out as 187 * appropriate. 188 */ 189 if (IS_ERR(ipcp)) 190 return (void *)ipcp; 191 return container_of(ipcp, struct shmid_kernel, shm_perm); 192 } 193 194 static inline void shm_lock_by_ptr(struct shmid_kernel *ipcp) 195 { 196 rcu_read_lock(); 197 ipc_lock_object(&ipcp->shm_perm); 198 } 199 200 static void shm_rcu_free(struct rcu_head *head) 201 { 202 struct kern_ipc_perm *ptr = container_of(head, struct kern_ipc_perm, 203 rcu); 204 struct shmid_kernel *shp = container_of(ptr, struct shmid_kernel, 205 shm_perm); 206 security_shm_free(&shp->shm_perm); 207 kvfree(shp); 208 } 209 210 static inline void shm_rmid(struct ipc_namespace *ns, struct shmid_kernel *s) 211 { 212 list_del(&s->shm_clist); 213 ipc_rmid(&shm_ids(ns), &s->shm_perm); 214 } 215 216 217 static int __shm_open(struct vm_area_struct *vma) 218 { 219 struct file *file = vma->vm_file; 220 struct shm_file_data *sfd = shm_file_data(file); 221 struct shmid_kernel *shp; 222 223 shp = shm_lock(sfd->ns, sfd->id); 224 225 if (IS_ERR(shp)) 226 return PTR_ERR(shp); 227 228 if (shp->shm_file != sfd->file) { 229 /* ID was reused */ 230 shm_unlock(shp); 231 return -EINVAL; 232 } 233 234 shp->shm_atim = ktime_get_real_seconds(); 235 ipc_update_pid(&shp->shm_lprid, task_tgid(current)); 236 shp->shm_nattch++; 237 shm_unlock(shp); 238 return 0; 239 } 240 241 /* This is called by fork, once for every shm attach. */ 242 static void shm_open(struct vm_area_struct *vma) 243 { 244 int err = __shm_open(vma); 245 /* 246 * We raced in the idr lookup or with shm_destroy(). 247 * Either way, the ID is busted. 248 */ 249 WARN_ON_ONCE(err); 250 } 251 252 /* 253 * shm_destroy - free the struct shmid_kernel 254 * 255 * @ns: namespace 256 * @shp: struct to free 257 * 258 * It has to be called with shp and shm_ids.rwsem (writer) locked, 259 * but returns with shp unlocked and freed. 260 */ 261 static void shm_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 262 { 263 struct file *shm_file; 264 265 shm_file = shp->shm_file; 266 shp->shm_file = NULL; 267 ns->shm_tot -= (shp->shm_segsz + PAGE_SIZE - 1) >> PAGE_SHIFT; 268 shm_rmid(ns, shp); 269 shm_unlock(shp); 270 if (!is_file_hugepages(shm_file)) 271 shmem_lock(shm_file, 0, shp->mlock_user); 272 else if (shp->mlock_user) 273 user_shm_unlock(i_size_read(file_inode(shm_file)), 274 shp->mlock_user); 275 fput(shm_file); 276 ipc_update_pid(&shp->shm_cprid, NULL); 277 ipc_update_pid(&shp->shm_lprid, NULL); 278 ipc_rcu_putref(&shp->shm_perm, shm_rcu_free); 279 } 280 281 /* 282 * shm_may_destroy - identifies whether shm segment should be destroyed now 283 * 284 * Returns true if and only if there are no active users of the segment and 285 * one of the following is true: 286 * 287 * 1) shmctl(id, IPC_RMID, NULL) was called for this shp 288 * 289 * 2) sysctl kernel.shm_rmid_forced is set to 1. 290 */ 291 static bool shm_may_destroy(struct ipc_namespace *ns, struct shmid_kernel *shp) 292 { 293 return (shp->shm_nattch == 0) && 294 (ns->shm_rmid_forced || 295 (shp->shm_perm.mode & SHM_DEST)); 296 } 297 298 /* 299 * remove the attach descriptor vma. 300 * free memory for segment if it is marked destroyed. 301 * The descriptor has already been removed from the current->mm->mmap list 302 * and will later be kfree()d. 303 */ 304 static void shm_close(struct vm_area_struct *vma) 305 { 306 struct file *file = vma->vm_file; 307 struct shm_file_data *sfd = shm_file_data(file); 308 struct shmid_kernel *shp; 309 struct ipc_namespace *ns = sfd->ns; 310 311 down_write(&shm_ids(ns).rwsem); 312 /* remove from the list of attaches of the shm segment */ 313 shp = shm_lock(ns, sfd->id); 314 315 /* 316 * We raced in the idr lookup or with shm_destroy(). 317 * Either way, the ID is busted. 318 */ 319 if (WARN_ON_ONCE(IS_ERR(shp))) 320 goto done; /* no-op */ 321 322 ipc_update_pid(&shp->shm_lprid, task_tgid(current)); 323 shp->shm_dtim = ktime_get_real_seconds(); 324 shp->shm_nattch--; 325 if (shm_may_destroy(ns, shp)) 326 shm_destroy(ns, shp); 327 else 328 shm_unlock(shp); 329 done: 330 up_write(&shm_ids(ns).rwsem); 331 } 332 333 /* Called with ns->shm_ids(ns).rwsem locked */ 334 static int shm_try_destroy_orphaned(int id, void *p, void *data) 335 { 336 struct ipc_namespace *ns = data; 337 struct kern_ipc_perm *ipcp = p; 338 struct shmid_kernel *shp = container_of(ipcp, struct shmid_kernel, shm_perm); 339 340 /* 341 * We want to destroy segments without users and with already 342 * exit'ed originating process. 343 * 344 * As shp->* are changed under rwsem, it's safe to skip shp locking. 345 */ 346 if (shp->shm_creator != NULL) 347 return 0; 348 349 if (shm_may_destroy(ns, shp)) { 350 shm_lock_by_ptr(shp); 351 shm_destroy(ns, shp); 352 } 353 return 0; 354 } 355 356 void shm_destroy_orphaned(struct ipc_namespace *ns) 357 { 358 down_write(&shm_ids(ns).rwsem); 359 if (shm_ids(ns).in_use) 360 idr_for_each(&shm_ids(ns).ipcs_idr, &shm_try_destroy_orphaned, ns); 361 up_write(&shm_ids(ns).rwsem); 362 } 363 364 /* Locking assumes this will only be called with task == current */ 365 void exit_shm(struct task_struct *task) 366 { 367 struct ipc_namespace *ns = task->nsproxy->ipc_ns; 368 struct shmid_kernel *shp, *n; 369 370 if (list_empty(&task->sysvshm.shm_clist)) 371 return; 372 373 /* 374 * If kernel.shm_rmid_forced is not set then only keep track of 375 * which shmids are orphaned, so that a later set of the sysctl 376 * can clean them up. 377 */ 378 if (!ns->shm_rmid_forced) { 379 down_read(&shm_ids(ns).rwsem); 380 list_for_each_entry(shp, &task->sysvshm.shm_clist, shm_clist) 381 shp->shm_creator = NULL; 382 /* 383 * Only under read lock but we are only called on current 384 * so no entry on the list will be shared. 385 */ 386 list_del(&task->sysvshm.shm_clist); 387 up_read(&shm_ids(ns).rwsem); 388 return; 389 } 390 391 /* 392 * Destroy all already created segments, that were not yet mapped, 393 * and mark any mapped as orphan to cover the sysctl toggling. 394 * Destroy is skipped if shm_may_destroy() returns false. 395 */ 396 down_write(&shm_ids(ns).rwsem); 397 list_for_each_entry_safe(shp, n, &task->sysvshm.shm_clist, shm_clist) { 398 shp->shm_creator = NULL; 399 400 if (shm_may_destroy(ns, shp)) { 401 shm_lock_by_ptr(shp); 402 shm_destroy(ns, shp); 403 } 404 } 405 406 /* Remove the list head from any segments still attached. */ 407 list_del(&task->sysvshm.shm_clist); 408 up_write(&shm_ids(ns).rwsem); 409 } 410 411 static vm_fault_t shm_fault(struct vm_fault *vmf) 412 { 413 struct file *file = vmf->vma->vm_file; 414 struct shm_file_data *sfd = shm_file_data(file); 415 416 return sfd->vm_ops->fault(vmf); 417 } 418 419 static int shm_split(struct vm_area_struct *vma, unsigned long addr) 420 { 421 struct file *file = vma->vm_file; 422 struct shm_file_data *sfd = shm_file_data(file); 423 424 if (sfd->vm_ops->split) 425 return sfd->vm_ops->split(vma, addr); 426 427 return 0; 428 } 429 430 static unsigned long shm_pagesize(struct vm_area_struct *vma) 431 { 432 struct file *file = vma->vm_file; 433 struct shm_file_data *sfd = shm_file_data(file); 434 435 if (sfd->vm_ops->pagesize) 436 return sfd->vm_ops->pagesize(vma); 437 438 return PAGE_SIZE; 439 } 440 441 #ifdef CONFIG_NUMA 442 static int shm_set_policy(struct vm_area_struct *vma, struct mempolicy *new) 443 { 444 struct file *file = vma->vm_file; 445 struct shm_file_data *sfd = shm_file_data(file); 446 int err = 0; 447 448 if (sfd->vm_ops->set_policy) 449 err = sfd->vm_ops->set_policy(vma, new); 450 return err; 451 } 452 453 static struct mempolicy *shm_get_policy(struct vm_area_struct *vma, 454 unsigned long addr) 455 { 456 struct file *file = vma->vm_file; 457 struct shm_file_data *sfd = shm_file_data(file); 458 struct mempolicy *pol = NULL; 459 460 if (sfd->vm_ops->get_policy) 461 pol = sfd->vm_ops->get_policy(vma, addr); 462 else if (vma->vm_policy) 463 pol = vma->vm_policy; 464 465 return pol; 466 } 467 #endif 468 469 static int shm_mmap(struct file *file, struct vm_area_struct *vma) 470 { 471 struct shm_file_data *sfd = shm_file_data(file); 472 int ret; 473 474 /* 475 * In case of remap_file_pages() emulation, the file can represent an 476 * IPC ID that was removed, and possibly even reused by another shm 477 * segment already. Propagate this case as an error to caller. 478 */ 479 ret = __shm_open(vma); 480 if (ret) 481 return ret; 482 483 ret = call_mmap(sfd->file, vma); 484 if (ret) { 485 shm_close(vma); 486 return ret; 487 } 488 sfd->vm_ops = vma->vm_ops; 489 #ifdef CONFIG_MMU 490 WARN_ON(!sfd->vm_ops->fault); 491 #endif 492 vma->vm_ops = &shm_vm_ops; 493 return 0; 494 } 495 496 static int shm_release(struct inode *ino, struct file *file) 497 { 498 struct shm_file_data *sfd = shm_file_data(file); 499 500 put_ipc_ns(sfd->ns); 501 fput(sfd->file); 502 shm_file_data(file) = NULL; 503 kfree(sfd); 504 return 0; 505 } 506 507 static int shm_fsync(struct file *file, loff_t start, loff_t end, int datasync) 508 { 509 struct shm_file_data *sfd = shm_file_data(file); 510 511 if (!sfd->file->f_op->fsync) 512 return -EINVAL; 513 return sfd->file->f_op->fsync(sfd->file, start, end, datasync); 514 } 515 516 static long shm_fallocate(struct file *file, int mode, loff_t offset, 517 loff_t len) 518 { 519 struct shm_file_data *sfd = shm_file_data(file); 520 521 if (!sfd->file->f_op->fallocate) 522 return -EOPNOTSUPP; 523 return sfd->file->f_op->fallocate(file, mode, offset, len); 524 } 525 526 static unsigned long shm_get_unmapped_area(struct file *file, 527 unsigned long addr, unsigned long len, unsigned long pgoff, 528 unsigned long flags) 529 { 530 struct shm_file_data *sfd = shm_file_data(file); 531 532 return sfd->file->f_op->get_unmapped_area(sfd->file, addr, len, 533 pgoff, flags); 534 } 535 536 static const struct file_operations shm_file_operations = { 537 .mmap = shm_mmap, 538 .fsync = shm_fsync, 539 .release = shm_release, 540 .get_unmapped_area = shm_get_unmapped_area, 541 .llseek = noop_llseek, 542 .fallocate = shm_fallocate, 543 }; 544 545 /* 546 * shm_file_operations_huge is now identical to shm_file_operations, 547 * but we keep it distinct for the sake of is_file_shm_hugepages(). 548 */ 549 static const struct file_operations shm_file_operations_huge = { 550 .mmap = shm_mmap, 551 .fsync = shm_fsync, 552 .release = shm_release, 553 .get_unmapped_area = shm_get_unmapped_area, 554 .llseek = noop_llseek, 555 .fallocate = shm_fallocate, 556 }; 557 558 bool is_file_shm_hugepages(struct file *file) 559 { 560 return file->f_op == &shm_file_operations_huge; 561 } 562 563 static const struct vm_operations_struct shm_vm_ops = { 564 .open = shm_open, /* callback for a new vm-area open */ 565 .close = shm_close, /* callback for when the vm-area is released */ 566 .fault = shm_fault, 567 .split = shm_split, 568 .pagesize = shm_pagesize, 569 #if defined(CONFIG_NUMA) 570 .set_policy = shm_set_policy, 571 .get_policy = shm_get_policy, 572 #endif 573 }; 574 575 /** 576 * newseg - Create a new shared memory segment 577 * @ns: namespace 578 * @params: ptr to the structure that contains key, size and shmflg 579 * 580 * Called with shm_ids.rwsem held as a writer. 581 */ 582 static int newseg(struct ipc_namespace *ns, struct ipc_params *params) 583 { 584 key_t key = params->key; 585 int shmflg = params->flg; 586 size_t size = params->u.size; 587 int error; 588 struct shmid_kernel *shp; 589 size_t numpages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; 590 struct file *file; 591 char name[13]; 592 vm_flags_t acctflag = 0; 593 594 if (size < SHMMIN || size > ns->shm_ctlmax) 595 return -EINVAL; 596 597 if (numpages << PAGE_SHIFT < size) 598 return -ENOSPC; 599 600 if (ns->shm_tot + numpages < ns->shm_tot || 601 ns->shm_tot + numpages > ns->shm_ctlall) 602 return -ENOSPC; 603 604 shp = kvmalloc(sizeof(*shp), GFP_KERNEL); 605 if (unlikely(!shp)) 606 return -ENOMEM; 607 608 shp->shm_perm.key = key; 609 shp->shm_perm.mode = (shmflg & S_IRWXUGO); 610 shp->mlock_user = NULL; 611 612 shp->shm_perm.security = NULL; 613 error = security_shm_alloc(&shp->shm_perm); 614 if (error) { 615 kvfree(shp); 616 return error; 617 } 618 619 sprintf(name, "SYSV%08x", key); 620 if (shmflg & SHM_HUGETLB) { 621 struct hstate *hs; 622 size_t hugesize; 623 624 hs = hstate_sizelog((shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 625 if (!hs) { 626 error = -EINVAL; 627 goto no_file; 628 } 629 hugesize = ALIGN(size, huge_page_size(hs)); 630 631 /* hugetlb_file_setup applies strict accounting */ 632 if (shmflg & SHM_NORESERVE) 633 acctflag = VM_NORESERVE; 634 file = hugetlb_file_setup(name, hugesize, acctflag, 635 &shp->mlock_user, HUGETLB_SHMFS_INODE, 636 (shmflg >> SHM_HUGE_SHIFT) & SHM_HUGE_MASK); 637 } else { 638 /* 639 * Do not allow no accounting for OVERCOMMIT_NEVER, even 640 * if it's asked for. 641 */ 642 if ((shmflg & SHM_NORESERVE) && 643 sysctl_overcommit_memory != OVERCOMMIT_NEVER) 644 acctflag = VM_NORESERVE; 645 file = shmem_kernel_file_setup(name, size, acctflag); 646 } 647 error = PTR_ERR(file); 648 if (IS_ERR(file)) 649 goto no_file; 650 651 shp->shm_cprid = get_pid(task_tgid(current)); 652 shp->shm_lprid = NULL; 653 shp->shm_atim = shp->shm_dtim = 0; 654 shp->shm_ctim = ktime_get_real_seconds(); 655 shp->shm_segsz = size; 656 shp->shm_nattch = 0; 657 shp->shm_file = file; 658 shp->shm_creator = current; 659 660 /* ipc_addid() locks shp upon success. */ 661 error = ipc_addid(&shm_ids(ns), &shp->shm_perm, ns->shm_ctlmni); 662 if (error < 0) 663 goto no_id; 664 665 list_add(&shp->shm_clist, ¤t->sysvshm.shm_clist); 666 667 /* 668 * shmid gets reported as "inode#" in /proc/pid/maps. 669 * proc-ps tools use this. Changing this will break them. 670 */ 671 file_inode(file)->i_ino = shp->shm_perm.id; 672 673 ns->shm_tot += numpages; 674 error = shp->shm_perm.id; 675 676 ipc_unlock_object(&shp->shm_perm); 677 rcu_read_unlock(); 678 return error; 679 680 no_id: 681 ipc_update_pid(&shp->shm_cprid, NULL); 682 ipc_update_pid(&shp->shm_lprid, NULL); 683 if (is_file_hugepages(file) && shp->mlock_user) 684 user_shm_unlock(size, shp->mlock_user); 685 fput(file); 686 no_file: 687 call_rcu(&shp->shm_perm.rcu, shm_rcu_free); 688 return error; 689 } 690 691 /* 692 * Called with shm_ids.rwsem and ipcp locked. 693 */ 694 static inline int shm_more_checks(struct kern_ipc_perm *ipcp, 695 struct ipc_params *params) 696 { 697 struct shmid_kernel *shp; 698 699 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 700 if (shp->shm_segsz < params->u.size) 701 return -EINVAL; 702 703 return 0; 704 } 705 706 long ksys_shmget(key_t key, size_t size, int shmflg) 707 { 708 struct ipc_namespace *ns; 709 static const struct ipc_ops shm_ops = { 710 .getnew = newseg, 711 .associate = security_shm_associate, 712 .more_checks = shm_more_checks, 713 }; 714 struct ipc_params shm_params; 715 716 ns = current->nsproxy->ipc_ns; 717 718 shm_params.key = key; 719 shm_params.flg = shmflg; 720 shm_params.u.size = size; 721 722 return ipcget(ns, &shm_ids(ns), &shm_ops, &shm_params); 723 } 724 725 SYSCALL_DEFINE3(shmget, key_t, key, size_t, size, int, shmflg) 726 { 727 return ksys_shmget(key, size, shmflg); 728 } 729 730 static inline unsigned long copy_shmid_to_user(void __user *buf, struct shmid64_ds *in, int version) 731 { 732 switch (version) { 733 case IPC_64: 734 return copy_to_user(buf, in, sizeof(*in)); 735 case IPC_OLD: 736 { 737 struct shmid_ds out; 738 739 memset(&out, 0, sizeof(out)); 740 ipc64_perm_to_ipc_perm(&in->shm_perm, &out.shm_perm); 741 out.shm_segsz = in->shm_segsz; 742 out.shm_atime = in->shm_atime; 743 out.shm_dtime = in->shm_dtime; 744 out.shm_ctime = in->shm_ctime; 745 out.shm_cpid = in->shm_cpid; 746 out.shm_lpid = in->shm_lpid; 747 out.shm_nattch = in->shm_nattch; 748 749 return copy_to_user(buf, &out, sizeof(out)); 750 } 751 default: 752 return -EINVAL; 753 } 754 } 755 756 static inline unsigned long 757 copy_shmid_from_user(struct shmid64_ds *out, void __user *buf, int version) 758 { 759 switch (version) { 760 case IPC_64: 761 if (copy_from_user(out, buf, sizeof(*out))) 762 return -EFAULT; 763 return 0; 764 case IPC_OLD: 765 { 766 struct shmid_ds tbuf_old; 767 768 if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old))) 769 return -EFAULT; 770 771 out->shm_perm.uid = tbuf_old.shm_perm.uid; 772 out->shm_perm.gid = tbuf_old.shm_perm.gid; 773 out->shm_perm.mode = tbuf_old.shm_perm.mode; 774 775 return 0; 776 } 777 default: 778 return -EINVAL; 779 } 780 } 781 782 static inline unsigned long copy_shminfo_to_user(void __user *buf, struct shminfo64 *in, int version) 783 { 784 switch (version) { 785 case IPC_64: 786 return copy_to_user(buf, in, sizeof(*in)); 787 case IPC_OLD: 788 { 789 struct shminfo out; 790 791 if (in->shmmax > INT_MAX) 792 out.shmmax = INT_MAX; 793 else 794 out.shmmax = (int)in->shmmax; 795 796 out.shmmin = in->shmmin; 797 out.shmmni = in->shmmni; 798 out.shmseg = in->shmseg; 799 out.shmall = in->shmall; 800 801 return copy_to_user(buf, &out, sizeof(out)); 802 } 803 default: 804 return -EINVAL; 805 } 806 } 807 808 /* 809 * Calculate and add used RSS and swap pages of a shm. 810 * Called with shm_ids.rwsem held as a reader 811 */ 812 static void shm_add_rss_swap(struct shmid_kernel *shp, 813 unsigned long *rss_add, unsigned long *swp_add) 814 { 815 struct inode *inode; 816 817 inode = file_inode(shp->shm_file); 818 819 if (is_file_hugepages(shp->shm_file)) { 820 struct address_space *mapping = inode->i_mapping; 821 struct hstate *h = hstate_file(shp->shm_file); 822 *rss_add += pages_per_huge_page(h) * mapping->nrpages; 823 } else { 824 #ifdef CONFIG_SHMEM 825 struct shmem_inode_info *info = SHMEM_I(inode); 826 827 spin_lock_irq(&info->lock); 828 *rss_add += inode->i_mapping->nrpages; 829 *swp_add += info->swapped; 830 spin_unlock_irq(&info->lock); 831 #else 832 *rss_add += inode->i_mapping->nrpages; 833 #endif 834 } 835 } 836 837 /* 838 * Called with shm_ids.rwsem held as a reader 839 */ 840 static void shm_get_stat(struct ipc_namespace *ns, unsigned long *rss, 841 unsigned long *swp) 842 { 843 int next_id; 844 int total, in_use; 845 846 *rss = 0; 847 *swp = 0; 848 849 in_use = shm_ids(ns).in_use; 850 851 for (total = 0, next_id = 0; total < in_use; next_id++) { 852 struct kern_ipc_perm *ipc; 853 struct shmid_kernel *shp; 854 855 ipc = idr_find(&shm_ids(ns).ipcs_idr, next_id); 856 if (ipc == NULL) 857 continue; 858 shp = container_of(ipc, struct shmid_kernel, shm_perm); 859 860 shm_add_rss_swap(shp, rss, swp); 861 862 total++; 863 } 864 } 865 866 /* 867 * This function handles some shmctl commands which require the rwsem 868 * to be held in write mode. 869 * NOTE: no locks must be held, the rwsem is taken inside this function. 870 */ 871 static int shmctl_down(struct ipc_namespace *ns, int shmid, int cmd, 872 struct shmid64_ds *shmid64) 873 { 874 struct kern_ipc_perm *ipcp; 875 struct shmid_kernel *shp; 876 int err; 877 878 down_write(&shm_ids(ns).rwsem); 879 rcu_read_lock(); 880 881 ipcp = ipcctl_pre_down_nolock(ns, &shm_ids(ns), shmid, cmd, 882 &shmid64->shm_perm, 0); 883 if (IS_ERR(ipcp)) { 884 err = PTR_ERR(ipcp); 885 goto out_unlock1; 886 } 887 888 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 889 890 err = security_shm_shmctl(&shp->shm_perm, cmd); 891 if (err) 892 goto out_unlock1; 893 894 switch (cmd) { 895 case IPC_RMID: 896 ipc_lock_object(&shp->shm_perm); 897 /* do_shm_rmid unlocks the ipc object and rcu */ 898 do_shm_rmid(ns, ipcp); 899 goto out_up; 900 case IPC_SET: 901 ipc_lock_object(&shp->shm_perm); 902 err = ipc_update_perm(&shmid64->shm_perm, ipcp); 903 if (err) 904 goto out_unlock0; 905 shp->shm_ctim = ktime_get_real_seconds(); 906 break; 907 default: 908 err = -EINVAL; 909 goto out_unlock1; 910 } 911 912 out_unlock0: 913 ipc_unlock_object(&shp->shm_perm); 914 out_unlock1: 915 rcu_read_unlock(); 916 out_up: 917 up_write(&shm_ids(ns).rwsem); 918 return err; 919 } 920 921 static int shmctl_ipc_info(struct ipc_namespace *ns, 922 struct shminfo64 *shminfo) 923 { 924 int err = security_shm_shmctl(NULL, IPC_INFO); 925 if (!err) { 926 memset(shminfo, 0, sizeof(*shminfo)); 927 shminfo->shmmni = shminfo->shmseg = ns->shm_ctlmni; 928 shminfo->shmmax = ns->shm_ctlmax; 929 shminfo->shmall = ns->shm_ctlall; 930 shminfo->shmmin = SHMMIN; 931 down_read(&shm_ids(ns).rwsem); 932 err = ipc_get_maxid(&shm_ids(ns)); 933 up_read(&shm_ids(ns).rwsem); 934 if (err < 0) 935 err = 0; 936 } 937 return err; 938 } 939 940 static int shmctl_shm_info(struct ipc_namespace *ns, 941 struct shm_info *shm_info) 942 { 943 int err = security_shm_shmctl(NULL, SHM_INFO); 944 if (!err) { 945 memset(shm_info, 0, sizeof(*shm_info)); 946 down_read(&shm_ids(ns).rwsem); 947 shm_info->used_ids = shm_ids(ns).in_use; 948 shm_get_stat(ns, &shm_info->shm_rss, &shm_info->shm_swp); 949 shm_info->shm_tot = ns->shm_tot; 950 shm_info->swap_attempts = 0; 951 shm_info->swap_successes = 0; 952 err = ipc_get_maxid(&shm_ids(ns)); 953 up_read(&shm_ids(ns).rwsem); 954 if (err < 0) 955 err = 0; 956 } 957 return err; 958 } 959 960 static int shmctl_stat(struct ipc_namespace *ns, int shmid, 961 int cmd, struct shmid64_ds *tbuf) 962 { 963 struct shmid_kernel *shp; 964 int id = 0; 965 int err; 966 967 memset(tbuf, 0, sizeof(*tbuf)); 968 969 rcu_read_lock(); 970 if (cmd == SHM_STAT || cmd == SHM_STAT_ANY) { 971 shp = shm_obtain_object(ns, shmid); 972 if (IS_ERR(shp)) { 973 err = PTR_ERR(shp); 974 goto out_unlock; 975 } 976 id = shp->shm_perm.id; 977 } else { /* IPC_STAT */ 978 shp = shm_obtain_object_check(ns, shmid); 979 if (IS_ERR(shp)) { 980 err = PTR_ERR(shp); 981 goto out_unlock; 982 } 983 } 984 985 /* 986 * Semantically SHM_STAT_ANY ought to be identical to 987 * that functionality provided by the /proc/sysvipc/ 988 * interface. As such, only audit these calls and 989 * do not do traditional S_IRUGO permission checks on 990 * the ipc object. 991 */ 992 if (cmd == SHM_STAT_ANY) 993 audit_ipc_obj(&shp->shm_perm); 994 else { 995 err = -EACCES; 996 if (ipcperms(ns, &shp->shm_perm, S_IRUGO)) 997 goto out_unlock; 998 } 999 1000 err = security_shm_shmctl(&shp->shm_perm, cmd); 1001 if (err) 1002 goto out_unlock; 1003 1004 ipc_lock_object(&shp->shm_perm); 1005 1006 if (!ipc_valid_object(&shp->shm_perm)) { 1007 ipc_unlock_object(&shp->shm_perm); 1008 err = -EIDRM; 1009 goto out_unlock; 1010 } 1011 1012 kernel_to_ipc64_perm(&shp->shm_perm, &tbuf->shm_perm); 1013 tbuf->shm_segsz = shp->shm_segsz; 1014 tbuf->shm_atime = shp->shm_atim; 1015 tbuf->shm_dtime = shp->shm_dtim; 1016 tbuf->shm_ctime = shp->shm_ctim; 1017 #ifndef CONFIG_64BIT 1018 tbuf->shm_atime_high = shp->shm_atim >> 32; 1019 tbuf->shm_dtime_high = shp->shm_dtim >> 32; 1020 tbuf->shm_ctime_high = shp->shm_ctim >> 32; 1021 #endif 1022 tbuf->shm_cpid = pid_vnr(shp->shm_cprid); 1023 tbuf->shm_lpid = pid_vnr(shp->shm_lprid); 1024 tbuf->shm_nattch = shp->shm_nattch; 1025 1026 ipc_unlock_object(&shp->shm_perm); 1027 rcu_read_unlock(); 1028 return id; 1029 1030 out_unlock: 1031 rcu_read_unlock(); 1032 return err; 1033 } 1034 1035 static int shmctl_do_lock(struct ipc_namespace *ns, int shmid, int cmd) 1036 { 1037 struct shmid_kernel *shp; 1038 struct file *shm_file; 1039 int err; 1040 1041 rcu_read_lock(); 1042 shp = shm_obtain_object_check(ns, shmid); 1043 if (IS_ERR(shp)) { 1044 err = PTR_ERR(shp); 1045 goto out_unlock1; 1046 } 1047 1048 audit_ipc_obj(&(shp->shm_perm)); 1049 err = security_shm_shmctl(&shp->shm_perm, cmd); 1050 if (err) 1051 goto out_unlock1; 1052 1053 ipc_lock_object(&shp->shm_perm); 1054 1055 /* check if shm_destroy() is tearing down shp */ 1056 if (!ipc_valid_object(&shp->shm_perm)) { 1057 err = -EIDRM; 1058 goto out_unlock0; 1059 } 1060 1061 if (!ns_capable(ns->user_ns, CAP_IPC_LOCK)) { 1062 kuid_t euid = current_euid(); 1063 1064 if (!uid_eq(euid, shp->shm_perm.uid) && 1065 !uid_eq(euid, shp->shm_perm.cuid)) { 1066 err = -EPERM; 1067 goto out_unlock0; 1068 } 1069 if (cmd == SHM_LOCK && !rlimit(RLIMIT_MEMLOCK)) { 1070 err = -EPERM; 1071 goto out_unlock0; 1072 } 1073 } 1074 1075 shm_file = shp->shm_file; 1076 if (is_file_hugepages(shm_file)) 1077 goto out_unlock0; 1078 1079 if (cmd == SHM_LOCK) { 1080 struct user_struct *user = current_user(); 1081 1082 err = shmem_lock(shm_file, 1, user); 1083 if (!err && !(shp->shm_perm.mode & SHM_LOCKED)) { 1084 shp->shm_perm.mode |= SHM_LOCKED; 1085 shp->mlock_user = user; 1086 } 1087 goto out_unlock0; 1088 } 1089 1090 /* SHM_UNLOCK */ 1091 if (!(shp->shm_perm.mode & SHM_LOCKED)) 1092 goto out_unlock0; 1093 shmem_lock(shm_file, 0, shp->mlock_user); 1094 shp->shm_perm.mode &= ~SHM_LOCKED; 1095 shp->mlock_user = NULL; 1096 get_file(shm_file); 1097 ipc_unlock_object(&shp->shm_perm); 1098 rcu_read_unlock(); 1099 shmem_unlock_mapping(shm_file->f_mapping); 1100 1101 fput(shm_file); 1102 return err; 1103 1104 out_unlock0: 1105 ipc_unlock_object(&shp->shm_perm); 1106 out_unlock1: 1107 rcu_read_unlock(); 1108 return err; 1109 } 1110 1111 long ksys_shmctl(int shmid, int cmd, struct shmid_ds __user *buf) 1112 { 1113 int err, version; 1114 struct ipc_namespace *ns; 1115 struct shmid64_ds sem64; 1116 1117 if (cmd < 0 || shmid < 0) 1118 return -EINVAL; 1119 1120 version = ipc_parse_version(&cmd); 1121 ns = current->nsproxy->ipc_ns; 1122 1123 switch (cmd) { 1124 case IPC_INFO: { 1125 struct shminfo64 shminfo; 1126 err = shmctl_ipc_info(ns, &shminfo); 1127 if (err < 0) 1128 return err; 1129 if (copy_shminfo_to_user(buf, &shminfo, version)) 1130 err = -EFAULT; 1131 return err; 1132 } 1133 case SHM_INFO: { 1134 struct shm_info shm_info; 1135 err = shmctl_shm_info(ns, &shm_info); 1136 if (err < 0) 1137 return err; 1138 if (copy_to_user(buf, &shm_info, sizeof(shm_info))) 1139 err = -EFAULT; 1140 return err; 1141 } 1142 case SHM_STAT: 1143 case SHM_STAT_ANY: 1144 case IPC_STAT: { 1145 err = shmctl_stat(ns, shmid, cmd, &sem64); 1146 if (err < 0) 1147 return err; 1148 if (copy_shmid_to_user(buf, &sem64, version)) 1149 err = -EFAULT; 1150 return err; 1151 } 1152 case IPC_SET: 1153 if (copy_shmid_from_user(&sem64, buf, version)) 1154 return -EFAULT; 1155 /* fallthru */ 1156 case IPC_RMID: 1157 return shmctl_down(ns, shmid, cmd, &sem64); 1158 case SHM_LOCK: 1159 case SHM_UNLOCK: 1160 return shmctl_do_lock(ns, shmid, cmd); 1161 default: 1162 return -EINVAL; 1163 } 1164 } 1165 1166 SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, struct shmid_ds __user *, buf) 1167 { 1168 return ksys_shmctl(shmid, cmd, buf); 1169 } 1170 1171 #ifdef CONFIG_COMPAT 1172 1173 struct compat_shmid_ds { 1174 struct compat_ipc_perm shm_perm; 1175 int shm_segsz; 1176 compat_time_t shm_atime; 1177 compat_time_t shm_dtime; 1178 compat_time_t shm_ctime; 1179 compat_ipc_pid_t shm_cpid; 1180 compat_ipc_pid_t shm_lpid; 1181 unsigned short shm_nattch; 1182 unsigned short shm_unused; 1183 compat_uptr_t shm_unused2; 1184 compat_uptr_t shm_unused3; 1185 }; 1186 1187 struct compat_shminfo64 { 1188 compat_ulong_t shmmax; 1189 compat_ulong_t shmmin; 1190 compat_ulong_t shmmni; 1191 compat_ulong_t shmseg; 1192 compat_ulong_t shmall; 1193 compat_ulong_t __unused1; 1194 compat_ulong_t __unused2; 1195 compat_ulong_t __unused3; 1196 compat_ulong_t __unused4; 1197 }; 1198 1199 struct compat_shm_info { 1200 compat_int_t used_ids; 1201 compat_ulong_t shm_tot, shm_rss, shm_swp; 1202 compat_ulong_t swap_attempts, swap_successes; 1203 }; 1204 1205 static int copy_compat_shminfo_to_user(void __user *buf, struct shminfo64 *in, 1206 int version) 1207 { 1208 if (in->shmmax > INT_MAX) 1209 in->shmmax = INT_MAX; 1210 if (version == IPC_64) { 1211 struct compat_shminfo64 info; 1212 memset(&info, 0, sizeof(info)); 1213 info.shmmax = in->shmmax; 1214 info.shmmin = in->shmmin; 1215 info.shmmni = in->shmmni; 1216 info.shmseg = in->shmseg; 1217 info.shmall = in->shmall; 1218 return copy_to_user(buf, &info, sizeof(info)); 1219 } else { 1220 struct shminfo info; 1221 memset(&info, 0, sizeof(info)); 1222 info.shmmax = in->shmmax; 1223 info.shmmin = in->shmmin; 1224 info.shmmni = in->shmmni; 1225 info.shmseg = in->shmseg; 1226 info.shmall = in->shmall; 1227 return copy_to_user(buf, &info, sizeof(info)); 1228 } 1229 } 1230 1231 static int put_compat_shm_info(struct shm_info *ip, 1232 struct compat_shm_info __user *uip) 1233 { 1234 struct compat_shm_info info; 1235 1236 memset(&info, 0, sizeof(info)); 1237 info.used_ids = ip->used_ids; 1238 info.shm_tot = ip->shm_tot; 1239 info.shm_rss = ip->shm_rss; 1240 info.shm_swp = ip->shm_swp; 1241 info.swap_attempts = ip->swap_attempts; 1242 info.swap_successes = ip->swap_successes; 1243 return copy_to_user(uip, &info, sizeof(info)); 1244 } 1245 1246 static int copy_compat_shmid_to_user(void __user *buf, struct shmid64_ds *in, 1247 int version) 1248 { 1249 if (version == IPC_64) { 1250 struct compat_shmid64_ds v; 1251 memset(&v, 0, sizeof(v)); 1252 to_compat_ipc64_perm(&v.shm_perm, &in->shm_perm); 1253 v.shm_atime = lower_32_bits(in->shm_atime); 1254 v.shm_atime_high = upper_32_bits(in->shm_atime); 1255 v.shm_dtime = lower_32_bits(in->shm_dtime); 1256 v.shm_dtime_high = upper_32_bits(in->shm_dtime); 1257 v.shm_ctime = lower_32_bits(in->shm_ctime); 1258 v.shm_ctime_high = upper_32_bits(in->shm_ctime); 1259 v.shm_segsz = in->shm_segsz; 1260 v.shm_nattch = in->shm_nattch; 1261 v.shm_cpid = in->shm_cpid; 1262 v.shm_lpid = in->shm_lpid; 1263 return copy_to_user(buf, &v, sizeof(v)); 1264 } else { 1265 struct compat_shmid_ds v; 1266 memset(&v, 0, sizeof(v)); 1267 to_compat_ipc_perm(&v.shm_perm, &in->shm_perm); 1268 v.shm_perm.key = in->shm_perm.key; 1269 v.shm_atime = in->shm_atime; 1270 v.shm_dtime = in->shm_dtime; 1271 v.shm_ctime = in->shm_ctime; 1272 v.shm_segsz = in->shm_segsz; 1273 v.shm_nattch = in->shm_nattch; 1274 v.shm_cpid = in->shm_cpid; 1275 v.shm_lpid = in->shm_lpid; 1276 return copy_to_user(buf, &v, sizeof(v)); 1277 } 1278 } 1279 1280 static int copy_compat_shmid_from_user(struct shmid64_ds *out, void __user *buf, 1281 int version) 1282 { 1283 memset(out, 0, sizeof(*out)); 1284 if (version == IPC_64) { 1285 struct compat_shmid64_ds __user *p = buf; 1286 return get_compat_ipc64_perm(&out->shm_perm, &p->shm_perm); 1287 } else { 1288 struct compat_shmid_ds __user *p = buf; 1289 return get_compat_ipc_perm(&out->shm_perm, &p->shm_perm); 1290 } 1291 } 1292 1293 long compat_ksys_shmctl(int shmid, int cmd, void __user *uptr) 1294 { 1295 struct ipc_namespace *ns; 1296 struct shmid64_ds sem64; 1297 int version = compat_ipc_parse_version(&cmd); 1298 int err; 1299 1300 ns = current->nsproxy->ipc_ns; 1301 1302 if (cmd < 0 || shmid < 0) 1303 return -EINVAL; 1304 1305 switch (cmd) { 1306 case IPC_INFO: { 1307 struct shminfo64 shminfo; 1308 err = shmctl_ipc_info(ns, &shminfo); 1309 if (err < 0) 1310 return err; 1311 if (copy_compat_shminfo_to_user(uptr, &shminfo, version)) 1312 err = -EFAULT; 1313 return err; 1314 } 1315 case SHM_INFO: { 1316 struct shm_info shm_info; 1317 err = shmctl_shm_info(ns, &shm_info); 1318 if (err < 0) 1319 return err; 1320 if (put_compat_shm_info(&shm_info, uptr)) 1321 err = -EFAULT; 1322 return err; 1323 } 1324 case IPC_STAT: 1325 case SHM_STAT_ANY: 1326 case SHM_STAT: 1327 err = shmctl_stat(ns, shmid, cmd, &sem64); 1328 if (err < 0) 1329 return err; 1330 if (copy_compat_shmid_to_user(uptr, &sem64, version)) 1331 err = -EFAULT; 1332 return err; 1333 1334 case IPC_SET: 1335 if (copy_compat_shmid_from_user(&sem64, uptr, version)) 1336 return -EFAULT; 1337 /* fallthru */ 1338 case IPC_RMID: 1339 return shmctl_down(ns, shmid, cmd, &sem64); 1340 case SHM_LOCK: 1341 case SHM_UNLOCK: 1342 return shmctl_do_lock(ns, shmid, cmd); 1343 break; 1344 default: 1345 return -EINVAL; 1346 } 1347 return err; 1348 } 1349 1350 COMPAT_SYSCALL_DEFINE3(shmctl, int, shmid, int, cmd, void __user *, uptr) 1351 { 1352 return compat_ksys_shmctl(shmid, cmd, uptr); 1353 } 1354 #endif 1355 1356 /* 1357 * Fix shmaddr, allocate descriptor, map shm, add attach descriptor to lists. 1358 * 1359 * NOTE! Despite the name, this is NOT a direct system call entrypoint. The 1360 * "raddr" thing points to kernel space, and there has to be a wrapper around 1361 * this. 1362 */ 1363 long do_shmat(int shmid, char __user *shmaddr, int shmflg, 1364 ulong *raddr, unsigned long shmlba) 1365 { 1366 struct shmid_kernel *shp; 1367 unsigned long addr = (unsigned long)shmaddr; 1368 unsigned long size; 1369 struct file *file; 1370 int err; 1371 unsigned long flags = MAP_SHARED; 1372 unsigned long prot; 1373 int acc_mode; 1374 struct ipc_namespace *ns; 1375 struct shm_file_data *sfd; 1376 struct path path; 1377 fmode_t f_mode; 1378 unsigned long populate = 0; 1379 1380 err = -EINVAL; 1381 if (shmid < 0) 1382 goto out; 1383 1384 if (addr) { 1385 if (addr & (shmlba - 1)) { 1386 if (shmflg & SHM_RND) { 1387 addr &= ~(shmlba - 1); /* round down */ 1388 1389 /* 1390 * Ensure that the round-down is non-nil 1391 * when remapping. This can happen for 1392 * cases when addr < shmlba. 1393 */ 1394 if (!addr && (shmflg & SHM_REMAP)) 1395 goto out; 1396 } else 1397 #ifndef __ARCH_FORCE_SHMLBA 1398 if (addr & ~PAGE_MASK) 1399 #endif 1400 goto out; 1401 } 1402 1403 flags |= MAP_FIXED; 1404 } else if ((shmflg & SHM_REMAP)) 1405 goto out; 1406 1407 if (shmflg & SHM_RDONLY) { 1408 prot = PROT_READ; 1409 acc_mode = S_IRUGO; 1410 f_mode = FMODE_READ; 1411 } else { 1412 prot = PROT_READ | PROT_WRITE; 1413 acc_mode = S_IRUGO | S_IWUGO; 1414 f_mode = FMODE_READ | FMODE_WRITE; 1415 } 1416 if (shmflg & SHM_EXEC) { 1417 prot |= PROT_EXEC; 1418 acc_mode |= S_IXUGO; 1419 } 1420 1421 /* 1422 * We cannot rely on the fs check since SYSV IPC does have an 1423 * additional creator id... 1424 */ 1425 ns = current->nsproxy->ipc_ns; 1426 rcu_read_lock(); 1427 shp = shm_obtain_object_check(ns, shmid); 1428 if (IS_ERR(shp)) { 1429 err = PTR_ERR(shp); 1430 goto out_unlock; 1431 } 1432 1433 err = -EACCES; 1434 if (ipcperms(ns, &shp->shm_perm, acc_mode)) 1435 goto out_unlock; 1436 1437 err = security_shm_shmat(&shp->shm_perm, shmaddr, shmflg); 1438 if (err) 1439 goto out_unlock; 1440 1441 ipc_lock_object(&shp->shm_perm); 1442 1443 /* check if shm_destroy() is tearing down shp */ 1444 if (!ipc_valid_object(&shp->shm_perm)) { 1445 ipc_unlock_object(&shp->shm_perm); 1446 err = -EIDRM; 1447 goto out_unlock; 1448 } 1449 1450 path = shp->shm_file->f_path; 1451 path_get(&path); 1452 shp->shm_nattch++; 1453 size = i_size_read(d_inode(path.dentry)); 1454 ipc_unlock_object(&shp->shm_perm); 1455 rcu_read_unlock(); 1456 1457 err = -ENOMEM; 1458 sfd = kzalloc(sizeof(*sfd), GFP_KERNEL); 1459 if (!sfd) { 1460 path_put(&path); 1461 goto out_nattch; 1462 } 1463 1464 file = alloc_file(&path, f_mode, 1465 is_file_hugepages(shp->shm_file) ? 1466 &shm_file_operations_huge : 1467 &shm_file_operations); 1468 err = PTR_ERR(file); 1469 if (IS_ERR(file)) { 1470 kfree(sfd); 1471 path_put(&path); 1472 goto out_nattch; 1473 } 1474 1475 file->private_data = sfd; 1476 file->f_mapping = shp->shm_file->f_mapping; 1477 sfd->id = shp->shm_perm.id; 1478 sfd->ns = get_ipc_ns(ns); 1479 /* 1480 * We need to take a reference to the real shm file to prevent the 1481 * pointer from becoming stale in cases where the lifetime of the outer 1482 * file extends beyond that of the shm segment. It's not usually 1483 * possible, but it can happen during remap_file_pages() emulation as 1484 * that unmaps the memory, then does ->mmap() via file reference only. 1485 * We'll deny the ->mmap() if the shm segment was since removed, but to 1486 * detect shm ID reuse we need to compare the file pointers. 1487 */ 1488 sfd->file = get_file(shp->shm_file); 1489 sfd->vm_ops = NULL; 1490 1491 err = security_mmap_file(file, prot, flags); 1492 if (err) 1493 goto out_fput; 1494 1495 if (down_write_killable(¤t->mm->mmap_sem)) { 1496 err = -EINTR; 1497 goto out_fput; 1498 } 1499 1500 if (addr && !(shmflg & SHM_REMAP)) { 1501 err = -EINVAL; 1502 if (addr + size < addr) 1503 goto invalid; 1504 1505 if (find_vma_intersection(current->mm, addr, addr + size)) 1506 goto invalid; 1507 } 1508 1509 addr = do_mmap_pgoff(file, addr, size, prot, flags, 0, &populate, NULL); 1510 *raddr = addr; 1511 err = 0; 1512 if (IS_ERR_VALUE(addr)) 1513 err = (long)addr; 1514 invalid: 1515 up_write(¤t->mm->mmap_sem); 1516 if (populate) 1517 mm_populate(addr, populate); 1518 1519 out_fput: 1520 fput(file); 1521 1522 out_nattch: 1523 down_write(&shm_ids(ns).rwsem); 1524 shp = shm_lock(ns, shmid); 1525 shp->shm_nattch--; 1526 if (shm_may_destroy(ns, shp)) 1527 shm_destroy(ns, shp); 1528 else 1529 shm_unlock(shp); 1530 up_write(&shm_ids(ns).rwsem); 1531 return err; 1532 1533 out_unlock: 1534 rcu_read_unlock(); 1535 out: 1536 return err; 1537 } 1538 1539 SYSCALL_DEFINE3(shmat, int, shmid, char __user *, shmaddr, int, shmflg) 1540 { 1541 unsigned long ret; 1542 long err; 1543 1544 err = do_shmat(shmid, shmaddr, shmflg, &ret, SHMLBA); 1545 if (err) 1546 return err; 1547 force_successful_syscall_return(); 1548 return (long)ret; 1549 } 1550 1551 #ifdef CONFIG_COMPAT 1552 1553 #ifndef COMPAT_SHMLBA 1554 #define COMPAT_SHMLBA SHMLBA 1555 #endif 1556 1557 COMPAT_SYSCALL_DEFINE3(shmat, int, shmid, compat_uptr_t, shmaddr, int, shmflg) 1558 { 1559 unsigned long ret; 1560 long err; 1561 1562 err = do_shmat(shmid, compat_ptr(shmaddr), shmflg, &ret, COMPAT_SHMLBA); 1563 if (err) 1564 return err; 1565 force_successful_syscall_return(); 1566 return (long)ret; 1567 } 1568 #endif 1569 1570 /* 1571 * detach and kill segment if marked destroyed. 1572 * The work is done in shm_close. 1573 */ 1574 long ksys_shmdt(char __user *shmaddr) 1575 { 1576 struct mm_struct *mm = current->mm; 1577 struct vm_area_struct *vma; 1578 unsigned long addr = (unsigned long)shmaddr; 1579 int retval = -EINVAL; 1580 #ifdef CONFIG_MMU 1581 loff_t size = 0; 1582 struct file *file; 1583 struct vm_area_struct *next; 1584 #endif 1585 1586 if (addr & ~PAGE_MASK) 1587 return retval; 1588 1589 if (down_write_killable(&mm->mmap_sem)) 1590 return -EINTR; 1591 1592 /* 1593 * This function tries to be smart and unmap shm segments that 1594 * were modified by partial mlock or munmap calls: 1595 * - It first determines the size of the shm segment that should be 1596 * unmapped: It searches for a vma that is backed by shm and that 1597 * started at address shmaddr. It records it's size and then unmaps 1598 * it. 1599 * - Then it unmaps all shm vmas that started at shmaddr and that 1600 * are within the initially determined size and that are from the 1601 * same shm segment from which we determined the size. 1602 * Errors from do_munmap are ignored: the function only fails if 1603 * it's called with invalid parameters or if it's called to unmap 1604 * a part of a vma. Both calls in this function are for full vmas, 1605 * the parameters are directly copied from the vma itself and always 1606 * valid - therefore do_munmap cannot fail. (famous last words?) 1607 */ 1608 /* 1609 * If it had been mremap()'d, the starting address would not 1610 * match the usual checks anyway. So assume all vma's are 1611 * above the starting address given. 1612 */ 1613 vma = find_vma(mm, addr); 1614 1615 #ifdef CONFIG_MMU 1616 while (vma) { 1617 next = vma->vm_next; 1618 1619 /* 1620 * Check if the starting address would match, i.e. it's 1621 * a fragment created by mprotect() and/or munmap(), or it 1622 * otherwise it starts at this address with no hassles. 1623 */ 1624 if ((vma->vm_ops == &shm_vm_ops) && 1625 (vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) { 1626 1627 /* 1628 * Record the file of the shm segment being 1629 * unmapped. With mremap(), someone could place 1630 * page from another segment but with equal offsets 1631 * in the range we are unmapping. 1632 */ 1633 file = vma->vm_file; 1634 size = i_size_read(file_inode(vma->vm_file)); 1635 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); 1636 /* 1637 * We discovered the size of the shm segment, so 1638 * break out of here and fall through to the next 1639 * loop that uses the size information to stop 1640 * searching for matching vma's. 1641 */ 1642 retval = 0; 1643 vma = next; 1644 break; 1645 } 1646 vma = next; 1647 } 1648 1649 /* 1650 * We need look no further than the maximum address a fragment 1651 * could possibly have landed at. Also cast things to loff_t to 1652 * prevent overflows and make comparisons vs. equal-width types. 1653 */ 1654 size = PAGE_ALIGN(size); 1655 while (vma && (loff_t)(vma->vm_end - addr) <= size) { 1656 next = vma->vm_next; 1657 1658 /* finding a matching vma now does not alter retval */ 1659 if ((vma->vm_ops == &shm_vm_ops) && 1660 ((vma->vm_start - addr)/PAGE_SIZE == vma->vm_pgoff) && 1661 (vma->vm_file == file)) 1662 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); 1663 vma = next; 1664 } 1665 1666 #else /* CONFIG_MMU */ 1667 /* under NOMMU conditions, the exact address to be destroyed must be 1668 * given 1669 */ 1670 if (vma && vma->vm_start == addr && vma->vm_ops == &shm_vm_ops) { 1671 do_munmap(mm, vma->vm_start, vma->vm_end - vma->vm_start, NULL); 1672 retval = 0; 1673 } 1674 1675 #endif 1676 1677 up_write(&mm->mmap_sem); 1678 return retval; 1679 } 1680 1681 SYSCALL_DEFINE1(shmdt, char __user *, shmaddr) 1682 { 1683 return ksys_shmdt(shmaddr); 1684 } 1685 1686 #ifdef CONFIG_PROC_FS 1687 static int sysvipc_shm_proc_show(struct seq_file *s, void *it) 1688 { 1689 struct pid_namespace *pid_ns = ipc_seq_pid_ns(s); 1690 struct user_namespace *user_ns = seq_user_ns(s); 1691 struct kern_ipc_perm *ipcp = it; 1692 struct shmid_kernel *shp; 1693 unsigned long rss = 0, swp = 0; 1694 1695 shp = container_of(ipcp, struct shmid_kernel, shm_perm); 1696 shm_add_rss_swap(shp, &rss, &swp); 1697 1698 #if BITS_PER_LONG <= 32 1699 #define SIZE_SPEC "%10lu" 1700 #else 1701 #define SIZE_SPEC "%21lu" 1702 #endif 1703 1704 seq_printf(s, 1705 "%10d %10d %4o " SIZE_SPEC " %5u %5u " 1706 "%5lu %5u %5u %5u %5u %10llu %10llu %10llu " 1707 SIZE_SPEC " " SIZE_SPEC "\n", 1708 shp->shm_perm.key, 1709 shp->shm_perm.id, 1710 shp->shm_perm.mode, 1711 shp->shm_segsz, 1712 pid_nr_ns(shp->shm_cprid, pid_ns), 1713 pid_nr_ns(shp->shm_lprid, pid_ns), 1714 shp->shm_nattch, 1715 from_kuid_munged(user_ns, shp->shm_perm.uid), 1716 from_kgid_munged(user_ns, shp->shm_perm.gid), 1717 from_kuid_munged(user_ns, shp->shm_perm.cuid), 1718 from_kgid_munged(user_ns, shp->shm_perm.cgid), 1719 shp->shm_atim, 1720 shp->shm_dtim, 1721 shp->shm_ctim, 1722 rss * PAGE_SIZE, 1723 swp * PAGE_SIZE); 1724 1725 return 0; 1726 } 1727 #endif 1728