1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5 * Copyright 2020 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
12 *
13 * Portions of this software were developed by Konstantin Belousov
14 * under sponsorship from the FreeBSD Foundation.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Support for shared swap-backed anonymous memory objects via
40 * shm_open(2), shm_rename(2), and shm_unlink(2).
41 * While most of the implementation is here, vm_mmap.c contains
42 * mapping logic changes.
43 *
44 * posixshmcontrol(1) allows users to inspect the state of the memory
45 * objects. Per-uid swap resource limit controls total amount of
46 * memory that user can consume for anonymous objects, including
47 * shared.
48 */
49
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/swap_pager.h>
104
105 struct shm_mapping {
106 char *sm_path;
107 Fnv32_t sm_fnv;
108 struct shmfd *sm_shmfd;
109 LIST_ENTRY(shm_mapping) sm_link;
110 };
111
112 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
113 static LIST_HEAD(, shm_mapping) *shm_dictionary;
114 static struct sx shm_dict_lock;
115 static struct mtx shm_timestamp_lock;
116 static u_long shm_hash;
117 static struct unrhdr64 shm_ino_unr;
118 static dev_t shm_dev_ino;
119
120 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
121
122 static void shm_init(void *arg);
123 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
124 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
125 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
126 static void shm_doremove(struct shm_mapping *map);
127 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
128 void *rl_cookie);
129 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
130 void *rl_cookie);
131 static int shm_copyin_path(struct thread *td, const char *userpath_in,
132 char **path_out);
133 static int shm_deallocate(struct shmfd *shmfd, off_t *offset,
134 off_t *length, int flags);
135
136 static fo_rdwr_t shm_read;
137 static fo_rdwr_t shm_write;
138 static fo_truncate_t shm_truncate;
139 static fo_ioctl_t shm_ioctl;
140 static fo_stat_t shm_stat;
141 static fo_close_t shm_close;
142 static fo_chmod_t shm_chmod;
143 static fo_chown_t shm_chown;
144 static fo_seek_t shm_seek;
145 static fo_fill_kinfo_t shm_fill_kinfo;
146 static fo_mmap_t shm_mmap;
147 static fo_get_seals_t shm_get_seals;
148 static fo_add_seals_t shm_add_seals;
149 static fo_fallocate_t shm_fallocate;
150 static fo_fspacectl_t shm_fspacectl;
151
152 /* File descriptor operations. */
153 struct fileops shm_ops = {
154 .fo_read = shm_read,
155 .fo_write = shm_write,
156 .fo_truncate = shm_truncate,
157 .fo_ioctl = shm_ioctl,
158 .fo_poll = invfo_poll,
159 .fo_kqfilter = invfo_kqfilter,
160 .fo_stat = shm_stat,
161 .fo_close = shm_close,
162 .fo_chmod = shm_chmod,
163 .fo_chown = shm_chown,
164 .fo_sendfile = vn_sendfile,
165 .fo_seek = shm_seek,
166 .fo_fill_kinfo = shm_fill_kinfo,
167 .fo_mmap = shm_mmap,
168 .fo_get_seals = shm_get_seals,
169 .fo_add_seals = shm_add_seals,
170 .fo_fallocate = shm_fallocate,
171 .fo_fspacectl = shm_fspacectl,
172 .fo_cmp = file_kcmp_generic,
173 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
174 };
175
176 FEATURE(posix_shm, "POSIX shared memory");
177
178 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
179 "");
180
181 static int largepage_reclaim_tries = 1;
182 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
183 CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
184 "Number of contig reclaims before giving up for default alloc policy");
185
186 #define shm_rangelock_unlock(shmfd, cookie) \
187 rangelock_unlock(&(shmfd)->shm_rl, (cookie))
188 #define shm_rangelock_rlock(shmfd, start, end) \
189 rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
190 #define shm_rangelock_tryrlock(shmfd, start, end) \
191 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
192 #define shm_rangelock_wlock(shmfd, start, end) \
193 rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
194
195 static int
uiomove_object_page(vm_object_t obj,size_t len,struct uio * uio)196 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
197 {
198 vm_page_t m;
199 vm_pindex_t idx;
200 size_t tlen;
201 int error, offset, rv;
202
203 idx = OFF_TO_IDX(uio->uio_offset);
204 offset = uio->uio_offset & PAGE_MASK;
205 tlen = MIN(PAGE_SIZE - offset, len);
206
207 rv = vm_page_grab_valid_unlocked(&m, obj, idx,
208 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
209 if (rv == VM_PAGER_OK)
210 goto found;
211
212 /*
213 * Read I/O without either a corresponding resident page or swap
214 * page: use zero_region. This is intended to avoid instantiating
215 * pages on read from a sparse region.
216 */
217 VM_OBJECT_WLOCK(obj);
218 m = vm_page_lookup(obj, idx);
219 if (uio->uio_rw == UIO_READ && m == NULL &&
220 !vm_pager_has_page(obj, idx, NULL, NULL)) {
221 VM_OBJECT_WUNLOCK(obj);
222 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
223 }
224
225 /*
226 * Although the tmpfs vnode lock is held here, it is
227 * nonetheless safe to sleep waiting for a free page. The
228 * pageout daemon does not need to acquire the tmpfs vnode
229 * lock to page out tobj's pages because tobj is a OBJT_SWAP
230 * type object.
231 */
232 rv = vm_page_grab_valid(&m, obj, idx,
233 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY);
234 if (rv != VM_PAGER_OK) {
235 VM_OBJECT_WUNLOCK(obj);
236 if (bootverbose) {
237 printf("uiomove_object: vm_obj %p idx %jd "
238 "pager error %d\n", obj, idx, rv);
239 }
240 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
241 }
242 VM_OBJECT_WUNLOCK(obj);
243
244 found:
245 error = uiomove_fromphys(&m, offset, tlen, uio);
246 if (uio->uio_rw == UIO_WRITE && error == 0)
247 vm_page_set_dirty(m);
248 vm_page_activate(m);
249 vm_page_sunbusy(m);
250
251 return (error);
252 }
253
254 int
uiomove_object(vm_object_t obj,off_t obj_size,struct uio * uio)255 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
256 {
257 ssize_t resid;
258 size_t len;
259 int error;
260
261 error = 0;
262 while ((resid = uio->uio_resid) > 0) {
263 if (obj_size <= uio->uio_offset)
264 break;
265 len = MIN(obj_size - uio->uio_offset, resid);
266 if (len == 0)
267 break;
268 error = uiomove_object_page(obj, len, uio);
269 if (error != 0 || resid == uio->uio_resid)
270 break;
271 }
272 return (error);
273 }
274
275 static u_long count_largepages[MAXPAGESIZES];
276
277 static int
shm_largepage_phys_populate(vm_object_t object,vm_pindex_t pidx,int fault_type,vm_prot_t max_prot,vm_pindex_t * first,vm_pindex_t * last)278 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
279 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
280 {
281 vm_page_t m __diagused;
282 int psind;
283
284 psind = object->un_pager.phys.data_val;
285 if (psind == 0 || pidx >= object->size)
286 return (VM_PAGER_FAIL);
287 *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
288
289 /*
290 * We only busy the first page in the superpage run. It is
291 * useless to busy whole run since we only remove full
292 * superpage, and it takes too long to busy e.g. 512 * 512 ==
293 * 262144 pages constituing 1G amd64 superage.
294 */
295 m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
296 MPASS(m != NULL);
297
298 *last = *first + atop(pagesizes[psind]) - 1;
299 return (VM_PAGER_OK);
300 }
301
302 static boolean_t
shm_largepage_phys_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)303 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
304 int *before, int *after)
305 {
306 int psind;
307
308 psind = object->un_pager.phys.data_val;
309 if (psind == 0 || pindex >= object->size)
310 return (FALSE);
311 if (before != NULL) {
312 *before = pindex - rounddown2(pindex, pagesizes[psind] /
313 PAGE_SIZE);
314 }
315 if (after != NULL) {
316 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
317 pindex;
318 }
319 return (TRUE);
320 }
321
322 static void
shm_largepage_phys_ctor(vm_object_t object,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred)323 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
324 vm_ooffset_t foff, struct ucred *cred)
325 {
326 }
327
328 static void
shm_largepage_phys_dtor(vm_object_t object)329 shm_largepage_phys_dtor(vm_object_t object)
330 {
331 int psind;
332
333 psind = object->un_pager.phys.data_val;
334 if (psind != 0) {
335 atomic_subtract_long(&count_largepages[psind],
336 object->size / (pagesizes[psind] / PAGE_SIZE));
337 vm_wire_sub(object->size);
338 } else {
339 KASSERT(object->size == 0,
340 ("largepage phys obj %p not initialized bit size %#jx > 0",
341 object, (uintmax_t)object->size));
342 }
343 }
344
345 static const struct phys_pager_ops shm_largepage_phys_ops = {
346 .phys_pg_populate = shm_largepage_phys_populate,
347 .phys_pg_haspage = shm_largepage_phys_haspage,
348 .phys_pg_ctor = shm_largepage_phys_ctor,
349 .phys_pg_dtor = shm_largepage_phys_dtor,
350 };
351
352 bool
shm_largepage(struct shmfd * shmfd)353 shm_largepage(struct shmfd *shmfd)
354 {
355 return (shmfd->shm_object->type == OBJT_PHYS);
356 }
357
358 static void
shm_pager_freespace(vm_object_t obj,vm_pindex_t start,vm_size_t size)359 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
360 {
361 struct shmfd *shm;
362 vm_size_t c;
363
364 swap_pager_freespace(obj, start, size, &c);
365 if (c == 0)
366 return;
367
368 shm = obj->un_pager.swp.swp_priv;
369 if (shm == NULL)
370 return;
371 KASSERT(shm->shm_pages >= c,
372 ("shm %p pages %jd free %jd", shm,
373 (uintmax_t)shm->shm_pages, (uintmax_t)c));
374 shm->shm_pages -= c;
375 }
376
377 static void
shm_page_inserted(vm_object_t obj,vm_page_t m)378 shm_page_inserted(vm_object_t obj, vm_page_t m)
379 {
380 struct shmfd *shm;
381
382 shm = obj->un_pager.swp.swp_priv;
383 if (shm == NULL)
384 return;
385 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
386 shm->shm_pages += 1;
387 }
388
389 static void
shm_page_removed(vm_object_t obj,vm_page_t m)390 shm_page_removed(vm_object_t obj, vm_page_t m)
391 {
392 struct shmfd *shm;
393
394 shm = obj->un_pager.swp.swp_priv;
395 if (shm == NULL)
396 return;
397 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
398 KASSERT(shm->shm_pages >= 1,
399 ("shm %p pages %jd free 1", shm,
400 (uintmax_t)shm->shm_pages));
401 shm->shm_pages -= 1;
402 }
403 }
404
405 static struct pagerops shm_swap_pager_ops = {
406 .pgo_kvme_type = KVME_TYPE_SWAP,
407 .pgo_freespace = shm_pager_freespace,
408 .pgo_page_inserted = shm_page_inserted,
409 .pgo_page_removed = shm_page_removed,
410 };
411 static int shmfd_pager_type = -1;
412
413 static int
shm_seek(struct file * fp,off_t offset,int whence,struct thread * td)414 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
415 {
416 struct shmfd *shmfd;
417 off_t foffset;
418 int error;
419
420 shmfd = fp->f_data;
421 foffset = foffset_lock(fp, 0);
422 error = 0;
423 switch (whence) {
424 case L_INCR:
425 if (foffset < 0 ||
426 (offset > 0 && foffset > OFF_MAX - offset)) {
427 error = EOVERFLOW;
428 break;
429 }
430 offset += foffset;
431 break;
432 case L_XTND:
433 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
434 error = EOVERFLOW;
435 break;
436 }
437 offset += shmfd->shm_size;
438 break;
439 case L_SET:
440 break;
441 default:
442 error = EINVAL;
443 }
444 if (error == 0) {
445 if (offset < 0 || offset > shmfd->shm_size)
446 error = EINVAL;
447 else
448 td->td_uretoff.tdu_off = offset;
449 }
450 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
451 return (error);
452 }
453
454 static int
shm_read(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)455 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
456 int flags, struct thread *td)
457 {
458 struct shmfd *shmfd;
459 void *rl_cookie;
460 int error;
461
462 shmfd = fp->f_data;
463 #ifdef MAC
464 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
465 if (error)
466 return (error);
467 #endif
468 foffset_lock_uio(fp, uio, flags);
469 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
470 uio->uio_offset + uio->uio_resid);
471 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
472 shm_rangelock_unlock(shmfd, rl_cookie);
473 foffset_unlock_uio(fp, uio, flags);
474 return (error);
475 }
476
477 static int
shm_write(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)478 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
479 int flags, struct thread *td)
480 {
481 struct shmfd *shmfd;
482 void *rl_cookie;
483 int error;
484 off_t size;
485
486 shmfd = fp->f_data;
487 #ifdef MAC
488 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
489 if (error)
490 return (error);
491 #endif
492 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
493 return (EINVAL);
494 foffset_lock_uio(fp, uio, flags);
495 if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
496 /*
497 * Overflow is only an error if we're supposed to expand on
498 * write. Otherwise, we'll just truncate the write to the
499 * size of the file, which can only grow up to OFF_MAX.
500 */
501 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
502 foffset_unlock_uio(fp, uio, flags);
503 return (EFBIG);
504 }
505
506 size = shmfd->shm_size;
507 } else {
508 size = uio->uio_offset + uio->uio_resid;
509 }
510 if ((flags & FOF_OFFSET) == 0)
511 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
512 else
513 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset, size);
514 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
515 error = EPERM;
516 } else {
517 error = 0;
518 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
519 size > shmfd->shm_size) {
520 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
521 }
522 if (error == 0)
523 error = uiomove_object(shmfd->shm_object,
524 shmfd->shm_size, uio);
525 }
526 shm_rangelock_unlock(shmfd, rl_cookie);
527 foffset_unlock_uio(fp, uio, flags);
528 return (error);
529 }
530
531 static int
shm_truncate(struct file * fp,off_t length,struct ucred * active_cred,struct thread * td)532 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
533 struct thread *td)
534 {
535 struct shmfd *shmfd;
536 #ifdef MAC
537 int error;
538 #endif
539
540 shmfd = fp->f_data;
541 #ifdef MAC
542 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
543 if (error)
544 return (error);
545 #endif
546 return (shm_dotruncate(shmfd, length));
547 }
548
549 int
shm_ioctl(struct file * fp,u_long com,void * data,struct ucred * active_cred,struct thread * td)550 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
551 struct thread *td)
552 {
553 struct shmfd *shmfd;
554 struct shm_largepage_conf *conf;
555 void *rl_cookie;
556
557 shmfd = fp->f_data;
558 switch (com) {
559 case FIONBIO:
560 case FIOASYNC:
561 /*
562 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
563 * just like it would on an unlinked regular file
564 */
565 return (0);
566 case FIOSSHMLPGCNF:
567 if (!shm_largepage(shmfd))
568 return (ENOTTY);
569 conf = data;
570 if (shmfd->shm_lp_psind != 0 &&
571 conf->psind != shmfd->shm_lp_psind)
572 return (EINVAL);
573 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
574 pagesizes[conf->psind] == 0)
575 return (EINVAL);
576 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
577 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
578 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
579 return (EINVAL);
580
581 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
582 shmfd->shm_lp_psind = conf->psind;
583 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
584 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
585 shm_rangelock_unlock(shmfd, rl_cookie);
586 return (0);
587 case FIOGSHMLPGCNF:
588 if (!shm_largepage(shmfd))
589 return (ENOTTY);
590 conf = data;
591 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
592 conf->psind = shmfd->shm_lp_psind;
593 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
594 shm_rangelock_unlock(shmfd, rl_cookie);
595 return (0);
596 default:
597 return (ENOTTY);
598 }
599 }
600
601 static int
shm_stat(struct file * fp,struct stat * sb,struct ucred * active_cred)602 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
603 {
604 struct shmfd *shmfd;
605 #ifdef MAC
606 int error;
607 #endif
608
609 shmfd = fp->f_data;
610
611 #ifdef MAC
612 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
613 if (error)
614 return (error);
615 #endif
616
617 /*
618 * Attempt to return sanish values for fstat() on a memory file
619 * descriptor.
620 */
621 bzero(sb, sizeof(*sb));
622 sb->st_blksize = PAGE_SIZE;
623 sb->st_size = shmfd->shm_size;
624 mtx_lock(&shm_timestamp_lock);
625 sb->st_atim = shmfd->shm_atime;
626 sb->st_ctim = shmfd->shm_ctime;
627 sb->st_mtim = shmfd->shm_mtime;
628 sb->st_birthtim = shmfd->shm_birthtime;
629 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
630 sb->st_uid = shmfd->shm_uid;
631 sb->st_gid = shmfd->shm_gid;
632 mtx_unlock(&shm_timestamp_lock);
633 sb->st_dev = shm_dev_ino;
634 sb->st_ino = shmfd->shm_ino;
635 sb->st_nlink = shmfd->shm_object->ref_count;
636 if (shm_largepage(shmfd)) {
637 sb->st_blocks = shmfd->shm_object->size /
638 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
639 } else {
640 sb->st_blocks = shmfd->shm_pages;
641 }
642
643 return (0);
644 }
645
646 static int
shm_close(struct file * fp,struct thread * td)647 shm_close(struct file *fp, struct thread *td)
648 {
649 struct shmfd *shmfd;
650
651 shmfd = fp->f_data;
652 fp->f_data = NULL;
653 shm_drop(shmfd);
654
655 return (0);
656 }
657
658 static int
shm_copyin_path(struct thread * td,const char * userpath_in,char ** path_out)659 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
660 int error;
661 char *path;
662 const char *pr_path;
663 size_t pr_pathlen;
664
665 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
666 pr_path = td->td_ucred->cr_prison->pr_path;
667
668 /* Construct a full pathname for jailed callers. */
669 pr_pathlen = strcmp(pr_path, "/") ==
670 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
671 error = copyinstr(userpath_in, path + pr_pathlen,
672 MAXPATHLEN - pr_pathlen, NULL);
673 if (error != 0)
674 goto out;
675
676 #ifdef KTRACE
677 if (KTRPOINT(curthread, KTR_NAMEI))
678 ktrnamei(path);
679 #endif
680
681 /* Require paths to start with a '/' character. */
682 if (path[pr_pathlen] != '/') {
683 error = EINVAL;
684 goto out;
685 }
686
687 *path_out = path;
688
689 out:
690 if (error != 0)
691 free(path, M_SHMFD);
692
693 return (error);
694 }
695
696 static int
shm_partial_page_invalidate(vm_object_t object,vm_pindex_t idx,int base,int end)697 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
698 int end)
699 {
700 vm_page_t m;
701 int rv;
702
703 VM_OBJECT_ASSERT_WLOCKED(object);
704 KASSERT(base >= 0, ("%s: base %d", __func__, base));
705 KASSERT(end - base <= PAGE_SIZE, ("%s: base %d end %d", __func__, base,
706 end));
707
708 retry:
709 m = vm_page_grab(object, idx, VM_ALLOC_NOCREAT);
710 if (m != NULL) {
711 MPASS(vm_page_all_valid(m));
712 } else if (vm_pager_has_page(object, idx, NULL, NULL)) {
713 m = vm_page_alloc(object, idx,
714 VM_ALLOC_NORMAL | VM_ALLOC_WAITFAIL);
715 if (m == NULL)
716 goto retry;
717 vm_object_pip_add(object, 1);
718 VM_OBJECT_WUNLOCK(object);
719 rv = vm_pager_get_pages(object, &m, 1, NULL, NULL);
720 VM_OBJECT_WLOCK(object);
721 vm_object_pip_wakeup(object);
722 if (rv == VM_PAGER_OK) {
723 /*
724 * Since the page was not resident, and therefore not
725 * recently accessed, immediately enqueue it for
726 * asynchronous laundering. The current operation is
727 * not regarded as an access.
728 */
729 vm_page_launder(m);
730 } else {
731 vm_page_free(m);
732 VM_OBJECT_WUNLOCK(object);
733 return (EIO);
734 }
735 }
736 if (m != NULL) {
737 pmap_zero_page_area(m, base, end - base);
738 KASSERT(vm_page_all_valid(m), ("%s: page %p is invalid",
739 __func__, m));
740 vm_page_set_dirty(m);
741 vm_page_xunbusy(m);
742 }
743
744 return (0);
745 }
746
747 static int
shm_dotruncate_locked(struct shmfd * shmfd,off_t length,void * rl_cookie)748 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
749 {
750 vm_object_t object;
751 vm_pindex_t nobjsize;
752 vm_ooffset_t delta;
753 int base, error;
754
755 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
756 object = shmfd->shm_object;
757 VM_OBJECT_ASSERT_WLOCKED(object);
758 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
759 if (length == shmfd->shm_size)
760 return (0);
761 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
762
763 /* Are we shrinking? If so, trim the end. */
764 if (length < shmfd->shm_size) {
765 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
766 return (EPERM);
767
768 /*
769 * Disallow any requests to shrink the size if this
770 * object is mapped into the kernel.
771 */
772 if (shmfd->shm_kmappings > 0)
773 return (EBUSY);
774
775 /*
776 * Zero the truncated part of the last page.
777 */
778 base = length & PAGE_MASK;
779 if (base != 0) {
780 error = shm_partial_page_invalidate(object,
781 OFF_TO_IDX(length), base, PAGE_SIZE);
782 if (error)
783 return (error);
784 }
785 delta = IDX_TO_OFF(object->size - nobjsize);
786
787 if (nobjsize < object->size)
788 vm_object_page_remove(object, nobjsize, object->size,
789 0);
790
791 /* Free the swap accounted for shm */
792 swap_release_by_cred(delta, object->cred);
793 object->charge -= delta;
794 } else {
795 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
796 return (EPERM);
797
798 /* Try to reserve additional swap space. */
799 delta = IDX_TO_OFF(nobjsize - object->size);
800 if (!swap_reserve_by_cred(delta, object->cred))
801 return (ENOMEM);
802 object->charge += delta;
803 }
804 shmfd->shm_size = length;
805 mtx_lock(&shm_timestamp_lock);
806 vfs_timestamp(&shmfd->shm_ctime);
807 shmfd->shm_mtime = shmfd->shm_ctime;
808 mtx_unlock(&shm_timestamp_lock);
809 object->size = nobjsize;
810 return (0);
811 }
812
813 static int
shm_dotruncate_largepage(struct shmfd * shmfd,off_t length,void * rl_cookie)814 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
815 {
816 vm_object_t object;
817 vm_page_t m;
818 vm_pindex_t newobjsz;
819 vm_pindex_t oldobjsz __unused;
820 int aflags, error, i, psind, try;
821
822 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
823 object = shmfd->shm_object;
824 VM_OBJECT_ASSERT_WLOCKED(object);
825 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
826
827 oldobjsz = object->size;
828 newobjsz = OFF_TO_IDX(length);
829 if (length == shmfd->shm_size)
830 return (0);
831 psind = shmfd->shm_lp_psind;
832 if (psind == 0 && length != 0)
833 return (EINVAL);
834 if ((length & (pagesizes[psind] - 1)) != 0)
835 return (EINVAL);
836
837 if (length < shmfd->shm_size) {
838 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
839 return (EPERM);
840 if (shmfd->shm_kmappings > 0)
841 return (EBUSY);
842 return (ENOTSUP); /* Pages are unmanaged. */
843 #if 0
844 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
845 object->size = newobjsz;
846 shmfd->shm_size = length;
847 return (0);
848 #endif
849 }
850
851 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
852 return (EPERM);
853
854 aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
855 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
856 aflags |= VM_ALLOC_WAITFAIL;
857 try = 0;
858
859 /*
860 * Extend shmfd and object, keeping all already fully
861 * allocated large pages intact even on error, because dropped
862 * object lock might allowed mapping of them.
863 */
864 while (object->size < newobjsz) {
865 m = vm_page_alloc_contig(object, object->size, aflags,
866 pagesizes[psind] / PAGE_SIZE, 0, ~0,
867 pagesizes[psind], 0,
868 VM_MEMATTR_DEFAULT);
869 if (m == NULL) {
870 VM_OBJECT_WUNLOCK(object);
871 if (shmfd->shm_lp_alloc_policy ==
872 SHM_LARGEPAGE_ALLOC_NOWAIT ||
873 (shmfd->shm_lp_alloc_policy ==
874 SHM_LARGEPAGE_ALLOC_DEFAULT &&
875 try >= largepage_reclaim_tries)) {
876 VM_OBJECT_WLOCK(object);
877 return (ENOMEM);
878 }
879 error = vm_page_reclaim_contig(aflags,
880 pagesizes[psind] / PAGE_SIZE, 0, ~0,
881 pagesizes[psind], 0);
882 if (error == ENOMEM)
883 error = vm_wait_intr(object);
884 if (error != 0) {
885 VM_OBJECT_WLOCK(object);
886 return (error);
887 }
888 try++;
889 VM_OBJECT_WLOCK(object);
890 continue;
891 }
892 try = 0;
893 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
894 if ((m[i].flags & PG_ZERO) == 0)
895 pmap_zero_page(&m[i]);
896 vm_page_valid(&m[i]);
897 vm_page_xunbusy(&m[i]);
898 }
899 object->size += OFF_TO_IDX(pagesizes[psind]);
900 shmfd->shm_size += pagesizes[psind];
901 atomic_add_long(&count_largepages[psind], 1);
902 vm_wire_add(atop(pagesizes[psind]));
903 }
904 return (0);
905 }
906
907 static int
shm_dotruncate_cookie(struct shmfd * shmfd,off_t length,void * rl_cookie)908 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
909 {
910 int error;
911
912 VM_OBJECT_WLOCK(shmfd->shm_object);
913 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
914 length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
915 rl_cookie);
916 VM_OBJECT_WUNLOCK(shmfd->shm_object);
917 return (error);
918 }
919
920 int
shm_dotruncate(struct shmfd * shmfd,off_t length)921 shm_dotruncate(struct shmfd *shmfd, off_t length)
922 {
923 void *rl_cookie;
924 int error;
925
926 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
927 error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
928 shm_rangelock_unlock(shmfd, rl_cookie);
929 return (error);
930 }
931
932 /*
933 * shmfd object management including creation and reference counting
934 * routines.
935 */
936 struct shmfd *
shm_alloc(struct ucred * ucred,mode_t mode,bool largepage)937 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
938 {
939 struct shmfd *shmfd;
940 vm_object_t obj;
941
942 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
943 shmfd->shm_size = 0;
944 shmfd->shm_uid = ucred->cr_uid;
945 shmfd->shm_gid = ucred->cr_gid;
946 shmfd->shm_mode = mode;
947 if (largepage) {
948 obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops,
949 NULL, shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
950 obj->un_pager.phys.phys_priv = shmfd;
951 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
952 } else {
953 obj = vm_pager_allocate(shmfd_pager_type, NULL,
954 shmfd->shm_size, VM_PROT_DEFAULT, 0, ucred);
955 obj->un_pager.swp.swp_priv = shmfd;
956 }
957 KASSERT(obj != NULL, ("shm_create: vm_pager_allocate"));
958 VM_OBJECT_WLOCK(obj);
959 vm_object_set_flag(obj, OBJ_POSIXSHM);
960 VM_OBJECT_WUNLOCK(obj);
961 shmfd->shm_object = obj;
962 vfs_timestamp(&shmfd->shm_birthtime);
963 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
964 shmfd->shm_birthtime;
965 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
966 refcount_init(&shmfd->shm_refs, 1);
967 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
968 rangelock_init(&shmfd->shm_rl);
969 #ifdef MAC
970 mac_posixshm_init(shmfd);
971 mac_posixshm_create(ucred, shmfd);
972 #endif
973
974 return (shmfd);
975 }
976
977 struct shmfd *
shm_hold(struct shmfd * shmfd)978 shm_hold(struct shmfd *shmfd)
979 {
980
981 refcount_acquire(&shmfd->shm_refs);
982 return (shmfd);
983 }
984
985 void
shm_drop(struct shmfd * shmfd)986 shm_drop(struct shmfd *shmfd)
987 {
988 vm_object_t obj;
989
990 if (refcount_release(&shmfd->shm_refs)) {
991 #ifdef MAC
992 mac_posixshm_destroy(shmfd);
993 #endif
994 rangelock_destroy(&shmfd->shm_rl);
995 mtx_destroy(&shmfd->shm_mtx);
996 obj = shmfd->shm_object;
997 VM_OBJECT_WLOCK(obj);
998 if (shm_largepage(shmfd))
999 obj->un_pager.phys.phys_priv = NULL;
1000 else
1001 obj->un_pager.swp.swp_priv = NULL;
1002 VM_OBJECT_WUNLOCK(obj);
1003 vm_object_deallocate(obj);
1004 free(shmfd, M_SHMFD);
1005 }
1006 }
1007
1008 /*
1009 * Determine if the credentials have sufficient permissions for a
1010 * specified combination of FREAD and FWRITE.
1011 */
1012 int
shm_access(struct shmfd * shmfd,struct ucred * ucred,int flags)1013 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
1014 {
1015 accmode_t accmode;
1016 int error;
1017
1018 accmode = 0;
1019 if (flags & FREAD)
1020 accmode |= VREAD;
1021 if (flags & FWRITE)
1022 accmode |= VWRITE;
1023 mtx_lock(&shm_timestamp_lock);
1024 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1025 accmode, ucred);
1026 mtx_unlock(&shm_timestamp_lock);
1027 return (error);
1028 }
1029
1030 static void
shm_init(void * arg)1031 shm_init(void *arg)
1032 {
1033 char name[32];
1034 int i;
1035
1036 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1037 sx_init(&shm_dict_lock, "shm dictionary");
1038 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1039 new_unrhdr64(&shm_ino_unr, 1);
1040 shm_dev_ino = devfs_alloc_cdp_inode();
1041 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1042 shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1043 OBJT_SWAP);
1044 MPASS(shmfd_pager_type != -1);
1045
1046 for (i = 1; i < MAXPAGESIZES; i++) {
1047 if (pagesizes[i] == 0)
1048 break;
1049 #define M (1024 * 1024)
1050 #define G (1024 * M)
1051 if (pagesizes[i] >= G)
1052 snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1053 else if (pagesizes[i] >= M)
1054 snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1055 else
1056 snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1057 #undef G
1058 #undef M
1059 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1060 OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1061 "number of non-transient largepages allocated");
1062 }
1063 }
1064 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1065
1066 /*
1067 * Remove all shared memory objects that belong to a prison.
1068 */
1069 void
shm_remove_prison(struct prison * pr)1070 shm_remove_prison(struct prison *pr)
1071 {
1072 struct shm_mapping *shmm, *tshmm;
1073 u_long i;
1074
1075 sx_xlock(&shm_dict_lock);
1076 for (i = 0; i < shm_hash + 1; i++) {
1077 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1078 if (shmm->sm_shmfd->shm_object->cred &&
1079 shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1080 shm_doremove(shmm);
1081 }
1082 }
1083 sx_xunlock(&shm_dict_lock);
1084 }
1085
1086 /*
1087 * Dictionary management. We maintain an in-kernel dictionary to map
1088 * paths to shmfd objects. We use the FNV hash on the path to store
1089 * the mappings in a hash table.
1090 */
1091 static struct shmfd *
shm_lookup(char * path,Fnv32_t fnv)1092 shm_lookup(char *path, Fnv32_t fnv)
1093 {
1094 struct shm_mapping *map;
1095
1096 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1097 if (map->sm_fnv != fnv)
1098 continue;
1099 if (strcmp(map->sm_path, path) == 0)
1100 return (map->sm_shmfd);
1101 }
1102
1103 return (NULL);
1104 }
1105
1106 static void
shm_insert(char * path,Fnv32_t fnv,struct shmfd * shmfd)1107 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1108 {
1109 struct shm_mapping *map;
1110
1111 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1112 map->sm_path = path;
1113 map->sm_fnv = fnv;
1114 map->sm_shmfd = shm_hold(shmfd);
1115 shmfd->shm_path = path;
1116 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1117 }
1118
1119 static int
shm_remove(char * path,Fnv32_t fnv,struct ucred * ucred)1120 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1121 {
1122 struct shm_mapping *map;
1123 int error;
1124
1125 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1126 if (map->sm_fnv != fnv)
1127 continue;
1128 if (strcmp(map->sm_path, path) == 0) {
1129 #ifdef MAC
1130 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1131 if (error)
1132 return (error);
1133 #endif
1134 error = shm_access(map->sm_shmfd, ucred,
1135 FREAD | FWRITE);
1136 if (error)
1137 return (error);
1138 shm_doremove(map);
1139 return (0);
1140 }
1141 }
1142
1143 return (ENOENT);
1144 }
1145
1146 static void
shm_doremove(struct shm_mapping * map)1147 shm_doremove(struct shm_mapping *map)
1148 {
1149 map->sm_shmfd->shm_path = NULL;
1150 LIST_REMOVE(map, sm_link);
1151 shm_drop(map->sm_shmfd);
1152 free(map->sm_path, M_SHMFD);
1153 free(map, M_SHMFD);
1154 }
1155
1156 int
kern_shm_open2(struct thread * td,const char * userpath,int flags,mode_t mode,int shmflags,struct filecaps * fcaps,const char * name __unused)1157 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1158 int shmflags, struct filecaps *fcaps, const char *name __unused)
1159 {
1160 struct pwddesc *pdp;
1161 struct shmfd *shmfd;
1162 struct file *fp;
1163 char *path;
1164 void *rl_cookie;
1165 Fnv32_t fnv;
1166 mode_t cmode;
1167 int error, fd, initial_seals;
1168 bool largepage;
1169
1170 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1171 SHM_LARGEPAGE)) != 0)
1172 return (EINVAL);
1173
1174 initial_seals = F_SEAL_SEAL;
1175 if ((shmflags & SHM_ALLOW_SEALING) != 0)
1176 initial_seals &= ~F_SEAL_SEAL;
1177
1178 AUDIT_ARG_FFLAGS(flags);
1179 AUDIT_ARG_MODE(mode);
1180
1181 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1182 return (EINVAL);
1183
1184 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1185 return (EINVAL);
1186
1187 largepage = (shmflags & SHM_LARGEPAGE) != 0;
1188 if (largepage && !PMAP_HAS_LARGEPAGES)
1189 return (ENOTTY);
1190
1191 /*
1192 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1193 * If the decision is made later to allow additional seals, care must be
1194 * taken below to ensure that the seals are properly set if the shmfd
1195 * already existed -- this currently assumes that only F_SEAL_SEAL can
1196 * be set and doesn't take further precautions to ensure the validity of
1197 * the seals being added with respect to current mappings.
1198 */
1199 if ((initial_seals & ~F_SEAL_SEAL) != 0)
1200 return (EINVAL);
1201
1202 if (userpath != SHM_ANON) {
1203 error = shm_copyin_path(td, userpath, &path);
1204 if (error != 0)
1205 return (error);
1206
1207 #ifdef CAPABILITY_MODE
1208 /*
1209 * shm_open(2) is only allowed for anonymous objects.
1210 */
1211 if (CAP_TRACING(td))
1212 ktrcapfail(CAPFAIL_NAMEI, path);
1213 if (IN_CAPABILITY_MODE(td)) {
1214 free(path, M_SHMFD);
1215 return (ECAPMODE);
1216 }
1217 #endif
1218
1219 AUDIT_ARG_UPATH1_CANON(path);
1220 } else {
1221 path = NULL;
1222 }
1223
1224 pdp = td->td_proc->p_pd;
1225 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1226
1227 /*
1228 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1229 * by POSIX. We allow it to be unset here so that an in-kernel
1230 * interface may be written as a thin layer around shm, optionally not
1231 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally
1232 * in sys_shm_open() to keep this implementation compliant.
1233 */
1234 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1235 if (error) {
1236 free(path, M_SHMFD);
1237 return (error);
1238 }
1239
1240 /* A SHM_ANON path pointer creates an anonymous object. */
1241 if (userpath == SHM_ANON) {
1242 /* A read-only anonymous object is pointless. */
1243 if ((flags & O_ACCMODE) == O_RDONLY) {
1244 fdclose(td, fp, fd);
1245 fdrop(fp, td);
1246 return (EINVAL);
1247 }
1248 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1249 shmfd->shm_seals = initial_seals;
1250 shmfd->shm_flags = shmflags;
1251 } else {
1252 fnv = fnv_32_str(path, FNV1_32_INIT);
1253 sx_xlock(&shm_dict_lock);
1254 shmfd = shm_lookup(path, fnv);
1255 if (shmfd == NULL) {
1256 /* Object does not yet exist, create it if requested. */
1257 if (flags & O_CREAT) {
1258 #ifdef MAC
1259 error = mac_posixshm_check_create(td->td_ucred,
1260 path);
1261 if (error == 0) {
1262 #endif
1263 shmfd = shm_alloc(td->td_ucred, cmode,
1264 largepage);
1265 shmfd->shm_seals = initial_seals;
1266 shmfd->shm_flags = shmflags;
1267 shm_insert(path, fnv, shmfd);
1268 #ifdef MAC
1269 }
1270 #endif
1271 } else {
1272 free(path, M_SHMFD);
1273 error = ENOENT;
1274 }
1275 } else {
1276 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1277
1278 /*
1279 * kern_shm_open() likely shouldn't ever error out on
1280 * trying to set a seal that already exists, unlike
1281 * F_ADD_SEALS. This would break terribly as
1282 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1283 * historical behavior where the underlying file could
1284 * not be sealed.
1285 */
1286 initial_seals &= ~shmfd->shm_seals;
1287
1288 /*
1289 * Object already exists, obtain a new
1290 * reference if requested and permitted.
1291 */
1292 free(path, M_SHMFD);
1293
1294 /*
1295 * initial_seals can't set additional seals if we've
1296 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set,
1297 * then we've already removed that one from
1298 * initial_seals. This is currently redundant as we
1299 * only allow setting F_SEAL_SEAL at creation time, but
1300 * it's cheap to check and decreases the effort required
1301 * to allow additional seals.
1302 */
1303 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1304 initial_seals != 0)
1305 error = EPERM;
1306 else if ((flags & (O_CREAT | O_EXCL)) ==
1307 (O_CREAT | O_EXCL))
1308 error = EEXIST;
1309 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1310 error = EINVAL;
1311 else {
1312 #ifdef MAC
1313 error = mac_posixshm_check_open(td->td_ucred,
1314 shmfd, FFLAGS(flags & O_ACCMODE));
1315 if (error == 0)
1316 #endif
1317 error = shm_access(shmfd, td->td_ucred,
1318 FFLAGS(flags & O_ACCMODE));
1319 }
1320
1321 /*
1322 * Truncate the file back to zero length if
1323 * O_TRUNC was specified and the object was
1324 * opened with read/write.
1325 */
1326 if (error == 0 &&
1327 (flags & (O_ACCMODE | O_TRUNC)) ==
1328 (O_RDWR | O_TRUNC)) {
1329 VM_OBJECT_WLOCK(shmfd->shm_object);
1330 #ifdef MAC
1331 error = mac_posixshm_check_truncate(
1332 td->td_ucred, fp->f_cred, shmfd);
1333 if (error == 0)
1334 #endif
1335 error = shm_dotruncate_locked(shmfd, 0,
1336 rl_cookie);
1337 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1338 }
1339 if (error == 0) {
1340 /*
1341 * Currently we only allow F_SEAL_SEAL to be
1342 * set initially. As noted above, this would
1343 * need to be reworked should that change.
1344 */
1345 shmfd->shm_seals |= initial_seals;
1346 shm_hold(shmfd);
1347 }
1348 shm_rangelock_unlock(shmfd, rl_cookie);
1349 }
1350 sx_xunlock(&shm_dict_lock);
1351
1352 if (error) {
1353 fdclose(td, fp, fd);
1354 fdrop(fp, td);
1355 return (error);
1356 }
1357 }
1358
1359 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1360
1361 td->td_retval[0] = fd;
1362 fdrop(fp, td);
1363
1364 return (0);
1365 }
1366
1367 /* System calls. */
1368 #ifdef COMPAT_FREEBSD12
1369 int
freebsd12_shm_open(struct thread * td,struct freebsd12_shm_open_args * uap)1370 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1371 {
1372
1373 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1374 uap->mode, NULL));
1375 }
1376 #endif
1377
1378 int
sys_shm_unlink(struct thread * td,struct shm_unlink_args * uap)1379 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1380 {
1381 char *path;
1382 Fnv32_t fnv;
1383 int error;
1384
1385 error = shm_copyin_path(td, uap->path, &path);
1386 if (error != 0)
1387 return (error);
1388
1389 AUDIT_ARG_UPATH1_CANON(path);
1390 fnv = fnv_32_str(path, FNV1_32_INIT);
1391 sx_xlock(&shm_dict_lock);
1392 error = shm_remove(path, fnv, td->td_ucred);
1393 sx_xunlock(&shm_dict_lock);
1394 free(path, M_SHMFD);
1395
1396 return (error);
1397 }
1398
1399 int
sys_shm_rename(struct thread * td,struct shm_rename_args * uap)1400 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1401 {
1402 char *path_from = NULL, *path_to = NULL;
1403 Fnv32_t fnv_from, fnv_to;
1404 struct shmfd *fd_from;
1405 struct shmfd *fd_to;
1406 int error;
1407 int flags;
1408
1409 flags = uap->flags;
1410 AUDIT_ARG_FFLAGS(flags);
1411
1412 /*
1413 * Make sure the user passed only valid flags.
1414 * If you add a new flag, please add a new term here.
1415 */
1416 if ((flags & ~(
1417 SHM_RENAME_NOREPLACE |
1418 SHM_RENAME_EXCHANGE
1419 )) != 0) {
1420 error = EINVAL;
1421 goto out;
1422 }
1423
1424 /*
1425 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1426 * force the user to choose one or the other.
1427 */
1428 if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1429 (flags & SHM_RENAME_EXCHANGE) != 0) {
1430 error = EINVAL;
1431 goto out;
1432 }
1433
1434 /* Renaming to or from anonymous makes no sense */
1435 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1436 error = EINVAL;
1437 goto out;
1438 }
1439
1440 error = shm_copyin_path(td, uap->path_from, &path_from);
1441 if (error != 0)
1442 goto out;
1443
1444 error = shm_copyin_path(td, uap->path_to, &path_to);
1445 if (error != 0)
1446 goto out;
1447
1448 AUDIT_ARG_UPATH1_CANON(path_from);
1449 AUDIT_ARG_UPATH2_CANON(path_to);
1450
1451 /* Rename with from/to equal is a no-op */
1452 if (strcmp(path_from, path_to) == 0)
1453 goto out;
1454
1455 fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1456 fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1457
1458 sx_xlock(&shm_dict_lock);
1459
1460 fd_from = shm_lookup(path_from, fnv_from);
1461 if (fd_from == NULL) {
1462 error = ENOENT;
1463 goto out_locked;
1464 }
1465
1466 fd_to = shm_lookup(path_to, fnv_to);
1467 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1468 error = EEXIST;
1469 goto out_locked;
1470 }
1471
1472 /*
1473 * Unconditionally prevents shm_remove from invalidating the 'from'
1474 * shm's state.
1475 */
1476 shm_hold(fd_from);
1477 error = shm_remove(path_from, fnv_from, td->td_ucred);
1478
1479 /*
1480 * One of my assumptions failed if ENOENT (e.g. locking didn't
1481 * protect us)
1482 */
1483 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1484 path_from));
1485 if (error != 0) {
1486 shm_drop(fd_from);
1487 goto out_locked;
1488 }
1489
1490 /*
1491 * If we are exchanging, we need to ensure the shm_remove below
1492 * doesn't invalidate the dest shm's state.
1493 */
1494 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1495 shm_hold(fd_to);
1496
1497 /*
1498 * NOTE: if path_to is not already in the hash, c'est la vie;
1499 * it simply means we have nothing already at path_to to unlink.
1500 * That is the ENOENT case.
1501 *
1502 * If we somehow don't have access to unlink this guy, but
1503 * did for the shm at path_from, then relink the shm to path_from
1504 * and abort with EACCES.
1505 *
1506 * All other errors: that is weird; let's relink and abort the
1507 * operation.
1508 */
1509 error = shm_remove(path_to, fnv_to, td->td_ucred);
1510 if (error != 0 && error != ENOENT) {
1511 shm_insert(path_from, fnv_from, fd_from);
1512 shm_drop(fd_from);
1513 /* Don't free path_from now, since the hash references it */
1514 path_from = NULL;
1515 goto out_locked;
1516 }
1517
1518 error = 0;
1519
1520 shm_insert(path_to, fnv_to, fd_from);
1521
1522 /* Don't free path_to now, since the hash references it */
1523 path_to = NULL;
1524
1525 /* We kept a ref when we removed, and incremented again in insert */
1526 shm_drop(fd_from);
1527 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1528 fd_from->shm_refs));
1529
1530 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1531 shm_insert(path_from, fnv_from, fd_to);
1532 path_from = NULL;
1533 shm_drop(fd_to);
1534 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1535 fd_to->shm_refs));
1536 }
1537
1538 out_locked:
1539 sx_xunlock(&shm_dict_lock);
1540
1541 out:
1542 free(path_from, M_SHMFD);
1543 free(path_to, M_SHMFD);
1544 return (error);
1545 }
1546
1547 static int
shm_mmap_large(struct shmfd * shmfd,vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t max_prot,int flags,vm_ooffset_t foff,struct thread * td)1548 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1549 vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1550 vm_ooffset_t foff, struct thread *td)
1551 {
1552 struct vmspace *vms;
1553 vm_map_entry_t next_entry, prev_entry;
1554 vm_offset_t align, mask, maxaddr;
1555 int docow, error, rv, try;
1556 bool curmap;
1557
1558 if (shmfd->shm_lp_psind == 0)
1559 return (EINVAL);
1560
1561 /* MAP_PRIVATE is disabled */
1562 if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1563 MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1564 return (EINVAL);
1565
1566 vms = td->td_proc->p_vmspace;
1567 curmap = map == &vms->vm_map;
1568 if (curmap) {
1569 error = kern_mmap_racct_check(td, map, size);
1570 if (error != 0)
1571 return (error);
1572 }
1573
1574 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1575 docow |= MAP_INHERIT_SHARE;
1576 if ((flags & MAP_NOCORE) != 0)
1577 docow |= MAP_DISABLE_COREDUMP;
1578
1579 mask = pagesizes[shmfd->shm_lp_psind] - 1;
1580 if ((foff & mask) != 0)
1581 return (EINVAL);
1582 maxaddr = vm_map_max(map);
1583 if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1584 maxaddr = MAP_32BIT_MAX_ADDR;
1585 if (size == 0 || (size & mask) != 0 ||
1586 (*addr != 0 && ((*addr & mask) != 0 ||
1587 *addr + size < *addr || *addr + size > maxaddr)))
1588 return (EINVAL);
1589
1590 align = flags & MAP_ALIGNMENT_MASK;
1591 if (align == 0) {
1592 align = pagesizes[shmfd->shm_lp_psind];
1593 } else if (align == MAP_ALIGNED_SUPER) {
1594 /*
1595 * MAP_ALIGNED_SUPER is only supported on superpage sizes,
1596 * i.e., [1, VM_NRESERVLEVEL]. shmfd->shm_lp_psind < 1 is
1597 * handled above.
1598 */
1599 if (
1600 #if VM_NRESERVLEVEL > 0
1601 shmfd->shm_lp_psind > VM_NRESERVLEVEL
1602 #else
1603 shmfd->shm_lp_psind > 1
1604 #endif
1605 )
1606 return (EINVAL);
1607 align = pagesizes[shmfd->shm_lp_psind];
1608 } else {
1609 align >>= MAP_ALIGNMENT_SHIFT;
1610 align = 1ULL << align;
1611 /* Also handles overflow. */
1612 if (align < pagesizes[shmfd->shm_lp_psind])
1613 return (EINVAL);
1614 }
1615
1616 vm_map_lock(map);
1617 if ((flags & MAP_FIXED) == 0) {
1618 try = 1;
1619 if (curmap && (*addr == 0 ||
1620 (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1621 *addr < round_page((vm_offset_t)vms->vm_daddr +
1622 lim_max(td, RLIMIT_DATA))))) {
1623 *addr = roundup2((vm_offset_t)vms->vm_daddr +
1624 lim_max(td, RLIMIT_DATA),
1625 pagesizes[shmfd->shm_lp_psind]);
1626 }
1627 again:
1628 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1629 if (rv != KERN_SUCCESS) {
1630 if (try == 1) {
1631 try = 2;
1632 *addr = vm_map_min(map);
1633 if ((*addr & mask) != 0)
1634 *addr = (*addr + mask) & mask;
1635 goto again;
1636 }
1637 goto fail1;
1638 }
1639 } else if ((flags & MAP_EXCL) == 0) {
1640 rv = vm_map_delete(map, *addr, *addr + size);
1641 if (rv != KERN_SUCCESS)
1642 goto fail1;
1643 } else {
1644 error = ENOSPC;
1645 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1646 goto fail;
1647 next_entry = vm_map_entry_succ(prev_entry);
1648 if (next_entry->start < *addr + size)
1649 goto fail;
1650 }
1651
1652 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1653 prot, max_prot, docow);
1654 fail1:
1655 error = vm_mmap_to_errno(rv);
1656 fail:
1657 vm_map_unlock(map);
1658 return (error);
1659 }
1660
1661 static int
shm_mmap(struct file * fp,vm_map_t map,vm_offset_t * addr,vm_size_t objsize,vm_prot_t prot,vm_prot_t max_maxprot,int flags,vm_ooffset_t foff,struct thread * td)1662 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1663 vm_prot_t prot, vm_prot_t max_maxprot, int flags,
1664 vm_ooffset_t foff, struct thread *td)
1665 {
1666 struct shmfd *shmfd;
1667 vm_prot_t maxprot;
1668 int error;
1669 bool writecnt;
1670 void *rl_cookie;
1671
1672 shmfd = fp->f_data;
1673 maxprot = VM_PROT_NONE;
1674
1675 rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1676 /* FREAD should always be set. */
1677 if ((fp->f_flag & FREAD) != 0)
1678 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1679
1680 /*
1681 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1682 * mapping with a write seal applied. Private mappings are always
1683 * writeable.
1684 */
1685 if ((flags & MAP_SHARED) == 0) {
1686 if ((max_maxprot & VM_PROT_WRITE) != 0)
1687 maxprot |= VM_PROT_WRITE;
1688 writecnt = false;
1689 } else {
1690 if ((fp->f_flag & FWRITE) != 0 &&
1691 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1692 maxprot |= VM_PROT_WRITE;
1693
1694 /*
1695 * Any mappings from a writable descriptor may be upgraded to
1696 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1697 * applied between the open and subsequent mmap(2). We want to
1698 * reject application of a write seal as long as any such
1699 * mapping exists so that the seal cannot be trivially bypassed.
1700 */
1701 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1702 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1703 error = EACCES;
1704 goto out;
1705 }
1706 }
1707 maxprot &= max_maxprot;
1708
1709 /* See comment in vn_mmap(). */
1710 if (
1711 #ifdef _LP64
1712 objsize > OFF_MAX ||
1713 #endif
1714 foff > OFF_MAX - objsize) {
1715 error = EINVAL;
1716 goto out;
1717 }
1718
1719 #ifdef MAC
1720 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1721 if (error != 0)
1722 goto out;
1723 #endif
1724
1725 mtx_lock(&shm_timestamp_lock);
1726 vfs_timestamp(&shmfd->shm_atime);
1727 mtx_unlock(&shm_timestamp_lock);
1728 vm_object_reference(shmfd->shm_object);
1729
1730 if (shm_largepage(shmfd)) {
1731 writecnt = false;
1732 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1733 maxprot, flags, foff, td);
1734 } else {
1735 if (writecnt) {
1736 vm_pager_update_writecount(shmfd->shm_object, 0,
1737 objsize);
1738 }
1739 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1740 shmfd->shm_object, foff, writecnt, td);
1741 }
1742 if (error != 0) {
1743 if (writecnt)
1744 vm_pager_release_writecount(shmfd->shm_object, 0,
1745 objsize);
1746 vm_object_deallocate(shmfd->shm_object);
1747 }
1748 out:
1749 shm_rangelock_unlock(shmfd, rl_cookie);
1750 return (error);
1751 }
1752
1753 static int
shm_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)1754 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1755 struct thread *td)
1756 {
1757 struct shmfd *shmfd;
1758 int error;
1759
1760 error = 0;
1761 shmfd = fp->f_data;
1762 mtx_lock(&shm_timestamp_lock);
1763 /*
1764 * SUSv4 says that x bits of permission need not be affected.
1765 * Be consistent with our shm_open there.
1766 */
1767 #ifdef MAC
1768 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1769 if (error != 0)
1770 goto out;
1771 #endif
1772 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1773 VADMIN, active_cred);
1774 if (error != 0)
1775 goto out;
1776 shmfd->shm_mode = mode & ACCESSPERMS;
1777 out:
1778 mtx_unlock(&shm_timestamp_lock);
1779 return (error);
1780 }
1781
1782 static int
shm_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)1783 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1784 struct thread *td)
1785 {
1786 struct shmfd *shmfd;
1787 int error;
1788
1789 error = 0;
1790 shmfd = fp->f_data;
1791 mtx_lock(&shm_timestamp_lock);
1792 #ifdef MAC
1793 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1794 if (error != 0)
1795 goto out;
1796 #endif
1797 if (uid == (uid_t)-1)
1798 uid = shmfd->shm_uid;
1799 if (gid == (gid_t)-1)
1800 gid = shmfd->shm_gid;
1801 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1802 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1803 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1804 goto out;
1805 shmfd->shm_uid = uid;
1806 shmfd->shm_gid = gid;
1807 out:
1808 mtx_unlock(&shm_timestamp_lock);
1809 return (error);
1810 }
1811
1812 /*
1813 * Helper routines to allow the backing object of a shared memory file
1814 * descriptor to be mapped in the kernel.
1815 */
1816 int
shm_map(struct file * fp,size_t size,off_t offset,void ** memp)1817 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1818 {
1819 struct shmfd *shmfd;
1820 vm_offset_t kva, ofs;
1821 vm_object_t obj;
1822 int rv;
1823
1824 if (fp->f_type != DTYPE_SHM)
1825 return (EINVAL);
1826 shmfd = fp->f_data;
1827 obj = shmfd->shm_object;
1828 VM_OBJECT_WLOCK(obj);
1829 /*
1830 * XXXRW: This validation is probably insufficient, and subject to
1831 * sign errors. It should be fixed.
1832 */
1833 if (offset >= shmfd->shm_size ||
1834 offset + size > round_page(shmfd->shm_size)) {
1835 VM_OBJECT_WUNLOCK(obj);
1836 return (EINVAL);
1837 }
1838
1839 shmfd->shm_kmappings++;
1840 vm_object_reference_locked(obj);
1841 VM_OBJECT_WUNLOCK(obj);
1842
1843 /* Map the object into the kernel_map and wire it. */
1844 kva = vm_map_min(kernel_map);
1845 ofs = offset & PAGE_MASK;
1846 offset = trunc_page(offset);
1847 size = round_page(size + ofs);
1848 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1849 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1850 VM_PROT_READ | VM_PROT_WRITE, 0);
1851 if (rv == KERN_SUCCESS) {
1852 rv = vm_map_wire(kernel_map, kva, kva + size,
1853 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1854 if (rv == KERN_SUCCESS) {
1855 *memp = (void *)(kva + ofs);
1856 return (0);
1857 }
1858 vm_map_remove(kernel_map, kva, kva + size);
1859 } else
1860 vm_object_deallocate(obj);
1861
1862 /* On failure, drop our mapping reference. */
1863 VM_OBJECT_WLOCK(obj);
1864 shmfd->shm_kmappings--;
1865 VM_OBJECT_WUNLOCK(obj);
1866
1867 return (vm_mmap_to_errno(rv));
1868 }
1869
1870 /*
1871 * We require the caller to unmap the entire entry. This allows us to
1872 * safely decrement shm_kmappings when a mapping is removed.
1873 */
1874 int
shm_unmap(struct file * fp,void * mem,size_t size)1875 shm_unmap(struct file *fp, void *mem, size_t size)
1876 {
1877 struct shmfd *shmfd;
1878 vm_map_entry_t entry;
1879 vm_offset_t kva, ofs;
1880 vm_object_t obj;
1881 vm_pindex_t pindex;
1882 vm_prot_t prot;
1883 boolean_t wired;
1884 vm_map_t map;
1885 int rv;
1886
1887 if (fp->f_type != DTYPE_SHM)
1888 return (EINVAL);
1889 shmfd = fp->f_data;
1890 kva = (vm_offset_t)mem;
1891 ofs = kva & PAGE_MASK;
1892 kva = trunc_page(kva);
1893 size = round_page(size + ofs);
1894 map = kernel_map;
1895 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1896 &obj, &pindex, &prot, &wired);
1897 if (rv != KERN_SUCCESS)
1898 return (EINVAL);
1899 if (entry->start != kva || entry->end != kva + size) {
1900 vm_map_lookup_done(map, entry);
1901 return (EINVAL);
1902 }
1903 vm_map_lookup_done(map, entry);
1904 if (obj != shmfd->shm_object)
1905 return (EINVAL);
1906 vm_map_remove(map, kva, kva + size);
1907 VM_OBJECT_WLOCK(obj);
1908 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1909 shmfd->shm_kmappings--;
1910 VM_OBJECT_WUNLOCK(obj);
1911 return (0);
1912 }
1913
1914 static int
shm_fill_kinfo_locked(struct shmfd * shmfd,struct kinfo_file * kif,bool list)1915 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1916 {
1917 const char *path, *pr_path;
1918 size_t pr_pathlen;
1919 bool visible;
1920
1921 sx_assert(&shm_dict_lock, SA_LOCKED);
1922 kif->kf_type = KF_TYPE_SHM;
1923 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1924 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1925 if (shmfd->shm_path != NULL) {
1926 path = shmfd->shm_path;
1927 pr_path = curthread->td_ucred->cr_prison->pr_path;
1928 if (strcmp(pr_path, "/") != 0) {
1929 /* Return the jail-rooted pathname. */
1930 pr_pathlen = strlen(pr_path);
1931 visible = strncmp(path, pr_path, pr_pathlen) == 0 &&
1932 path[pr_pathlen] == '/';
1933 if (list && !visible)
1934 return (EPERM);
1935 if (visible)
1936 path += pr_pathlen;
1937 }
1938 strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1939 }
1940 return (0);
1941 }
1942
1943 static int
shm_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp __unused)1944 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1945 struct filedesc *fdp __unused)
1946 {
1947 int res;
1948
1949 sx_slock(&shm_dict_lock);
1950 res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1951 sx_sunlock(&shm_dict_lock);
1952 return (res);
1953 }
1954
1955 static int
shm_add_seals(struct file * fp,int seals)1956 shm_add_seals(struct file *fp, int seals)
1957 {
1958 struct shmfd *shmfd;
1959 void *rl_cookie;
1960 vm_ooffset_t writemappings;
1961 int error, nseals;
1962
1963 error = 0;
1964 shmfd = fp->f_data;
1965 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1966
1967 /* Even already-set seals should result in EPERM. */
1968 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1969 error = EPERM;
1970 goto out;
1971 }
1972 nseals = seals & ~shmfd->shm_seals;
1973 if ((nseals & F_SEAL_WRITE) != 0) {
1974 if (shm_largepage(shmfd)) {
1975 error = ENOTSUP;
1976 goto out;
1977 }
1978
1979 /*
1980 * The rangelock above prevents writable mappings from being
1981 * added after we've started applying seals. The RLOCK here
1982 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1983 * writemappings will be done without a rangelock.
1984 */
1985 VM_OBJECT_RLOCK(shmfd->shm_object);
1986 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1987 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1988 /* kmappings are also writable */
1989 if (writemappings > 0) {
1990 error = EBUSY;
1991 goto out;
1992 }
1993 }
1994 shmfd->shm_seals |= nseals;
1995 out:
1996 shm_rangelock_unlock(shmfd, rl_cookie);
1997 return (error);
1998 }
1999
2000 static int
shm_get_seals(struct file * fp,int * seals)2001 shm_get_seals(struct file *fp, int *seals)
2002 {
2003 struct shmfd *shmfd;
2004
2005 shmfd = fp->f_data;
2006 *seals = shmfd->shm_seals;
2007 return (0);
2008 }
2009
2010 static int
shm_deallocate(struct shmfd * shmfd,off_t * offset,off_t * length,int flags)2011 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
2012 {
2013 vm_object_t object;
2014 vm_pindex_t pistart, pi, piend;
2015 vm_ooffset_t off, len;
2016 int startofs, endofs, end;
2017 int error;
2018
2019 off = *offset;
2020 len = *length;
2021 KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2022 if (off + len > shmfd->shm_size)
2023 len = shmfd->shm_size - off;
2024 object = shmfd->shm_object;
2025 startofs = off & PAGE_MASK;
2026 endofs = (off + len) & PAGE_MASK;
2027 pistart = OFF_TO_IDX(off);
2028 piend = OFF_TO_IDX(off + len);
2029 pi = OFF_TO_IDX(off + PAGE_MASK);
2030 error = 0;
2031
2032 /* Handle the case when offset is on or beyond shm size. */
2033 if ((off_t)len <= 0) {
2034 *length = 0;
2035 return (0);
2036 }
2037
2038 VM_OBJECT_WLOCK(object);
2039
2040 if (startofs != 0) {
2041 end = pistart != piend ? PAGE_SIZE : endofs;
2042 error = shm_partial_page_invalidate(object, pistart, startofs,
2043 end);
2044 if (error)
2045 goto out;
2046 off += end - startofs;
2047 len -= end - startofs;
2048 }
2049
2050 if (pi < piend) {
2051 vm_object_page_remove(object, pi, piend, 0);
2052 off += IDX_TO_OFF(piend - pi);
2053 len -= IDX_TO_OFF(piend - pi);
2054 }
2055
2056 if (endofs != 0 && pistart != piend) {
2057 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2058 if (error)
2059 goto out;
2060 off += endofs;
2061 len -= endofs;
2062 }
2063
2064 out:
2065 VM_OBJECT_WUNLOCK(shmfd->shm_object);
2066 *offset = off;
2067 *length = len;
2068 return (error);
2069 }
2070
2071 static int
shm_fspacectl(struct file * fp,int cmd,off_t * offset,off_t * length,int flags,struct ucred * active_cred,struct thread * td)2072 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2073 struct ucred *active_cred, struct thread *td)
2074 {
2075 void *rl_cookie;
2076 struct shmfd *shmfd;
2077 off_t off, len;
2078 int error;
2079
2080 KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2081 KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2082 ("shm_fspacectl: non-zero flags"));
2083 KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2084 ("shm_fspacectl: offset/length overflow or underflow"));
2085 error = EINVAL;
2086 shmfd = fp->f_data;
2087 off = *offset;
2088 len = *length;
2089
2090 rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2091 switch (cmd) {
2092 case SPACECTL_DEALLOC:
2093 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2094 error = EPERM;
2095 break;
2096 }
2097 error = shm_deallocate(shmfd, &off, &len, flags);
2098 *offset = off;
2099 *length = len;
2100 break;
2101 default:
2102 __assert_unreachable();
2103 }
2104 shm_rangelock_unlock(shmfd, rl_cookie);
2105 return (error);
2106 }
2107
2108
2109 static int
shm_fallocate(struct file * fp,off_t offset,off_t len,struct thread * td)2110 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2111 {
2112 void *rl_cookie;
2113 struct shmfd *shmfd;
2114 size_t size;
2115 int error;
2116
2117 /* This assumes that the caller already checked for overflow. */
2118 error = 0;
2119 shmfd = fp->f_data;
2120 size = offset + len;
2121
2122 /*
2123 * Just grab the rangelock for the range that we may be attempting to
2124 * grow, rather than blocking read/write for regions we won't be
2125 * touching while this (potential) resize is in progress. Other
2126 * attempts to resize the shmfd will have to take a write lock from 0 to
2127 * OFF_MAX, so this being potentially beyond the current usable range of
2128 * the shmfd is not necessarily a concern. If other mechanisms are
2129 * added to grow a shmfd, this may need to be re-evaluated.
2130 */
2131 rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2132 if (size > shmfd->shm_size)
2133 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2134 shm_rangelock_unlock(shmfd, rl_cookie);
2135 /* Translate to posix_fallocate(2) return value as needed. */
2136 if (error == ENOMEM)
2137 error = ENOSPC;
2138 return (error);
2139 }
2140
2141 static int
sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)2142 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2143 {
2144 struct shm_mapping *shmm;
2145 struct sbuf sb;
2146 struct kinfo_file kif;
2147 u_long i;
2148 int error, error2;
2149
2150 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2151 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2152 error = 0;
2153 sx_slock(&shm_dict_lock);
2154 for (i = 0; i < shm_hash + 1; i++) {
2155 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2156 error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2157 &kif, true);
2158 if (error == EPERM) {
2159 error = 0;
2160 continue;
2161 }
2162 if (error != 0)
2163 break;
2164 pack_kinfo(&kif);
2165 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2166 0 : ENOMEM;
2167 if (error != 0)
2168 break;
2169 }
2170 }
2171 sx_sunlock(&shm_dict_lock);
2172 error2 = sbuf_finish(&sb);
2173 sbuf_delete(&sb);
2174 return (error != 0 ? error : error2);
2175 }
2176
2177 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2178 CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2179 NULL, 0, sysctl_posix_shm_list, "",
2180 "POSIX SHM list");
2181
2182 int
kern_shm_open(struct thread * td,const char * path,int flags,mode_t mode,struct filecaps * caps)2183 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2184 struct filecaps *caps)
2185 {
2186
2187 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2188 }
2189
2190 /*
2191 * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2192 * caller, and libc will enforce it for the traditional shm_open() call. This
2193 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
2194 * interface also includes a 'name' argument that is currently unused, but could
2195 * potentially be exported later via some interface for debugging purposes.
2196 * From the kernel's perspective, it is optional. Individual consumers like
2197 * memfd_create() may require it in order to be compatible with other systems
2198 * implementing the same function.
2199 */
2200 int
sys_shm_open2(struct thread * td,struct shm_open2_args * uap)2201 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2202 {
2203
2204 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2205 uap->shmflags, NULL, uap->name));
2206 }
2207
2208 int
shm_get_path(struct vm_object * obj,char * path,size_t sz)2209 shm_get_path(struct vm_object *obj, char *path, size_t sz)
2210 {
2211 struct shmfd *shmfd;
2212 int error;
2213
2214 error = 0;
2215 shmfd = NULL;
2216 sx_slock(&shm_dict_lock);
2217 VM_OBJECT_RLOCK(obj);
2218 if ((obj->flags & OBJ_POSIXSHM) == 0) {
2219 error = EINVAL;
2220 } else {
2221 if (obj->type == shmfd_pager_type)
2222 shmfd = obj->un_pager.swp.swp_priv;
2223 else if (obj->type == OBJT_PHYS)
2224 shmfd = obj->un_pager.phys.phys_priv;
2225 if (shmfd == NULL) {
2226 error = ENXIO;
2227 } else {
2228 strlcpy(path, shmfd->shm_path == NULL ? "anon" :
2229 shmfd->shm_path, sz);
2230 }
2231 }
2232 if (error != 0)
2233 path[0] = '\0';
2234 VM_OBJECT_RUNLOCK(obj);
2235 sx_sunlock(&shm_dict_lock);
2236 return (error);
2237 }
2238