1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5 * Copyright 2020 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
12 *
13 * Portions of this software were developed by Konstantin Belousov
14 * under sponsorship from the FreeBSD Foundation.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Support for shared swap-backed anonymous memory objects via
40 * shm_open(2), shm_rename(2), and shm_unlink(2).
41 * While most of the implementation is here, vm_mmap.c contains
42 * mapping logic changes.
43 *
44 * posixshmcontrol(1) allows users to inspect the state of the memory
45 * objects. Per-uid swap resource limit controls total amount of
46 * memory that user can consume for anonymous objects, including
47 * shared.
48 */
49
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/vm_radix.h>
104 #include <vm/swap_pager.h>
105
106 struct shm_mapping {
107 char *sm_path;
108 Fnv32_t sm_fnv;
109 struct shmfd *sm_shmfd;
110 LIST_ENTRY(shm_mapping) sm_link;
111 };
112
113 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
114 static LIST_HEAD(, shm_mapping) *shm_dictionary;
115 static struct sx shm_dict_lock;
116 static struct mtx shm_timestamp_lock;
117 static u_long shm_hash;
118 static struct unrhdr64 shm_ino_unr;
119 static dev_t shm_dev_ino;
120
121 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
122
123 static void shm_init(void *arg);
124 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
125 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
126 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
127 static void shm_doremove(struct shm_mapping *map);
128 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129 void *rl_cookie);
130 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
131 void *rl_cookie);
132 static int shm_copyin_path(struct thread *td, const char *userpath_in,
133 char **path_out);
134 static int shm_deallocate(struct shmfd *shmfd, off_t *offset,
135 off_t *length, int flags);
136
137 static fo_rdwr_t shm_read;
138 static fo_rdwr_t shm_write;
139 static fo_truncate_t shm_truncate;
140 static fo_ioctl_t shm_ioctl;
141 static fo_stat_t shm_stat;
142 static fo_close_t shm_close;
143 static fo_chmod_t shm_chmod;
144 static fo_chown_t shm_chown;
145 static fo_seek_t shm_seek;
146 static fo_fill_kinfo_t shm_fill_kinfo;
147 static fo_mmap_t shm_mmap;
148 static fo_get_seals_t shm_get_seals;
149 static fo_add_seals_t shm_add_seals;
150 static fo_fallocate_t shm_fallocate;
151 static fo_fspacectl_t shm_fspacectl;
152
153 /* File descriptor operations. */
154 const struct fileops shm_ops = {
155 .fo_read = shm_read,
156 .fo_write = shm_write,
157 .fo_truncate = shm_truncate,
158 .fo_ioctl = shm_ioctl,
159 .fo_poll = invfo_poll,
160 .fo_kqfilter = invfo_kqfilter,
161 .fo_stat = shm_stat,
162 .fo_close = shm_close,
163 .fo_chmod = shm_chmod,
164 .fo_chown = shm_chown,
165 .fo_sendfile = vn_sendfile,
166 .fo_seek = shm_seek,
167 .fo_fill_kinfo = shm_fill_kinfo,
168 .fo_mmap = shm_mmap,
169 .fo_get_seals = shm_get_seals,
170 .fo_add_seals = shm_add_seals,
171 .fo_fallocate = shm_fallocate,
172 .fo_fspacectl = shm_fspacectl,
173 .fo_cmp = file_kcmp_generic,
174 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
175 };
176
177 FEATURE(posix_shm, "POSIX shared memory");
178
179 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180 "");
181
182 static int largepage_reclaim_tries = 1;
183 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
184 CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
185 "Number of contig reclaims before giving up for default alloc policy");
186
187 #define shm_rangelock_unlock(shmfd, cookie) \
188 rangelock_unlock(&(shmfd)->shm_rl, (cookie))
189 #define shm_rangelock_rlock(shmfd, start, end) \
190 rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
191 #define shm_rangelock_tryrlock(shmfd, start, end) \
192 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
193 #define shm_rangelock_wlock(shmfd, start, end) \
194 rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
195
196 static int
uiomove_object_page(vm_object_t obj,size_t len,struct uio * uio)197 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
198 {
199 struct pctrie_iter pages;
200 vm_page_t m;
201 vm_pindex_t idx;
202 size_t tlen;
203 int error, offset, rv;
204
205 idx = OFF_TO_IDX(uio->uio_offset);
206 offset = uio->uio_offset & PAGE_MASK;
207 tlen = MIN(PAGE_SIZE - offset, len);
208
209 rv = vm_page_grab_valid_unlocked(&m, obj, idx,
210 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
211 if (rv == VM_PAGER_OK)
212 goto found;
213
214 /*
215 * Read I/O without either a corresponding resident page or swap
216 * page: use zero_region. This is intended to avoid instantiating
217 * pages on read from a sparse region.
218 */
219 vm_page_iter_init(&pages, obj);
220 VM_OBJECT_WLOCK(obj);
221 m = vm_radix_iter_lookup(&pages, idx);
222 if (uio->uio_rw == UIO_READ && m == NULL &&
223 !vm_pager_has_page(obj, idx, NULL, NULL)) {
224 VM_OBJECT_WUNLOCK(obj);
225 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
226 }
227
228 /*
229 * Although the tmpfs vnode lock is held here, it is
230 * nonetheless safe to sleep waiting for a free page. The
231 * pageout daemon does not need to acquire the tmpfs vnode
232 * lock to page out tobj's pages because tobj is a OBJT_SWAP
233 * type object.
234 */
235 rv = vm_page_grab_valid_iter(&m, obj, idx,
236 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY, &pages);
237 if (rv != VM_PAGER_OK) {
238 VM_OBJECT_WUNLOCK(obj);
239 if (bootverbose) {
240 printf("uiomove_object: vm_obj %p idx %jd "
241 "pager error %d\n", obj, idx, rv);
242 }
243 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
244 }
245 VM_OBJECT_WUNLOCK(obj);
246
247 found:
248 error = uiomove_fromphys(&m, offset, tlen, uio);
249 if (uio->uio_rw == UIO_WRITE && error == 0)
250 vm_page_set_dirty(m);
251 vm_page_activate(m);
252 vm_page_sunbusy(m);
253
254 return (error);
255 }
256
257 int
uiomove_object(vm_object_t obj,off_t obj_size,struct uio * uio)258 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
259 {
260 ssize_t resid;
261 size_t len;
262 int error;
263
264 error = 0;
265 while ((resid = uio->uio_resid) > 0) {
266 if (obj_size <= uio->uio_offset)
267 break;
268 len = MIN(obj_size - uio->uio_offset, resid);
269 if (len == 0)
270 break;
271 error = uiomove_object_page(obj, len, uio);
272 if (error != 0 || resid == uio->uio_resid)
273 break;
274 }
275 return (error);
276 }
277
278 static u_long count_largepages[MAXPAGESIZES];
279
280 static int
shm_largepage_phys_populate(vm_object_t object,vm_pindex_t pidx,int fault_type,vm_prot_t max_prot,vm_pindex_t * first,vm_pindex_t * last)281 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
282 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
283 {
284 vm_page_t m __diagused;
285 int psind;
286
287 psind = object->un_pager.phys.data_val;
288 if (psind == 0 || pidx >= object->size)
289 return (VM_PAGER_FAIL);
290 *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
291
292 /*
293 * We only busy the first page in the superpage run. It is
294 * useless to busy whole run since we only remove full
295 * superpage, and it takes too long to busy e.g. 512 * 512 ==
296 * 262144 pages constituing 1G amd64 superage.
297 */
298 m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
299 MPASS(m != NULL);
300
301 *last = *first + atop(pagesizes[psind]) - 1;
302 return (VM_PAGER_OK);
303 }
304
305 static boolean_t
shm_largepage_phys_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)306 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
307 int *before, int *after)
308 {
309 int psind;
310
311 psind = object->un_pager.phys.data_val;
312 if (psind == 0 || pindex >= object->size)
313 return (FALSE);
314 if (before != NULL) {
315 *before = pindex - rounddown2(pindex, pagesizes[psind] /
316 PAGE_SIZE);
317 }
318 if (after != NULL) {
319 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
320 pindex;
321 }
322 return (TRUE);
323 }
324
325 static void
shm_largepage_phys_ctor(vm_object_t object,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred)326 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
327 vm_ooffset_t foff, struct ucred *cred)
328 {
329 }
330
331 static void
shm_largepage_phys_dtor(vm_object_t object)332 shm_largepage_phys_dtor(vm_object_t object)
333 {
334 int psind;
335
336 psind = object->un_pager.phys.data_val;
337 if (psind != 0) {
338 atomic_subtract_long(&count_largepages[psind],
339 object->size / (pagesizes[psind] / PAGE_SIZE));
340 vm_wire_sub(object->size);
341 } else {
342 KASSERT(object->size == 0,
343 ("largepage phys obj %p not initialized bit size %#jx > 0",
344 object, (uintmax_t)object->size));
345 }
346 }
347
348 static const struct phys_pager_ops shm_largepage_phys_ops = {
349 .phys_pg_populate = shm_largepage_phys_populate,
350 .phys_pg_haspage = shm_largepage_phys_haspage,
351 .phys_pg_ctor = shm_largepage_phys_ctor,
352 .phys_pg_dtor = shm_largepage_phys_dtor,
353 };
354
355 bool
shm_largepage(struct shmfd * shmfd)356 shm_largepage(struct shmfd *shmfd)
357 {
358 return (shmfd->shm_object->type == OBJT_PHYS);
359 }
360
361 static void
shm_pager_freespace(vm_object_t obj,vm_pindex_t start,vm_size_t size)362 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
363 {
364 struct shmfd *shm;
365 vm_size_t c;
366
367 swap_pager_freespace(obj, start, size, &c);
368 if (c == 0)
369 return;
370
371 shm = obj->un_pager.swp.swp_priv;
372 if (shm == NULL)
373 return;
374 KASSERT(shm->shm_pages >= c,
375 ("shm %p pages %jd free %jd", shm,
376 (uintmax_t)shm->shm_pages, (uintmax_t)c));
377 shm->shm_pages -= c;
378 }
379
380 static void
shm_page_inserted(vm_object_t obj,vm_page_t m)381 shm_page_inserted(vm_object_t obj, vm_page_t m)
382 {
383 struct shmfd *shm;
384
385 shm = obj->un_pager.swp.swp_priv;
386 if (shm == NULL)
387 return;
388 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
389 shm->shm_pages += 1;
390 }
391
392 static void
shm_page_removed(vm_object_t obj,vm_page_t m)393 shm_page_removed(vm_object_t obj, vm_page_t m)
394 {
395 struct shmfd *shm;
396
397 shm = obj->un_pager.swp.swp_priv;
398 if (shm == NULL)
399 return;
400 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
401 KASSERT(shm->shm_pages >= 1,
402 ("shm %p pages %jd free 1", shm,
403 (uintmax_t)shm->shm_pages));
404 shm->shm_pages -= 1;
405 }
406 }
407
408 static struct pagerops shm_swap_pager_ops = {
409 .pgo_kvme_type = KVME_TYPE_SWAP,
410 .pgo_freespace = shm_pager_freespace,
411 .pgo_page_inserted = shm_page_inserted,
412 .pgo_page_removed = shm_page_removed,
413 };
414 static int shmfd_pager_type = -1;
415
416 static int
shm_seek(struct file * fp,off_t offset,int whence,struct thread * td)417 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
418 {
419 struct shmfd *shmfd;
420 off_t foffset;
421 int error;
422
423 shmfd = fp->f_data;
424 foffset = foffset_lock(fp, 0);
425 error = 0;
426 switch (whence) {
427 case L_INCR:
428 if (foffset < 0 ||
429 (offset > 0 && foffset > OFF_MAX - offset)) {
430 error = EOVERFLOW;
431 break;
432 }
433 offset += foffset;
434 break;
435 case L_XTND:
436 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
437 error = EOVERFLOW;
438 break;
439 }
440 offset += shmfd->shm_size;
441 break;
442 case L_SET:
443 break;
444 default:
445 error = EINVAL;
446 }
447 if (error == 0) {
448 if (offset < 0 || offset > shmfd->shm_size)
449 error = EINVAL;
450 else
451 td->td_uretoff.tdu_off = offset;
452 }
453 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
454 return (error);
455 }
456
457 static int
shm_read(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)458 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
459 int flags, struct thread *td)
460 {
461 struct shmfd *shmfd;
462 void *rl_cookie;
463 int error;
464
465 shmfd = fp->f_data;
466 #ifdef MAC
467 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
468 if (error)
469 return (error);
470 #endif
471 foffset_lock_uio(fp, uio, flags);
472 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
473 uio->uio_offset + uio->uio_resid);
474 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
475 shm_rangelock_unlock(shmfd, rl_cookie);
476 foffset_unlock_uio(fp, uio, flags);
477 return (error);
478 }
479
480 static int
shm_write(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)481 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
482 int flags, struct thread *td)
483 {
484 struct shmfd *shmfd;
485 void *rl_cookie;
486 int error;
487 off_t newsize;
488
489 KASSERT((flags & FOF_OFFSET) == 0 || uio->uio_offset >= 0,
490 ("%s: negative offset", __func__));
491
492 shmfd = fp->f_data;
493 #ifdef MAC
494 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
495 if (error)
496 return (error);
497 #endif
498 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
499 return (EINVAL);
500 foffset_lock_uio(fp, uio, flags);
501 if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
502 /*
503 * Overflow is only an error if we're supposed to expand on
504 * write. Otherwise, we'll just truncate the write to the
505 * size of the file, which can only grow up to OFF_MAX.
506 */
507 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
508 foffset_unlock_uio(fp, uio, flags);
509 return (EFBIG);
510 }
511
512 newsize = atomic_load_64(&shmfd->shm_size);
513 } else {
514 newsize = uio->uio_offset + uio->uio_resid;
515 }
516 if ((flags & FOF_OFFSET) == 0)
517 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
518 else
519 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset,
520 MAX(newsize, uio->uio_offset));
521 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
522 error = EPERM;
523 } else {
524 error = 0;
525 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
526 newsize > shmfd->shm_size) {
527 error = shm_dotruncate_cookie(shmfd, newsize,
528 rl_cookie);
529 }
530 if (error == 0)
531 error = uiomove_object(shmfd->shm_object,
532 shmfd->shm_size, uio);
533 }
534 shm_rangelock_unlock(shmfd, rl_cookie);
535 foffset_unlock_uio(fp, uio, flags);
536 return (error);
537 }
538
539 static int
shm_truncate(struct file * fp,off_t length,struct ucred * active_cred,struct thread * td)540 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
541 struct thread *td)
542 {
543 struct shmfd *shmfd;
544 #ifdef MAC
545 int error;
546 #endif
547
548 shmfd = fp->f_data;
549 #ifdef MAC
550 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
551 if (error)
552 return (error);
553 #endif
554 return (shm_dotruncate(shmfd, length));
555 }
556
557 int
shm_ioctl(struct file * fp,u_long com,void * data,struct ucred * active_cred,struct thread * td)558 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
559 struct thread *td)
560 {
561 struct shmfd *shmfd;
562 struct shm_largepage_conf *conf;
563 void *rl_cookie;
564
565 shmfd = fp->f_data;
566 switch (com) {
567 case FIONBIO:
568 case FIOASYNC:
569 /*
570 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
571 * just like it would on an unlinked regular file
572 */
573 return (0);
574 case FIOSSHMLPGCNF:
575 if (!shm_largepage(shmfd))
576 return (ENOTTY);
577 conf = data;
578 if (shmfd->shm_lp_psind != 0 &&
579 conf->psind != shmfd->shm_lp_psind)
580 return (EINVAL);
581 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
582 pagesizes[conf->psind] == 0)
583 return (EINVAL);
584 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
585 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
586 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
587 return (EINVAL);
588
589 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
590 shmfd->shm_lp_psind = conf->psind;
591 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
592 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
593 shm_rangelock_unlock(shmfd, rl_cookie);
594 return (0);
595 case FIOGSHMLPGCNF:
596 if (!shm_largepage(shmfd))
597 return (ENOTTY);
598 conf = data;
599 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
600 conf->psind = shmfd->shm_lp_psind;
601 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
602 shm_rangelock_unlock(shmfd, rl_cookie);
603 return (0);
604 default:
605 return (ENOTTY);
606 }
607 }
608
609 static int
shm_stat(struct file * fp,struct stat * sb,struct ucred * active_cred)610 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
611 {
612 struct shmfd *shmfd;
613 #ifdef MAC
614 int error;
615 #endif
616
617 shmfd = fp->f_data;
618
619 #ifdef MAC
620 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
621 if (error)
622 return (error);
623 #endif
624
625 /*
626 * Attempt to return sanish values for fstat() on a memory file
627 * descriptor.
628 */
629 bzero(sb, sizeof(*sb));
630 sb->st_blksize = PAGE_SIZE;
631 sb->st_size = shmfd->shm_size;
632 mtx_lock(&shm_timestamp_lock);
633 sb->st_atim = shmfd->shm_atime;
634 sb->st_ctim = shmfd->shm_ctime;
635 sb->st_mtim = shmfd->shm_mtime;
636 sb->st_birthtim = shmfd->shm_birthtime;
637 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
638 sb->st_uid = shmfd->shm_uid;
639 sb->st_gid = shmfd->shm_gid;
640 mtx_unlock(&shm_timestamp_lock);
641 sb->st_dev = shm_dev_ino;
642 sb->st_ino = shmfd->shm_ino;
643 sb->st_nlink = shmfd->shm_object->ref_count;
644 if (shm_largepage(shmfd)) {
645 sb->st_blocks = shmfd->shm_object->size /
646 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
647 } else {
648 sb->st_blocks = shmfd->shm_pages;
649 }
650
651 return (0);
652 }
653
654 static int
shm_close(struct file * fp,struct thread * td)655 shm_close(struct file *fp, struct thread *td)
656 {
657 struct shmfd *shmfd;
658
659 shmfd = fp->f_data;
660 fp->f_data = NULL;
661 shm_drop(shmfd);
662
663 return (0);
664 }
665
666 static int
shm_copyin_path(struct thread * td,const char * userpath_in,char ** path_out)667 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
668 int error;
669 char *path;
670 const char *pr_path;
671 size_t pr_pathlen;
672
673 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
674 pr_path = td->td_ucred->cr_prison->pr_path;
675
676 /* Construct a full pathname for jailed callers. */
677 pr_pathlen = strcmp(pr_path, "/") ==
678 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
679 error = copyinstr(userpath_in, path + pr_pathlen,
680 MAXPATHLEN - pr_pathlen, NULL);
681 if (error != 0)
682 goto out;
683
684 #ifdef KTRACE
685 if (KTRPOINT(curthread, KTR_NAMEI))
686 ktrnamei(path);
687 #endif
688
689 /* Require paths to start with a '/' character. */
690 if (path[pr_pathlen] != '/') {
691 error = EINVAL;
692 goto out;
693 }
694
695 *path_out = path;
696
697 out:
698 if (error != 0)
699 free(path, M_SHMFD);
700
701 return (error);
702 }
703
704 static int
shm_partial_page_invalidate(vm_object_t object,vm_pindex_t idx,int base,int end)705 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
706 int end)
707 {
708 int error;
709
710 error = vm_page_grab_zero_partial(object, idx, base, end);
711 if (error == EIO)
712 VM_OBJECT_WUNLOCK(object);
713 return (error);
714 }
715
716 static int
shm_dotruncate_locked(struct shmfd * shmfd,off_t length,void * rl_cookie)717 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
718 {
719 vm_object_t object;
720 vm_pindex_t nobjsize;
721 vm_ooffset_t delta;
722 int base, error;
723
724 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
725 object = shmfd->shm_object;
726 VM_OBJECT_ASSERT_WLOCKED(object);
727 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
728 if (length == shmfd->shm_size)
729 return (0);
730 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
731
732 /* Are we shrinking? If so, trim the end. */
733 if (length < shmfd->shm_size) {
734 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
735 return (EPERM);
736
737 /*
738 * Disallow any requests to shrink the size if this
739 * object is mapped into the kernel.
740 */
741 if (shmfd->shm_kmappings > 0)
742 return (EBUSY);
743
744 /*
745 * Zero the truncated part of the last page.
746 */
747 base = length & PAGE_MASK;
748 if (base != 0) {
749 error = shm_partial_page_invalidate(object,
750 OFF_TO_IDX(length), base, PAGE_SIZE);
751 if (error)
752 return (error);
753 }
754 delta = IDX_TO_OFF(object->size - nobjsize);
755
756 if (nobjsize < object->size)
757 vm_object_page_remove(object, nobjsize, object->size,
758 0);
759
760 /* Free the swap accounted for shm */
761 swap_release_by_cred(delta, object->cred);
762 } else {
763 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
764 return (EPERM);
765
766 /* Try to reserve additional swap space. */
767 delta = IDX_TO_OFF(nobjsize - object->size);
768 if (!swap_reserve_by_cred(delta, object->cred))
769 return (ENOMEM);
770 }
771 shmfd->shm_size = length;
772 mtx_lock(&shm_timestamp_lock);
773 vfs_timestamp(&shmfd->shm_ctime);
774 shmfd->shm_mtime = shmfd->shm_ctime;
775 mtx_unlock(&shm_timestamp_lock);
776 object->size = nobjsize;
777 return (0);
778 }
779
780 static int
shm_dotruncate_largepage(struct shmfd * shmfd,off_t length,void * rl_cookie)781 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
782 {
783 vm_object_t object;
784 vm_page_t m;
785 vm_pindex_t newobjsz;
786 vm_pindex_t oldobjsz __unused;
787 int aflags, error, i, psind, try;
788
789 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
790 object = shmfd->shm_object;
791 VM_OBJECT_ASSERT_WLOCKED(object);
792 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
793
794 oldobjsz = object->size;
795 newobjsz = OFF_TO_IDX(length);
796 if (length == shmfd->shm_size)
797 return (0);
798 psind = shmfd->shm_lp_psind;
799 if (psind == 0 && length != 0)
800 return (EINVAL);
801 if ((length & (pagesizes[psind] - 1)) != 0)
802 return (EINVAL);
803
804 if (length < shmfd->shm_size) {
805 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
806 return (EPERM);
807 if (shmfd->shm_kmappings > 0)
808 return (EBUSY);
809 return (ENOTSUP); /* Pages are unmanaged. */
810 #if 0
811 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
812 object->size = newobjsz;
813 shmfd->shm_size = length;
814 return (0);
815 #endif
816 }
817
818 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
819 return (EPERM);
820
821 aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
822 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
823 aflags |= VM_ALLOC_WAITFAIL;
824 try = 0;
825
826 /*
827 * Extend shmfd and object, keeping all already fully
828 * allocated large pages intact even on error, because dropped
829 * object lock might allowed mapping of them.
830 */
831 while (object->size < newobjsz) {
832 error = sig_intr();
833 if (error != 0)
834 return (error);
835 m = vm_page_alloc_contig(object, object->size, aflags,
836 pagesizes[psind] / PAGE_SIZE, 0, ~0,
837 pagesizes[psind], 0,
838 VM_MEMATTR_DEFAULT);
839 if (m == NULL) {
840 VM_OBJECT_WUNLOCK(object);
841 error = sig_intr();
842 if (error != 0) {
843 VM_OBJECT_WLOCK(object);
844 return (error);
845 }
846 if (shmfd->shm_lp_alloc_policy ==
847 SHM_LARGEPAGE_ALLOC_NOWAIT ||
848 (shmfd->shm_lp_alloc_policy ==
849 SHM_LARGEPAGE_ALLOC_DEFAULT &&
850 try >= largepage_reclaim_tries)) {
851 VM_OBJECT_WLOCK(object);
852 return (ENOMEM);
853 }
854 error = vm_page_reclaim_contig(aflags,
855 pagesizes[psind] / PAGE_SIZE, 0, ~0,
856 pagesizes[psind], 0);
857 if (error == ENOMEM)
858 error = vm_wait_intr(object);
859 if (error != 0) {
860 VM_OBJECT_WLOCK(object);
861 return (error);
862 }
863 try++;
864 VM_OBJECT_WLOCK(object);
865 continue;
866 }
867 try = 0;
868 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
869 if ((m[i].flags & PG_ZERO) == 0)
870 pmap_zero_page(&m[i]);
871 vm_page_valid(&m[i]);
872 vm_page_xunbusy(&m[i]);
873 }
874 object->size += OFF_TO_IDX(pagesizes[psind]);
875 shmfd->shm_size += pagesizes[psind];
876 atomic_add_long(&count_largepages[psind], 1);
877 vm_wire_add(atop(pagesizes[psind]));
878 }
879 return (0);
880 }
881
882 static int
shm_dotruncate_cookie(struct shmfd * shmfd,off_t length,void * rl_cookie)883 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
884 {
885 int error;
886
887 VM_OBJECT_WLOCK(shmfd->shm_object);
888 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
889 length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
890 rl_cookie);
891 VM_OBJECT_WUNLOCK(shmfd->shm_object);
892 return (error);
893 }
894
895 int
shm_dotruncate(struct shmfd * shmfd,off_t length)896 shm_dotruncate(struct shmfd *shmfd, off_t length)
897 {
898 void *rl_cookie;
899 int error;
900
901 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
902 error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
903 shm_rangelock_unlock(shmfd, rl_cookie);
904 return (error);
905 }
906
907 /*
908 * shmfd object management including creation and reference counting
909 * routines.
910 */
911 struct shmfd *
shm_alloc(struct ucred * ucred,mode_t mode,bool largepage)912 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
913 {
914 struct shmfd *shmfd;
915 vm_object_t obj;
916
917 if (largepage) {
918 obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops,
919 NULL, 0, VM_PROT_DEFAULT, 0, ucred);
920 } else {
921 obj = vm_pager_allocate(shmfd_pager_type, NULL, 0,
922 VM_PROT_DEFAULT, 0, ucred);
923 }
924 if (obj == NULL) {
925 /*
926 * swap reservation limits can cause object allocation
927 * to fail.
928 */
929 return (NULL);
930 }
931
932 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
933 shmfd->shm_uid = ucred->cr_uid;
934 shmfd->shm_gid = ucred->cr_gid;
935 shmfd->shm_mode = mode;
936 if (largepage) {
937 obj->un_pager.phys.phys_priv = shmfd;
938 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
939 } else {
940 obj->un_pager.swp.swp_priv = shmfd;
941 }
942
943 VM_OBJECT_WLOCK(obj);
944 vm_object_set_flag(obj, OBJ_POSIXSHM);
945 VM_OBJECT_WUNLOCK(obj);
946 shmfd->shm_object = obj;
947 vfs_timestamp(&shmfd->shm_birthtime);
948 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
949 shmfd->shm_birthtime;
950 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
951 refcount_init(&shmfd->shm_refs, 1);
952 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
953 rangelock_init(&shmfd->shm_rl);
954 #ifdef MAC
955 mac_posixshm_init(shmfd);
956 mac_posixshm_create(ucred, shmfd);
957 #endif
958
959 return (shmfd);
960 }
961
962 struct shmfd *
shm_hold(struct shmfd * shmfd)963 shm_hold(struct shmfd *shmfd)
964 {
965
966 refcount_acquire(&shmfd->shm_refs);
967 return (shmfd);
968 }
969
970 void
shm_drop(struct shmfd * shmfd)971 shm_drop(struct shmfd *shmfd)
972 {
973 vm_object_t obj;
974
975 if (refcount_release(&shmfd->shm_refs)) {
976 #ifdef MAC
977 mac_posixshm_destroy(shmfd);
978 #endif
979 rangelock_destroy(&shmfd->shm_rl);
980 mtx_destroy(&shmfd->shm_mtx);
981 obj = shmfd->shm_object;
982 VM_OBJECT_WLOCK(obj);
983 if (shm_largepage(shmfd))
984 obj->un_pager.phys.phys_priv = NULL;
985 else
986 obj->un_pager.swp.swp_priv = NULL;
987 VM_OBJECT_WUNLOCK(obj);
988 vm_object_deallocate(obj);
989 free(shmfd, M_SHMFD);
990 }
991 }
992
993 /*
994 * Determine if the credentials have sufficient permissions for a
995 * specified combination of FREAD and FWRITE.
996 */
997 int
shm_access(struct shmfd * shmfd,struct ucred * ucred,int flags)998 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
999 {
1000 accmode_t accmode;
1001 int error;
1002
1003 accmode = 0;
1004 if (flags & FREAD)
1005 accmode |= VREAD;
1006 if (flags & FWRITE)
1007 accmode |= VWRITE;
1008 mtx_lock(&shm_timestamp_lock);
1009 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1010 accmode, ucred);
1011 mtx_unlock(&shm_timestamp_lock);
1012 return (error);
1013 }
1014
1015 static void
shm_init(void * arg)1016 shm_init(void *arg)
1017 {
1018 char name[32];
1019 int i;
1020
1021 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1022 sx_init(&shm_dict_lock, "shm dictionary");
1023 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1024 new_unrhdr64(&shm_ino_unr, 1);
1025 shm_dev_ino = devfs_alloc_cdp_inode();
1026 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1027 shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1028 OBJT_SWAP);
1029 MPASS(shmfd_pager_type != -1);
1030
1031 for (i = 1; i < MAXPAGESIZES; i++) {
1032 if (pagesizes[i] == 0)
1033 break;
1034 #define M (1024 * 1024)
1035 #define G (1024 * M)
1036 if (pagesizes[i] >= G)
1037 snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1038 else if (pagesizes[i] >= M)
1039 snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1040 else
1041 snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1042 #undef G
1043 #undef M
1044 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1045 OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1046 "number of non-transient largepages allocated");
1047 }
1048 }
1049 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1050
1051 /*
1052 * Remove all shared memory objects that belong to a prison.
1053 */
1054 void
shm_remove_prison(struct prison * pr)1055 shm_remove_prison(struct prison *pr)
1056 {
1057 struct shm_mapping *shmm, *tshmm;
1058 u_long i;
1059
1060 sx_xlock(&shm_dict_lock);
1061 for (i = 0; i < shm_hash + 1; i++) {
1062 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1063 if (shmm->sm_shmfd->shm_object->cred &&
1064 shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1065 shm_doremove(shmm);
1066 }
1067 }
1068 sx_xunlock(&shm_dict_lock);
1069 }
1070
1071 /*
1072 * Dictionary management. We maintain an in-kernel dictionary to map
1073 * paths to shmfd objects. We use the FNV hash on the path to store
1074 * the mappings in a hash table.
1075 */
1076 static struct shmfd *
shm_lookup(char * path,Fnv32_t fnv)1077 shm_lookup(char *path, Fnv32_t fnv)
1078 {
1079 struct shm_mapping *map;
1080
1081 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1082 if (map->sm_fnv != fnv)
1083 continue;
1084 if (strcmp(map->sm_path, path) == 0)
1085 return (map->sm_shmfd);
1086 }
1087
1088 return (NULL);
1089 }
1090
1091 static void
shm_insert(char * path,Fnv32_t fnv,struct shmfd * shmfd)1092 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1093 {
1094 struct shm_mapping *map;
1095
1096 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1097 map->sm_path = path;
1098 map->sm_fnv = fnv;
1099 map->sm_shmfd = shm_hold(shmfd);
1100 shmfd->shm_path = path;
1101 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1102 }
1103
1104 static int
shm_remove(char * path,Fnv32_t fnv,struct ucred * ucred)1105 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1106 {
1107 struct shm_mapping *map;
1108 int error;
1109
1110 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1111 if (map->sm_fnv != fnv)
1112 continue;
1113 if (strcmp(map->sm_path, path) == 0) {
1114 #ifdef MAC
1115 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1116 if (error)
1117 return (error);
1118 #endif
1119 error = shm_access(map->sm_shmfd, ucred,
1120 FREAD | FWRITE);
1121 if (error)
1122 return (error);
1123 shm_doremove(map);
1124 return (0);
1125 }
1126 }
1127
1128 return (ENOENT);
1129 }
1130
1131 static void
shm_doremove(struct shm_mapping * map)1132 shm_doremove(struct shm_mapping *map)
1133 {
1134 map->sm_shmfd->shm_path = NULL;
1135 LIST_REMOVE(map, sm_link);
1136 shm_drop(map->sm_shmfd);
1137 free(map->sm_path, M_SHMFD);
1138 free(map, M_SHMFD);
1139 }
1140
1141 int
kern_shm_open2(struct thread * td,const char * userpath,int flags,mode_t mode,int shmflags,struct filecaps * fcaps,const char * name __unused,struct shmfd * shmfd)1142 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1143 int shmflags, struct filecaps *fcaps, const char *name __unused,
1144 struct shmfd *shmfd)
1145 {
1146 struct pwddesc *pdp;
1147 struct file *fp;
1148 char *path;
1149 void *rl_cookie;
1150 Fnv32_t fnv;
1151 mode_t cmode;
1152 int error, fd, initial_seals;
1153 bool largepage;
1154
1155 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1156 SHM_LARGEPAGE)) != 0)
1157 return (EINVAL);
1158
1159 initial_seals = F_SEAL_SEAL;
1160 if ((shmflags & SHM_ALLOW_SEALING) != 0)
1161 initial_seals &= ~F_SEAL_SEAL;
1162
1163 AUDIT_ARG_FFLAGS(flags);
1164 AUDIT_ARG_MODE(mode);
1165
1166 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1167 return (EINVAL);
1168
1169 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC |
1170 O_CLOFORK)) != 0)
1171 return (EINVAL);
1172
1173 largepage = (shmflags & SHM_LARGEPAGE) != 0;
1174 if (largepage && !PMAP_HAS_LARGEPAGES)
1175 return (ENOTTY);
1176
1177 /*
1178 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1179 * If the decision is made later to allow additional seals, care must be
1180 * taken below to ensure that the seals are properly set if the shmfd
1181 * already existed -- this currently assumes that only F_SEAL_SEAL can
1182 * be set and doesn't take further precautions to ensure the validity of
1183 * the seals being added with respect to current mappings.
1184 */
1185 if ((initial_seals & ~F_SEAL_SEAL) != 0)
1186 return (EINVAL);
1187
1188 if (userpath != SHM_ANON) {
1189 error = shm_copyin_path(td, userpath, &path);
1190 if (error != 0)
1191 return (error);
1192
1193 #ifdef CAPABILITY_MODE
1194 /*
1195 * shm_open(2) is only allowed for anonymous objects.
1196 */
1197 if (CAP_TRACING(td))
1198 ktrcapfail(CAPFAIL_NAMEI, path);
1199 if (IN_CAPABILITY_MODE(td)) {
1200 error = ECAPMODE;
1201 goto outnofp;
1202 }
1203 #endif
1204
1205 AUDIT_ARG_UPATH1_CANON(path);
1206 } else {
1207 path = NULL;
1208 }
1209
1210 pdp = td->td_proc->p_pd;
1211 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1212
1213 /*
1214 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1215 * by POSIX. We allow it to be unset here so that an in-kernel
1216 * interface may be written as a thin layer around shm, optionally not
1217 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally
1218 * in sys_shm_open() to keep this implementation compliant.
1219 */
1220 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1221 if (error != 0)
1222 goto outnofp;
1223
1224 /*
1225 * A SHM_ANON path pointer creates an anonymous object. We allow other
1226 * parts of the kernel to pre-populate a shmfd and then materialize an
1227 * fd for it here as a means to pass data back up to userland. This
1228 * doesn't really make sense for named shm objects, but it makes plenty
1229 * of sense for anonymous objects.
1230 */
1231 if (userpath == SHM_ANON) {
1232 if (shmfd != NULL) {
1233 shm_hold(shmfd);
1234 } else {
1235 /*
1236 * A read-only anonymous object is pointless, unless it
1237 * was pre-populated by the kernel with the expectation
1238 * that a shmfd would later be created for userland to
1239 * access it through.
1240 */
1241 if ((flags & O_ACCMODE) == O_RDONLY) {
1242 error = EINVAL;
1243 goto out;
1244 }
1245 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1246 if (shmfd == NULL) {
1247 error = ENOMEM;
1248 goto out;
1249 }
1250
1251 shmfd->shm_seals = initial_seals;
1252 shmfd->shm_flags = shmflags;
1253 }
1254 } else {
1255 fnv = fnv_32_str(path, FNV1_32_INIT);
1256 sx_xlock(&shm_dict_lock);
1257
1258 MPASS(shmfd == NULL);
1259 shmfd = shm_lookup(path, fnv);
1260 if (shmfd == NULL) {
1261 /* Object does not yet exist, create it if requested. */
1262 if (flags & O_CREAT) {
1263 #ifdef MAC
1264 error = mac_posixshm_check_create(td->td_ucred,
1265 path);
1266 if (error == 0) {
1267 #endif
1268 shmfd = shm_alloc(td->td_ucred, cmode,
1269 largepage);
1270 if (shmfd == NULL) {
1271 error = ENOMEM;
1272 } else {
1273 shmfd->shm_seals =
1274 initial_seals;
1275 shmfd->shm_flags = shmflags;
1276 shm_insert(path, fnv, shmfd);
1277 path = NULL;
1278 }
1279 #ifdef MAC
1280 }
1281 #endif
1282 } else {
1283 error = ENOENT;
1284 }
1285 } else {
1286 /*
1287 * Object already exists, obtain a new reference if
1288 * requested and permitted.
1289 */
1290 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1291
1292 /*
1293 * kern_shm_open() likely shouldn't ever error out on
1294 * trying to set a seal that already exists, unlike
1295 * F_ADD_SEALS. This would break terribly as
1296 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1297 * historical behavior where the underlying file could
1298 * not be sealed.
1299 */
1300 initial_seals &= ~shmfd->shm_seals;
1301
1302 /*
1303 * initial_seals can't set additional seals if we've
1304 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set,
1305 * then we've already removed that one from
1306 * initial_seals. This is currently redundant as we
1307 * only allow setting F_SEAL_SEAL at creation time, but
1308 * it's cheap to check and decreases the effort required
1309 * to allow additional seals.
1310 */
1311 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1312 initial_seals != 0)
1313 error = EPERM;
1314 else if ((flags & (O_CREAT | O_EXCL)) ==
1315 (O_CREAT | O_EXCL))
1316 error = EEXIST;
1317 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1318 error = EINVAL;
1319 else {
1320 #ifdef MAC
1321 error = mac_posixshm_check_open(td->td_ucred,
1322 shmfd, FFLAGS(flags & O_ACCMODE));
1323 if (error == 0)
1324 #endif
1325 error = shm_access(shmfd, td->td_ucred,
1326 FFLAGS(flags & O_ACCMODE));
1327 }
1328
1329 /*
1330 * Truncate the file back to zero length if
1331 * O_TRUNC was specified and the object was
1332 * opened with read/write.
1333 */
1334 if (error == 0 &&
1335 (flags & (O_ACCMODE | O_TRUNC)) ==
1336 (O_RDWR | O_TRUNC)) {
1337 VM_OBJECT_WLOCK(shmfd->shm_object);
1338 #ifdef MAC
1339 error = mac_posixshm_check_truncate(
1340 td->td_ucred, fp->f_cred, shmfd);
1341 if (error == 0)
1342 #endif
1343 error = shm_dotruncate_locked(shmfd, 0,
1344 rl_cookie);
1345 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1346 }
1347 if (error == 0) {
1348 /*
1349 * Currently we only allow F_SEAL_SEAL to be
1350 * set initially. As noted above, this would
1351 * need to be reworked should that change.
1352 */
1353 shmfd->shm_seals |= initial_seals;
1354 shm_hold(shmfd);
1355 }
1356 shm_rangelock_unlock(shmfd, rl_cookie);
1357 }
1358 sx_xunlock(&shm_dict_lock);
1359
1360 if (error != 0)
1361 goto out;
1362 }
1363
1364 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1365
1366 td->td_retval[0] = fd;
1367 fdrop(fp, td);
1368 free(path, M_SHMFD);
1369
1370 return (0);
1371
1372 out:
1373 fdclose(td, fp, fd);
1374 fdrop(fp, td);
1375 outnofp:
1376 free(path, M_SHMFD);
1377
1378 return (error);
1379 }
1380
1381 /* System calls. */
1382 #ifdef COMPAT_FREEBSD12
1383 int
freebsd12_shm_open(struct thread * td,struct freebsd12_shm_open_args * uap)1384 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1385 {
1386
1387 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1388 uap->mode, NULL));
1389 }
1390 #endif
1391
1392 int
sys_shm_unlink(struct thread * td,struct shm_unlink_args * uap)1393 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1394 {
1395 char *path;
1396 Fnv32_t fnv;
1397 int error;
1398
1399 error = shm_copyin_path(td, uap->path, &path);
1400 if (error != 0)
1401 return (error);
1402
1403 AUDIT_ARG_UPATH1_CANON(path);
1404 fnv = fnv_32_str(path, FNV1_32_INIT);
1405 sx_xlock(&shm_dict_lock);
1406 error = shm_remove(path, fnv, td->td_ucred);
1407 sx_xunlock(&shm_dict_lock);
1408 free(path, M_SHMFD);
1409
1410 return (error);
1411 }
1412
1413 int
sys_shm_rename(struct thread * td,struct shm_rename_args * uap)1414 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1415 {
1416 char *path_from = NULL, *path_to = NULL;
1417 Fnv32_t fnv_from, fnv_to;
1418 struct shmfd *fd_from;
1419 struct shmfd *fd_to;
1420 int error;
1421 int flags;
1422
1423 flags = uap->flags;
1424 AUDIT_ARG_FFLAGS(flags);
1425
1426 /*
1427 * Make sure the user passed only valid flags.
1428 * If you add a new flag, please add a new term here.
1429 */
1430 if ((flags & ~(
1431 SHM_RENAME_NOREPLACE |
1432 SHM_RENAME_EXCHANGE
1433 )) != 0) {
1434 error = EINVAL;
1435 goto out;
1436 }
1437
1438 /*
1439 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1440 * force the user to choose one or the other.
1441 */
1442 if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1443 (flags & SHM_RENAME_EXCHANGE) != 0) {
1444 error = EINVAL;
1445 goto out;
1446 }
1447
1448 /* Renaming to or from anonymous makes no sense */
1449 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1450 error = EINVAL;
1451 goto out;
1452 }
1453
1454 error = shm_copyin_path(td, uap->path_from, &path_from);
1455 if (error != 0)
1456 goto out;
1457
1458 error = shm_copyin_path(td, uap->path_to, &path_to);
1459 if (error != 0)
1460 goto out;
1461
1462 AUDIT_ARG_UPATH1_CANON(path_from);
1463 AUDIT_ARG_UPATH2_CANON(path_to);
1464
1465 /* Rename with from/to equal is a no-op */
1466 if (strcmp(path_from, path_to) == 0)
1467 goto out;
1468
1469 fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1470 fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1471
1472 sx_xlock(&shm_dict_lock);
1473
1474 fd_from = shm_lookup(path_from, fnv_from);
1475 if (fd_from == NULL) {
1476 error = ENOENT;
1477 goto out_locked;
1478 }
1479
1480 fd_to = shm_lookup(path_to, fnv_to);
1481 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1482 error = EEXIST;
1483 goto out_locked;
1484 }
1485
1486 /*
1487 * Unconditionally prevents shm_remove from invalidating the 'from'
1488 * shm's state.
1489 */
1490 shm_hold(fd_from);
1491 error = shm_remove(path_from, fnv_from, td->td_ucred);
1492
1493 /*
1494 * One of my assumptions failed if ENOENT (e.g. locking didn't
1495 * protect us)
1496 */
1497 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1498 path_from));
1499 if (error != 0) {
1500 shm_drop(fd_from);
1501 goto out_locked;
1502 }
1503
1504 /*
1505 * If we are exchanging, we need to ensure the shm_remove below
1506 * doesn't invalidate the dest shm's state.
1507 */
1508 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1509 shm_hold(fd_to);
1510
1511 /*
1512 * NOTE: if path_to is not already in the hash, c'est la vie;
1513 * it simply means we have nothing already at path_to to unlink.
1514 * That is the ENOENT case.
1515 *
1516 * If we somehow don't have access to unlink this guy, but
1517 * did for the shm at path_from, then relink the shm to path_from
1518 * and abort with EACCES.
1519 *
1520 * All other errors: that is weird; let's relink and abort the
1521 * operation.
1522 */
1523 error = shm_remove(path_to, fnv_to, td->td_ucred);
1524 if (error != 0 && error != ENOENT) {
1525 shm_insert(path_from, fnv_from, fd_from);
1526 shm_drop(fd_from);
1527 /* Don't free path_from now, since the hash references it */
1528 path_from = NULL;
1529 goto out_locked;
1530 }
1531
1532 error = 0;
1533
1534 shm_insert(path_to, fnv_to, fd_from);
1535
1536 /* Don't free path_to now, since the hash references it */
1537 path_to = NULL;
1538
1539 /* We kept a ref when we removed, and incremented again in insert */
1540 shm_drop(fd_from);
1541 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1542 fd_from->shm_refs));
1543
1544 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1545 shm_insert(path_from, fnv_from, fd_to);
1546 path_from = NULL;
1547 shm_drop(fd_to);
1548 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1549 fd_to->shm_refs));
1550 }
1551
1552 out_locked:
1553 sx_xunlock(&shm_dict_lock);
1554
1555 out:
1556 free(path_from, M_SHMFD);
1557 free(path_to, M_SHMFD);
1558 return (error);
1559 }
1560
1561 static int
shm_mmap_large(struct shmfd * shmfd,vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t max_prot,int flags,vm_ooffset_t foff,struct thread * td)1562 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1563 vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1564 vm_ooffset_t foff, struct thread *td)
1565 {
1566 struct vmspace *vms;
1567 vm_map_entry_t next_entry, prev_entry;
1568 vm_offset_t align, mask, maxaddr;
1569 int docow, error, rv, try;
1570 bool curmap;
1571
1572 if (shmfd->shm_lp_psind == 0)
1573 return (EINVAL);
1574
1575 /* MAP_PRIVATE is disabled */
1576 if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1577 MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1578 return (EINVAL);
1579
1580 vms = td->td_proc->p_vmspace;
1581 curmap = map == &vms->vm_map;
1582 if (curmap) {
1583 error = kern_mmap_racct_check(td, map, size);
1584 if (error != 0)
1585 return (error);
1586 }
1587
1588 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1589 docow |= MAP_INHERIT_SHARE;
1590 if ((flags & MAP_NOCORE) != 0)
1591 docow |= MAP_DISABLE_COREDUMP;
1592
1593 mask = pagesizes[shmfd->shm_lp_psind] - 1;
1594 if ((foff & mask) != 0)
1595 return (EINVAL);
1596 maxaddr = vm_map_max(map);
1597 if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1598 maxaddr = MAP_32BIT_MAX_ADDR;
1599 if (size == 0 || (size & mask) != 0 ||
1600 (*addr != 0 && ((*addr & mask) != 0 ||
1601 *addr + size < *addr || *addr + size > maxaddr)))
1602 return (EINVAL);
1603
1604 align = flags & MAP_ALIGNMENT_MASK;
1605 if (align == 0) {
1606 align = pagesizes[shmfd->shm_lp_psind];
1607 } else if (align == MAP_ALIGNED_SUPER) {
1608 /*
1609 * MAP_ALIGNED_SUPER is only supported on superpage sizes,
1610 * i.e., [1, VM_NRESERVLEVEL]. shmfd->shm_lp_psind < 1 is
1611 * handled above.
1612 */
1613 if (
1614 #if VM_NRESERVLEVEL > 0
1615 shmfd->shm_lp_psind > VM_NRESERVLEVEL
1616 #else
1617 shmfd->shm_lp_psind > 1
1618 #endif
1619 )
1620 return (EINVAL);
1621 align = pagesizes[shmfd->shm_lp_psind];
1622 } else {
1623 align >>= MAP_ALIGNMENT_SHIFT;
1624 align = 1ULL << align;
1625 /* Also handles overflow. */
1626 if (align < pagesizes[shmfd->shm_lp_psind])
1627 return (EINVAL);
1628 }
1629
1630 vm_map_lock(map);
1631 if ((flags & MAP_FIXED) == 0) {
1632 try = 1;
1633 if (curmap && (*addr == 0 ||
1634 (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1635 *addr < round_page((vm_offset_t)vms->vm_daddr +
1636 lim_max(td, RLIMIT_DATA))))) {
1637 *addr = roundup2((vm_offset_t)vms->vm_daddr +
1638 lim_max(td, RLIMIT_DATA),
1639 pagesizes[shmfd->shm_lp_psind]);
1640 }
1641 again:
1642 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1643 if (rv != KERN_SUCCESS) {
1644 if (try == 1) {
1645 try = 2;
1646 *addr = vm_map_min(map);
1647 if ((*addr & mask) != 0)
1648 *addr = (*addr + mask) & mask;
1649 goto again;
1650 }
1651 goto fail1;
1652 }
1653 } else if ((flags & MAP_EXCL) == 0) {
1654 rv = vm_map_delete(map, *addr, *addr + size);
1655 if (rv != KERN_SUCCESS)
1656 goto fail1;
1657 } else {
1658 error = ENOSPC;
1659 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1660 goto fail;
1661 next_entry = vm_map_entry_succ(prev_entry);
1662 if (next_entry->start < *addr + size)
1663 goto fail;
1664 }
1665
1666 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1667 prot, max_prot, docow);
1668 fail1:
1669 error = vm_mmap_to_errno(rv);
1670 fail:
1671 vm_map_unlock(map);
1672 return (error);
1673 }
1674
1675 static int
shm_mmap(struct file * fp,vm_map_t map,vm_offset_t * addr,vm_size_t objsize,vm_prot_t prot,vm_prot_t max_maxprot,int flags,vm_ooffset_t foff,struct thread * td)1676 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1677 vm_prot_t prot, vm_prot_t max_maxprot, int flags,
1678 vm_ooffset_t foff, struct thread *td)
1679 {
1680 struct shmfd *shmfd;
1681 vm_prot_t maxprot;
1682 int error;
1683 bool writecnt;
1684 void *rl_cookie;
1685
1686 shmfd = fp->f_data;
1687 maxprot = VM_PROT_NONE;
1688
1689 rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1690 /* FREAD should always be set. */
1691 if ((fp->f_flag & FREAD) != 0)
1692 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1693
1694 /*
1695 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1696 * mapping with a write seal applied. Private mappings are always
1697 * writeable.
1698 */
1699 if ((flags & MAP_SHARED) == 0) {
1700 if ((max_maxprot & VM_PROT_WRITE) != 0)
1701 maxprot |= VM_PROT_WRITE;
1702 writecnt = false;
1703 } else {
1704 if ((fp->f_flag & FWRITE) != 0 &&
1705 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1706 maxprot |= VM_PROT_WRITE;
1707
1708 /*
1709 * Any mappings from a writable descriptor may be upgraded to
1710 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1711 * applied between the open and subsequent mmap(2). We want to
1712 * reject application of a write seal as long as any such
1713 * mapping exists so that the seal cannot be trivially bypassed.
1714 */
1715 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1716 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1717 error = EACCES;
1718 goto out;
1719 }
1720 }
1721 maxprot &= max_maxprot;
1722
1723 /* See comment in vn_mmap(). */
1724 if (
1725 #ifdef _LP64
1726 objsize > OFF_MAX ||
1727 #endif
1728 foff > OFF_MAX - objsize) {
1729 error = EINVAL;
1730 goto out;
1731 }
1732
1733 #ifdef MAC
1734 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1735 if (error != 0)
1736 goto out;
1737 #endif
1738
1739 mtx_lock(&shm_timestamp_lock);
1740 vfs_timestamp(&shmfd->shm_atime);
1741 mtx_unlock(&shm_timestamp_lock);
1742 vm_object_reference(shmfd->shm_object);
1743
1744 if (shm_largepage(shmfd)) {
1745 writecnt = false;
1746 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1747 maxprot, flags, foff, td);
1748 } else {
1749 if (writecnt) {
1750 vm_pager_update_writecount(shmfd->shm_object, 0,
1751 objsize);
1752 }
1753 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1754 shmfd->shm_object, foff, writecnt, td);
1755 }
1756 if (error != 0) {
1757 if (writecnt)
1758 vm_pager_release_writecount(shmfd->shm_object, 0,
1759 objsize);
1760 vm_object_deallocate(shmfd->shm_object);
1761 }
1762 out:
1763 shm_rangelock_unlock(shmfd, rl_cookie);
1764 return (error);
1765 }
1766
1767 static int
shm_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)1768 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1769 struct thread *td)
1770 {
1771 struct shmfd *shmfd;
1772 int error;
1773
1774 error = 0;
1775 shmfd = fp->f_data;
1776 mtx_lock(&shm_timestamp_lock);
1777 /*
1778 * SUSv4 says that x bits of permission need not be affected.
1779 * Be consistent with our shm_open there.
1780 */
1781 #ifdef MAC
1782 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1783 if (error != 0)
1784 goto out;
1785 #endif
1786 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1787 VADMIN, active_cred);
1788 if (error != 0)
1789 goto out;
1790 shmfd->shm_mode = mode & ACCESSPERMS;
1791 out:
1792 mtx_unlock(&shm_timestamp_lock);
1793 return (error);
1794 }
1795
1796 static int
shm_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)1797 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1798 struct thread *td)
1799 {
1800 struct shmfd *shmfd;
1801 int error;
1802
1803 error = 0;
1804 shmfd = fp->f_data;
1805 mtx_lock(&shm_timestamp_lock);
1806 #ifdef MAC
1807 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1808 if (error != 0)
1809 goto out;
1810 #endif
1811 if (uid == (uid_t)-1)
1812 uid = shmfd->shm_uid;
1813 if (gid == (gid_t)-1)
1814 gid = shmfd->shm_gid;
1815 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1816 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1817 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1818 goto out;
1819 shmfd->shm_uid = uid;
1820 shmfd->shm_gid = gid;
1821 out:
1822 mtx_unlock(&shm_timestamp_lock);
1823 return (error);
1824 }
1825
1826 /*
1827 * Helper routines to allow the backing object of a shared memory file
1828 * descriptor to be mapped in the kernel.
1829 */
1830 int
shm_map(struct file * fp,size_t size,off_t offset,void ** memp)1831 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1832 {
1833 struct shmfd *shmfd;
1834 vm_offset_t kva, ofs;
1835 vm_object_t obj;
1836 int rv;
1837
1838 if (fp->f_type != DTYPE_SHM)
1839 return (EINVAL);
1840 shmfd = fp->f_data;
1841 obj = shmfd->shm_object;
1842 VM_OBJECT_WLOCK(obj);
1843 /*
1844 * XXXRW: This validation is probably insufficient, and subject to
1845 * sign errors. It should be fixed.
1846 */
1847 if (offset >= shmfd->shm_size ||
1848 offset + size > round_page(shmfd->shm_size)) {
1849 VM_OBJECT_WUNLOCK(obj);
1850 return (EINVAL);
1851 }
1852
1853 shmfd->shm_kmappings++;
1854 vm_object_reference_locked(obj);
1855 VM_OBJECT_WUNLOCK(obj);
1856
1857 /* Map the object into the kernel_map and wire it. */
1858 kva = vm_map_min(kernel_map);
1859 ofs = offset & PAGE_MASK;
1860 offset = trunc_page(offset);
1861 size = round_page(size + ofs);
1862 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1863 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1864 VM_PROT_READ | VM_PROT_WRITE, 0);
1865 if (rv == KERN_SUCCESS) {
1866 rv = vm_map_wire(kernel_map, kva, kva + size,
1867 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1868 if (rv == KERN_SUCCESS) {
1869 *memp = (void *)(kva + ofs);
1870 return (0);
1871 }
1872 vm_map_remove(kernel_map, kva, kva + size);
1873 } else
1874 vm_object_deallocate(obj);
1875
1876 /* On failure, drop our mapping reference. */
1877 VM_OBJECT_WLOCK(obj);
1878 shmfd->shm_kmappings--;
1879 VM_OBJECT_WUNLOCK(obj);
1880
1881 return (vm_mmap_to_errno(rv));
1882 }
1883
1884 /*
1885 * We require the caller to unmap the entire entry. This allows us to
1886 * safely decrement shm_kmappings when a mapping is removed.
1887 */
1888 int
shm_unmap(struct file * fp,void * mem,size_t size)1889 shm_unmap(struct file *fp, void *mem, size_t size)
1890 {
1891 struct shmfd *shmfd;
1892 vm_map_entry_t entry;
1893 vm_offset_t kva, ofs;
1894 vm_object_t obj;
1895 vm_pindex_t pindex;
1896 vm_prot_t prot;
1897 boolean_t wired;
1898 vm_map_t map;
1899 int rv;
1900
1901 if (fp->f_type != DTYPE_SHM)
1902 return (EINVAL);
1903 shmfd = fp->f_data;
1904 kva = (vm_offset_t)mem;
1905 ofs = kva & PAGE_MASK;
1906 kva = trunc_page(kva);
1907 size = round_page(size + ofs);
1908 map = kernel_map;
1909 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1910 &obj, &pindex, &prot, &wired);
1911 if (rv != KERN_SUCCESS)
1912 return (EINVAL);
1913 if (entry->start != kva || entry->end != kva + size) {
1914 vm_map_lookup_done(map, entry);
1915 return (EINVAL);
1916 }
1917 vm_map_lookup_done(map, entry);
1918 if (obj != shmfd->shm_object)
1919 return (EINVAL);
1920 vm_map_remove(map, kva, kva + size);
1921 VM_OBJECT_WLOCK(obj);
1922 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1923 shmfd->shm_kmappings--;
1924 VM_OBJECT_WUNLOCK(obj);
1925 return (0);
1926 }
1927
1928 static int
shm_fill_kinfo_locked(struct shmfd * shmfd,struct kinfo_file * kif,bool list)1929 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1930 {
1931 const char *path, *pr_path;
1932 size_t pr_pathlen;
1933 bool visible;
1934
1935 sx_assert(&shm_dict_lock, SA_LOCKED);
1936 kif->kf_type = KF_TYPE_SHM;
1937 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1938 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1939 if (shmfd->shm_path != NULL) {
1940 path = shmfd->shm_path;
1941 pr_path = curthread->td_ucred->cr_prison->pr_path;
1942 if (strcmp(pr_path, "/") != 0) {
1943 /* Return the jail-rooted pathname. */
1944 pr_pathlen = strlen(pr_path);
1945 visible = strncmp(path, pr_path, pr_pathlen) == 0 &&
1946 path[pr_pathlen] == '/';
1947 if (list && !visible)
1948 return (EPERM);
1949 if (visible)
1950 path += pr_pathlen;
1951 }
1952 strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1953 }
1954 return (0);
1955 }
1956
1957 static int
shm_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp __unused)1958 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1959 struct filedesc *fdp __unused)
1960 {
1961 int res;
1962
1963 sx_slock(&shm_dict_lock);
1964 res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1965 sx_sunlock(&shm_dict_lock);
1966 return (res);
1967 }
1968
1969 static int
shm_add_seals(struct file * fp,int seals)1970 shm_add_seals(struct file *fp, int seals)
1971 {
1972 struct shmfd *shmfd;
1973 void *rl_cookie;
1974 vm_ooffset_t writemappings;
1975 int error, nseals;
1976
1977 error = 0;
1978 shmfd = fp->f_data;
1979 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1980
1981 /* Even already-set seals should result in EPERM. */
1982 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1983 error = EPERM;
1984 goto out;
1985 }
1986 nseals = seals & ~shmfd->shm_seals;
1987 if ((nseals & F_SEAL_WRITE) != 0) {
1988 if (shm_largepage(shmfd)) {
1989 error = ENOTSUP;
1990 goto out;
1991 }
1992
1993 /*
1994 * The rangelock above prevents writable mappings from being
1995 * added after we've started applying seals. The RLOCK here
1996 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1997 * writemappings will be done without a rangelock.
1998 */
1999 VM_OBJECT_RLOCK(shmfd->shm_object);
2000 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
2001 VM_OBJECT_RUNLOCK(shmfd->shm_object);
2002 /* kmappings are also writable */
2003 if (writemappings > 0) {
2004 error = EBUSY;
2005 goto out;
2006 }
2007 }
2008 shmfd->shm_seals |= nseals;
2009 out:
2010 shm_rangelock_unlock(shmfd, rl_cookie);
2011 return (error);
2012 }
2013
2014 static int
shm_get_seals(struct file * fp,int * seals)2015 shm_get_seals(struct file *fp, int *seals)
2016 {
2017 struct shmfd *shmfd;
2018
2019 shmfd = fp->f_data;
2020 *seals = shmfd->shm_seals;
2021 return (0);
2022 }
2023
2024 static int
shm_deallocate(struct shmfd * shmfd,off_t * offset,off_t * length,int flags)2025 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
2026 {
2027 vm_object_t object;
2028 vm_pindex_t pistart, pi, piend;
2029 vm_ooffset_t off, len;
2030 int startofs, endofs, end;
2031 int error;
2032
2033 off = *offset;
2034 len = *length;
2035 KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2036 if (off + len > shmfd->shm_size)
2037 len = shmfd->shm_size - off;
2038 object = shmfd->shm_object;
2039 startofs = off & PAGE_MASK;
2040 endofs = (off + len) & PAGE_MASK;
2041 pistart = OFF_TO_IDX(off);
2042 piend = OFF_TO_IDX(off + len);
2043 pi = OFF_TO_IDX(off + PAGE_MASK);
2044 error = 0;
2045
2046 /* Handle the case when offset is on or beyond shm size. */
2047 if ((off_t)len <= 0) {
2048 *length = 0;
2049 return (0);
2050 }
2051
2052 VM_OBJECT_WLOCK(object);
2053
2054 if (startofs != 0) {
2055 end = pistart != piend ? PAGE_SIZE : endofs;
2056 error = shm_partial_page_invalidate(object, pistart, startofs,
2057 end);
2058 if (error)
2059 goto out;
2060 off += end - startofs;
2061 len -= end - startofs;
2062 }
2063
2064 if (pi < piend) {
2065 vm_object_page_remove(object, pi, piend, 0);
2066 off += IDX_TO_OFF(piend - pi);
2067 len -= IDX_TO_OFF(piend - pi);
2068 }
2069
2070 if (endofs != 0 && pistart != piend) {
2071 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2072 if (error)
2073 goto out;
2074 off += endofs;
2075 len -= endofs;
2076 }
2077
2078 out:
2079 VM_OBJECT_WUNLOCK(shmfd->shm_object);
2080 *offset = off;
2081 *length = len;
2082 return (error);
2083 }
2084
2085 static int
shm_fspacectl(struct file * fp,int cmd,off_t * offset,off_t * length,int flags,struct ucred * active_cred,struct thread * td)2086 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2087 struct ucred *active_cred, struct thread *td)
2088 {
2089 void *rl_cookie;
2090 struct shmfd *shmfd;
2091 off_t off, len;
2092 int error;
2093
2094 KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2095 KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2096 ("shm_fspacectl: non-zero flags"));
2097 KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2098 ("shm_fspacectl: offset/length overflow or underflow"));
2099 error = EINVAL;
2100 shmfd = fp->f_data;
2101 off = *offset;
2102 len = *length;
2103
2104 rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2105 switch (cmd) {
2106 case SPACECTL_DEALLOC:
2107 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2108 error = EPERM;
2109 break;
2110 }
2111 error = shm_deallocate(shmfd, &off, &len, flags);
2112 *offset = off;
2113 *length = len;
2114 break;
2115 default:
2116 __assert_unreachable();
2117 }
2118 shm_rangelock_unlock(shmfd, rl_cookie);
2119 return (error);
2120 }
2121
2122
2123 static int
shm_fallocate(struct file * fp,off_t offset,off_t len,struct thread * td)2124 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2125 {
2126 void *rl_cookie;
2127 struct shmfd *shmfd;
2128 size_t size;
2129 int error;
2130
2131 /* This assumes that the caller already checked for overflow. */
2132 error = 0;
2133 shmfd = fp->f_data;
2134 size = offset + len;
2135
2136 /*
2137 * Just grab the rangelock for the range that we may be attempting to
2138 * grow, rather than blocking read/write for regions we won't be
2139 * touching while this (potential) resize is in progress. Other
2140 * attempts to resize the shmfd will have to take a write lock from 0 to
2141 * OFF_MAX, so this being potentially beyond the current usable range of
2142 * the shmfd is not necessarily a concern. If other mechanisms are
2143 * added to grow a shmfd, this may need to be re-evaluated.
2144 */
2145 rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2146 if (size > shmfd->shm_size)
2147 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2148 shm_rangelock_unlock(shmfd, rl_cookie);
2149 /* Translate to posix_fallocate(2) return value as needed. */
2150 if (error == ENOMEM)
2151 error = ENOSPC;
2152 return (error);
2153 }
2154
2155 static int
sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)2156 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2157 {
2158 struct shm_mapping *shmm;
2159 struct sbuf sb;
2160 struct kinfo_file kif = {};
2161 u_long i;
2162 int error, error2;
2163
2164 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2165 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2166 error = 0;
2167 sx_slock(&shm_dict_lock);
2168 for (i = 0; i < shm_hash + 1; i++) {
2169 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2170 error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2171 &kif, true);
2172 if (error == EPERM) {
2173 error = 0;
2174 continue;
2175 }
2176 if (error != 0)
2177 break;
2178 pack_kinfo(&kif);
2179 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2180 0 : ENOMEM;
2181 if (error != 0)
2182 break;
2183 }
2184 }
2185 sx_sunlock(&shm_dict_lock);
2186 error2 = sbuf_finish(&sb);
2187 sbuf_delete(&sb);
2188 return (error != 0 ? error : error2);
2189 }
2190
2191 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2192 CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2193 NULL, 0, sysctl_posix_shm_list, "",
2194 "POSIX SHM list");
2195
2196 int
kern_shm_open(struct thread * td,const char * path,int flags,mode_t mode,struct filecaps * caps)2197 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2198 struct filecaps *caps)
2199 {
2200
2201 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL, NULL));
2202 }
2203
2204 /*
2205 * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2206 * caller, and libc will enforce it for the traditional shm_open() call. This
2207 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
2208 * interface also includes a 'name' argument that is currently unused, but could
2209 * potentially be exported later via some interface for debugging purposes.
2210 * From the kernel's perspective, it is optional. Individual consumers like
2211 * memfd_create() may require it in order to be compatible with other systems
2212 * implementing the same function.
2213 */
2214 int
sys_shm_open2(struct thread * td,struct shm_open2_args * uap)2215 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2216 {
2217
2218 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2219 uap->shmflags, NULL, uap->name, NULL));
2220 }
2221
2222 int
shm_get_path(struct vm_object * obj,char * path,size_t sz)2223 shm_get_path(struct vm_object *obj, char *path, size_t sz)
2224 {
2225 struct shmfd *shmfd;
2226 int error;
2227
2228 error = 0;
2229 shmfd = NULL;
2230 sx_slock(&shm_dict_lock);
2231 VM_OBJECT_RLOCK(obj);
2232 if ((obj->flags & OBJ_POSIXSHM) == 0) {
2233 error = EINVAL;
2234 } else {
2235 if (obj->type == shmfd_pager_type)
2236 shmfd = obj->un_pager.swp.swp_priv;
2237 else if (obj->type == OBJT_PHYS)
2238 shmfd = obj->un_pager.phys.phys_priv;
2239 if (shmfd == NULL) {
2240 error = ENXIO;
2241 } else {
2242 strlcpy(path, shmfd->shm_path == NULL ? "anon" :
2243 shmfd->shm_path, sz);
2244 }
2245 }
2246 if (error != 0)
2247 path[0] = '\0';
2248 VM_OBJECT_RUNLOCK(obj);
2249 sx_sunlock(&shm_dict_lock);
2250 return (error);
2251 }
2252