1 /*-
2 * SPDX-License-Identifier: BSD-2-Clause
3 *
4 * Copyright (c) 2006, 2011, 2016-2017 Robert N. M. Watson
5 * Copyright 2020 The FreeBSD Foundation
6 * All rights reserved.
7 *
8 * Portions of this software were developed by BAE Systems, the University of
9 * Cambridge Computer Laboratory, and Memorial University under DARPA/AFRL
10 * contract FA8650-15-C-7558 ("CADETS"), as part of the DARPA Transparent
11 * Computing (TC) research program.
12 *
13 * Portions of this software were developed by Konstantin Belousov
14 * under sponsorship from the FreeBSD Foundation.
15 *
16 * Redistribution and use in source and binary forms, with or without
17 * modification, are permitted provided that the following conditions
18 * are met:
19 * 1. Redistributions of source code must retain the above copyright
20 * notice, this list of conditions and the following disclaimer.
21 * 2. Redistributions in binary form must reproduce the above copyright
22 * notice, this list of conditions and the following disclaimer in the
23 * documentation and/or other materials provided with the distribution.
24 *
25 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
26 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
27 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
28 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
29 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
30 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
31 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
32 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
33 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
34 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
35 * SUCH DAMAGE.
36 */
37
38 /*
39 * Support for shared swap-backed anonymous memory objects via
40 * shm_open(2), shm_rename(2), and shm_unlink(2).
41 * While most of the implementation is here, vm_mmap.c contains
42 * mapping logic changes.
43 *
44 * posixshmcontrol(1) allows users to inspect the state of the memory
45 * objects. Per-uid swap resource limit controls total amount of
46 * memory that user can consume for anonymous objects, including
47 * shared.
48 */
49
50 #include <sys/cdefs.h>
51 #include "opt_capsicum.h"
52 #include "opt_ktrace.h"
53
54 #include <sys/param.h>
55 #include <sys/capsicum.h>
56 #include <sys/conf.h>
57 #include <sys/fcntl.h>
58 #include <sys/file.h>
59 #include <sys/filedesc.h>
60 #include <sys/filio.h>
61 #include <sys/fnv_hash.h>
62 #include <sys/kernel.h>
63 #include <sys/limits.h>
64 #include <sys/uio.h>
65 #include <sys/signal.h>
66 #include <sys/jail.h>
67 #include <sys/ktrace.h>
68 #include <sys/lock.h>
69 #include <sys/malloc.h>
70 #include <sys/mman.h>
71 #include <sys/mutex.h>
72 #include <sys/priv.h>
73 #include <sys/proc.h>
74 #include <sys/refcount.h>
75 #include <sys/resourcevar.h>
76 #include <sys/rwlock.h>
77 #include <sys/sbuf.h>
78 #include <sys/stat.h>
79 #include <sys/syscallsubr.h>
80 #include <sys/sysctl.h>
81 #include <sys/sysproto.h>
82 #include <sys/systm.h>
83 #include <sys/sx.h>
84 #include <sys/time.h>
85 #include <sys/vmmeter.h>
86 #include <sys/vnode.h>
87 #include <sys/unistd.h>
88 #include <sys/user.h>
89
90 #include <security/audit/audit.h>
91 #include <security/mac/mac_framework.h>
92
93 #include <vm/vm.h>
94 #include <vm/vm_param.h>
95 #include <vm/pmap.h>
96 #include <vm/vm_extern.h>
97 #include <vm/vm_map.h>
98 #include <vm/vm_kern.h>
99 #include <vm/vm_object.h>
100 #include <vm/vm_page.h>
101 #include <vm/vm_pageout.h>
102 #include <vm/vm_pager.h>
103 #include <vm/vm_radix.h>
104 #include <vm/swap_pager.h>
105
106 struct shm_mapping {
107 char *sm_path;
108 Fnv32_t sm_fnv;
109 struct shmfd *sm_shmfd;
110 LIST_ENTRY(shm_mapping) sm_link;
111 };
112
113 static MALLOC_DEFINE(M_SHMFD, "shmfd", "shared memory file descriptor");
114 static LIST_HEAD(, shm_mapping) *shm_dictionary;
115 static struct sx shm_dict_lock;
116 static struct mtx shm_timestamp_lock;
117 static u_long shm_hash;
118 static struct unrhdr64 shm_ino_unr;
119 static dev_t shm_dev_ino;
120
121 #define SHM_HASH(fnv) (&shm_dictionary[(fnv) & shm_hash])
122
123 static void shm_init(void *arg);
124 static void shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd);
125 static struct shmfd *shm_lookup(char *path, Fnv32_t fnv);
126 static int shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred);
127 static void shm_doremove(struct shm_mapping *map);
128 static int shm_dotruncate_cookie(struct shmfd *shmfd, off_t length,
129 void *rl_cookie);
130 static int shm_dotruncate_locked(struct shmfd *shmfd, off_t length,
131 void *rl_cookie);
132 static int shm_copyin_path(struct thread *td, const char *userpath_in,
133 char **path_out);
134 static int shm_deallocate(struct shmfd *shmfd, off_t *offset,
135 off_t *length, int flags);
136
137 static fo_rdwr_t shm_read;
138 static fo_rdwr_t shm_write;
139 static fo_truncate_t shm_truncate;
140 static fo_ioctl_t shm_ioctl;
141 static fo_stat_t shm_stat;
142 static fo_close_t shm_close;
143 static fo_chmod_t shm_chmod;
144 static fo_chown_t shm_chown;
145 static fo_seek_t shm_seek;
146 static fo_fill_kinfo_t shm_fill_kinfo;
147 static fo_mmap_t shm_mmap;
148 static fo_get_seals_t shm_get_seals;
149 static fo_add_seals_t shm_add_seals;
150 static fo_fallocate_t shm_fallocate;
151 static fo_fspacectl_t shm_fspacectl;
152
153 /* File descriptor operations. */
154 const struct fileops shm_ops = {
155 .fo_read = shm_read,
156 .fo_write = shm_write,
157 .fo_truncate = shm_truncate,
158 .fo_ioctl = shm_ioctl,
159 .fo_poll = invfo_poll,
160 .fo_kqfilter = invfo_kqfilter,
161 .fo_stat = shm_stat,
162 .fo_close = shm_close,
163 .fo_chmod = shm_chmod,
164 .fo_chown = shm_chown,
165 .fo_sendfile = vn_sendfile,
166 .fo_seek = shm_seek,
167 .fo_fill_kinfo = shm_fill_kinfo,
168 .fo_mmap = shm_mmap,
169 .fo_get_seals = shm_get_seals,
170 .fo_add_seals = shm_add_seals,
171 .fo_fallocate = shm_fallocate,
172 .fo_fspacectl = shm_fspacectl,
173 .fo_cmp = file_kcmp_generic,
174 .fo_flags = DFLAG_PASSABLE | DFLAG_SEEKABLE,
175 };
176
177 FEATURE(posix_shm, "POSIX shared memory");
178
179 static SYSCTL_NODE(_vm, OID_AUTO, largepages, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
180 "");
181
182 static int largepage_reclaim_tries = 1;
183 SYSCTL_INT(_vm_largepages, OID_AUTO, reclaim_tries,
184 CTLFLAG_RWTUN, &largepage_reclaim_tries, 0,
185 "Number of contig reclaims before giving up for default alloc policy");
186
187 #define shm_rangelock_unlock(shmfd, cookie) \
188 rangelock_unlock(&(shmfd)->shm_rl, (cookie))
189 #define shm_rangelock_rlock(shmfd, start, end) \
190 rangelock_rlock(&(shmfd)->shm_rl, (start), (end))
191 #define shm_rangelock_tryrlock(shmfd, start, end) \
192 rangelock_tryrlock(&(shmfd)->shm_rl, (start), (end))
193 #define shm_rangelock_wlock(shmfd, start, end) \
194 rangelock_wlock(&(shmfd)->shm_rl, (start), (end))
195
196 static int
uiomove_object_page(vm_object_t obj,size_t len,struct uio * uio)197 uiomove_object_page(vm_object_t obj, size_t len, struct uio *uio)
198 {
199 struct pctrie_iter pages;
200 vm_page_t m;
201 vm_pindex_t idx;
202 size_t tlen;
203 int error, offset, rv;
204
205 idx = OFF_TO_IDX(uio->uio_offset);
206 offset = uio->uio_offset & PAGE_MASK;
207 tlen = MIN(PAGE_SIZE - offset, len);
208
209 rv = vm_page_grab_valid_unlocked(&m, obj, idx,
210 VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY | VM_ALLOC_NOCREAT);
211 if (rv == VM_PAGER_OK)
212 goto found;
213
214 /*
215 * Read I/O without either a corresponding resident page or swap
216 * page: use zero_region. This is intended to avoid instantiating
217 * pages on read from a sparse region.
218 */
219 vm_page_iter_init(&pages, obj);
220 VM_OBJECT_WLOCK(obj);
221 m = vm_radix_iter_lookup(&pages, idx);
222 if (uio->uio_rw == UIO_READ && m == NULL &&
223 !vm_pager_has_page(obj, idx, NULL, NULL)) {
224 VM_OBJECT_WUNLOCK(obj);
225 return (uiomove(__DECONST(void *, zero_region), tlen, uio));
226 }
227
228 /*
229 * Although the tmpfs vnode lock is held here, it is
230 * nonetheless safe to sleep waiting for a free page. The
231 * pageout daemon does not need to acquire the tmpfs vnode
232 * lock to page out tobj's pages because tobj is a OBJT_SWAP
233 * type object.
234 */
235 rv = vm_page_grab_valid_iter(&m, obj, idx,
236 VM_ALLOC_NORMAL | VM_ALLOC_SBUSY | VM_ALLOC_IGN_SBUSY, &pages);
237 if (rv != VM_PAGER_OK) {
238 VM_OBJECT_WUNLOCK(obj);
239 if (bootverbose) {
240 printf("uiomove_object: vm_obj %p idx %jd "
241 "pager error %d\n", obj, idx, rv);
242 }
243 return (rv == VM_PAGER_AGAIN ? ENOSPC : EIO);
244 }
245 VM_OBJECT_WUNLOCK(obj);
246
247 found:
248 error = uiomove_fromphys(&m, offset, tlen, uio);
249 if (uio->uio_rw == UIO_WRITE && error == 0)
250 vm_page_set_dirty(m);
251 vm_page_activate(m);
252 vm_page_sunbusy(m);
253
254 return (error);
255 }
256
257 int
uiomove_object(vm_object_t obj,off_t obj_size,struct uio * uio)258 uiomove_object(vm_object_t obj, off_t obj_size, struct uio *uio)
259 {
260 ssize_t resid;
261 size_t len;
262 int error;
263
264 error = 0;
265 while ((resid = uio->uio_resid) > 0) {
266 if (obj_size <= uio->uio_offset)
267 break;
268 len = MIN(obj_size - uio->uio_offset, resid);
269 if (len == 0)
270 break;
271 error = uiomove_object_page(obj, len, uio);
272 if (error != 0 || resid == uio->uio_resid)
273 break;
274 }
275 return (error);
276 }
277
278 static u_long count_largepages[MAXPAGESIZES];
279
280 static int
shm_largepage_phys_populate(vm_object_t object,vm_pindex_t pidx,int fault_type,vm_prot_t max_prot,vm_pindex_t * first,vm_pindex_t * last)281 shm_largepage_phys_populate(vm_object_t object, vm_pindex_t pidx,
282 int fault_type, vm_prot_t max_prot, vm_pindex_t *first, vm_pindex_t *last)
283 {
284 vm_page_t m __diagused;
285 int psind;
286
287 psind = object->un_pager.phys.data_val;
288 if (psind == 0 || pidx >= object->size)
289 return (VM_PAGER_FAIL);
290 *first = rounddown2(pidx, pagesizes[psind] / PAGE_SIZE);
291
292 /*
293 * We only busy the first page in the superpage run. It is
294 * useless to busy whole run since we only remove full
295 * superpage, and it takes too long to busy e.g. 512 * 512 ==
296 * 262144 pages constituing 1G amd64 superage.
297 */
298 m = vm_page_grab(object, *first, VM_ALLOC_NORMAL | VM_ALLOC_NOCREAT);
299 MPASS(m != NULL);
300
301 *last = *first + atop(pagesizes[psind]) - 1;
302 return (VM_PAGER_OK);
303 }
304
305 static boolean_t
shm_largepage_phys_haspage(vm_object_t object,vm_pindex_t pindex,int * before,int * after)306 shm_largepage_phys_haspage(vm_object_t object, vm_pindex_t pindex,
307 int *before, int *after)
308 {
309 int psind;
310
311 psind = object->un_pager.phys.data_val;
312 if (psind == 0 || pindex >= object->size)
313 return (FALSE);
314 if (before != NULL) {
315 *before = pindex - rounddown2(pindex, pagesizes[psind] /
316 PAGE_SIZE);
317 }
318 if (after != NULL) {
319 *after = roundup2(pindex, pagesizes[psind] / PAGE_SIZE) -
320 pindex;
321 }
322 return (TRUE);
323 }
324
325 static void
shm_largepage_phys_ctor(vm_object_t object,vm_prot_t prot,vm_ooffset_t foff,struct ucred * cred)326 shm_largepage_phys_ctor(vm_object_t object, vm_prot_t prot,
327 vm_ooffset_t foff, struct ucred *cred)
328 {
329 }
330
331 static void
shm_largepage_phys_dtor(vm_object_t object)332 shm_largepage_phys_dtor(vm_object_t object)
333 {
334 int psind;
335
336 psind = object->un_pager.phys.data_val;
337 if (psind != 0) {
338 atomic_subtract_long(&count_largepages[psind],
339 object->size / (pagesizes[psind] / PAGE_SIZE));
340 vm_wire_sub(object->size);
341 } else {
342 KASSERT(object->size == 0,
343 ("largepage phys obj %p not initialized bit size %#jx > 0",
344 object, (uintmax_t)object->size));
345 }
346 }
347
348 static const struct phys_pager_ops shm_largepage_phys_ops = {
349 .phys_pg_populate = shm_largepage_phys_populate,
350 .phys_pg_haspage = shm_largepage_phys_haspage,
351 .phys_pg_ctor = shm_largepage_phys_ctor,
352 .phys_pg_dtor = shm_largepage_phys_dtor,
353 };
354
355 bool
shm_largepage(struct shmfd * shmfd)356 shm_largepage(struct shmfd *shmfd)
357 {
358 return (shmfd->shm_object->type == OBJT_PHYS);
359 }
360
361 static void
shm_pager_freespace(vm_object_t obj,vm_pindex_t start,vm_size_t size)362 shm_pager_freespace(vm_object_t obj, vm_pindex_t start, vm_size_t size)
363 {
364 struct shmfd *shm;
365 vm_size_t c;
366
367 swap_pager_freespace(obj, start, size, &c);
368 if (c == 0)
369 return;
370
371 shm = obj->un_pager.swp.swp_priv;
372 if (shm == NULL)
373 return;
374 KASSERT(shm->shm_pages >= c,
375 ("shm %p pages %jd free %jd", shm,
376 (uintmax_t)shm->shm_pages, (uintmax_t)c));
377 shm->shm_pages -= c;
378 }
379
380 static void
shm_page_inserted(vm_object_t obj,vm_page_t m)381 shm_page_inserted(vm_object_t obj, vm_page_t m)
382 {
383 struct shmfd *shm;
384
385 shm = obj->un_pager.swp.swp_priv;
386 if (shm == NULL)
387 return;
388 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL))
389 shm->shm_pages += 1;
390 }
391
392 static void
shm_page_removed(vm_object_t obj,vm_page_t m)393 shm_page_removed(vm_object_t obj, vm_page_t m)
394 {
395 struct shmfd *shm;
396
397 shm = obj->un_pager.swp.swp_priv;
398 if (shm == NULL)
399 return;
400 if (!vm_pager_has_page(obj, m->pindex, NULL, NULL)) {
401 KASSERT(shm->shm_pages >= 1,
402 ("shm %p pages %jd free 1", shm,
403 (uintmax_t)shm->shm_pages));
404 shm->shm_pages -= 1;
405 }
406 }
407
408 static struct pagerops shm_swap_pager_ops = {
409 .pgo_kvme_type = KVME_TYPE_SWAP,
410 .pgo_freespace = shm_pager_freespace,
411 .pgo_page_inserted = shm_page_inserted,
412 .pgo_page_removed = shm_page_removed,
413 };
414 static int shmfd_pager_type = -1;
415
416 static int
shm_seek(struct file * fp,off_t offset,int whence,struct thread * td)417 shm_seek(struct file *fp, off_t offset, int whence, struct thread *td)
418 {
419 struct shmfd *shmfd;
420 off_t foffset;
421 int error;
422
423 shmfd = fp->f_data;
424 foffset = foffset_lock(fp, 0);
425 error = 0;
426 switch (whence) {
427 case L_INCR:
428 if (foffset < 0 ||
429 (offset > 0 && foffset > OFF_MAX - offset)) {
430 error = EOVERFLOW;
431 break;
432 }
433 offset += foffset;
434 break;
435 case L_XTND:
436 if (offset > 0 && shmfd->shm_size > OFF_MAX - offset) {
437 error = EOVERFLOW;
438 break;
439 }
440 offset += shmfd->shm_size;
441 break;
442 case L_SET:
443 break;
444 default:
445 error = EINVAL;
446 }
447 if (error == 0) {
448 if (offset < 0 || offset > shmfd->shm_size)
449 error = EINVAL;
450 else
451 td->td_uretoff.tdu_off = offset;
452 }
453 foffset_unlock(fp, offset, error != 0 ? FOF_NOUPDATE : 0);
454 return (error);
455 }
456
457 static int
shm_read(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)458 shm_read(struct file *fp, struct uio *uio, struct ucred *active_cred,
459 int flags, struct thread *td)
460 {
461 struct shmfd *shmfd;
462 void *rl_cookie;
463 int error;
464
465 shmfd = fp->f_data;
466 #ifdef MAC
467 error = mac_posixshm_check_read(active_cred, fp->f_cred, shmfd);
468 if (error)
469 return (error);
470 #endif
471 foffset_lock_uio(fp, uio, flags);
472 rl_cookie = shm_rangelock_rlock(shmfd, uio->uio_offset,
473 uio->uio_offset + uio->uio_resid);
474 error = uiomove_object(shmfd->shm_object, shmfd->shm_size, uio);
475 shm_rangelock_unlock(shmfd, rl_cookie);
476 foffset_unlock_uio(fp, uio, flags);
477 return (error);
478 }
479
480 static int
shm_write(struct file * fp,struct uio * uio,struct ucred * active_cred,int flags,struct thread * td)481 shm_write(struct file *fp, struct uio *uio, struct ucred *active_cred,
482 int flags, struct thread *td)
483 {
484 struct shmfd *shmfd;
485 void *rl_cookie;
486 int error;
487 off_t newsize;
488
489 KASSERT((flags & FOF_OFFSET) == 0 || uio->uio_offset >= 0,
490 ("%s: negative offset", __func__));
491
492 shmfd = fp->f_data;
493 #ifdef MAC
494 error = mac_posixshm_check_write(active_cred, fp->f_cred, shmfd);
495 if (error)
496 return (error);
497 #endif
498 if (shm_largepage(shmfd) && shmfd->shm_lp_psind == 0)
499 return (EINVAL);
500 foffset_lock_uio(fp, uio, flags);
501 if (uio->uio_resid > OFF_MAX - uio->uio_offset) {
502 /*
503 * Overflow is only an error if we're supposed to expand on
504 * write. Otherwise, we'll just truncate the write to the
505 * size of the file, which can only grow up to OFF_MAX.
506 */
507 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0) {
508 foffset_unlock_uio(fp, uio, flags);
509 return (EFBIG);
510 }
511
512 newsize = atomic_load_64(&shmfd->shm_size);
513 } else {
514 newsize = uio->uio_offset + uio->uio_resid;
515 }
516 if ((flags & FOF_OFFSET) == 0)
517 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
518 else
519 rl_cookie = shm_rangelock_wlock(shmfd, uio->uio_offset,
520 MAX(newsize, uio->uio_offset));
521 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
522 error = EPERM;
523 } else {
524 error = 0;
525 if ((shmfd->shm_flags & SHM_GROW_ON_WRITE) != 0 &&
526 newsize > shmfd->shm_size) {
527 error = shm_dotruncate_cookie(shmfd, newsize,
528 rl_cookie);
529 }
530 if (error == 0)
531 error = uiomove_object(shmfd->shm_object,
532 shmfd->shm_size, uio);
533 }
534 shm_rangelock_unlock(shmfd, rl_cookie);
535 foffset_unlock_uio(fp, uio, flags);
536 return (error);
537 }
538
539 static int
shm_truncate(struct file * fp,off_t length,struct ucred * active_cred,struct thread * td)540 shm_truncate(struct file *fp, off_t length, struct ucred *active_cred,
541 struct thread *td)
542 {
543 struct shmfd *shmfd;
544 #ifdef MAC
545 int error;
546 #endif
547
548 shmfd = fp->f_data;
549 #ifdef MAC
550 error = mac_posixshm_check_truncate(active_cred, fp->f_cred, shmfd);
551 if (error)
552 return (error);
553 #endif
554 return (shm_dotruncate(shmfd, length));
555 }
556
557 int
shm_ioctl(struct file * fp,u_long com,void * data,struct ucred * active_cred,struct thread * td)558 shm_ioctl(struct file *fp, u_long com, void *data, struct ucred *active_cred,
559 struct thread *td)
560 {
561 struct shmfd *shmfd;
562 struct shm_largepage_conf *conf;
563 void *rl_cookie;
564
565 shmfd = fp->f_data;
566 switch (com) {
567 case FIONBIO:
568 case FIOASYNC:
569 /*
570 * Allow fcntl(fd, F_SETFL, O_NONBLOCK) to work,
571 * just like it would on an unlinked regular file
572 */
573 return (0);
574 case FIOSSHMLPGCNF:
575 if (!shm_largepage(shmfd))
576 return (ENOTTY);
577 conf = data;
578 if (shmfd->shm_lp_psind != 0 &&
579 conf->psind != shmfd->shm_lp_psind)
580 return (EINVAL);
581 if (conf->psind <= 0 || conf->psind >= MAXPAGESIZES ||
582 pagesizes[conf->psind] == 0)
583 return (EINVAL);
584 if (conf->alloc_policy != SHM_LARGEPAGE_ALLOC_DEFAULT &&
585 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_NOWAIT &&
586 conf->alloc_policy != SHM_LARGEPAGE_ALLOC_HARD)
587 return (EINVAL);
588
589 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
590 shmfd->shm_lp_psind = conf->psind;
591 shmfd->shm_lp_alloc_policy = conf->alloc_policy;
592 shmfd->shm_object->un_pager.phys.data_val = conf->psind;
593 shm_rangelock_unlock(shmfd, rl_cookie);
594 return (0);
595 case FIOGSHMLPGCNF:
596 if (!shm_largepage(shmfd))
597 return (ENOTTY);
598 conf = data;
599 rl_cookie = shm_rangelock_rlock(shmfd, 0, OFF_MAX);
600 conf->psind = shmfd->shm_lp_psind;
601 conf->alloc_policy = shmfd->shm_lp_alloc_policy;
602 shm_rangelock_unlock(shmfd, rl_cookie);
603 return (0);
604 default:
605 return (ENOTTY);
606 }
607 }
608
609 static int
shm_stat(struct file * fp,struct stat * sb,struct ucred * active_cred)610 shm_stat(struct file *fp, struct stat *sb, struct ucred *active_cred)
611 {
612 struct shmfd *shmfd;
613 #ifdef MAC
614 int error;
615 #endif
616
617 shmfd = fp->f_data;
618
619 #ifdef MAC
620 error = mac_posixshm_check_stat(active_cred, fp->f_cred, shmfd);
621 if (error)
622 return (error);
623 #endif
624
625 /*
626 * Attempt to return sanish values for fstat() on a memory file
627 * descriptor.
628 */
629 bzero(sb, sizeof(*sb));
630 sb->st_blksize = PAGE_SIZE;
631 sb->st_size = shmfd->shm_size;
632 mtx_lock(&shm_timestamp_lock);
633 sb->st_atim = shmfd->shm_atime;
634 sb->st_ctim = shmfd->shm_ctime;
635 sb->st_mtim = shmfd->shm_mtime;
636 sb->st_birthtim = shmfd->shm_birthtime;
637 sb->st_mode = S_IFREG | shmfd->shm_mode; /* XXX */
638 sb->st_uid = shmfd->shm_uid;
639 sb->st_gid = shmfd->shm_gid;
640 mtx_unlock(&shm_timestamp_lock);
641 sb->st_dev = shm_dev_ino;
642 sb->st_ino = shmfd->shm_ino;
643 sb->st_nlink = shmfd->shm_object->ref_count;
644 if (shm_largepage(shmfd)) {
645 sb->st_blocks = shmfd->shm_object->size /
646 (pagesizes[shmfd->shm_lp_psind] >> PAGE_SHIFT);
647 } else {
648 sb->st_blocks = shmfd->shm_pages;
649 }
650
651 return (0);
652 }
653
654 static int
shm_close(struct file * fp,struct thread * td)655 shm_close(struct file *fp, struct thread *td)
656 {
657 struct shmfd *shmfd;
658
659 shmfd = fp->f_data;
660 fp->f_data = NULL;
661 shm_drop(shmfd);
662
663 return (0);
664 }
665
666 static int
shm_copyin_path(struct thread * td,const char * userpath_in,char ** path_out)667 shm_copyin_path(struct thread *td, const char *userpath_in, char **path_out) {
668 int error;
669 char *path;
670 const char *pr_path;
671 size_t pr_pathlen;
672
673 path = malloc(MAXPATHLEN, M_SHMFD, M_WAITOK);
674 pr_path = td->td_ucred->cr_prison->pr_path;
675
676 /* Construct a full pathname for jailed callers. */
677 pr_pathlen = strcmp(pr_path, "/") ==
678 0 ? 0 : strlcpy(path, pr_path, MAXPATHLEN);
679 error = copyinstr(userpath_in, path + pr_pathlen,
680 MAXPATHLEN - pr_pathlen, NULL);
681 if (error != 0)
682 goto out;
683
684 #ifdef KTRACE
685 if (KTRPOINT(curthread, KTR_NAMEI))
686 ktrnamei(path);
687 #endif
688
689 /* Require paths to start with a '/' character. */
690 if (path[pr_pathlen] != '/') {
691 error = EINVAL;
692 goto out;
693 }
694
695 *path_out = path;
696
697 out:
698 if (error != 0)
699 free(path, M_SHMFD);
700
701 return (error);
702 }
703
704 static int
shm_partial_page_invalidate(vm_object_t object,vm_pindex_t idx,int base,int end)705 shm_partial_page_invalidate(vm_object_t object, vm_pindex_t idx, int base,
706 int end)
707 {
708 int error;
709
710 error = vm_page_grab_zero_partial(object, idx, base, end);
711 if (error == EIO)
712 VM_OBJECT_WUNLOCK(object);
713 return (error);
714 }
715
716 static int
shm_dotruncate_locked(struct shmfd * shmfd,off_t length,void * rl_cookie)717 shm_dotruncate_locked(struct shmfd *shmfd, off_t length, void *rl_cookie)
718 {
719 vm_object_t object;
720 vm_pindex_t nobjsize;
721 vm_ooffset_t delta;
722 int base, error;
723
724 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
725 object = shmfd->shm_object;
726 VM_OBJECT_ASSERT_WLOCKED(object);
727 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
728 if (length == shmfd->shm_size)
729 return (0);
730 nobjsize = OFF_TO_IDX(length + PAGE_MASK);
731
732 /* Are we shrinking? If so, trim the end. */
733 if (length < shmfd->shm_size) {
734 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
735 return (EPERM);
736
737 /*
738 * Disallow any requests to shrink the size if this
739 * object is mapped into the kernel.
740 */
741 if (shmfd->shm_kmappings > 0)
742 return (EBUSY);
743
744 /*
745 * Zero the truncated part of the last page.
746 */
747 base = length & PAGE_MASK;
748 if (base != 0) {
749 error = shm_partial_page_invalidate(object,
750 OFF_TO_IDX(length), base, PAGE_SIZE);
751 if (error)
752 return (error);
753 }
754 delta = IDX_TO_OFF(object->size - nobjsize);
755
756 if (nobjsize < object->size)
757 vm_object_page_remove(object, nobjsize, object->size,
758 0);
759
760 /* Free the swap accounted for shm */
761 swap_release_by_cred(delta, object->cred);
762 object->charge -= delta;
763 } else {
764 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
765 return (EPERM);
766
767 /* Try to reserve additional swap space. */
768 delta = IDX_TO_OFF(nobjsize - object->size);
769 if (!swap_reserve_by_cred(delta, object->cred))
770 return (ENOMEM);
771 object->charge += delta;
772 }
773 shmfd->shm_size = length;
774 mtx_lock(&shm_timestamp_lock);
775 vfs_timestamp(&shmfd->shm_ctime);
776 shmfd->shm_mtime = shmfd->shm_ctime;
777 mtx_unlock(&shm_timestamp_lock);
778 object->size = nobjsize;
779 return (0);
780 }
781
782 static int
shm_dotruncate_largepage(struct shmfd * shmfd,off_t length,void * rl_cookie)783 shm_dotruncate_largepage(struct shmfd *shmfd, off_t length, void *rl_cookie)
784 {
785 vm_object_t object;
786 vm_page_t m;
787 vm_pindex_t newobjsz;
788 vm_pindex_t oldobjsz __unused;
789 int aflags, error, i, psind, try;
790
791 KASSERT(length >= 0, ("shm_dotruncate: length < 0"));
792 object = shmfd->shm_object;
793 VM_OBJECT_ASSERT_WLOCKED(object);
794 rangelock_cookie_assert(rl_cookie, RA_WLOCKED);
795
796 oldobjsz = object->size;
797 newobjsz = OFF_TO_IDX(length);
798 if (length == shmfd->shm_size)
799 return (0);
800 psind = shmfd->shm_lp_psind;
801 if (psind == 0 && length != 0)
802 return (EINVAL);
803 if ((length & (pagesizes[psind] - 1)) != 0)
804 return (EINVAL);
805
806 if (length < shmfd->shm_size) {
807 if ((shmfd->shm_seals & F_SEAL_SHRINK) != 0)
808 return (EPERM);
809 if (shmfd->shm_kmappings > 0)
810 return (EBUSY);
811 return (ENOTSUP); /* Pages are unmanaged. */
812 #if 0
813 vm_object_page_remove(object, newobjsz, oldobjsz, 0);
814 object->size = newobjsz;
815 shmfd->shm_size = length;
816 return (0);
817 #endif
818 }
819
820 if ((shmfd->shm_seals & F_SEAL_GROW) != 0)
821 return (EPERM);
822
823 aflags = VM_ALLOC_NORMAL | VM_ALLOC_ZERO;
824 if (shmfd->shm_lp_alloc_policy == SHM_LARGEPAGE_ALLOC_NOWAIT)
825 aflags |= VM_ALLOC_WAITFAIL;
826 try = 0;
827
828 /*
829 * Extend shmfd and object, keeping all already fully
830 * allocated large pages intact even on error, because dropped
831 * object lock might allowed mapping of them.
832 */
833 while (object->size < newobjsz) {
834 m = vm_page_alloc_contig(object, object->size, aflags,
835 pagesizes[psind] / PAGE_SIZE, 0, ~0,
836 pagesizes[psind], 0,
837 VM_MEMATTR_DEFAULT);
838 if (m == NULL) {
839 VM_OBJECT_WUNLOCK(object);
840 if (shmfd->shm_lp_alloc_policy ==
841 SHM_LARGEPAGE_ALLOC_NOWAIT ||
842 (shmfd->shm_lp_alloc_policy ==
843 SHM_LARGEPAGE_ALLOC_DEFAULT &&
844 try >= largepage_reclaim_tries)) {
845 VM_OBJECT_WLOCK(object);
846 return (ENOMEM);
847 }
848 error = vm_page_reclaim_contig(aflags,
849 pagesizes[psind] / PAGE_SIZE, 0, ~0,
850 pagesizes[psind], 0);
851 if (error == ENOMEM)
852 error = vm_wait_intr(object);
853 if (error != 0) {
854 VM_OBJECT_WLOCK(object);
855 return (error);
856 }
857 try++;
858 VM_OBJECT_WLOCK(object);
859 continue;
860 }
861 try = 0;
862 for (i = 0; i < pagesizes[psind] / PAGE_SIZE; i++) {
863 if ((m[i].flags & PG_ZERO) == 0)
864 pmap_zero_page(&m[i]);
865 vm_page_valid(&m[i]);
866 vm_page_xunbusy(&m[i]);
867 }
868 object->size += OFF_TO_IDX(pagesizes[psind]);
869 shmfd->shm_size += pagesizes[psind];
870 atomic_add_long(&count_largepages[psind], 1);
871 vm_wire_add(atop(pagesizes[psind]));
872 }
873 return (0);
874 }
875
876 static int
shm_dotruncate_cookie(struct shmfd * shmfd,off_t length,void * rl_cookie)877 shm_dotruncate_cookie(struct shmfd *shmfd, off_t length, void *rl_cookie)
878 {
879 int error;
880
881 VM_OBJECT_WLOCK(shmfd->shm_object);
882 error = shm_largepage(shmfd) ? shm_dotruncate_largepage(shmfd,
883 length, rl_cookie) : shm_dotruncate_locked(shmfd, length,
884 rl_cookie);
885 VM_OBJECT_WUNLOCK(shmfd->shm_object);
886 return (error);
887 }
888
889 int
shm_dotruncate(struct shmfd * shmfd,off_t length)890 shm_dotruncate(struct shmfd *shmfd, off_t length)
891 {
892 void *rl_cookie;
893 int error;
894
895 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
896 error = shm_dotruncate_cookie(shmfd, length, rl_cookie);
897 shm_rangelock_unlock(shmfd, rl_cookie);
898 return (error);
899 }
900
901 /*
902 * shmfd object management including creation and reference counting
903 * routines.
904 */
905 struct shmfd *
shm_alloc(struct ucred * ucred,mode_t mode,bool largepage)906 shm_alloc(struct ucred *ucred, mode_t mode, bool largepage)
907 {
908 struct shmfd *shmfd;
909 vm_object_t obj;
910
911 if (largepage) {
912 obj = phys_pager_allocate(NULL, &shm_largepage_phys_ops,
913 NULL, 0, VM_PROT_DEFAULT, 0, ucred);
914 } else {
915 obj = vm_pager_allocate(shmfd_pager_type, NULL, 0,
916 VM_PROT_DEFAULT, 0, ucred);
917 }
918 if (obj == NULL) {
919 /*
920 * swap reservation limits can cause object allocation
921 * to fail.
922 */
923 return (NULL);
924 }
925
926 shmfd = malloc(sizeof(*shmfd), M_SHMFD, M_WAITOK | M_ZERO);
927 shmfd->shm_uid = ucred->cr_uid;
928 shmfd->shm_gid = ucred->cr_gid;
929 shmfd->shm_mode = mode;
930 if (largepage) {
931 obj->un_pager.phys.phys_priv = shmfd;
932 shmfd->shm_lp_alloc_policy = SHM_LARGEPAGE_ALLOC_DEFAULT;
933 } else {
934 obj->un_pager.swp.swp_priv = shmfd;
935 }
936
937 VM_OBJECT_WLOCK(obj);
938 vm_object_set_flag(obj, OBJ_POSIXSHM);
939 VM_OBJECT_WUNLOCK(obj);
940 shmfd->shm_object = obj;
941 vfs_timestamp(&shmfd->shm_birthtime);
942 shmfd->shm_atime = shmfd->shm_mtime = shmfd->shm_ctime =
943 shmfd->shm_birthtime;
944 shmfd->shm_ino = alloc_unr64(&shm_ino_unr);
945 refcount_init(&shmfd->shm_refs, 1);
946 mtx_init(&shmfd->shm_mtx, "shmrl", NULL, MTX_DEF);
947 rangelock_init(&shmfd->shm_rl);
948 #ifdef MAC
949 mac_posixshm_init(shmfd);
950 mac_posixshm_create(ucred, shmfd);
951 #endif
952
953 return (shmfd);
954 }
955
956 struct shmfd *
shm_hold(struct shmfd * shmfd)957 shm_hold(struct shmfd *shmfd)
958 {
959
960 refcount_acquire(&shmfd->shm_refs);
961 return (shmfd);
962 }
963
964 void
shm_drop(struct shmfd * shmfd)965 shm_drop(struct shmfd *shmfd)
966 {
967 vm_object_t obj;
968
969 if (refcount_release(&shmfd->shm_refs)) {
970 #ifdef MAC
971 mac_posixshm_destroy(shmfd);
972 #endif
973 rangelock_destroy(&shmfd->shm_rl);
974 mtx_destroy(&shmfd->shm_mtx);
975 obj = shmfd->shm_object;
976 VM_OBJECT_WLOCK(obj);
977 if (shm_largepage(shmfd))
978 obj->un_pager.phys.phys_priv = NULL;
979 else
980 obj->un_pager.swp.swp_priv = NULL;
981 VM_OBJECT_WUNLOCK(obj);
982 vm_object_deallocate(obj);
983 free(shmfd, M_SHMFD);
984 }
985 }
986
987 /*
988 * Determine if the credentials have sufficient permissions for a
989 * specified combination of FREAD and FWRITE.
990 */
991 int
shm_access(struct shmfd * shmfd,struct ucred * ucred,int flags)992 shm_access(struct shmfd *shmfd, struct ucred *ucred, int flags)
993 {
994 accmode_t accmode;
995 int error;
996
997 accmode = 0;
998 if (flags & FREAD)
999 accmode |= VREAD;
1000 if (flags & FWRITE)
1001 accmode |= VWRITE;
1002 mtx_lock(&shm_timestamp_lock);
1003 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1004 accmode, ucred);
1005 mtx_unlock(&shm_timestamp_lock);
1006 return (error);
1007 }
1008
1009 static void
shm_init(void * arg)1010 shm_init(void *arg)
1011 {
1012 char name[32];
1013 int i;
1014
1015 mtx_init(&shm_timestamp_lock, "shm timestamps", NULL, MTX_DEF);
1016 sx_init(&shm_dict_lock, "shm dictionary");
1017 shm_dictionary = hashinit(1024, M_SHMFD, &shm_hash);
1018 new_unrhdr64(&shm_ino_unr, 1);
1019 shm_dev_ino = devfs_alloc_cdp_inode();
1020 KASSERT(shm_dev_ino > 0, ("shm dev inode not initialized"));
1021 shmfd_pager_type = vm_pager_alloc_dyn_type(&shm_swap_pager_ops,
1022 OBJT_SWAP);
1023 MPASS(shmfd_pager_type != -1);
1024
1025 for (i = 1; i < MAXPAGESIZES; i++) {
1026 if (pagesizes[i] == 0)
1027 break;
1028 #define M (1024 * 1024)
1029 #define G (1024 * M)
1030 if (pagesizes[i] >= G)
1031 snprintf(name, sizeof(name), "%luG", pagesizes[i] / G);
1032 else if (pagesizes[i] >= M)
1033 snprintf(name, sizeof(name), "%luM", pagesizes[i] / M);
1034 else
1035 snprintf(name, sizeof(name), "%lu", pagesizes[i]);
1036 #undef G
1037 #undef M
1038 SYSCTL_ADD_ULONG(NULL, SYSCTL_STATIC_CHILDREN(_vm_largepages),
1039 OID_AUTO, name, CTLFLAG_RD, &count_largepages[i],
1040 "number of non-transient largepages allocated");
1041 }
1042 }
1043 SYSINIT(shm_init, SI_SUB_SYSV_SHM, SI_ORDER_ANY, shm_init, NULL);
1044
1045 /*
1046 * Remove all shared memory objects that belong to a prison.
1047 */
1048 void
shm_remove_prison(struct prison * pr)1049 shm_remove_prison(struct prison *pr)
1050 {
1051 struct shm_mapping *shmm, *tshmm;
1052 u_long i;
1053
1054 sx_xlock(&shm_dict_lock);
1055 for (i = 0; i < shm_hash + 1; i++) {
1056 LIST_FOREACH_SAFE(shmm, &shm_dictionary[i], sm_link, tshmm) {
1057 if (shmm->sm_shmfd->shm_object->cred &&
1058 shmm->sm_shmfd->shm_object->cred->cr_prison == pr)
1059 shm_doremove(shmm);
1060 }
1061 }
1062 sx_xunlock(&shm_dict_lock);
1063 }
1064
1065 /*
1066 * Dictionary management. We maintain an in-kernel dictionary to map
1067 * paths to shmfd objects. We use the FNV hash on the path to store
1068 * the mappings in a hash table.
1069 */
1070 static struct shmfd *
shm_lookup(char * path,Fnv32_t fnv)1071 shm_lookup(char *path, Fnv32_t fnv)
1072 {
1073 struct shm_mapping *map;
1074
1075 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1076 if (map->sm_fnv != fnv)
1077 continue;
1078 if (strcmp(map->sm_path, path) == 0)
1079 return (map->sm_shmfd);
1080 }
1081
1082 return (NULL);
1083 }
1084
1085 static void
shm_insert(char * path,Fnv32_t fnv,struct shmfd * shmfd)1086 shm_insert(char *path, Fnv32_t fnv, struct shmfd *shmfd)
1087 {
1088 struct shm_mapping *map;
1089
1090 map = malloc(sizeof(struct shm_mapping), M_SHMFD, M_WAITOK);
1091 map->sm_path = path;
1092 map->sm_fnv = fnv;
1093 map->sm_shmfd = shm_hold(shmfd);
1094 shmfd->shm_path = path;
1095 LIST_INSERT_HEAD(SHM_HASH(fnv), map, sm_link);
1096 }
1097
1098 static int
shm_remove(char * path,Fnv32_t fnv,struct ucred * ucred)1099 shm_remove(char *path, Fnv32_t fnv, struct ucred *ucred)
1100 {
1101 struct shm_mapping *map;
1102 int error;
1103
1104 LIST_FOREACH(map, SHM_HASH(fnv), sm_link) {
1105 if (map->sm_fnv != fnv)
1106 continue;
1107 if (strcmp(map->sm_path, path) == 0) {
1108 #ifdef MAC
1109 error = mac_posixshm_check_unlink(ucred, map->sm_shmfd);
1110 if (error)
1111 return (error);
1112 #endif
1113 error = shm_access(map->sm_shmfd, ucred,
1114 FREAD | FWRITE);
1115 if (error)
1116 return (error);
1117 shm_doremove(map);
1118 return (0);
1119 }
1120 }
1121
1122 return (ENOENT);
1123 }
1124
1125 static void
shm_doremove(struct shm_mapping * map)1126 shm_doremove(struct shm_mapping *map)
1127 {
1128 map->sm_shmfd->shm_path = NULL;
1129 LIST_REMOVE(map, sm_link);
1130 shm_drop(map->sm_shmfd);
1131 free(map->sm_path, M_SHMFD);
1132 free(map, M_SHMFD);
1133 }
1134
1135 int
kern_shm_open2(struct thread * td,const char * userpath,int flags,mode_t mode,int shmflags,struct filecaps * fcaps,const char * name __unused)1136 kern_shm_open2(struct thread *td, const char *userpath, int flags, mode_t mode,
1137 int shmflags, struct filecaps *fcaps, const char *name __unused)
1138 {
1139 struct pwddesc *pdp;
1140 struct shmfd *shmfd;
1141 struct file *fp;
1142 char *path;
1143 void *rl_cookie;
1144 Fnv32_t fnv;
1145 mode_t cmode;
1146 int error, fd, initial_seals;
1147 bool largepage;
1148
1149 if ((shmflags & ~(SHM_ALLOW_SEALING | SHM_GROW_ON_WRITE |
1150 SHM_LARGEPAGE)) != 0)
1151 return (EINVAL);
1152
1153 initial_seals = F_SEAL_SEAL;
1154 if ((shmflags & SHM_ALLOW_SEALING) != 0)
1155 initial_seals &= ~F_SEAL_SEAL;
1156
1157 AUDIT_ARG_FFLAGS(flags);
1158 AUDIT_ARG_MODE(mode);
1159
1160 if ((flags & O_ACCMODE) != O_RDONLY && (flags & O_ACCMODE) != O_RDWR)
1161 return (EINVAL);
1162
1163 if ((flags & ~(O_ACCMODE | O_CREAT | O_EXCL | O_TRUNC | O_CLOEXEC)) != 0)
1164 return (EINVAL);
1165
1166 largepage = (shmflags & SHM_LARGEPAGE) != 0;
1167 if (largepage && !PMAP_HAS_LARGEPAGES)
1168 return (ENOTTY);
1169
1170 /*
1171 * Currently only F_SEAL_SEAL may be set when creating or opening shmfd.
1172 * If the decision is made later to allow additional seals, care must be
1173 * taken below to ensure that the seals are properly set if the shmfd
1174 * already existed -- this currently assumes that only F_SEAL_SEAL can
1175 * be set and doesn't take further precautions to ensure the validity of
1176 * the seals being added with respect to current mappings.
1177 */
1178 if ((initial_seals & ~F_SEAL_SEAL) != 0)
1179 return (EINVAL);
1180
1181 if (userpath != SHM_ANON) {
1182 error = shm_copyin_path(td, userpath, &path);
1183 if (error != 0)
1184 return (error);
1185
1186 #ifdef CAPABILITY_MODE
1187 /*
1188 * shm_open(2) is only allowed for anonymous objects.
1189 */
1190 if (CAP_TRACING(td))
1191 ktrcapfail(CAPFAIL_NAMEI, path);
1192 if (IN_CAPABILITY_MODE(td)) {
1193 error = ECAPMODE;
1194 goto outnofp;
1195 }
1196 #endif
1197
1198 AUDIT_ARG_UPATH1_CANON(path);
1199 } else {
1200 path = NULL;
1201 }
1202
1203 pdp = td->td_proc->p_pd;
1204 cmode = (mode & ~pdp->pd_cmask) & ACCESSPERMS;
1205
1206 /*
1207 * shm_open(2) created shm should always have O_CLOEXEC set, as mandated
1208 * by POSIX. We allow it to be unset here so that an in-kernel
1209 * interface may be written as a thin layer around shm, optionally not
1210 * setting CLOEXEC. For shm_open(2), O_CLOEXEC is set unconditionally
1211 * in sys_shm_open() to keep this implementation compliant.
1212 */
1213 error = falloc_caps(td, &fp, &fd, flags & O_CLOEXEC, fcaps);
1214 if (error != 0)
1215 goto outnofp;
1216
1217 /* A SHM_ANON path pointer creates an anonymous object. */
1218 if (userpath == SHM_ANON) {
1219 /* A read-only anonymous object is pointless. */
1220 if ((flags & O_ACCMODE) == O_RDONLY) {
1221 error = EINVAL;
1222 goto out;
1223 }
1224 shmfd = shm_alloc(td->td_ucred, cmode, largepage);
1225 if (shmfd == NULL) {
1226 error = ENOMEM;
1227 goto out;
1228 }
1229 shmfd->shm_seals = initial_seals;
1230 shmfd->shm_flags = shmflags;
1231 } else {
1232 fnv = fnv_32_str(path, FNV1_32_INIT);
1233 sx_xlock(&shm_dict_lock);
1234 shmfd = shm_lookup(path, fnv);
1235 if (shmfd == NULL) {
1236 /* Object does not yet exist, create it if requested. */
1237 if (flags & O_CREAT) {
1238 #ifdef MAC
1239 error = mac_posixshm_check_create(td->td_ucred,
1240 path);
1241 if (error == 0) {
1242 #endif
1243 shmfd = shm_alloc(td->td_ucred, cmode,
1244 largepage);
1245 if (shmfd == NULL) {
1246 error = ENOMEM;
1247 } else {
1248 shmfd->shm_seals =
1249 initial_seals;
1250 shmfd->shm_flags = shmflags;
1251 shm_insert(path, fnv, shmfd);
1252 path = NULL;
1253 }
1254 #ifdef MAC
1255 }
1256 #endif
1257 } else {
1258 error = ENOENT;
1259 }
1260 } else {
1261 /*
1262 * Object already exists, obtain a new reference if
1263 * requested and permitted.
1264 */
1265 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1266
1267 /*
1268 * kern_shm_open() likely shouldn't ever error out on
1269 * trying to set a seal that already exists, unlike
1270 * F_ADD_SEALS. This would break terribly as
1271 * shm_open(2) actually sets F_SEAL_SEAL to maintain
1272 * historical behavior where the underlying file could
1273 * not be sealed.
1274 */
1275 initial_seals &= ~shmfd->shm_seals;
1276
1277 /*
1278 * initial_seals can't set additional seals if we've
1279 * already been set F_SEAL_SEAL. If F_SEAL_SEAL is set,
1280 * then we've already removed that one from
1281 * initial_seals. This is currently redundant as we
1282 * only allow setting F_SEAL_SEAL at creation time, but
1283 * it's cheap to check and decreases the effort required
1284 * to allow additional seals.
1285 */
1286 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0 &&
1287 initial_seals != 0)
1288 error = EPERM;
1289 else if ((flags & (O_CREAT | O_EXCL)) ==
1290 (O_CREAT | O_EXCL))
1291 error = EEXIST;
1292 else if (shmflags != 0 && shmflags != shmfd->shm_flags)
1293 error = EINVAL;
1294 else {
1295 #ifdef MAC
1296 error = mac_posixshm_check_open(td->td_ucred,
1297 shmfd, FFLAGS(flags & O_ACCMODE));
1298 if (error == 0)
1299 #endif
1300 error = shm_access(shmfd, td->td_ucred,
1301 FFLAGS(flags & O_ACCMODE));
1302 }
1303
1304 /*
1305 * Truncate the file back to zero length if
1306 * O_TRUNC was specified and the object was
1307 * opened with read/write.
1308 */
1309 if (error == 0 &&
1310 (flags & (O_ACCMODE | O_TRUNC)) ==
1311 (O_RDWR | O_TRUNC)) {
1312 VM_OBJECT_WLOCK(shmfd->shm_object);
1313 #ifdef MAC
1314 error = mac_posixshm_check_truncate(
1315 td->td_ucred, fp->f_cred, shmfd);
1316 if (error == 0)
1317 #endif
1318 error = shm_dotruncate_locked(shmfd, 0,
1319 rl_cookie);
1320 VM_OBJECT_WUNLOCK(shmfd->shm_object);
1321 }
1322 if (error == 0) {
1323 /*
1324 * Currently we only allow F_SEAL_SEAL to be
1325 * set initially. As noted above, this would
1326 * need to be reworked should that change.
1327 */
1328 shmfd->shm_seals |= initial_seals;
1329 shm_hold(shmfd);
1330 }
1331 shm_rangelock_unlock(shmfd, rl_cookie);
1332 }
1333 sx_xunlock(&shm_dict_lock);
1334
1335 if (error != 0)
1336 goto out;
1337 }
1338
1339 finit(fp, FFLAGS(flags & O_ACCMODE), DTYPE_SHM, shmfd, &shm_ops);
1340
1341 td->td_retval[0] = fd;
1342 fdrop(fp, td);
1343 free(path, M_SHMFD);
1344
1345 return (0);
1346
1347 out:
1348 fdclose(td, fp, fd);
1349 fdrop(fp, td);
1350 outnofp:
1351 free(path, M_SHMFD);
1352
1353 return (error);
1354 }
1355
1356 /* System calls. */
1357 #ifdef COMPAT_FREEBSD12
1358 int
freebsd12_shm_open(struct thread * td,struct freebsd12_shm_open_args * uap)1359 freebsd12_shm_open(struct thread *td, struct freebsd12_shm_open_args *uap)
1360 {
1361
1362 return (kern_shm_open(td, uap->path, uap->flags | O_CLOEXEC,
1363 uap->mode, NULL));
1364 }
1365 #endif
1366
1367 int
sys_shm_unlink(struct thread * td,struct shm_unlink_args * uap)1368 sys_shm_unlink(struct thread *td, struct shm_unlink_args *uap)
1369 {
1370 char *path;
1371 Fnv32_t fnv;
1372 int error;
1373
1374 error = shm_copyin_path(td, uap->path, &path);
1375 if (error != 0)
1376 return (error);
1377
1378 AUDIT_ARG_UPATH1_CANON(path);
1379 fnv = fnv_32_str(path, FNV1_32_INIT);
1380 sx_xlock(&shm_dict_lock);
1381 error = shm_remove(path, fnv, td->td_ucred);
1382 sx_xunlock(&shm_dict_lock);
1383 free(path, M_SHMFD);
1384
1385 return (error);
1386 }
1387
1388 int
sys_shm_rename(struct thread * td,struct shm_rename_args * uap)1389 sys_shm_rename(struct thread *td, struct shm_rename_args *uap)
1390 {
1391 char *path_from = NULL, *path_to = NULL;
1392 Fnv32_t fnv_from, fnv_to;
1393 struct shmfd *fd_from;
1394 struct shmfd *fd_to;
1395 int error;
1396 int flags;
1397
1398 flags = uap->flags;
1399 AUDIT_ARG_FFLAGS(flags);
1400
1401 /*
1402 * Make sure the user passed only valid flags.
1403 * If you add a new flag, please add a new term here.
1404 */
1405 if ((flags & ~(
1406 SHM_RENAME_NOREPLACE |
1407 SHM_RENAME_EXCHANGE
1408 )) != 0) {
1409 error = EINVAL;
1410 goto out;
1411 }
1412
1413 /*
1414 * EXCHANGE and NOREPLACE don't quite make sense together. Let's
1415 * force the user to choose one or the other.
1416 */
1417 if ((flags & SHM_RENAME_NOREPLACE) != 0 &&
1418 (flags & SHM_RENAME_EXCHANGE) != 0) {
1419 error = EINVAL;
1420 goto out;
1421 }
1422
1423 /* Renaming to or from anonymous makes no sense */
1424 if (uap->path_from == SHM_ANON || uap->path_to == SHM_ANON) {
1425 error = EINVAL;
1426 goto out;
1427 }
1428
1429 error = shm_copyin_path(td, uap->path_from, &path_from);
1430 if (error != 0)
1431 goto out;
1432
1433 error = shm_copyin_path(td, uap->path_to, &path_to);
1434 if (error != 0)
1435 goto out;
1436
1437 AUDIT_ARG_UPATH1_CANON(path_from);
1438 AUDIT_ARG_UPATH2_CANON(path_to);
1439
1440 /* Rename with from/to equal is a no-op */
1441 if (strcmp(path_from, path_to) == 0)
1442 goto out;
1443
1444 fnv_from = fnv_32_str(path_from, FNV1_32_INIT);
1445 fnv_to = fnv_32_str(path_to, FNV1_32_INIT);
1446
1447 sx_xlock(&shm_dict_lock);
1448
1449 fd_from = shm_lookup(path_from, fnv_from);
1450 if (fd_from == NULL) {
1451 error = ENOENT;
1452 goto out_locked;
1453 }
1454
1455 fd_to = shm_lookup(path_to, fnv_to);
1456 if ((flags & SHM_RENAME_NOREPLACE) != 0 && fd_to != NULL) {
1457 error = EEXIST;
1458 goto out_locked;
1459 }
1460
1461 /*
1462 * Unconditionally prevents shm_remove from invalidating the 'from'
1463 * shm's state.
1464 */
1465 shm_hold(fd_from);
1466 error = shm_remove(path_from, fnv_from, td->td_ucred);
1467
1468 /*
1469 * One of my assumptions failed if ENOENT (e.g. locking didn't
1470 * protect us)
1471 */
1472 KASSERT(error != ENOENT, ("Our shm disappeared during shm_rename: %s",
1473 path_from));
1474 if (error != 0) {
1475 shm_drop(fd_from);
1476 goto out_locked;
1477 }
1478
1479 /*
1480 * If we are exchanging, we need to ensure the shm_remove below
1481 * doesn't invalidate the dest shm's state.
1482 */
1483 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL)
1484 shm_hold(fd_to);
1485
1486 /*
1487 * NOTE: if path_to is not already in the hash, c'est la vie;
1488 * it simply means we have nothing already at path_to to unlink.
1489 * That is the ENOENT case.
1490 *
1491 * If we somehow don't have access to unlink this guy, but
1492 * did for the shm at path_from, then relink the shm to path_from
1493 * and abort with EACCES.
1494 *
1495 * All other errors: that is weird; let's relink and abort the
1496 * operation.
1497 */
1498 error = shm_remove(path_to, fnv_to, td->td_ucred);
1499 if (error != 0 && error != ENOENT) {
1500 shm_insert(path_from, fnv_from, fd_from);
1501 shm_drop(fd_from);
1502 /* Don't free path_from now, since the hash references it */
1503 path_from = NULL;
1504 goto out_locked;
1505 }
1506
1507 error = 0;
1508
1509 shm_insert(path_to, fnv_to, fd_from);
1510
1511 /* Don't free path_to now, since the hash references it */
1512 path_to = NULL;
1513
1514 /* We kept a ref when we removed, and incremented again in insert */
1515 shm_drop(fd_from);
1516 KASSERT(fd_from->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1517 fd_from->shm_refs));
1518
1519 if ((flags & SHM_RENAME_EXCHANGE) != 0 && fd_to != NULL) {
1520 shm_insert(path_from, fnv_from, fd_to);
1521 path_from = NULL;
1522 shm_drop(fd_to);
1523 KASSERT(fd_to->shm_refs > 0, ("Expected >0 refs; got: %d\n",
1524 fd_to->shm_refs));
1525 }
1526
1527 out_locked:
1528 sx_xunlock(&shm_dict_lock);
1529
1530 out:
1531 free(path_from, M_SHMFD);
1532 free(path_to, M_SHMFD);
1533 return (error);
1534 }
1535
1536 static int
shm_mmap_large(struct shmfd * shmfd,vm_map_t map,vm_offset_t * addr,vm_size_t size,vm_prot_t prot,vm_prot_t max_prot,int flags,vm_ooffset_t foff,struct thread * td)1537 shm_mmap_large(struct shmfd *shmfd, vm_map_t map, vm_offset_t *addr,
1538 vm_size_t size, vm_prot_t prot, vm_prot_t max_prot, int flags,
1539 vm_ooffset_t foff, struct thread *td)
1540 {
1541 struct vmspace *vms;
1542 vm_map_entry_t next_entry, prev_entry;
1543 vm_offset_t align, mask, maxaddr;
1544 int docow, error, rv, try;
1545 bool curmap;
1546
1547 if (shmfd->shm_lp_psind == 0)
1548 return (EINVAL);
1549
1550 /* MAP_PRIVATE is disabled */
1551 if ((flags & ~(MAP_SHARED | MAP_FIXED | MAP_EXCL |
1552 MAP_NOCORE | MAP_32BIT | MAP_ALIGNMENT_MASK)) != 0)
1553 return (EINVAL);
1554
1555 vms = td->td_proc->p_vmspace;
1556 curmap = map == &vms->vm_map;
1557 if (curmap) {
1558 error = kern_mmap_racct_check(td, map, size);
1559 if (error != 0)
1560 return (error);
1561 }
1562
1563 docow = shmfd->shm_lp_psind << MAP_SPLIT_BOUNDARY_SHIFT;
1564 docow |= MAP_INHERIT_SHARE;
1565 if ((flags & MAP_NOCORE) != 0)
1566 docow |= MAP_DISABLE_COREDUMP;
1567
1568 mask = pagesizes[shmfd->shm_lp_psind] - 1;
1569 if ((foff & mask) != 0)
1570 return (EINVAL);
1571 maxaddr = vm_map_max(map);
1572 if ((flags & MAP_32BIT) != 0 && maxaddr > MAP_32BIT_MAX_ADDR)
1573 maxaddr = MAP_32BIT_MAX_ADDR;
1574 if (size == 0 || (size & mask) != 0 ||
1575 (*addr != 0 && ((*addr & mask) != 0 ||
1576 *addr + size < *addr || *addr + size > maxaddr)))
1577 return (EINVAL);
1578
1579 align = flags & MAP_ALIGNMENT_MASK;
1580 if (align == 0) {
1581 align = pagesizes[shmfd->shm_lp_psind];
1582 } else if (align == MAP_ALIGNED_SUPER) {
1583 /*
1584 * MAP_ALIGNED_SUPER is only supported on superpage sizes,
1585 * i.e., [1, VM_NRESERVLEVEL]. shmfd->shm_lp_psind < 1 is
1586 * handled above.
1587 */
1588 if (
1589 #if VM_NRESERVLEVEL > 0
1590 shmfd->shm_lp_psind > VM_NRESERVLEVEL
1591 #else
1592 shmfd->shm_lp_psind > 1
1593 #endif
1594 )
1595 return (EINVAL);
1596 align = pagesizes[shmfd->shm_lp_psind];
1597 } else {
1598 align >>= MAP_ALIGNMENT_SHIFT;
1599 align = 1ULL << align;
1600 /* Also handles overflow. */
1601 if (align < pagesizes[shmfd->shm_lp_psind])
1602 return (EINVAL);
1603 }
1604
1605 vm_map_lock(map);
1606 if ((flags & MAP_FIXED) == 0) {
1607 try = 1;
1608 if (curmap && (*addr == 0 ||
1609 (*addr >= round_page((vm_offset_t)vms->vm_taddr) &&
1610 *addr < round_page((vm_offset_t)vms->vm_daddr +
1611 lim_max(td, RLIMIT_DATA))))) {
1612 *addr = roundup2((vm_offset_t)vms->vm_daddr +
1613 lim_max(td, RLIMIT_DATA),
1614 pagesizes[shmfd->shm_lp_psind]);
1615 }
1616 again:
1617 rv = vm_map_find_aligned(map, addr, size, maxaddr, align);
1618 if (rv != KERN_SUCCESS) {
1619 if (try == 1) {
1620 try = 2;
1621 *addr = vm_map_min(map);
1622 if ((*addr & mask) != 0)
1623 *addr = (*addr + mask) & mask;
1624 goto again;
1625 }
1626 goto fail1;
1627 }
1628 } else if ((flags & MAP_EXCL) == 0) {
1629 rv = vm_map_delete(map, *addr, *addr + size);
1630 if (rv != KERN_SUCCESS)
1631 goto fail1;
1632 } else {
1633 error = ENOSPC;
1634 if (vm_map_lookup_entry(map, *addr, &prev_entry))
1635 goto fail;
1636 next_entry = vm_map_entry_succ(prev_entry);
1637 if (next_entry->start < *addr + size)
1638 goto fail;
1639 }
1640
1641 rv = vm_map_insert(map, shmfd->shm_object, foff, *addr, *addr + size,
1642 prot, max_prot, docow);
1643 fail1:
1644 error = vm_mmap_to_errno(rv);
1645 fail:
1646 vm_map_unlock(map);
1647 return (error);
1648 }
1649
1650 static int
shm_mmap(struct file * fp,vm_map_t map,vm_offset_t * addr,vm_size_t objsize,vm_prot_t prot,vm_prot_t max_maxprot,int flags,vm_ooffset_t foff,struct thread * td)1651 shm_mmap(struct file *fp, vm_map_t map, vm_offset_t *addr, vm_size_t objsize,
1652 vm_prot_t prot, vm_prot_t max_maxprot, int flags,
1653 vm_ooffset_t foff, struct thread *td)
1654 {
1655 struct shmfd *shmfd;
1656 vm_prot_t maxprot;
1657 int error;
1658 bool writecnt;
1659 void *rl_cookie;
1660
1661 shmfd = fp->f_data;
1662 maxprot = VM_PROT_NONE;
1663
1664 rl_cookie = shm_rangelock_rlock(shmfd, 0, objsize);
1665 /* FREAD should always be set. */
1666 if ((fp->f_flag & FREAD) != 0)
1667 maxprot |= VM_PROT_EXECUTE | VM_PROT_READ;
1668
1669 /*
1670 * If FWRITE's set, we can allow VM_PROT_WRITE unless it's a shared
1671 * mapping with a write seal applied. Private mappings are always
1672 * writeable.
1673 */
1674 if ((flags & MAP_SHARED) == 0) {
1675 if ((max_maxprot & VM_PROT_WRITE) != 0)
1676 maxprot |= VM_PROT_WRITE;
1677 writecnt = false;
1678 } else {
1679 if ((fp->f_flag & FWRITE) != 0 &&
1680 (shmfd->shm_seals & F_SEAL_WRITE) == 0)
1681 maxprot |= VM_PROT_WRITE;
1682
1683 /*
1684 * Any mappings from a writable descriptor may be upgraded to
1685 * VM_PROT_WRITE with mprotect(2), unless a write-seal was
1686 * applied between the open and subsequent mmap(2). We want to
1687 * reject application of a write seal as long as any such
1688 * mapping exists so that the seal cannot be trivially bypassed.
1689 */
1690 writecnt = (maxprot & VM_PROT_WRITE) != 0;
1691 if (!writecnt && (prot & VM_PROT_WRITE) != 0) {
1692 error = EACCES;
1693 goto out;
1694 }
1695 }
1696 maxprot &= max_maxprot;
1697
1698 /* See comment in vn_mmap(). */
1699 if (
1700 #ifdef _LP64
1701 objsize > OFF_MAX ||
1702 #endif
1703 foff > OFF_MAX - objsize) {
1704 error = EINVAL;
1705 goto out;
1706 }
1707
1708 #ifdef MAC
1709 error = mac_posixshm_check_mmap(td->td_ucred, shmfd, prot, flags);
1710 if (error != 0)
1711 goto out;
1712 #endif
1713
1714 mtx_lock(&shm_timestamp_lock);
1715 vfs_timestamp(&shmfd->shm_atime);
1716 mtx_unlock(&shm_timestamp_lock);
1717 vm_object_reference(shmfd->shm_object);
1718
1719 if (shm_largepage(shmfd)) {
1720 writecnt = false;
1721 error = shm_mmap_large(shmfd, map, addr, objsize, prot,
1722 maxprot, flags, foff, td);
1723 } else {
1724 if (writecnt) {
1725 vm_pager_update_writecount(shmfd->shm_object, 0,
1726 objsize);
1727 }
1728 error = vm_mmap_object(map, addr, objsize, prot, maxprot, flags,
1729 shmfd->shm_object, foff, writecnt, td);
1730 }
1731 if (error != 0) {
1732 if (writecnt)
1733 vm_pager_release_writecount(shmfd->shm_object, 0,
1734 objsize);
1735 vm_object_deallocate(shmfd->shm_object);
1736 }
1737 out:
1738 shm_rangelock_unlock(shmfd, rl_cookie);
1739 return (error);
1740 }
1741
1742 static int
shm_chmod(struct file * fp,mode_t mode,struct ucred * active_cred,struct thread * td)1743 shm_chmod(struct file *fp, mode_t mode, struct ucred *active_cred,
1744 struct thread *td)
1745 {
1746 struct shmfd *shmfd;
1747 int error;
1748
1749 error = 0;
1750 shmfd = fp->f_data;
1751 mtx_lock(&shm_timestamp_lock);
1752 /*
1753 * SUSv4 says that x bits of permission need not be affected.
1754 * Be consistent with our shm_open there.
1755 */
1756 #ifdef MAC
1757 error = mac_posixshm_check_setmode(active_cred, shmfd, mode);
1758 if (error != 0)
1759 goto out;
1760 #endif
1761 error = vaccess(VREG, shmfd->shm_mode, shmfd->shm_uid, shmfd->shm_gid,
1762 VADMIN, active_cred);
1763 if (error != 0)
1764 goto out;
1765 shmfd->shm_mode = mode & ACCESSPERMS;
1766 out:
1767 mtx_unlock(&shm_timestamp_lock);
1768 return (error);
1769 }
1770
1771 static int
shm_chown(struct file * fp,uid_t uid,gid_t gid,struct ucred * active_cred,struct thread * td)1772 shm_chown(struct file *fp, uid_t uid, gid_t gid, struct ucred *active_cred,
1773 struct thread *td)
1774 {
1775 struct shmfd *shmfd;
1776 int error;
1777
1778 error = 0;
1779 shmfd = fp->f_data;
1780 mtx_lock(&shm_timestamp_lock);
1781 #ifdef MAC
1782 error = mac_posixshm_check_setowner(active_cred, shmfd, uid, gid);
1783 if (error != 0)
1784 goto out;
1785 #endif
1786 if (uid == (uid_t)-1)
1787 uid = shmfd->shm_uid;
1788 if (gid == (gid_t)-1)
1789 gid = shmfd->shm_gid;
1790 if (((uid != shmfd->shm_uid && uid != active_cred->cr_uid) ||
1791 (gid != shmfd->shm_gid && !groupmember(gid, active_cred))) &&
1792 (error = priv_check_cred(active_cred, PRIV_VFS_CHOWN)))
1793 goto out;
1794 shmfd->shm_uid = uid;
1795 shmfd->shm_gid = gid;
1796 out:
1797 mtx_unlock(&shm_timestamp_lock);
1798 return (error);
1799 }
1800
1801 /*
1802 * Helper routines to allow the backing object of a shared memory file
1803 * descriptor to be mapped in the kernel.
1804 */
1805 int
shm_map(struct file * fp,size_t size,off_t offset,void ** memp)1806 shm_map(struct file *fp, size_t size, off_t offset, void **memp)
1807 {
1808 struct shmfd *shmfd;
1809 vm_offset_t kva, ofs;
1810 vm_object_t obj;
1811 int rv;
1812
1813 if (fp->f_type != DTYPE_SHM)
1814 return (EINVAL);
1815 shmfd = fp->f_data;
1816 obj = shmfd->shm_object;
1817 VM_OBJECT_WLOCK(obj);
1818 /*
1819 * XXXRW: This validation is probably insufficient, and subject to
1820 * sign errors. It should be fixed.
1821 */
1822 if (offset >= shmfd->shm_size ||
1823 offset + size > round_page(shmfd->shm_size)) {
1824 VM_OBJECT_WUNLOCK(obj);
1825 return (EINVAL);
1826 }
1827
1828 shmfd->shm_kmappings++;
1829 vm_object_reference_locked(obj);
1830 VM_OBJECT_WUNLOCK(obj);
1831
1832 /* Map the object into the kernel_map and wire it. */
1833 kva = vm_map_min(kernel_map);
1834 ofs = offset & PAGE_MASK;
1835 offset = trunc_page(offset);
1836 size = round_page(size + ofs);
1837 rv = vm_map_find(kernel_map, obj, offset, &kva, size, 0,
1838 VMFS_OPTIMAL_SPACE, VM_PROT_READ | VM_PROT_WRITE,
1839 VM_PROT_READ | VM_PROT_WRITE, 0);
1840 if (rv == KERN_SUCCESS) {
1841 rv = vm_map_wire(kernel_map, kva, kva + size,
1842 VM_MAP_WIRE_SYSTEM | VM_MAP_WIRE_NOHOLES);
1843 if (rv == KERN_SUCCESS) {
1844 *memp = (void *)(kva + ofs);
1845 return (0);
1846 }
1847 vm_map_remove(kernel_map, kva, kva + size);
1848 } else
1849 vm_object_deallocate(obj);
1850
1851 /* On failure, drop our mapping reference. */
1852 VM_OBJECT_WLOCK(obj);
1853 shmfd->shm_kmappings--;
1854 VM_OBJECT_WUNLOCK(obj);
1855
1856 return (vm_mmap_to_errno(rv));
1857 }
1858
1859 /*
1860 * We require the caller to unmap the entire entry. This allows us to
1861 * safely decrement shm_kmappings when a mapping is removed.
1862 */
1863 int
shm_unmap(struct file * fp,void * mem,size_t size)1864 shm_unmap(struct file *fp, void *mem, size_t size)
1865 {
1866 struct shmfd *shmfd;
1867 vm_map_entry_t entry;
1868 vm_offset_t kva, ofs;
1869 vm_object_t obj;
1870 vm_pindex_t pindex;
1871 vm_prot_t prot;
1872 boolean_t wired;
1873 vm_map_t map;
1874 int rv;
1875
1876 if (fp->f_type != DTYPE_SHM)
1877 return (EINVAL);
1878 shmfd = fp->f_data;
1879 kva = (vm_offset_t)mem;
1880 ofs = kva & PAGE_MASK;
1881 kva = trunc_page(kva);
1882 size = round_page(size + ofs);
1883 map = kernel_map;
1884 rv = vm_map_lookup(&map, kva, VM_PROT_READ | VM_PROT_WRITE, &entry,
1885 &obj, &pindex, &prot, &wired);
1886 if (rv != KERN_SUCCESS)
1887 return (EINVAL);
1888 if (entry->start != kva || entry->end != kva + size) {
1889 vm_map_lookup_done(map, entry);
1890 return (EINVAL);
1891 }
1892 vm_map_lookup_done(map, entry);
1893 if (obj != shmfd->shm_object)
1894 return (EINVAL);
1895 vm_map_remove(map, kva, kva + size);
1896 VM_OBJECT_WLOCK(obj);
1897 KASSERT(shmfd->shm_kmappings > 0, ("shm_unmap: object not mapped"));
1898 shmfd->shm_kmappings--;
1899 VM_OBJECT_WUNLOCK(obj);
1900 return (0);
1901 }
1902
1903 static int
shm_fill_kinfo_locked(struct shmfd * shmfd,struct kinfo_file * kif,bool list)1904 shm_fill_kinfo_locked(struct shmfd *shmfd, struct kinfo_file *kif, bool list)
1905 {
1906 const char *path, *pr_path;
1907 size_t pr_pathlen;
1908 bool visible;
1909
1910 sx_assert(&shm_dict_lock, SA_LOCKED);
1911 kif->kf_type = KF_TYPE_SHM;
1912 kif->kf_un.kf_file.kf_file_mode = S_IFREG | shmfd->shm_mode;
1913 kif->kf_un.kf_file.kf_file_size = shmfd->shm_size;
1914 if (shmfd->shm_path != NULL) {
1915 path = shmfd->shm_path;
1916 pr_path = curthread->td_ucred->cr_prison->pr_path;
1917 if (strcmp(pr_path, "/") != 0) {
1918 /* Return the jail-rooted pathname. */
1919 pr_pathlen = strlen(pr_path);
1920 visible = strncmp(path, pr_path, pr_pathlen) == 0 &&
1921 path[pr_pathlen] == '/';
1922 if (list && !visible)
1923 return (EPERM);
1924 if (visible)
1925 path += pr_pathlen;
1926 }
1927 strlcpy(kif->kf_path, path, sizeof(kif->kf_path));
1928 }
1929 return (0);
1930 }
1931
1932 static int
shm_fill_kinfo(struct file * fp,struct kinfo_file * kif,struct filedesc * fdp __unused)1933 shm_fill_kinfo(struct file *fp, struct kinfo_file *kif,
1934 struct filedesc *fdp __unused)
1935 {
1936 int res;
1937
1938 sx_slock(&shm_dict_lock);
1939 res = shm_fill_kinfo_locked(fp->f_data, kif, false);
1940 sx_sunlock(&shm_dict_lock);
1941 return (res);
1942 }
1943
1944 static int
shm_add_seals(struct file * fp,int seals)1945 shm_add_seals(struct file *fp, int seals)
1946 {
1947 struct shmfd *shmfd;
1948 void *rl_cookie;
1949 vm_ooffset_t writemappings;
1950 int error, nseals;
1951
1952 error = 0;
1953 shmfd = fp->f_data;
1954 rl_cookie = shm_rangelock_wlock(shmfd, 0, OFF_MAX);
1955
1956 /* Even already-set seals should result in EPERM. */
1957 if ((shmfd->shm_seals & F_SEAL_SEAL) != 0) {
1958 error = EPERM;
1959 goto out;
1960 }
1961 nseals = seals & ~shmfd->shm_seals;
1962 if ((nseals & F_SEAL_WRITE) != 0) {
1963 if (shm_largepage(shmfd)) {
1964 error = ENOTSUP;
1965 goto out;
1966 }
1967
1968 /*
1969 * The rangelock above prevents writable mappings from being
1970 * added after we've started applying seals. The RLOCK here
1971 * is to avoid torn reads on ILP32 arches as unmapping/reducing
1972 * writemappings will be done without a rangelock.
1973 */
1974 VM_OBJECT_RLOCK(shmfd->shm_object);
1975 writemappings = shmfd->shm_object->un_pager.swp.writemappings;
1976 VM_OBJECT_RUNLOCK(shmfd->shm_object);
1977 /* kmappings are also writable */
1978 if (writemappings > 0) {
1979 error = EBUSY;
1980 goto out;
1981 }
1982 }
1983 shmfd->shm_seals |= nseals;
1984 out:
1985 shm_rangelock_unlock(shmfd, rl_cookie);
1986 return (error);
1987 }
1988
1989 static int
shm_get_seals(struct file * fp,int * seals)1990 shm_get_seals(struct file *fp, int *seals)
1991 {
1992 struct shmfd *shmfd;
1993
1994 shmfd = fp->f_data;
1995 *seals = shmfd->shm_seals;
1996 return (0);
1997 }
1998
1999 static int
shm_deallocate(struct shmfd * shmfd,off_t * offset,off_t * length,int flags)2000 shm_deallocate(struct shmfd *shmfd, off_t *offset, off_t *length, int flags)
2001 {
2002 vm_object_t object;
2003 vm_pindex_t pistart, pi, piend;
2004 vm_ooffset_t off, len;
2005 int startofs, endofs, end;
2006 int error;
2007
2008 off = *offset;
2009 len = *length;
2010 KASSERT(off + len <= (vm_ooffset_t)OFF_MAX, ("off + len overflows"));
2011 if (off + len > shmfd->shm_size)
2012 len = shmfd->shm_size - off;
2013 object = shmfd->shm_object;
2014 startofs = off & PAGE_MASK;
2015 endofs = (off + len) & PAGE_MASK;
2016 pistart = OFF_TO_IDX(off);
2017 piend = OFF_TO_IDX(off + len);
2018 pi = OFF_TO_IDX(off + PAGE_MASK);
2019 error = 0;
2020
2021 /* Handle the case when offset is on or beyond shm size. */
2022 if ((off_t)len <= 0) {
2023 *length = 0;
2024 return (0);
2025 }
2026
2027 VM_OBJECT_WLOCK(object);
2028
2029 if (startofs != 0) {
2030 end = pistart != piend ? PAGE_SIZE : endofs;
2031 error = shm_partial_page_invalidate(object, pistart, startofs,
2032 end);
2033 if (error)
2034 goto out;
2035 off += end - startofs;
2036 len -= end - startofs;
2037 }
2038
2039 if (pi < piend) {
2040 vm_object_page_remove(object, pi, piend, 0);
2041 off += IDX_TO_OFF(piend - pi);
2042 len -= IDX_TO_OFF(piend - pi);
2043 }
2044
2045 if (endofs != 0 && pistart != piend) {
2046 error = shm_partial_page_invalidate(object, piend, 0, endofs);
2047 if (error)
2048 goto out;
2049 off += endofs;
2050 len -= endofs;
2051 }
2052
2053 out:
2054 VM_OBJECT_WUNLOCK(shmfd->shm_object);
2055 *offset = off;
2056 *length = len;
2057 return (error);
2058 }
2059
2060 static int
shm_fspacectl(struct file * fp,int cmd,off_t * offset,off_t * length,int flags,struct ucred * active_cred,struct thread * td)2061 shm_fspacectl(struct file *fp, int cmd, off_t *offset, off_t *length, int flags,
2062 struct ucred *active_cred, struct thread *td)
2063 {
2064 void *rl_cookie;
2065 struct shmfd *shmfd;
2066 off_t off, len;
2067 int error;
2068
2069 KASSERT(cmd == SPACECTL_DEALLOC, ("shm_fspacectl: Invalid cmd"));
2070 KASSERT((flags & ~SPACECTL_F_SUPPORTED) == 0,
2071 ("shm_fspacectl: non-zero flags"));
2072 KASSERT(*offset >= 0 && *length > 0 && *length <= OFF_MAX - *offset,
2073 ("shm_fspacectl: offset/length overflow or underflow"));
2074 error = EINVAL;
2075 shmfd = fp->f_data;
2076 off = *offset;
2077 len = *length;
2078
2079 rl_cookie = shm_rangelock_wlock(shmfd, off, off + len);
2080 switch (cmd) {
2081 case SPACECTL_DEALLOC:
2082 if ((shmfd->shm_seals & F_SEAL_WRITE) != 0) {
2083 error = EPERM;
2084 break;
2085 }
2086 error = shm_deallocate(shmfd, &off, &len, flags);
2087 *offset = off;
2088 *length = len;
2089 break;
2090 default:
2091 __assert_unreachable();
2092 }
2093 shm_rangelock_unlock(shmfd, rl_cookie);
2094 return (error);
2095 }
2096
2097
2098 static int
shm_fallocate(struct file * fp,off_t offset,off_t len,struct thread * td)2099 shm_fallocate(struct file *fp, off_t offset, off_t len, struct thread *td)
2100 {
2101 void *rl_cookie;
2102 struct shmfd *shmfd;
2103 size_t size;
2104 int error;
2105
2106 /* This assumes that the caller already checked for overflow. */
2107 error = 0;
2108 shmfd = fp->f_data;
2109 size = offset + len;
2110
2111 /*
2112 * Just grab the rangelock for the range that we may be attempting to
2113 * grow, rather than blocking read/write for regions we won't be
2114 * touching while this (potential) resize is in progress. Other
2115 * attempts to resize the shmfd will have to take a write lock from 0 to
2116 * OFF_MAX, so this being potentially beyond the current usable range of
2117 * the shmfd is not necessarily a concern. If other mechanisms are
2118 * added to grow a shmfd, this may need to be re-evaluated.
2119 */
2120 rl_cookie = shm_rangelock_wlock(shmfd, offset, size);
2121 if (size > shmfd->shm_size)
2122 error = shm_dotruncate_cookie(shmfd, size, rl_cookie);
2123 shm_rangelock_unlock(shmfd, rl_cookie);
2124 /* Translate to posix_fallocate(2) return value as needed. */
2125 if (error == ENOMEM)
2126 error = ENOSPC;
2127 return (error);
2128 }
2129
2130 static int
sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)2131 sysctl_posix_shm_list(SYSCTL_HANDLER_ARGS)
2132 {
2133 struct shm_mapping *shmm;
2134 struct sbuf sb;
2135 struct kinfo_file kif;
2136 u_long i;
2137 int error, error2;
2138
2139 sbuf_new_for_sysctl(&sb, NULL, sizeof(struct kinfo_file) * 5, req);
2140 sbuf_clear_flags(&sb, SBUF_INCLUDENUL);
2141 error = 0;
2142 sx_slock(&shm_dict_lock);
2143 for (i = 0; i < shm_hash + 1; i++) {
2144 LIST_FOREACH(shmm, &shm_dictionary[i], sm_link) {
2145 error = shm_fill_kinfo_locked(shmm->sm_shmfd,
2146 &kif, true);
2147 if (error == EPERM) {
2148 error = 0;
2149 continue;
2150 }
2151 if (error != 0)
2152 break;
2153 pack_kinfo(&kif);
2154 error = sbuf_bcat(&sb, &kif, kif.kf_structsize) == 0 ?
2155 0 : ENOMEM;
2156 if (error != 0)
2157 break;
2158 }
2159 }
2160 sx_sunlock(&shm_dict_lock);
2161 error2 = sbuf_finish(&sb);
2162 sbuf_delete(&sb);
2163 return (error != 0 ? error : error2);
2164 }
2165
2166 SYSCTL_PROC(_kern_ipc, OID_AUTO, posix_shm_list,
2167 CTLFLAG_RD | CTLFLAG_PRISON | CTLFLAG_MPSAFE | CTLTYPE_OPAQUE,
2168 NULL, 0, sysctl_posix_shm_list, "",
2169 "POSIX SHM list");
2170
2171 int
kern_shm_open(struct thread * td,const char * path,int flags,mode_t mode,struct filecaps * caps)2172 kern_shm_open(struct thread *td, const char *path, int flags, mode_t mode,
2173 struct filecaps *caps)
2174 {
2175
2176 return (kern_shm_open2(td, path, flags, mode, 0, caps, NULL));
2177 }
2178
2179 /*
2180 * This version of the shm_open() interface leaves CLOEXEC behavior up to the
2181 * caller, and libc will enforce it for the traditional shm_open() call. This
2182 * allows other consumers, like memfd_create(), to opt-in for CLOEXEC. This
2183 * interface also includes a 'name' argument that is currently unused, but could
2184 * potentially be exported later via some interface for debugging purposes.
2185 * From the kernel's perspective, it is optional. Individual consumers like
2186 * memfd_create() may require it in order to be compatible with other systems
2187 * implementing the same function.
2188 */
2189 int
sys_shm_open2(struct thread * td,struct shm_open2_args * uap)2190 sys_shm_open2(struct thread *td, struct shm_open2_args *uap)
2191 {
2192
2193 return (kern_shm_open2(td, uap->path, uap->flags, uap->mode,
2194 uap->shmflags, NULL, uap->name));
2195 }
2196
2197 int
shm_get_path(struct vm_object * obj,char * path,size_t sz)2198 shm_get_path(struct vm_object *obj, char *path, size_t sz)
2199 {
2200 struct shmfd *shmfd;
2201 int error;
2202
2203 error = 0;
2204 shmfd = NULL;
2205 sx_slock(&shm_dict_lock);
2206 VM_OBJECT_RLOCK(obj);
2207 if ((obj->flags & OBJ_POSIXSHM) == 0) {
2208 error = EINVAL;
2209 } else {
2210 if (obj->type == shmfd_pager_type)
2211 shmfd = obj->un_pager.swp.swp_priv;
2212 else if (obj->type == OBJT_PHYS)
2213 shmfd = obj->un_pager.phys.phys_priv;
2214 if (shmfd == NULL) {
2215 error = ENXIO;
2216 } else {
2217 strlcpy(path, shmfd->shm_path == NULL ? "anon" :
2218 shmfd->shm_path, sz);
2219 }
2220 }
2221 if (error != 0)
2222 path[0] = '\0';
2223 VM_OBJECT_RUNLOCK(obj);
2224 sx_sunlock(&shm_dict_lock);
2225 return (error);
2226 }
2227