xref: /freebsd/lib/libthr/thread/thr_pshared.c (revision 22cf89c938886d14f5796fc49f9f020c23ea8eaf)
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  *
4  * This software was developed by Konstantin Belousov
5  * under sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include <sys/cdefs.h>
30 #include <sys/types.h>
31 #include <sys/mman.h>
32 #include <sys/queue.h>
33 #include "namespace.h"
34 #include <stdlib.h>
35 #include "un-namespace.h"
36 
37 #include "thr_private.h"
38 
39 struct psh {
40 	LIST_ENTRY(psh) link;
41 	void *key;
42 	void *val;
43 };
44 
45 LIST_HEAD(pshared_hash_head, psh);
46 #define	HASH_SIZE	128
47 static struct pshared_hash_head pshared_hash[HASH_SIZE];
48 #define	PSHARED_KEY_HASH(key)	(((unsigned long)(key) >> 8) % HASH_SIZE)
49 /* XXXKIB: lock could be split to per-hash chain, if appears contested */
50 static struct urwlock pshared_lock = DEFAULT_URWLOCK;
51 static int page_size;
52 
53 void
54 __thr_pshared_init(void)
55 {
56 	int i;
57 
58 	page_size = getpagesize();
59 	THR_ASSERT(page_size >= THR_PAGE_SIZE_MIN,
60 	    "THR_PAGE_SIZE_MIN is too large");
61 
62 	_thr_urwlock_init(&pshared_lock);
63 	for (i = 0; i < HASH_SIZE; i++)
64 		LIST_INIT(&pshared_hash[i]);
65 }
66 
67 static void
68 pshared_rlock(struct pthread *curthread)
69 {
70 
71 	curthread->locklevel++;
72 	_thr_rwl_rdlock(&pshared_lock);
73 }
74 
75 static void
76 pshared_wlock(struct pthread *curthread)
77 {
78 
79 	curthread->locklevel++;
80 	_thr_rwl_wrlock(&pshared_lock);
81 }
82 
83 static void
84 pshared_unlock(struct pthread *curthread)
85 {
86 
87 	_thr_rwl_unlock(&pshared_lock);
88 	curthread->locklevel--;
89 	_thr_ast(curthread);
90 }
91 
92 /*
93  * Among all processes sharing a lock only one executes
94  * pthread_lock_destroy().  Other processes still have the hash and
95  * mapped off-page.
96  *
97  * Mitigate the problem by checking the liveness of all hashed keys
98  * periodically.  Right now this is executed on each
99  * pthread_lock_destroy(), but may be done less often if found to be
100  * too time-consuming.
101  */
102 static void
103 pshared_gc(struct pthread *curthread)
104 {
105 	struct pshared_hash_head *hd;
106 	struct psh *h, *h1;
107 	int error, i;
108 
109 	pshared_wlock(curthread);
110 	for (i = 0; i < HASH_SIZE; i++) {
111 		hd = &pshared_hash[i];
112 		LIST_FOREACH_SAFE(h, hd, link, h1) {
113 			error = _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_ALIVE,
114 			    h->val, NULL);
115 			if (error == 0)
116 				continue;
117 			LIST_REMOVE(h, link);
118 			munmap(h->val, page_size);
119 			free(h);
120 		}
121 	}
122 	pshared_unlock(curthread);
123 }
124 
125 static void *
126 pshared_lookup(void *key)
127 {
128 	struct pshared_hash_head *hd;
129 	struct psh *h;
130 
131 	hd = &pshared_hash[PSHARED_KEY_HASH(key)];
132 	LIST_FOREACH(h, hd, link) {
133 		if (h->key == key)
134 			return (h->val);
135 	}
136 	return (NULL);
137 }
138 
139 static int
140 pshared_insert(void *key, void **val)
141 {
142 	struct pshared_hash_head *hd;
143 	struct psh *h;
144 
145 	hd = &pshared_hash[PSHARED_KEY_HASH(key)];
146 	LIST_FOREACH(h, hd, link) {
147 		/*
148 		 * When the key already exists in the hash, we should
149 		 * return either the new (just mapped) or old (hashed)
150 		 * val, and the other val should be unmapped to avoid
151 		 * address space leak.
152 		 *
153 		 * If two threads perform lock of the same object
154 		 * which is not yet stored in the pshared_hash, then
155 		 * the val already inserted by the first thread should
156 		 * be returned, and the second val freed (order is by
157 		 * the pshared_lock()).  Otherwise, if we unmap the
158 		 * value obtained from the hash, the first thread
159 		 * might operate on an unmapped off-page object.
160 		 *
161 		 * There is still an issue: if hashed key was unmapped
162 		 * and then other page is mapped at the same key
163 		 * address, the hash would return the old val.  I
164 		 * decided to handle the race of simultaneous hash
165 		 * insertion, leaving the unlikely remap problem
166 		 * unaddressed.
167 		 */
168 		if (h->key == key) {
169 			if (h->val != *val) {
170 				munmap(*val, page_size);
171 				*val = h->val;
172 			}
173 			return (1);
174 		}
175 	}
176 
177 	h = malloc(sizeof(*h));
178 	if (h == NULL)
179 		return (0);
180 	h->key = key;
181 	h->val = *val;
182 	LIST_INSERT_HEAD(hd, h, link);
183 	return (1);
184 }
185 
186 static void *
187 pshared_remove(void *key)
188 {
189 	struct pshared_hash_head *hd;
190 	struct psh *h;
191 	void *val;
192 
193 	hd = &pshared_hash[PSHARED_KEY_HASH(key)];
194 	LIST_FOREACH(h, hd, link) {
195 		if (h->key == key) {
196 			LIST_REMOVE(h, link);
197 			val = h->val;
198 			free(h);
199 			return (val);
200 		}
201 	}
202 	return (NULL);
203 }
204 
205 static void
206 pshared_clean(void *key, void *val)
207 {
208 
209 	if (val != NULL)
210 		munmap(val, page_size);
211 	_umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_DESTROY, key, NULL);
212 }
213 
214 static void
215 pshared_destroy(struct pthread *curthread, void *key)
216 {
217 	void *val;
218 
219 	pshared_wlock(curthread);
220 	val = pshared_remove(key);
221 	pshared_unlock(curthread);
222 	pshared_clean(key, val);
223 }
224 
225 void *
226 __thr_pshared_offpage(void *key, int doalloc)
227 {
228 	struct pthread *curthread;
229 	void *res;
230 	int fd, ins_done;
231 
232 	curthread = _get_curthread();
233 	if (doalloc) {
234 		pshared_destroy(curthread, key);
235 		res = NULL;
236 	} else {
237 		pshared_rlock(curthread);
238 		res = pshared_lookup(key);
239 		pshared_unlock(curthread);
240 		if (res != NULL)
241 			return (res);
242 	}
243 	fd = _umtx_op(NULL, UMTX_OP_SHM, doalloc ? UMTX_SHM_CREAT :
244 	    UMTX_SHM_LOOKUP, key, NULL);
245 	if (fd == -1)
246 		return (NULL);
247 	res = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
248 	close(fd);
249 	if (res == MAP_FAILED)
250 		return (NULL);
251 	pshared_wlock(curthread);
252 	ins_done = pshared_insert(key, &res);
253 	pshared_unlock(curthread);
254 	if (!ins_done) {
255 		pshared_clean(key, res);
256 		res = NULL;
257 	}
258 	return (res);
259 }
260 
261 void
262 __thr_pshared_destroy(void *key)
263 {
264 	struct pthread *curthread;
265 
266 	curthread = _get_curthread();
267 	pshared_destroy(curthread, key);
268 	pshared_gc(curthread);
269 }
270 
271 void
272 __thr_pshared_atfork_pre(void)
273 {
274 
275 	_thr_rwl_rdlock(&pshared_lock);
276 }
277 
278 void
279 __thr_pshared_atfork_post(void)
280 {
281 
282 	_thr_rwl_unlock(&pshared_lock);
283 }
284