Lines Matching full:ss

115 	ss->exit = true;						\
120 ss->exit = true; \
126 static void child(struct shared_state *ss, int depth);
129 child_fork(struct shared_state *ss, int depth) in child_fork() argument
135 child(ss, depth); in child_fork()
140 child_fault(struct shared_state *ss) in child_fault() argument
144 for (i = 0; i < ss->len; i += ss->pagesize) in child_fault()
145 (void)((volatile char *)ss->p)[i]; in child_fault()
149 child_write(struct shared_state *ss, int val, size_t len) in child_write() argument
153 for (i = 0; i < len; i += ss->pagesize) in child_write()
154 ((int *)ss->p)[i / sizeof(int)] = val; in child_write()
159 child_wait_p3_write(struct shared_state *ss) in child_wait_p3_write() argument
161 while (!ss->p3_did_write) { in child_wait_p3_write()
162 if (ss->exit) in child_wait_p3_write()
170 child_verify(struct shared_state *ss, int depth, int newval, int oldval) in child_verify() argument
175 for (i = 0; i < ss->len; i += ss->pagesize) { in child_verify()
176 expectval = i < ss->modlen ? newval : oldval; in child_verify()
177 foundval = ((int *)ss->p)[i / sizeof(int)]; in child_verify()
186 child(struct shared_state *ss, int depth) in child() argument
197 ss->p = mmap(NULL, ss->len, PROT_READ | PROT_WRITE, in child()
199 if (ss->p == MAP_FAILED) in child()
203 child_write(ss, mypid, ss->len); in child()
204 if (!ss->lazy_cow) { in child()
205 if (mlock(ss->p, ss->len) == -1) in child()
207 if (mprotect(ss->p, ss->len, PROT_READ) == -1) in child()
210 if (ss->block_xfer) { in child()
216 child_fork(ss, 4); in child()
223 if (minherit(ss->p, ss->len, INHERIT_COPY) != 0) in child()
226 child_fork(ss, depth + 1); in child()
228 child_wait_p3_write(ss); in child()
229 child_verify(ss, depth, mypid, mypid); in child()
230 if (!ss->collapse) { in child()
232 while (!ss->exit) in child()
242 if (minherit(ss->p, ss->len, INHERIT_SHARE) != 0) in child()
251 child_fault(ss); in child()
252 oldval = atomic_load_acq_int(ss->p); in child()
255 pid = child_fork(ss, depth + 1); in child()
256 if (ss->collapse) { in child()
258 while (!ss->exiting[1] || in child()
259 (ss->block_xfer && !ss->exiting[4])) in child()
268 child_wait_p3_write(ss); in child()
269 child_verify(ss, depth, pid, oldval); in child()
270 ss->okay = true; in child()
271 ss->exit = true; in child()
279 if (!ss->lazy_cow) { in child()
280 if (mlock(ss->p, ss->len) == -1) in child()
282 if (mprotect(ss->p, ss->len, PROT_READ | PROT_WRITE) == in child()
293 child_write(ss, mypid, ss->modlen); in child()
294 ss->p3_did_write = true; in child()
299 oldval = atomic_load_acq_int(ss->p); in child()
300 child_wait_p3_write(ss); in child()
301 child_verify(ss, depth, oldval, oldval); in child()
308 ss->exiting[depth] = true; in child()
316 struct shared_state *ss; in do_one_shared_shadow_inval() local
328 ss = mmap(NULL, sizeof(*ss), PROT_READ | PROT_WRITE, in do_one_shared_shadow_inval()
330 ATF_REQUIRE(ss != MAP_FAILED); in do_one_shared_shadow_inval()
332 ss->len = len; in do_one_shared_shadow_inval()
333 ss->modlen = (flags & FLAG_FULLMOD) ? ss->len : ss->len / 2; in do_one_shared_shadow_inval()
334 ss->pagesize = pagesize; in do_one_shared_shadow_inval()
335 ss->collapse = (flags & FLAG_COLLAPSE) != 0; in do_one_shared_shadow_inval()
336 ss->block_xfer = (flags & FLAG_BLOCK_XFER) != 0; in do_one_shared_shadow_inval()
337 ss->lazy_cow = lazy_cow; in do_one_shared_shadow_inval()
342 child(ss, 1); in do_one_shared_shadow_inval()
351 ATF_REQUIRE(ss->okay); in do_one_shared_shadow_inval()
353 ATF_REQUIRE(munmap(ss, sizeof(*ss)) == 0); in do_one_shared_shadow_inval()