xref: /freebsd/tools/test/stress2/misc/vm_reserv_populate.sh (revision e32fecd0c2c3ee37c47ee100f169e7eb0282a873)
1#!/bin/sh
2
3#
4# Copyright (c) 2015 EMC Corp.
5# All rights reserved.
6#
7# Redistribution and use in source and binary forms, with or without
8# modification, are permitted provided that the following conditions
9# are met:
10# 1. Redistributions of source code must retain the above copyright
11#    notice, this list of conditions and the following disclaimer.
12# 2. Redistributions in binary form must reproduce the above copyright
13#    notice, this list of conditions and the following disclaimer in the
14#    documentation and/or other materials provided with the distribution.
15#
16# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19# ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26# SUCH DAMAGE.
27#
28
29# Bug 198163 - Kernel panic in vm_reserv_populate()
30# Test scenario by: ikosarev@accesssoftek.com
31# http://people.freebsd.org/~pho/stress/log/kostik771.txt
32# Fixed by r280238
33
34. ../default.cfg
35
36uname -a | egrep -q "i386|amd64" || exit 0
37odir=`pwd`
38cd /tmp
39sed '1,/^EOF/d' < $odir/$0 > vm_reserv_populate.cc
40rm -f /tmp/vm_reserv_populate
41mycc -o vm_reserv_populate -Wall -Wextra -g -O2 vm_reserv_populate.cc ||
42    exit 1
43rm -f vm_reserv_populate.cc
44
45(cd $odir/../testcases/swap; ./swap -t 5m -i 20 -h -l 100 > /dev/null) &
46./vm_reserv_populate
47while pgrep -q swap; do
48	pkill -9 swap
49done
50rm vm_reserv_populate
51exit 0
52EOF
53#include <pthread.h>
54#include <unistd.h>
55#include <stdio.h>
56#include <stdlib.h>
57#include <sys/types.h>
58#include <sys/wait.h>
59#include <sys/time.h>
60#include <signal.h>
61#include <errno.h>
62#include <stdarg.h>
63#include <string.h>
64#include <assert.h>
65
66#include <sys/syscall.h>
67#include <sys/mman.h>
68#include <sys/types.h>
69#include <sys/stat.h>
70#include <fcntl.h>
71
72#define INLINE inline
73#define NOINLINE __attribute__((noinline))
74#define SYSCALL(name) SYS_ ## name
75#define internal_syscall __syscall
76
77typedef unsigned char u8;
78typedef unsigned int u32;
79typedef unsigned long long u64;
80typedef unsigned long uptr;
81
82struct atomic_uint32_t {
83  typedef u32 Type;
84  volatile Type val_dont_use;
85};
86
87struct atomic_uintptr_t {
88  typedef uptr Type;
89  volatile Type val_dont_use;
90};
91
92uptr internal_sched_yield() {
93  return internal_syscall(SYSCALL(sched_yield));
94}
95
96enum memory_order {
97  memory_order_relaxed = 1 << 0,
98  memory_order_consume = 1 << 1,
99  memory_order_acquire = 1 << 2,
100  memory_order_release = 1 << 3,
101  memory_order_acq_rel = 1 << 4,
102  memory_order_seq_cst = 1 << 5
103};
104
105INLINE void proc_yield(int cnt) {
106  __asm__ __volatile__("" ::: "memory");
107  for (int i = 0; i < cnt; i++)
108    __asm__ __volatile__("pause");
109  __asm__ __volatile__("" ::: "memory");
110}
111
112template<typename T>
113NOINLINE typename T::Type atomic_load(
114    const volatile T *a, memory_order mo) {
115  assert(mo & (memory_order_relaxed | memory_order_consume
116      | memory_order_acquire | memory_order_seq_cst));
117  assert(!((uptr)a % sizeof(*a)));
118  typename T::Type v;
119
120  if (sizeof(*a) < 8 || sizeof(void*) == 8) {
121    // Assume that aligned loads are atomic.
122    if (mo == memory_order_relaxed) {
123      v = a->val_dont_use;
124    } else if (mo == memory_order_consume) {
125      // Assume that processor respects data dependencies
126      // (and that compiler won't break them).
127      __asm__ __volatile__("" ::: "memory");
128      v = a->val_dont_use;
129      __asm__ __volatile__("" ::: "memory");
130    } else if (mo == memory_order_acquire) {
131      __asm__ __volatile__("" ::: "memory");
132      v = a->val_dont_use;
133      // On x86 loads are implicitly acquire.
134      __asm__ __volatile__("" ::: "memory");
135    } else {  // seq_cst
136      // On x86 plain MOV is enough for seq_cst store.
137      __asm__ __volatile__("" ::: "memory");
138      v = a->val_dont_use;
139      __asm__ __volatile__("" ::: "memory");
140    }
141  } else {
142    // 64-bit load on 32-bit platform.
143    __asm__ __volatile__(
144        "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
145        "movq %%mm0, %0;"  // (ptr could be read-only)
146        "emms;"            // Empty mmx state/Reset FP regs
147        : "=m" (v)
148        : "m" (a->val_dont_use)
149        : // mark the FP stack and mmx registers as clobbered
150          "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
151#ifdef __MMX__
152          "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
153#endif  // #ifdef __MMX__
154          "memory");
155  }
156  return v;
157}
158
159template<typename T>
160INLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) {
161  assert(mo & (memory_order_relaxed | memory_order_release
162      | memory_order_seq_cst));
163  assert(!((uptr)a % sizeof(*a)));
164
165  if (sizeof(*a) < 8 || sizeof(void*) == 8) {
166    // Assume that aligned loads are atomic.
167    if (mo == memory_order_relaxed) {
168      a->val_dont_use = v;
169    } else if (mo == memory_order_release) {
170      // On x86 stores are implicitly release.
171      __asm__ __volatile__("" ::: "memory");
172      a->val_dont_use = v;
173      __asm__ __volatile__("" ::: "memory");
174    } else {  // seq_cst
175      // On x86 stores are implicitly release.
176      __asm__ __volatile__("" ::: "memory");
177      a->val_dont_use = v;
178      __sync_synchronize();
179    }
180  } else {
181    // 64-bit store on 32-bit platform.
182    __asm__ __volatile__(
183        "movq %1, %%mm0;"  // Use mmx reg for 64-bit atomic moves
184        "movq %%mm0, %0;"
185        "emms;"            // Empty mmx state/Reset FP regs
186        : "=m" (a->val_dont_use)
187        : "m" (v)
188        : // mark the FP stack and mmx registers as clobbered
189          "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)",
190#ifdef __MMX__
191          "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7",
192#endif  // #ifdef __MMX__
193          "memory");
194    if (mo == memory_order_seq_cst)
195      __sync_synchronize();
196  }
197}
198
199template<typename T>
200INLINE bool atomic_compare_exchange_strong(volatile T *a,
201                                           typename T::Type *cmp,
202                                           typename T::Type xchg,
203                                           memory_order mo __unused) {
204  typedef typename T::Type Type;
205  Type cmpv = *cmp;
206  Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg);
207  if (prev == cmpv)
208    return true;
209  *cmp = prev;
210  return false;
211}
212
213template<typename T>
214INLINE bool atomic_compare_exchange_weak(volatile T *a,
215                                         typename T::Type *cmp,
216                                         typename T::Type xchg,
217                                         memory_order mo) {
218  return atomic_compare_exchange_strong(a, cmp, xchg, mo);
219}
220
221const u32 kTabSizeLog = 20;
222const int kTabSize = 1 << kTabSizeLog;
223
224static atomic_uintptr_t tab[kTabSize];
225
226int x_fork(void) {
227  for (int i = 0; i < kTabSize; ++i) {
228    atomic_uintptr_t *p = &tab[i];
229    for (int j = 0;; j++) {
230      uptr cmp = atomic_load(p, memory_order_relaxed);
231      if ((cmp & 1) == 0 &&
232          atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire))
233        break;
234      if (j < 10)
235        proc_yield(10);
236      else
237        internal_sched_yield();
238    }
239  }
240
241  int pid = fork();
242
243  for (int i = 0; i < kTabSize; ++i) {
244    atomic_uintptr_t *p = &tab[i];
245    uptr s = atomic_load(p, memory_order_relaxed);
246    atomic_store(p, (s & ~1UL), memory_order_release);
247  }
248
249  return pid;
250}
251
252void test() {
253  pid_t pid = x_fork();
254  if (pid) {
255    pid_t p;
256    while ((p = wait(NULL)) == -1) { }
257  }
258}
259
260int main() {
261  const int kChildren = 1000;
262  for (int i = 0; i < kChildren; ++i) {
263    pid_t pid = x_fork();
264    if (!pid) {
265      test();
266      return 0;
267    }
268  }
269
270  sleep(5);
271
272  for (int i = 0; i < kChildren; ++i) {
273    pid_t p;
274    while ((p = wait(NULL)) == -1) {  }
275  }
276
277  return 0;
278}
279