1*8a272653SPeter Holm#!/bin/sh 2*8a272653SPeter Holm 3*8a272653SPeter Holm# 4*8a272653SPeter Holm# Copyright (c) 2015 EMC Corp. 5*8a272653SPeter Holm# All rights reserved. 6*8a272653SPeter Holm# 7*8a272653SPeter Holm# Redistribution and use in source and binary forms, with or without 8*8a272653SPeter Holm# modification, are permitted provided that the following conditions 9*8a272653SPeter Holm# are met: 10*8a272653SPeter Holm# 1. Redistributions of source code must retain the above copyright 11*8a272653SPeter Holm# notice, this list of conditions and the following disclaimer. 12*8a272653SPeter Holm# 2. Redistributions in binary form must reproduce the above copyright 13*8a272653SPeter Holm# notice, this list of conditions and the following disclaimer in the 14*8a272653SPeter Holm# documentation and/or other materials provided with the distribution. 15*8a272653SPeter Holm# 16*8a272653SPeter Holm# THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND 17*8a272653SPeter Holm# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 18*8a272653SPeter Holm# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 19*8a272653SPeter Holm# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE 20*8a272653SPeter Holm# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL 21*8a272653SPeter Holm# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS 22*8a272653SPeter Holm# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 23*8a272653SPeter Holm# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 24*8a272653SPeter Holm# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY 25*8a272653SPeter Holm# OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF 26*8a272653SPeter Holm# SUCH DAMAGE. 27*8a272653SPeter Holm# 28*8a272653SPeter Holm 29*8a272653SPeter Holm# Bug 198163 - Kernel panic in vm_reserv_populate() 30*8a272653SPeter Holm# Test scenario by: ikosarev@accesssoftek.com 31*8a272653SPeter Holm# http://people.freebsd.org/~pho/stress/log/kostik771.txt 32*8a272653SPeter Holm# Fixed by r280238 33*8a272653SPeter Holm 34*8a272653SPeter Holm. ../default.cfg 35*8a272653SPeter Holm 36*8a272653SPeter Holmuname -a | egrep -q "i386|amd64" || exit 0 37*8a272653SPeter Holmodir=`pwd` 38*8a272653SPeter Holmcd /tmp 39*8a272653SPeter Holmsed '1,/^EOF/d' < $odir/$0 > vm_reserv_populate.cc 40*8a272653SPeter Holmrm -f /tmp/vm_reserv_populate 41*8a272653SPeter Holmmycc -o vm_reserv_populate -Wall -Wextra -g -O2 vm_reserv_populate.cc || 42*8a272653SPeter Holm exit 1 43*8a272653SPeter Holmrm -f vm_reserv_populate.cc 44*8a272653SPeter Holm 45*8a272653SPeter Holm(cd $odir/../testcases/swap; ./swap -t 5m -i 20 -h -l 100 > /dev/null) & 46*8a272653SPeter Holm./vm_reserv_populate 47*8a272653SPeter Holmwhile pgrep -q swap; do 48*8a272653SPeter Holm pkill -9 swap 49*8a272653SPeter Holmdone 50*8a272653SPeter Holmrm vm_reserv_populate 51*8a272653SPeter Holmexit 0 52*8a272653SPeter HolmEOF 53*8a272653SPeter Holm#include <pthread.h> 54*8a272653SPeter Holm#include <unistd.h> 55*8a272653SPeter Holm#include <stdio.h> 56*8a272653SPeter Holm#include <stdlib.h> 57*8a272653SPeter Holm#include <sys/types.h> 58*8a272653SPeter Holm#include <sys/wait.h> 59*8a272653SPeter Holm#include <sys/time.h> 60*8a272653SPeter Holm#include <signal.h> 61*8a272653SPeter Holm#include <errno.h> 62*8a272653SPeter Holm#include <stdarg.h> 63*8a272653SPeter Holm#include <string.h> 64*8a272653SPeter Holm#include <assert.h> 65*8a272653SPeter Holm 66*8a272653SPeter Holm#include <sys/syscall.h> 67*8a272653SPeter Holm#include <sys/mman.h> 68*8a272653SPeter Holm#include <sys/types.h> 69*8a272653SPeter Holm#include <sys/stat.h> 70*8a272653SPeter Holm#include <fcntl.h> 71*8a272653SPeter Holm 72*8a272653SPeter Holm#define INLINE inline 73*8a272653SPeter Holm#define NOINLINE __attribute__((noinline)) 74*8a272653SPeter Holm#define SYSCALL(name) SYS_ ## name 75*8a272653SPeter Holm#define internal_syscall __syscall 76*8a272653SPeter Holm 77*8a272653SPeter Holmtypedef unsigned char u8; 78*8a272653SPeter Holmtypedef unsigned int u32; 79*8a272653SPeter Holmtypedef unsigned long long u64; 80*8a272653SPeter Holmtypedef unsigned long uptr; 81*8a272653SPeter Holm 82*8a272653SPeter Holmstruct atomic_uint32_t { 83*8a272653SPeter Holm typedef u32 Type; 84*8a272653SPeter Holm volatile Type val_dont_use; 85*8a272653SPeter Holm}; 86*8a272653SPeter Holm 87*8a272653SPeter Holmstruct atomic_uintptr_t { 88*8a272653SPeter Holm typedef uptr Type; 89*8a272653SPeter Holm volatile Type val_dont_use; 90*8a272653SPeter Holm}; 91*8a272653SPeter Holm 92*8a272653SPeter Holmuptr internal_sched_yield() { 93*8a272653SPeter Holm return internal_syscall(SYSCALL(sched_yield)); 94*8a272653SPeter Holm} 95*8a272653SPeter Holm 96*8a272653SPeter Holmenum memory_order { 97*8a272653SPeter Holm memory_order_relaxed = 1 << 0, 98*8a272653SPeter Holm memory_order_consume = 1 << 1, 99*8a272653SPeter Holm memory_order_acquire = 1 << 2, 100*8a272653SPeter Holm memory_order_release = 1 << 3, 101*8a272653SPeter Holm memory_order_acq_rel = 1 << 4, 102*8a272653SPeter Holm memory_order_seq_cst = 1 << 5 103*8a272653SPeter Holm}; 104*8a272653SPeter Holm 105*8a272653SPeter HolmINLINE void proc_yield(int cnt) { 106*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 107*8a272653SPeter Holm for (int i = 0; i < cnt; i++) 108*8a272653SPeter Holm __asm__ __volatile__("pause"); 109*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 110*8a272653SPeter Holm} 111*8a272653SPeter Holm 112*8a272653SPeter Holmtemplate<typename T> 113*8a272653SPeter HolmNOINLINE typename T::Type atomic_load( 114*8a272653SPeter Holm const volatile T *a, memory_order mo) { 115*8a272653SPeter Holm assert(mo & (memory_order_relaxed | memory_order_consume 116*8a272653SPeter Holm | memory_order_acquire | memory_order_seq_cst)); 117*8a272653SPeter Holm assert(!((uptr)a % sizeof(*a))); 118*8a272653SPeter Holm typename T::Type v; 119*8a272653SPeter Holm 120*8a272653SPeter Holm if (sizeof(*a) < 8 || sizeof(void*) == 8) { 121*8a272653SPeter Holm // Assume that aligned loads are atomic. 122*8a272653SPeter Holm if (mo == memory_order_relaxed) { 123*8a272653SPeter Holm v = a->val_dont_use; 124*8a272653SPeter Holm } else if (mo == memory_order_consume) { 125*8a272653SPeter Holm // Assume that processor respects data dependencies 126*8a272653SPeter Holm // (and that compiler won't break them). 127*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 128*8a272653SPeter Holm v = a->val_dont_use; 129*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 130*8a272653SPeter Holm } else if (mo == memory_order_acquire) { 131*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 132*8a272653SPeter Holm v = a->val_dont_use; 133*8a272653SPeter Holm // On x86 loads are implicitly acquire. 134*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 135*8a272653SPeter Holm } else { // seq_cst 136*8a272653SPeter Holm // On x86 plain MOV is enough for seq_cst store. 137*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 138*8a272653SPeter Holm v = a->val_dont_use; 139*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 140*8a272653SPeter Holm } 141*8a272653SPeter Holm } else { 142*8a272653SPeter Holm // 64-bit load on 32-bit platform. 143*8a272653SPeter Holm __asm__ __volatile__( 144*8a272653SPeter Holm "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves 145*8a272653SPeter Holm "movq %%mm0, %0;" // (ptr could be read-only) 146*8a272653SPeter Holm "emms;" // Empty mmx state/Reset FP regs 147*8a272653SPeter Holm : "=m" (v) 148*8a272653SPeter Holm : "m" (a->val_dont_use) 149*8a272653SPeter Holm : // mark the FP stack and mmx registers as clobbered 150*8a272653SPeter Holm "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", 151*8a272653SPeter Holm#ifdef __MMX__ 152*8a272653SPeter Holm "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", 153*8a272653SPeter Holm#endif // #ifdef __MMX__ 154*8a272653SPeter Holm "memory"); 155*8a272653SPeter Holm } 156*8a272653SPeter Holm return v; 157*8a272653SPeter Holm} 158*8a272653SPeter Holm 159*8a272653SPeter Holmtemplate<typename T> 160*8a272653SPeter HolmINLINE void atomic_store(volatile T *a, typename T::Type v, memory_order mo) { 161*8a272653SPeter Holm assert(mo & (memory_order_relaxed | memory_order_release 162*8a272653SPeter Holm | memory_order_seq_cst)); 163*8a272653SPeter Holm assert(!((uptr)a % sizeof(*a))); 164*8a272653SPeter Holm 165*8a272653SPeter Holm if (sizeof(*a) < 8 || sizeof(void*) == 8) { 166*8a272653SPeter Holm // Assume that aligned loads are atomic. 167*8a272653SPeter Holm if (mo == memory_order_relaxed) { 168*8a272653SPeter Holm a->val_dont_use = v; 169*8a272653SPeter Holm } else if (mo == memory_order_release) { 170*8a272653SPeter Holm // On x86 stores are implicitly release. 171*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 172*8a272653SPeter Holm a->val_dont_use = v; 173*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 174*8a272653SPeter Holm } else { // seq_cst 175*8a272653SPeter Holm // On x86 stores are implicitly release. 176*8a272653SPeter Holm __asm__ __volatile__("" ::: "memory"); 177*8a272653SPeter Holm a->val_dont_use = v; 178*8a272653SPeter Holm __sync_synchronize(); 179*8a272653SPeter Holm } 180*8a272653SPeter Holm } else { 181*8a272653SPeter Holm // 64-bit store on 32-bit platform. 182*8a272653SPeter Holm __asm__ __volatile__( 183*8a272653SPeter Holm "movq %1, %%mm0;" // Use mmx reg for 64-bit atomic moves 184*8a272653SPeter Holm "movq %%mm0, %0;" 185*8a272653SPeter Holm "emms;" // Empty mmx state/Reset FP regs 186*8a272653SPeter Holm : "=m" (a->val_dont_use) 187*8a272653SPeter Holm : "m" (v) 188*8a272653SPeter Holm : // mark the FP stack and mmx registers as clobbered 189*8a272653SPeter Holm "st", "st(1)", "st(2)", "st(3)", "st(4)", "st(5)", "st(6)", "st(7)", 190*8a272653SPeter Holm#ifdef __MMX__ 191*8a272653SPeter Holm "mm0", "mm1", "mm2", "mm3", "mm4", "mm5", "mm6", "mm7", 192*8a272653SPeter Holm#endif // #ifdef __MMX__ 193*8a272653SPeter Holm "memory"); 194*8a272653SPeter Holm if (mo == memory_order_seq_cst) 195*8a272653SPeter Holm __sync_synchronize(); 196*8a272653SPeter Holm } 197*8a272653SPeter Holm} 198*8a272653SPeter Holm 199*8a272653SPeter Holmtemplate<typename T> 200*8a272653SPeter HolmINLINE bool atomic_compare_exchange_strong(volatile T *a, 201*8a272653SPeter Holm typename T::Type *cmp, 202*8a272653SPeter Holm typename T::Type xchg, 203*8a272653SPeter Holm memory_order mo __unused) { 204*8a272653SPeter Holm typedef typename T::Type Type; 205*8a272653SPeter Holm Type cmpv = *cmp; 206*8a272653SPeter Holm Type prev = __sync_val_compare_and_swap(&a->val_dont_use, cmpv, xchg); 207*8a272653SPeter Holm if (prev == cmpv) 208*8a272653SPeter Holm return true; 209*8a272653SPeter Holm *cmp = prev; 210*8a272653SPeter Holm return false; 211*8a272653SPeter Holm} 212*8a272653SPeter Holm 213*8a272653SPeter Holmtemplate<typename T> 214*8a272653SPeter HolmINLINE bool atomic_compare_exchange_weak(volatile T *a, 215*8a272653SPeter Holm typename T::Type *cmp, 216*8a272653SPeter Holm typename T::Type xchg, 217*8a272653SPeter Holm memory_order mo) { 218*8a272653SPeter Holm return atomic_compare_exchange_strong(a, cmp, xchg, mo); 219*8a272653SPeter Holm} 220*8a272653SPeter Holm 221*8a272653SPeter Holmconst u32 kTabSizeLog = 20; 222*8a272653SPeter Holmconst int kTabSize = 1 << kTabSizeLog; 223*8a272653SPeter Holm 224*8a272653SPeter Holmstatic atomic_uintptr_t tab[kTabSize]; 225*8a272653SPeter Holm 226*8a272653SPeter Holmint x_fork(void) { 227*8a272653SPeter Holm for (int i = 0; i < kTabSize; ++i) { 228*8a272653SPeter Holm atomic_uintptr_t *p = &tab[i]; 229*8a272653SPeter Holm for (int j = 0;; j++) { 230*8a272653SPeter Holm uptr cmp = atomic_load(p, memory_order_relaxed); 231*8a272653SPeter Holm if ((cmp & 1) == 0 && 232*8a272653SPeter Holm atomic_compare_exchange_weak(p, &cmp, cmp | 1, memory_order_acquire)) 233*8a272653SPeter Holm break; 234*8a272653SPeter Holm if (j < 10) 235*8a272653SPeter Holm proc_yield(10); 236*8a272653SPeter Holm else 237*8a272653SPeter Holm internal_sched_yield(); 238*8a272653SPeter Holm } 239*8a272653SPeter Holm } 240*8a272653SPeter Holm 241*8a272653SPeter Holm int pid = fork(); 242*8a272653SPeter Holm 243*8a272653SPeter Holm for (int i = 0; i < kTabSize; ++i) { 244*8a272653SPeter Holm atomic_uintptr_t *p = &tab[i]; 245*8a272653SPeter Holm uptr s = atomic_load(p, memory_order_relaxed); 246*8a272653SPeter Holm atomic_store(p, (s & ~1UL), memory_order_release); 247*8a272653SPeter Holm } 248*8a272653SPeter Holm 249*8a272653SPeter Holm return pid; 250*8a272653SPeter Holm} 251*8a272653SPeter Holm 252*8a272653SPeter Holmvoid test() { 253*8a272653SPeter Holm pid_t pid = x_fork(); 254*8a272653SPeter Holm if (pid) { 255*8a272653SPeter Holm pid_t p; 256*8a272653SPeter Holm while ((p = wait(NULL)) == -1) { } 257*8a272653SPeter Holm } 258*8a272653SPeter Holm} 259*8a272653SPeter Holm 260*8a272653SPeter Holmint main() { 261*8a272653SPeter Holm const int kChildren = 1000; 262*8a272653SPeter Holm for (int i = 0; i < kChildren; ++i) { 263*8a272653SPeter Holm pid_t pid = x_fork(); 264*8a272653SPeter Holm if (!pid) { 265*8a272653SPeter Holm test(); 266*8a272653SPeter Holm return 0; 267*8a272653SPeter Holm } 268*8a272653SPeter Holm } 269*8a272653SPeter Holm 270*8a272653SPeter Holm sleep(5); 271*8a272653SPeter Holm 272*8a272653SPeter Holm for (int i = 0; i < kChildren; ++i) { 273*8a272653SPeter Holm pid_t p; 274*8a272653SPeter Holm while ((p = wait(NULL)) == -1) { } 275*8a272653SPeter Holm } 276*8a272653SPeter Holm 277*8a272653SPeter Holm return 0; 278*8a272653SPeter Holm} 279