1# $Id: Makefile,v 1.54 2020/05/17 17:26:14 sjg Exp $ 2# 3# $NetBSD: Makefile,v 1.58 2020/05/17 12:36:26 rillig Exp $ 4# 5# Unit tests for make(1) 6# The main targets are: 7# 8# all: run all the tests 9# test: run 'all', and compare to expected results 10# accept: move generated output to expected results 11# 12# Adding a test case. 13# Each feature should get its own set of tests in its own suitably 14# named makefile (*.mk), with its own set of expected results (*.exp), 15# and it should be added to the TESTNAMES list. 16# 17 18.MAIN: all 19 20.-include "Makefile.config" 21 22UNIT_TESTS:= ${.PARSEDIR} 23.PATH: ${UNIT_TESTS} 24 25# Each test is in a sub-makefile. 26# Keep the list sorted. 27TESTNAMES= \ 28 comment \ 29 cond-late \ 30 cond1 \ 31 cond2 \ 32 dollar \ 33 doterror \ 34 dotwait \ 35 error \ 36 export \ 37 export-all \ 38 export-env \ 39 forloop \ 40 forsubst \ 41 hash \ 42 include-main \ 43 misc \ 44 moderrs \ 45 modmatch \ 46 modmisc \ 47 modorder \ 48 modts \ 49 modword \ 50 order \ 51 posix \ 52 qequals \ 53 sunshcmd \ 54 sysv \ 55 ternary \ 56 unexport \ 57 unexport-env \ 58 varcmd \ 59 varmisc \ 60 varmod-edge \ 61 varquote \ 62 varshell 63 64# these tests were broken by referting POSIX chanegs 65STRICT_POSIX_TESTS = \ 66 escape \ 67 impsrc \ 68 phony-end \ 69 posix1 \ 70 suffixes 71 72# Override make flags for certain tests 73flags.doterror= 74flags.order=-j1 75 76OUTFILES= ${TESTNAMES:S/$/.out/} 77 78all: ${OUTFILES} 79 80CLEANFILES += *.rawout *.out *.status *.tmp *.core *.tmp 81CLEANFILES += obj*.[och] lib*.a # posix1.mk 82CLEANFILES += issue* .[ab]* # suffixes.mk 83CLEANRECURSIVE += dir dummy # posix1.mk 84 85clean: 86 rm -f ${CLEANFILES} 87.if !empty(CLEANRECURSIVE) 88 rm -rf ${CLEANRECURSIVE} 89.endif 90 91TEST_MAKE?= ${.MAKE} 92TOOL_SED?= sed 93TOOL_TR?= tr 94TOOL_DIFF?= diff 95 96.if defined(.PARSEDIR) 97# ensure consistent results from sort(1) 98LC_ALL= C 99LANG= C 100.export LANG LC_ALL 101.endif 102 103# some tests need extra post-processing 104SED_CMDS.varshell = -e 's,^[a-z]*sh: ,,' \ 105 -e '/command/s,No such.*,not found,' 106 107# the tests are actually done with sub-makes. 108.SUFFIXES: .mk .rawout .out 109.mk.rawout: 110 @echo ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC} 111 -@cd ${.OBJDIR} && \ 112 { ${TEST_MAKE} ${flags.${.TARGET:R}:U-k} -f ${.IMPSRC} \ 113 2>&1 ; echo $$? >${.TARGET:R}.status ; } > ${.TARGET}.tmp 114 @mv ${.TARGET}.tmp ${.TARGET} 115 116# We always pretend .MAKE was called 'make' 117# and strip ${.CURDIR}/ from the output 118# and replace anything after 'stopped in' with unit-tests 119# so the results can be compared. 120.rawout.out: 121 @echo postprocess ${.TARGET} 122 @${TOOL_SED} -e 's,^${TEST_MAKE:T:C/\./\\\./g}[][0-9]*:,make:,' \ 123 -e 's,${TEST_MAKE:C/\./\\\./g},make,' \ 124 -e '/stopped/s, /.*, unit-tests,' \ 125 -e 's,${.CURDIR:C/\./\\\./g}/,,g' \ 126 -e 's,${UNIT_TESTS:C/\./\\\./g}/,,g' ${SED_CMDS.${.TARGET:T:R}} \ 127 < ${.IMPSRC} > ${.TARGET}.tmp 128 @echo "exit status `cat ${.TARGET:R}.status`" >> ${.TARGET}.tmp 129 @mv ${.TARGET}.tmp ${.TARGET} 130 131# Compare all output files 132test: ${OUTFILES} .PHONY 133 @failed= ; \ 134 for test in ${TESTNAMES}; do \ 135 ${TOOL_DIFF} ${DIFF_FLAGS} ${UNIT_TESTS}/$${test}.exp $${test}.out \ 136 || failed="$${failed}$${failed:+ }$${test}" ; \ 137 done ; \ 138 if [ -n "$${failed}" ]; then \ 139 echo "Failed tests: $${failed}" ; false ; \ 140 else \ 141 echo "All tests passed" ; \ 142 fi 143 144accept: 145 @for test in ${TESTNAMES}; do \ 146 cmp -s ${UNIT_TESTS}/$${test}.exp $${test}.out \ 147 || { echo "Replacing $${test}.exp" ; \ 148 cp $${test}.out ${UNIT_TESTS}/$${test}.exp ; } \ 149 done 150 151.if exists(${TEST_MAKE}) 152${TESTNAMES:S/$/.rawout/}: ${TEST_MAKE} 153.endif 154 155.-include <obj.mk> 156