xref: /freebsd/usr.bin/bmake/tests/common.sh (revision 02e9120893770924227138ba49df1edb3896112a)
1#
2# Common code used run regression tests for usr.bin/make.
3
4#
5# Output a message and exit with an error.
6#
7fatal()
8{
9	echo "fatal: $*" >/dev/stderr
10	exit 1
11}
12
13make_is_fmake() {
14	# This test is not very reliable but works for now: the old fmake
15	# does have a -v option while bmake doesn't.
16	${MAKE_PROG} -f Makefile.non-existent -v 2>&1 | \
17	    grep -q "cannot open.*non-existent"
18}
19
20#
21# Check whether the working directory exists - it must.
22#
23ensure_workdir()
24{
25	if [ ! -d ${WORK_DIR} ] ; then
26		fatal "working directory ${WORK_DIR} does not exist."
27	fi
28}
29
30#
31# Make sure all tests have been run
32#
33ensure_run()
34{
35	if [ -z "${TEST_N}" ] ; then
36		TEST_N=1
37	fi
38
39	FAIL=
40	N=1
41	while [ ${N} -le ${TEST_N} ] ; do
42		if ! skip_test ${N} ; then
43			if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
44			     ! -f ${OUTPUT_DIR}/stdout.${N} -o \
45			     ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
46				echo "Test ${SUBDIR}/${N} no yet run"
47				FAIL=yes
48			fi
49		fi
50		N=$((N + 1))
51	done
52
53	if [ ! -z "${FAIL}" ] ; then
54		exit 1
55	fi
56}
57
58#
59# Output usage message.
60#
61print_usage()
62{
63	echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
64	echo " setup	- setup working directory"
65	echo " run	- run the tests"
66	echo " show	- show test results"
67	echo " compare	- compare actual and expected results"
68	echo " diff	- diff actual and expected results"
69	echo " reset	- reset the test to its initial state"
70	echo " clean	- delete working and output directory"
71	echo " test	- setup + run + compare"
72	echo " prove	- setup + run + compare + clean"
73	echo " desc	- print short description"
74	echo " update	- update the expected results with the current results"
75	echo " help	- show this information"
76}
77
78#
79# Return 0 if we should skip the test. 1 otherwise
80#
81skip_test()
82{
83	eval skip=\${TEST_${1}_SKIP}
84	if [ -z "${skip}" ] ; then
85		return 1
86	else
87		return 0
88	fi
89}
90
91#
92# Common function for setup and reset.
93#
94common_setup()
95{
96	#
97	# If a Makefile exists in the source directory - copy it over
98	#
99	if [ -e ${SRC_DIR}/Makefile.test -a ! -e ${WORK_DIR}/Makefile ] ; then
100		cp ${SRC_DIR}/Makefile.test ${WORK_DIR}/Makefile
101	fi
102
103	#
104	# If the TEST_MAKE_DIRS variable is set, create those directories
105	#
106	set -- ${TEST_MAKE_DIRS}
107	while [ $# -ne 0 ] ; do
108		if [ ! -d ${WORK_DIR}/${1} ] ; then
109			mkdir -p -m ${2} ${WORK_DIR}/${1}
110		else
111			chmod ${2} ${WORK_DIR}/${1}
112		fi
113		shift ; shift
114	done
115
116	#
117	# If the TEST_COPY_FILES variable is set, copy those files over to
118	# the working directory. The value is assumed to be pairs of
119	# filenames and modes.
120	#
121	set -- ${TEST_COPY_FILES}
122	while [ $# -ne 0 ] ; do
123		local dstname="$(echo ${1} | sed -e 's,Makefile.test,Makefile,')"
124		if [ ! -e ${WORK_DIR}/${dstname} ] ; then
125			cp ${SRC_DIR}/${1} ${WORK_DIR}/${dstname}
126		fi
127		chmod ${2} ${WORK_DIR}/${dstname}
128		shift ; shift
129	done
130
131	#
132	# If the TEST_TOUCH variable is set, it is taken to be a list
133	# of pairs of filenames and arguments to touch(1). The arguments
134	# to touch must be surrounded by single quotes if there are more
135	# than one argument.
136	#
137	eval set -- ${TEST_TOUCH}
138	while [ $# -ne 0 ] ; do
139		eval touch ${2} ${WORK_DIR}/${1}
140		shift ; shift
141	done
142
143	#
144	# Now create links
145	#
146	eval set -- ${TEST_LINKS}
147	while [ $# -ne 0 ] ; do
148		eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
149		shift ; shift
150	done
151}
152
153#
154# Setup the test. This creates the working and output directories and
155# populates it with files. If there is a setup_test() function - call it.
156#
157eval_setup()
158{
159	#
160	# Check whether the working directory exists. If it does exit
161	# fatally so that we don't clobber a test the user is working on.
162	#
163	if [ -d ${WORK_DIR} ] ; then
164		fatal "working directory ${WORK_DIR} already exists."
165	fi
166
167	#
168	# Now create it and the output directory
169	#
170	mkdir -p ${WORK_DIR}
171	rm -rf ${OUTPUT_DIR}
172	mkdir -p ${OUTPUT_DIR}
173
174	#
175	# Common stuff
176	#
177	common_setup
178
179	#
180	# Now after all execute the user's setup function if it exists.
181	#
182	setup_test
183}
184
185#
186# Default setup_test function does nothing. This may be overriden by
187# the test.
188#
189setup_test()
190{
191}
192
193#
194# Reset the test. Here we need to rely on information from the test.
195# We executed the same steps as in the setup, by try not to clobber existing
196# files.
197# All files and directories that are listed on the TEST_CLEAN_FILES
198# variable are removed. Then the TEST_TOUCH list is executed and finally
199# the reset_test() function called if it exists.
200#
201eval_reset()
202{
203	ensure_workdir
204
205	#
206	# Clean the output directory
207	#
208	rm -rf ${OUTPUT_DIR}/*
209
210	#
211	# Common stuff
212	#
213	common_setup
214
215	#
216	# Remove files.
217	#
218	for f in ${TEST_CLEAN_FILES} ; do
219		rm -rf ${WORK_DIR}/${f}
220	done
221
222	#
223	# Execute test's function
224	#
225	reset_test
226}
227
228#
229# Default reset_test function does nothing. This may be overriden by
230# the test.
231#
232reset_test()
233{
234}
235
236#
237# Clean the test. This simply removes the working and output directories.
238#
239eval_clean()
240{
241	#
242	# If you have special cleaning needs, provide a 'cleanup' shell script.
243	#
244	if [ -n "${TEST_CLEANUP}" ] ; then
245		. ${SRC_DIR}/cleanup
246	fi
247	if [ -z "${NO_TEST_CLEANUP}" ] ; then
248		rm -rf ${WORK_DIR}
249		rm -rf ${OUTPUT_DIR}
250	fi
251}
252
253#
254# Run the test.
255#
256eval_run()
257{
258	ensure_workdir
259
260	if [ -z "${TEST_N}" ] ; then
261		TEST_N=1
262	fi
263
264	N=1
265	while [ ${N} -le ${TEST_N} ] ; do
266		if ! skip_test ${N} ; then
267			( cd ${WORK_DIR} ;
268			  exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
269			  run_test ${N}
270			  echo $? >${OUTPUT_DIR}/status.${N}
271			)
272		fi
273		N=$((N + 1))
274	done
275}
276
277#
278# Default run_test() function.  It can be replaced by the
279# user specified regression test. The argument to this function is
280# the test number.
281#
282run_test()
283{
284	eval args=\${TEST_${1}-test${1}}
285        ${MAKE_PROG} $args
286}
287
288#
289# Show test results.
290#
291eval_show()
292{
293	ensure_workdir
294
295	if [ -z "${TEST_N}" ] ; then
296		TEST_N=1
297	fi
298
299	N=1
300	while [ ${N} -le ${TEST_N} ] ; do
301		if ! skip_test ${N} ; then
302			echo "=== Test ${N} Status =================="
303			cat ${OUTPUT_DIR}/status.${N}
304			echo ".......... Stdout .................."
305			cat ${OUTPUT_DIR}/stdout.${N}
306			echo ".......... Stderr .................."
307			cat ${OUTPUT_DIR}/stderr.${N}
308		fi
309		N=$((N + 1))
310	done
311}
312
313#
314# Compare results with expected results
315#
316eval_compare()
317{
318	ensure_workdir
319	ensure_run
320
321	if [ -z "${TEST_N}" ] ; then
322		TEST_N=1
323	fi
324
325	echo "1..${TEST_N}"
326	N=1
327	while [ ${N} -le ${TEST_N} ] ; do
328		fail=
329		todo=
330		skip=
331		if ! skip_test ${N} ; then
332			do_compare stdout ${N} || fail="${fail}stdout "
333			do_compare stderr ${N} || fail="${fail}stderr "
334			do_compare status ${N} || fail="${fail}status "
335			eval todo=\${TEST_${N}_TODO}
336		else
337			eval skip=\${TEST_${N}_SKIP}
338		fi
339		msg=
340		if [ ! -z "$fail" ]; then
341			msg="${msg}not "
342		fi
343		msg="${msg}ok ${N} ${SUBDIR}/${N}"
344		if [ ! -z "$fail" -o ! -z "$todo" -o ! -z "$skip" ]; then
345			msg="${msg} # "
346		fi
347		if [ ! -z "$skip" ] ; then
348			msg="${msg}skip ${skip}; "
349		fi
350		if [ ! -z "$todo" ] ; then
351			msg="${msg}TODO ${todo}; "
352		fi
353		if [ ! -z "$fail" ] ; then
354			msg="${msg}reason: ${fail}"
355		fi
356		echo ${msg}
357		N=$((N + 1))
358	done
359}
360
361#
362# Check if the test result is the same as the expected result.
363#
364# $1	Input file
365# $2	Test number
366#
367do_compare()
368{
369	local EXPECTED RESULT
370	EXPECTED="${SRC_DIR}/expected.$1.$2"
371	RESULT="${OUTPUT_DIR}/$1.$2"
372
373	if [ -f $EXPECTED ]; then
374		cat $RESULT | sed -e "s,^$(basename $MAKE_PROG):,make:," | \
375		diff -u $EXPECTED -
376		#diff -q $EXPECTED - 1>/dev/null 2>/dev/null
377		return $?
378	else
379		return 1	# FAIL
380	fi
381}
382
383#
384# Diff current and expected results
385#
386eval_diff()
387{
388	ensure_workdir
389	ensure_run
390
391	if [ -z "${TEST_N}" ] ; then
392		TEST_N=1
393	fi
394
395	N=1
396	while [ ${N} -le ${TEST_N} ] ; do
397		if ! skip_test ${N} ; then
398			FAIL=
399			do_diff stdout ${N}
400			do_diff stderr ${N}
401			do_diff status ${N}
402		fi
403		N=$((N + 1))
404	done
405}
406
407#
408# Check if the test result is the same as the expected result.
409#
410# $1	Input file
411# $2	Test number
412#
413do_diff()
414{
415	local EXPECTED RESULT
416	EXPECTED="${SRC_DIR}/expected.$1.$2"
417	RESULT="${OUTPUT_DIR}/$1.$2"
418
419	echo diff -u $EXPECTED $RESULT
420	if [ -f $EXPECTED ]; then
421		diff -u $EXPECTED $RESULT
422	else
423		echo "${EXPECTED} does not exist"
424	fi
425}
426
427#
428# Update expected results
429#
430eval_update()
431{
432	ensure_workdir
433	ensure_run
434
435	if [ -z "${TEST_N}" ] ; then
436		TEST_N=1
437	fi
438
439	FAIL=
440	N=1
441	while [ ${N} -le ${TEST_N} ] ; do
442		if ! skip_test ${N} ; then
443			cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
444			cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
445			cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
446		fi
447		N=$((N + 1))
448	done
449}
450
451#
452# Print description
453#
454eval_desc()
455{
456	echo "${SUBDIR}: ${DESC}"
457}
458
459#
460# Run the test
461#
462eval_test()
463{
464	eval_setup
465	eval_run
466	eval_compare
467}
468
469#
470# Run the test for prove(1)
471#
472eval_prove()
473{
474	eval_setup
475	eval_run
476	eval_compare
477	eval_clean
478}
479
480#
481# Main function. Execute the command(s) on the command line.
482#
483eval_cmd()
484{
485	if [ $# -eq 0 ] ; then
486		# if no arguments given default to 'prove'
487		set -- prove
488	fi
489
490	if ! make_is_fmake ; then
491		for i in $(jot ${TEST_N:-1}) ; do
492			eval TEST_${i}_SKIP=\"make is not fmake\"
493		done
494	fi
495
496	for i
497	do
498		case $i in
499
500		setup | run | compare | diff | clean | reset | show | \
501		test | prove | desc | update)
502			eval eval_$i
503			;;
504		* | help)
505			print_usage
506			;;
507		esac
508	done
509}
510
511##############################################################################
512#
513# Main code
514#
515
516#
517# Determine our sub-directory. Argh.
518#
519SRC_DIR=$(dirname $0)
520SRC_BASE=`cd ${SRC_DIR} ; while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
521SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
522
523#
524# Construct working directory
525#
526WORK_DIR=$(pwd)/work/${SUBDIR}
527OUTPUT_DIR=${WORK_DIR}.OUTPUT
528
529#
530# Make to use
531#
532MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
533