xref: /freebsd/usr.bin/bmake/tests/common.sh (revision a7623790fb345e6dc986dfd31df0ace115e6f2e4)
1# $FreeBSD$
2#
3# Common code used run regression tests for usr.bin/make.
4
5#
6# Output a message and exit with an error.
7#
8fatal()
9{
10	echo "fatal: $*" >/dev/stderr
11	exit 1
12}
13
14make_is_fmake() {
15	# This test is not very reliable but works for now: the old fmake
16	# does have a -v option while bmake doesn't.
17	${MAKE_PROG} -f Makefile.non-existent -v 2>&1 | \
18	    grep -q "cannot open.*non-existent"
19}
20
21#
22# Check whether the working directory exists - it must.
23#
24ensure_workdir()
25{
26	if [ ! -d ${WORK_DIR} ] ; then
27		fatal "working directory ${WORK_DIR} does not exist."
28	fi
29}
30
31#
32# Make sure all tests have been run
33#
34ensure_run()
35{
36	if [ -z "${TEST_N}" ] ; then
37		TEST_N=1
38	fi
39
40	FAIL=
41	N=1
42	while [ ${N} -le ${TEST_N} ] ; do
43		if ! skip_test ${N} ; then
44			if [ ! -f ${OUTPUT_DIR}/status.${N} -o \
45			     ! -f ${OUTPUT_DIR}/stdout.${N} -o \
46			     ! -f ${OUTPUT_DIR}/stderr.${N} ] ; then
47				echo "Test ${SUBDIR}/${N} no yet run"
48				FAIL=yes
49			fi
50		fi
51		N=$((N + 1))
52	done
53
54	if [ ! -z "${FAIL}" ] ; then
55		exit 1
56	fi
57}
58
59#
60# Output usage messsage.
61#
62print_usage()
63{
64	echo "Usage: sh -v -m <path> -w <dir> $0 command(s)"
65	echo " setup	- setup working directory"
66	echo " run	- run the tests"
67	echo " show	- show test results"
68	echo " compare	- compare actual and expected results"
69	echo " diff	- diff actual and expected results"
70	echo " reset	- reset the test to its initial state"
71	echo " clean	- delete working and output directory"
72	echo " test	- setup + run + compare"
73	echo " prove	- setup + run + compare + clean"
74	echo " desc	- print short description"
75	echo " update	- update the expected results with the current results"
76	echo " help	- show this information"
77}
78
79#
80# Return 0 if we should skip the test. 1 otherwise
81#
82skip_test()
83{
84	eval skip=\${TEST_${1}_SKIP}
85	if [ -z "${skip}" ] ; then
86		return 1
87	else
88		return 0
89	fi
90}
91
92#
93# Common function for setup and reset.
94#
95common_setup()
96{
97	#
98	# If a Makefile exists in the source directory - copy it over
99	#
100	if [ -e ${SRC_DIR}/Makefile.test -a ! -e ${WORK_DIR}/Makefile ] ; then
101		cp ${SRC_DIR}/Makefile.test ${WORK_DIR}/Makefile
102	fi
103
104	#
105	# If the TEST_MAKE_DIRS variable is set, create those directories
106	#
107	set -- ${TEST_MAKE_DIRS}
108	while [ $# -ne 0 ] ; do
109		if [ ! -d ${WORK_DIR}/${1} ] ; then
110			mkdir -p -m ${2} ${WORK_DIR}/${1}
111		else
112			chmod ${2} ${WORK_DIR}/${1}
113		fi
114		shift ; shift
115	done
116
117	#
118	# If the TEST_COPY_FILES variable is set, copy those files over to
119	# the working directory. The value is assumed to be pairs of
120	# filenames and modes.
121	#
122	set -- ${TEST_COPY_FILES}
123	while [ $# -ne 0 ] ; do
124		local dstname="$(echo ${1} | sed -e 's,Makefile.test,Makefile,')"
125		if [ ! -e ${WORK_DIR}/${dstname} ] ; then
126			cp ${SRC_DIR}/${1} ${WORK_DIR}/${dstname}
127		fi
128		chmod ${2} ${WORK_DIR}/${dstname}
129		shift ; shift
130	done
131
132	#
133	# If the TEST_TOUCH variable is set, it is taken to be a list
134	# of pairs of filenames and arguments to touch(1). The arguments
135	# to touch must be surrounded by single quotes if there are more
136	# than one argument.
137	#
138	eval set -- ${TEST_TOUCH}
139	while [ $# -ne 0 ] ; do
140		eval touch ${2} ${WORK_DIR}/${1}
141		shift ; shift
142	done
143
144	#
145	# Now create links
146	#
147	eval set -- ${TEST_LINKS}
148	while [ $# -ne 0 ] ; do
149		eval ln ${WORK_DIR}/${1} ${WORK_DIR}/${2}
150		shift ; shift
151	done
152}
153
154#
155# Setup the test. This creates the working and output directories and
156# populates it with files. If there is a setup_test() function - call it.
157#
158eval_setup()
159{
160	#
161	# Check whether the working directory exists. If it does exit
162	# fatally so that we don't clobber a test the user is working on.
163	#
164	if [ -d ${WORK_DIR} ] ; then
165		fatal "working directory ${WORK_DIR} already exists."
166	fi
167
168	#
169	# Now create it and the output directory
170	#
171	mkdir -p ${WORK_DIR}
172	rm -rf ${OUTPUT_DIR}
173	mkdir -p ${OUTPUT_DIR}
174
175	#
176	# Common stuff
177	#
178	common_setup
179
180	#
181	# Now after all execute the user's setup function if it exists.
182	#
183	setup_test
184}
185
186#
187# Default setup_test function does nothing. This may be overriden by
188# the test.
189#
190setup_test()
191{
192}
193
194#
195# Reset the test. Here we need to rely on information from the test.
196# We executed the same steps as in the setup, by try not to clobber existing
197# files.
198# All files and directories that are listed on the TEST_CLEAN_FILES
199# variable are removed. Then the TEST_TOUCH list is executed and finally
200# the reset_test() function called if it exists.
201#
202eval_reset()
203{
204	ensure_workdir
205
206	#
207	# Clean the output directory
208	#
209	rm -rf ${OUTPUT_DIR}/*
210
211	#
212	# Common stuff
213	#
214	common_setup
215
216	#
217	# Remove files.
218	#
219	for f in ${TEST_CLEAN_FILES} ; do
220		rm -rf ${WORK_DIR}/${f}
221	done
222
223	#
224	# Execute test's function
225	#
226	reset_test
227}
228
229#
230# Default reset_test function does nothing. This may be overriden by
231# the test.
232#
233reset_test()
234{
235}
236
237#
238# Clean the test. This simply removes the working and output directories.
239#
240eval_clean()
241{
242	#
243	# If you have special cleaning needs, provide a 'cleanup' shell script.
244	#
245	if [ -n "${TEST_CLEANUP}" ] ; then
246		. ${SRC_DIR}/cleanup
247	fi
248	if [ -z "${NO_TEST_CLEANUP}" ] ; then
249		rm -rf ${WORK_DIR}
250		rm -rf ${OUTPUT_DIR}
251	fi
252}
253
254#
255# Run the test.
256#
257eval_run()
258{
259	ensure_workdir
260
261	if [ -z "${TEST_N}" ] ; then
262		TEST_N=1
263	fi
264
265	N=1
266	while [ ${N} -le ${TEST_N} ] ; do
267		if ! skip_test ${N} ; then
268			( cd ${WORK_DIR} ;
269			  exec 1>${OUTPUT_DIR}/stdout.${N} 2>${OUTPUT_DIR}/stderr.${N}
270			  run_test ${N}
271			  echo $? >${OUTPUT_DIR}/status.${N}
272			)
273		fi
274		N=$((N + 1))
275	done
276}
277
278#
279# Default run_test() function.  It can be replaced by the
280# user specified regression test. The argument to this function is
281# the test number.
282#
283run_test()
284{
285	eval args=\${TEST_${1}-test${1}}
286        ${MAKE_PROG} $args
287}
288
289#
290# Show test results.
291#
292eval_show()
293{
294	ensure_workdir
295
296	if [ -z "${TEST_N}" ] ; then
297		TEST_N=1
298	fi
299
300	N=1
301	while [ ${N} -le ${TEST_N} ] ; do
302		if ! skip_test ${N} ; then
303			echo "=== Test ${N} Status =================="
304			cat ${OUTPUT_DIR}/status.${N}
305			echo ".......... Stdout .................."
306			cat ${OUTPUT_DIR}/stdout.${N}
307			echo ".......... Stderr .................."
308			cat ${OUTPUT_DIR}/stderr.${N}
309		fi
310		N=$((N + 1))
311	done
312}
313
314#
315# Compare results with expected results
316#
317eval_compare()
318{
319	ensure_workdir
320	ensure_run
321
322	if [ -z "${TEST_N}" ] ; then
323		TEST_N=1
324	fi
325
326	echo "1..${TEST_N}"
327	N=1
328	while [ ${N} -le ${TEST_N} ] ; do
329		fail=
330		todo=
331		skip=
332		if ! skip_test ${N} ; then
333			do_compare stdout ${N} || fail="${fail}stdout "
334			do_compare stderr ${N} || fail="${fail}stderr "
335			do_compare status ${N} || fail="${fail}status "
336			eval todo=\${TEST_${N}_TODO}
337		else
338			eval skip=\${TEST_${N}_SKIP}
339		fi
340		msg=
341		if [ ! -z "$fail" ]; then
342			msg="${msg}not "
343		fi
344		msg="${msg}ok ${N} ${SUBDIR}/${N}"
345		if [ ! -z "$fail" -o ! -z "$todo" -o ! -z "$skip" ]; then
346			msg="${msg} # "
347		fi
348		if [ ! -z "$skip" ] ; then
349			msg="${msg}skip ${skip}; "
350		fi
351		if [ ! -z "$todo" ] ; then
352			msg="${msg}TODO ${todo}; "
353		fi
354		if [ ! -z "$fail" ] ; then
355			msg="${msg}reason: ${fail}"
356		fi
357		echo ${msg}
358		N=$((N + 1))
359	done
360}
361
362#
363# Check if the test result is the same as the expected result.
364#
365# $1	Input file
366# $2	Test number
367#
368do_compare()
369{
370	local EXPECTED RESULT
371	EXPECTED="${SRC_DIR}/expected.$1.$2"
372	RESULT="${OUTPUT_DIR}/$1.$2"
373
374	if [ -f $EXPECTED ]; then
375		cat $RESULT | sed -e "s,^$(basename $MAKE_PROG):,make:," | \
376		diff -u $EXPECTED -
377		#diff -q $EXPECTED - 1>/dev/null 2>/dev/null
378		return $?
379	else
380		return 1	# FAIL
381	fi
382}
383
384#
385# Diff current and expected results
386#
387eval_diff()
388{
389	ensure_workdir
390	ensure_run
391
392	if [ -z "${TEST_N}" ] ; then
393		TEST_N=1
394	fi
395
396	N=1
397	while [ ${N} -le ${TEST_N} ] ; do
398		if ! skip_test ${N} ; then
399			FAIL=
400			do_diff stdout ${N}
401			do_diff stderr ${N}
402			do_diff status ${N}
403		fi
404		N=$((N + 1))
405	done
406}
407
408#
409# Check if the test result is the same as the expected result.
410#
411# $1	Input file
412# $2	Test number
413#
414do_diff()
415{
416	local EXPECTED RESULT
417	EXPECTED="${SRC_DIR}/expected.$1.$2"
418	RESULT="${OUTPUT_DIR}/$1.$2"
419
420	echo diff -u $EXPECTED $RESULT
421	if [ -f $EXPECTED ]; then
422		diff -u $EXPECTED $RESULT
423	else
424		echo "${EXPECTED} does not exist"
425	fi
426}
427
428#
429# Update expected results
430#
431eval_update()
432{
433	ensure_workdir
434	ensure_run
435
436	if [ -z "${TEST_N}" ] ; then
437		TEST_N=1
438	fi
439
440	FAIL=
441	N=1
442	while [ ${N} -le ${TEST_N} ] ; do
443		if ! skip_test ${N} ; then
444			cp ${OUTPUT_DIR}/stdout.${N} expected.stdout.${N}
445			cp ${OUTPUT_DIR}/stderr.${N} expected.stderr.${N}
446			cp ${OUTPUT_DIR}/status.${N} expected.status.${N}
447		fi
448		N=$((N + 1))
449	done
450}
451
452#
453# Print description
454#
455eval_desc()
456{
457	echo "${SUBDIR}: ${DESC}"
458}
459
460#
461# Run the test
462#
463eval_test()
464{
465	eval_setup
466	eval_run
467	eval_compare
468}
469
470#
471# Run the test for prove(1)
472#
473eval_prove()
474{
475	eval_setup
476	eval_run
477	eval_compare
478	eval_clean
479}
480
481#
482# Main function. Execute the command(s) on the command line.
483#
484eval_cmd()
485{
486	if [ $# -eq 0 ] ; then
487		# if no arguments given default to 'prove'
488		set -- prove
489	fi
490
491	if ! make_is_fmake ; then
492		for i in $(jot ${TEST_N:-1}) ; do
493			eval TEST_${i}_SKIP=\"make is not fmake\"
494		done
495	fi
496
497	for i
498	do
499		case $i in
500
501		setup | run | compare | diff | clean | reset | show | \
502		test | prove | desc | update)
503			eval eval_$i
504			;;
505		* | help)
506			print_usage
507			;;
508		esac
509	done
510}
511
512##############################################################################
513#
514# Main code
515#
516
517#
518# Determine our sub-directory. Argh.
519#
520SRC_DIR=$(dirname $0)
521SRC_BASE=`cd ${SRC_DIR} ; while [ ! -f common.sh ] ; do cd .. ; done ; pwd`
522SUBDIR=`echo ${SRC_DIR} | sed "s@${SRC_BASE}/@@"`
523
524#
525# Construct working directory
526#
527WORK_DIR=$(pwd)/work/${SUBDIR}
528OUTPUT_DIR=${WORK_DIR}.OUTPUT
529
530#
531# Make to use
532#
533MAKE_PROG=${MAKE_PROG:-/usr/bin/make}
534