Commit c29d75a1 authored by Charles Ferguson's avatar Charles Ferguson
Browse files

Add support for common directories for artifacts. Merge JUnit files.

We now have common directories for the artifacts produced, the logs
we generate and the environment files that we use. These fixed
directories mean that an automation system can more readily rely on
the locations provided.

We have a tidier mechanism for reporting the results in XML, by
writing multiple files, and then merging them. We could still do a
lot more with this, but it works pretty well right now.

This change covers all the components, but it has only been tested
with the Python code really.
parent 3b7c0a3f
......@@ -50,12 +50,12 @@ function config() {
local key="$1"
local default="${2:-}"
if [[ ! -f "${root}/project.config" ]] ; then
if [[ ! -f "${config_file}" ]] ; then
# No configuration file
echo "${default}"
elif grep -q "^${key}: \?" "${root}/project.config" ; then
elif grep -q "^${key}: \?" "${config_file}" ; then
# Key found, so we can return it
grep "^${key}: \?" "${root}/project.config" | sed "s/^${key}: *//" || true
grep "^${key}: \?" "${config_file}" | sed "s/^${key}: *//" || true
else
# Key not found in configuration file
echo "${default}"
......@@ -220,7 +220,20 @@ function report_file() {
# @param $2 message to display
function report_failure() {
local filename="$1"
local message="$2"
local message="${2:-FAILED}"
echo " $filename: $message"
}
##
# Report that we have succeeded
#
# @param $1 filename being looked at
# @param $2 message to display
function report_failure() {
local filename="$1"
local message="${2:-Success}"
echo " $filename: $message"
}
......@@ -301,7 +314,37 @@ function all_setup_docs() {
}
##
# Merge all the test result files into a single file,
# and clear out the original files.
function merge_test_results() {
local n=0
local test_files=()
local tf
while [[ "$n" -lt "$tests_total" ]] ; do
tf="${artifact_dir}/test-results-$n.xml"
if [[ -f "$tf" ]] ; then
test_files+=("$tf")
fi
n=$(( n + 1 ))
done
# Merge all those files together
if [[ "${#test_files[@]}" == 0 ]]; then
echo "No JUnit XML files generated"
else
echo "Merging ${#test_files[@]} files into '${artifact_dir}/test-results.xml'"
"${scripts}/junit-xml" --output "${artifact_dir}/test-results.xml" "${test_files[@]}"
fi
# And clear up the files that we now no-longer need.
rm "${test_files[@]}"
}
function all_end_test() {
merge_test_results
if [[ "$tests_failed" != "0" ]] ; then
echo "<<< Tests FAILED ($tests_failed test files failed, $tests_passed test files passed)"
exit 1
......@@ -312,6 +355,9 @@ function all_end_test() {
}
function all_end_coverage() {
# If the file writing fails, it isn't fatal
( merge_test_results || true )
echo "Overall Coverage: ${coverage_percentage}% (limit is ${coverage_limit}%)"
if [[ "${coverage_percentage}" != '<unknown>' && \
"${coverage_percentage%.*}" -lt "${coverage_limit}" ]] ; then
......@@ -512,6 +558,16 @@ function run_action() {
exit 1
fi
# Ensure the directories we'll use are present and (where necessary)
# cleaned.
if [[ "$(uname -s)" == 'Darwin' ]] ; then
# On OSX, '--one-file-system' does not exist.
rm -rf "${artifact_dir:-ARTIFACT_DIR_UNSET}" "${log_dir:-LOG_DIR_UNSET}"
else
rm -rf --one-file-system "${artifact_dir:-ARTIFACT_DIR_UNSET}" "${log_dir:-LOG_DIR_UNSET}"
fi
mkdir -p "${environment_dir}" "${artifact_dir}" "${log_dir}"
"all_setup_${action}"
for lang in $languages ; do
if active "$lang" "$action" ; then
......@@ -534,6 +590,13 @@ function run_action() {
fi
done
# Remove any directories we did not use
for dir in "${environment_dir}" "${artifact_dir}" "${log_dir}" ; do
if [[ -d "$dir" ]] ; then
rmdir "$dir" 2> /dev/null || true
fi
done
# Report the results of the action, and call exit appropriately.
"all_end_${action}"
}
......@@ -547,3 +610,24 @@ for file in $(expand_filenames "${scripts}"'/common.*') ; do
done
actions="test coverage lint docs"
# General configuration for all tools.
config_file="${root}/project.config"
# The 'environment' directory is where we store the necessary tools and
# resources to make it possible to isolate the actions.
export environment_dir="${root:-ROOT_UNSET}/$(config environment_dir '.env')"
# The 'artifact' directory is where the output of the action is stored.
# Results of tests, or built objects, would go here.
# This directory might be the content that was preserved.
export artifact_dir="${root:-ROOT_UNSET}/$(config artifact_dir 'artifacts')"
# The 'log' directory is where files related to the action, but not
# its direct output, would be placed.
# Transient logs, and data used as part of the operation, but not its
# direct output, would go here.
# This directory might be the content that would be used for diagnosing
# problems, but not retained once the lifetime of the action had been
# reached.
export log_dir="${root:-ROOT_UNSET}/$(config log_dir 'ci-logs')"
......@@ -69,8 +69,11 @@ function perl_switch_lint_checks() {
#
# @param $@ CPAN requirements to install (either filenames, or names of modules preceeded by a '+')
function perl_environment() {
"${scripts}/perl-env-setup" -e "${root}/perllib" "$@" < /dev/null
source "${scripts}/perl-env" -e "${root}/perllib"
# Taking input from /dev/null means that the input isn't a tty, so we avoid
# printing messages.
"${scripts}/perl-env-setup" -l "${log_dir}/cpan_output.txt" \
-e "${environment_dir}/perllib" "$@" < /dev/null
source "${scripts}/perl-env" -e "${environment_dir}/perllib"
}
function perl_tool_requirements() {
......@@ -127,18 +130,7 @@ function perl_setup_coverage() {
ignore_options="${ignore_options},+ignore_re,${pattern}"
done
export PERL5OPT="MDevel::Cover=-db,cover_db$ignore_options,-silent,1"
# Clear away anything left.
if [[ "$(uname -s)" == 'Darwin' ]] ; then
# On OSX, '--one-file-system' does not exist.
rm -rf cover_db test_results
else
rm -rf --one-file-system cover_db test_results
fi
# Ensure that the results directory exists.
mkdir test_results
export PERL5OPT="MDevel::Cover=-db,${log_dir}/cover_db$ignore_options,-silent,1"
}
function perl_setup_lint() {
......@@ -176,7 +168,7 @@ function perl_run_lint() {
message="+"
done
if ! perlcritic --profile perlcriticrc \
if ! perlcritic --profile "${root}/perlcriticrc" \
--color \
$(config -e lint_perl_files '*.pl *.pm') \
2>&1 | output_filter "Linting Perl files"; then
......@@ -197,18 +189,18 @@ function perl_process_coverage() {
echo '--- Generating HTML coverage report ---'
cover -report html_basic \
-outputdir test_results/coverage \
-outputdir "${artifact_dir}/perl-coverage" \
+ignore_re 'test-' \
cover_db > test_results/Coverage.txt
"${log_dir}/cover_db" > "${artifact_dir}/perl-coverage.txt"
echo '--- Generating text coverage report ---'
cover -report text \
-outputdir test_results/coverage \
-outputdir "${artifact_dir}/perl-coverage" \
+ignore_re 'test-' \
cover_db > test_results/CoverageFull.txt
"${log_dir}/cover_db" > "${artifact_dir}/perl-coveragefull.txt"
# And read the amount of code covered
coverage_percentage=$(grep Total test_results/Coverage.txt | awk 'END { print $3 }')
coverage_percentage=$(grep Total "${artifact_dir}/perl-coverage.txt" | awk 'END { print $3 }')
echo "Perl Coverage: ${coverage_percentage}%"
}
......
......@@ -108,9 +108,9 @@ function python_switch_any_3() {
# @param $@ Requirements files to install
function python_environment() {
# Set up and enter the Virtual env.
"${scripts}/python-env-setup" ${PYTHON_SWITCH} -e "${root}/${VENV_DIR}" \
"${scripts}/python-env-setup" ${PYTHON_SWITCH} -e "${environment_dir}/${VENV_DIR}" \
"$@" < /dev/null
source "${scripts}/python-env" -e "${root}/${VENV_DIR}"
source "${scripts}/python-env" -e "${environment_dir}/${VENV_DIR}"
}
function python_tool_requirements() {
......@@ -159,11 +159,6 @@ function python_setup_test() {
function python_setup_coverage() {
python_setup_test "$@"
# Clear out any previous coverage data.
if [[ -f .coverage ]] ; then
rm -f .coverage
fi
}
function python_setup_lint() {
......@@ -180,7 +175,9 @@ function python_run_test() {
local switches="$*"
for test in $(config -e test_python_files '*_test.py') ; do
if "$PYTHON_TOOL" "$test" -v $switches 2>&1 \
if "$PYTHON_TOOL" "$test" \
--with-xunit --xunit-file "${artifact_dir}/test-results-${tests_total}.xml" \
-v $switches 2>&1 \
| output_filter "Test '$test'..." ; then
tests_passed=$(( tests_passed + 1 ))
else
......@@ -192,12 +189,16 @@ function python_run_test() {
}
function python_run_coverage() {
python_run_test --with-coverage --cover-branches
# We need to set the 'COVERAGE_FILE' variable so that the output from the
# coverage is written to the log directory, rather than the root of the
# project. This is also needed when processing the results.
COVERAGE_FILE="${log_dir}/python-coverage" \
python_run_test --with-coverage --cover-branches
}
function python_run_lint() {
if ! pylint --reports no \
--rcfile pylintrc \
--rcfile "${root}/pylintrc" \
$(config -e lint_python_files '*.py') 2>&1 \
| output_filter "Linting Python files" ; then
lint_failed=$(( lint_failed+1 ))
......@@ -211,6 +212,7 @@ function python_run_docs() {
if ! "${scripts}/python-build-docs" --initial-tag "$(config version_git_tag none)" \
--major-version "$(config version 1.0)" \
--paths "$(config docs_python_files '*.py')" \
--output-dir "${artifact_dir}/python-docs" \
| output_filter ; then
docs_failed=$(( docs_failed+1 ))
else
......@@ -236,8 +238,22 @@ function python_process_coverage() {
ignore_options="${ignore_options},${pattern}"
done
coverage_percentage=$(coverage report --omit "${ignore_options}" \
| tail -1 \
# Generate the text report
COVERAGE_FILE="${log_dir}/python-coverage" \
coverage report --omit "${ignore_options}" \
> "${artifact_dir}/python-coverage.txt"
# And the HTML report
COVERAGE_FILE="${log_dir}/python-coverage" \
coverage html --dir "${artifact_dir}/python-coverage" \
--omit "${ignore_options}"
# And the XML report
COVERAGE_FILE="${log_dir}/python-coverage" \
coverage xml -o "${artifact_dir}/coverage.xml" \
--omit "${ignore_options}"
coverage_percentage=$(tail "${artifact_dir}/python-coverage.txt" \
| sed -n 's/^.*[^0-9]\([0-9][0-9\.]*\)%.*$/\1/ p' \
| awk 'END { print int($1) }')
echo "Python coverage: ${coverage_percentage}%"
......
#!/usr/bin/env python
"""
Process the JUnit XML files.
The intention of this script is to take number of JUnit XML script containing an
arbitrary number of test suites each, and put them all together in a single
file.
"""
import argparse
import sys
import xml.etree.ElementTree as ET
import copy
def expect_int(value):
"""
Expect an integer to be passed, but if it isn't, we'll return None rather than
generate exceptions.
"""
if value is not None:
try:
value = int(value)
except ValueError:
value = None
return value
def expect_float(value):
"""
Expect an float to be passed, but if it isn't, we'll return None rather than
generate exceptions.
"""
if value is not None:
try:
value = float(value)
except ValueError:
value = None
return value
def sum_or_none(iterable):
"""
Expect to be able to sum the values, but return 'None' if all the values were 'None'.
"""
total = None
try:
for value in iterable:
if value is not None:
if total is None:
total = value
else:
total += value
except ValueError:
total = None
return total
class TestXML(object):
def __init__(self, xmlfile=None):
if xmlfile is not None:
tree = ET.parse(xmlfile)
root = tree.getroot()
if root.tag == 'testsuites':
testsuites = root.findall('./testsuite')
self.suites = [TestSuite(suite_node) for suite_node in testsuites]
elif root.tag == 'testsuite':
self.suites = [TestSuite(root)]
else:
# Cannot interpret it, so giving up
self.suites = None
else:
self.suites = []
def __add__(self, other):
if not isinstance(other, TestXML):
raise TypeError("Cannot add a TextXML to a '%r'" % (other,))
new = TestXML(None)
new.suites = copy.deepcopy(self.suites)
new.suites.extend(other.suites)
return new
def __iadd__(self, other):
if not isinstance(other, TestXML):
raise TypeError("Cannot add a TextXML to a '%r'" % (other,))
self.suites.extend(other.suites)
return self
def xml(self):
root = ET.Element('testsuites')
# pylint: disable=C0326
n_tests = sum_or_none(suite.n_tests for suite in self.suites)
n_disabled = sum_or_none(suite.n_disabled for suite in self.suites)
n_skip = sum_or_none(suite.n_skip for suite in self.suites)
n_errors = sum_or_none(suite.n_errors for suite in self.suites)
n_failures = sum_or_none(suite.n_failures for suite in self.suites)
if n_tests is not None:
root.set('tests', str(n_tests))
if n_disabled is not None:
root.set('disabled', str(n_disabled))
if n_errors is not None:
root.set('errors', str(n_errors))
if n_skip is not None:
root.set('skipped', str(n_skip))
if n_failures is not None:
root.set('failures', str(n_failures))
for suite in self.suites:
root.append(suite.xml())
return ET.ElementTree(root)
class TestSuite(object):
"""
<testsuite name="" <!-- Full (class) name of the test for non-aggregated testsuite documents.
Class name without the package for aggregated testsuites documents. Required -->
tests="" <!-- The total number of tests in the suite, required. -->
disabled="" <!-- the total number of disabled tests in the suite. optional -->
errors="" <!-- The total number of tests in the suite that errored. An errored test is one that had
an unanticipated problem, for example an unchecked throwable; or a problem with the
implementation of the test. optional -->
failures="" <!-- The total number of tests in the suite that failed. A failure is a test which the code
has explicitly failed by using the mechanisms for that purpose. e.g., via an
assertEquals. optional -->
hostname="" <!-- Host on which the tests were executed. 'localhost' should be used if the hostname
cannot be determined. optional -->
id="" <!-- Starts at 0 for the first testsuite and is incremented by 1 for each following
testsuite -->
package="" <!-- Derived from testsuite/@name in the non-aggregated documents. optional -->
skipped="" <!-- The total number of skipped tests. optional -->
time="" <!-- Time taken (in seconds) to execute the tests in the suite. optional -->
timestamp="" <!-- when the test was executed in ISO 8601 format (2014-01-21T16:17:18).
Timezone may not be specified. optional -->
>
"""
def __init__(self, suite):
self.name = suite.attrib.get('name', None)
self.package = suite.attrib.get('package', None)
self.hostname = suite.attrib.get('hostname', None)
# id is actually a string
self.id = suite.attrib.get('id', None)
self.time = expect_float(suite.attrib.get('time', None))
self.timestamp = suite.attrib.get('timestamp', None)
# PHPunit also reports 'file'
self.file = suite.attrib.get('file', None)
# Counts, which might not be specified
self.n_tests = expect_int(suite.attrib.get('tests', None))
self.n_disabled = expect_int(suite.attrib.get('disabled', None))
self.n_errors = expect_int(suite.attrib.get('errors', None))
self.n_failures = expect_int(suite.attrib.get('failures', None))
# nose xunit generates 'skip'
# specs I've seen use 'skipped'
# nose2 junitxml documentation says it generates 'skips'
# nose2 junitxml implementation uses 'skipped'
# mocha junit generates 'skipped'
# tap-xunit generates 'skipped'
# xcpretty does not output skipped counts
# xmlrunner unittest xml reporting generates 'skipped'
self.n_skip = expect_int(suite.attrib.get('skip', suite.attrib.get('skipped', None)))
# PHPunit reports assertions
self.n_assertions = expect_int(suite.attrib.get('assertions', None))
self.testcases = suite.findall('./testcase')
self.testcases = copy.deepcopy(self.testcases)
if self.n_tests is None:
# We don't know the number of tests, so we will need to count them
# And we cannot determine which tests were disabled as there is no
# indication as far as can be seen.
self.n_tests = (self.n_disabled or 0) + len(self.testcases)
if self.n_errors is None:
# We don't know the number of errors, so we will need to count them
self.n_errors = len(suite.findall('./testcase/error'))
if self.n_skip is None:
# We don't know the number of errors, so we will need to count them
self.n_skip = len(suite.findall('./testcase/skipped'))
if self.n_failures is None:
# We don't know the number of errors, so we will need to count them
self.n_failures = len(suite.findall('./testcase/failure'))
self.system_out = suite.find('./system-out')
if self.system_out is not None:
self.system_out = copy.deepcopy(self.system_out)
self.system_err = suite.find('./system-err')
if self.system_err is not None:
self.system_err = copy.deepcopy(self.system_err)
self.properties = suite.find('./properties')
if self.properties is not None:
self.properties = copy.deepcopy(self.properties)
def xml(self):
suite = ET.Element('testsuite')
if self.n_tests is not None:
suite.set('tests', str(self.n_tests))
if self.n_skip is not None:
suite.set('skipped', str(self.n_skip))
if self.n_errors is not None:
suite.set('errors', str(self.n_errors))
if self.n_disabled is not None:
suite.set('disabled', str(self.n_disabled))
if self.n_failures is not None:
suite.set('failures', str(self.n_failures))
if self.n_assertions is not None:
suite.set('assertions', str(self.n_assertions))
if self.name is not None:
suite.set('name', self.name)
if self.file is not None:
suite.set('file', self.file)
if self.package is not None:
suite.set('package', self.package)
if self.hostname is not None:
suite.set('hostname', self.hostname)
if self.id is not None:
suite.set('id', self.id)
if self.time is not None:
suite.set('time', str(self.time))
if self.timestamp is not None:
suite.set('timestamp', self.timestamp)
if self.properties is not None:
suite.append(self.properties)
# NOTE: In the testcases, 'classname' is the field that is in the specification.
# PHPunit uses 'class'.
suite.extend(self.testcases)
if self.system_out is not None:
suite.append(self.system_out)
if self.system_err is not None:
suite.append(self.system_err)
return suite
def main():
usage = "junit-xml <options> {<junit-files>}*"
parser = argparse.ArgumentParser(usage=usage,
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('--output', type=str,
help="Specify the JUnit output file", default='test-results.xml')
parser.add_argument('files', nargs='+',
help='JUnit files to merge together')
options = parser.parse_args()
txml = TestXML()
for xmlfile in options.files:
txml += TestXML(xmlfile)
xml = txml.xml()
xml.write(options.output, encoding='UTF-8', xml_declaration=True)
return 0
if __name__ == '__main__':
sys.exit(main())
......@@ -4,7 +4,7 @@
#
_PERLLIB_SHELL=false
_PERLLIB_ENVDIR=perllib
_PERLLIB_ENVDIR=".env/perllib"
_PERLLIB_COMMAND=()
function _perllib_parse() {
......
......@@ -16,6 +16,9 @@ set -eo pipefail
# This is a Camel symbol, to indicate this is Perl.
environment_icon="🐪 "
# The logfile we will write to
log_file='output_cpan.txt'
##
# Convert a given path into one that starts from the root.
......@@ -75,6 +78,7 @@ Optional arguments:
-v verbose; be more noisy about what is being done
-e <env-dir> path to the directory in which to create the
environment (default 'perllib')
-l <log-file> path to the file into which the log is written
-h show this help message and exit
Positional arguments:
......@@ -254,29 +258,29 @@ function install_requirements() {
debug " Now required defs: ${required_defs[@]}"
done
echo "Ready to install CPAN modules:" >> output_cpan.txt
echo "Ready to install CPAN modules:" >> "${log_file}"
for module in "${required_defs[@]}" ; do
echo " $module" >> output_cpan.txt
echo " $module" >> "${log_file}"
done
echo $'\nInstalling modules...\n' >> output_cpan.txt
echo $'\nInstalling modules...\n' >> "${log_file}"
if [ "${#required_defs[@]}" != '0' ] ; then
if ! cpanm --skip-satisfied -n "${required_defs[@]}" >> output_cpan.txt 2>&1 ; then
if ! cpanm --skip-satisfied -n "${required_defs[@]}" >> "${log_file}" 2>&1 ; then
echo "CPAN installation failed" >&2
sed 's/^/ /' < output_cpan.txt
sed 's/^/ /' < "${log_file}"
exit 1
fi
fi
echo $'\nChecking installation...\n' >> output_cpan.txt
echo $'\nChecking installation...\n' >> "${log_file}"
failed=false
for module in "${required_defs[@]}" ; do
if ! check_module "${module}" 2>> output_cpan.txt; then
echo " $module was not installed properly" >> output_cpan.txt
if ! check_module "${module}" 2>> "${log_file}"; then