mirror of
https://github.com/RIOT-OS/RIOT.git
synced 2024-12-29 04:50:03 +01:00
ad702111a2
We want to remove Kconfig, so don't fail CI when the Kconfig build no longer aligns with the classic make based build.
595 lines
17 KiB
Bash
Executable File
595 lines
17 KiB
Bash
Executable File
#!/bin/sh
|
|
|
|
# uncomment and change this to limit builds, e.g.,
|
|
#export BOARDS="samr21-xpro native"
|
|
# and / or
|
|
#export APPS="examples/hello-world tests/unittests"
|
|
|
|
QUICKBUILD_BOARDS="
|
|
adafruit-itsybitsy-m4
|
|
atmega256rfr2-xpro
|
|
esp32-wroom-32
|
|
esp32s3-devkit
|
|
frdm-k64f
|
|
hifive1b
|
|
msb-430
|
|
msba2
|
|
native
|
|
nrf52840dk
|
|
qn9080dk
|
|
samr21-xpro
|
|
stk3200
|
|
stm32f429i-disc1"
|
|
|
|
# this configures boards that are available via pifleet
|
|
case "${CI_MURDOCK_PROJECT}" in
|
|
riot)
|
|
: ${TEST_BOARDS_AVAILABLE:="esp32-wroom-32 samr21-xpro"}
|
|
;;
|
|
riot-staging)
|
|
: ${TEST_BOARDS_AVAILABLE:=""}
|
|
;;
|
|
*)
|
|
: ${TEST_BOARDS_AVAILABLE:=""}
|
|
;;
|
|
esac
|
|
|
|
#: ${EMULATED_BOARDS_AVAILABLE:="microbit"}
|
|
|
|
# temporarily disabling llvm builds until https://github.com/RIOT-OS/RIOT/pull/15595
|
|
# is in
|
|
: ${TEST_BOARDS_LLVM_COMPILE:="iotlab-m3 native nrf52dk mulle nucleo-f401re samr21-xpro slstk3402a"}
|
|
|
|
: ${TEST_WITH_CONFIG_SUPPORTED:="examples/suit_update tests/drivers/at86rf2xx_aes"}
|
|
|
|
export RIOT_CI_BUILD=1
|
|
export CC_NOCOLOR=1
|
|
export STATIC_TESTS=0
|
|
export CFLAGS_DBG=""
|
|
export DLCACHE_DIR=${DLCACHE_DIR:-~/.dlcache}
|
|
export ENABLE_TEST_CACHE=${ENABLE_TEST_CACHE:-1}
|
|
export MURDOCK_REDIS_HOST=${MURDOCK_REDIS_HOST:-127.0.0.1}
|
|
|
|
NIGHTLY=${NIGHTLY:-0}
|
|
|
|
check_label() {
|
|
local label="${1}"
|
|
[ -z "${CI_PULL_LABELS}" ] && return 1
|
|
echo "${CI_PULL_LABELS}" | grep -q "${label}"
|
|
return $?
|
|
}
|
|
|
|
# true if "$2" starts with "$1", false otherwise
|
|
startswith() {
|
|
case "${2}" in
|
|
${1}*) true ;;
|
|
*) false ;;
|
|
esac
|
|
}
|
|
|
|
# this function returns 0 when this build is building a merge queue branch.
|
|
is_merge_queue_build() {
|
|
startswith "gh-readonly-queue/" "${CI_BUILD_BRANCH}"
|
|
}
|
|
|
|
# fullbuild logic
|
|
# non-full-builds are those where can_fast_ci_run might reduce the build
|
|
# automatically
|
|
if [ -z "${FULL_BUILD}" ]; then
|
|
if [ "${NIGHTLY}" = 1 ]; then
|
|
FULL_BUILD=1
|
|
elif check_label "CI: full build"; then
|
|
# full build if requested by label
|
|
FULL_BUILD=1
|
|
else
|
|
FULL_BUILD=0
|
|
fi
|
|
export FULL_BUILD
|
|
fi
|
|
|
|
|
|
# quickbuild logic
|
|
# a "quickbuild" is only building a representative subset of all build
|
|
# configurations.
|
|
if [ -z "${QUICK_BUILD}" ]; then
|
|
export QUICK_BUILD=0
|
|
if is_merge_queue_build; then
|
|
# always do full build for merge queue' branches
|
|
true
|
|
elif [ ${FULL_BUILD} -eq 1 ]; then
|
|
# full build if building nightly or full build requested by label
|
|
true
|
|
else
|
|
export QUICK_BUILD=1
|
|
fi
|
|
fi
|
|
|
|
# This is a work around for a bug in CCACHE which interacts very badly with
|
|
# some features of RIOT and of murdock. The result is that ccache is
|
|
# ineffective (i.e. objects are never reused, resulting in extreme cache miss
|
|
# rate) and murdock becomes slow.
|
|
#
|
|
# - CCACHE thinks that -gz by itself enables debugging, which is not true.
|
|
# see https://github.com/ccache/ccache/issues/464
|
|
# - When debug info is included, CCACHE hashes the file paths, as these
|
|
# influence the debug information (the name of compile units and/or their
|
|
# "comp_dir" attribute)
|
|
# - Riot does not set -fdebug-prefix-map. This is not that easy as it may not
|
|
# be supported in every toolchain (some are quite old).
|
|
# - Murdock builds PRs in different directories each time.
|
|
#
|
|
# It is only the combination of these three factors which causes this bug.
|
|
export OPTIONAL_CFLAGS_BLACKLIST="-gz"
|
|
|
|
DWQ_ENV="-E BOARDS -E APPS -E NIGHTLY -E RUN_TESTS -E ENABLE_TEST_CACHE
|
|
-E TEST_HASH -E CI_PULL_LABELS -ECI_BASE_BRANCH -ECI_BASE_COMMIT
|
|
-EPKG_USE_MIRROR -EAPPS_CHANGED -EBOARDS_CHANGED -ESTATIC_TESTS
|
|
-E CI_MURDOCK_PROJECT -EFULL_BUILD -EQUICK_BUILD"
|
|
|
|
if [ ${NIGHTLY} -eq 1 ]; then
|
|
export PKG_USE_MIRROR=0
|
|
fi
|
|
|
|
CFCR_ARGS="--upstreambranch ${CI_BASE_COMMIT}"
|
|
|
|
# if RUN_TESTS is unset (e.g., not passed from the outside),
|
|
# set to 1 if NIGHTLY=1 or if the label "CI: run tests" is set,
|
|
# otherwise set 0.
|
|
if [ -z "$RUN_TESTS" ]; then
|
|
if [ "$NIGHTLY" = "1" ] || check_label "CI: run tests" ; then
|
|
RUN_TESTS=1
|
|
else
|
|
RUN_TESTS=0
|
|
fi
|
|
fi
|
|
|
|
[ "$ENABLE_TEST_CACHE" = "1" ] && {
|
|
check_label "CI: disable test cache" && export ENABLE_TEST_CACHE=0
|
|
}
|
|
|
|
error() {
|
|
echo "$@"
|
|
exit 1
|
|
}
|
|
|
|
# if MURDOCK_HOOK is set, this function will execute it and pass on all it's
|
|
# parameters. should the hook script exit with negative exit code, hook() makes
|
|
# this script exit with error, too.
|
|
# hook() will be called from different locations of this script.
|
|
# currently, the only caller is "run_test", which calls "hook run_test_pre".
|
|
# More hooks will be added as needed.
|
|
hook() {
|
|
if [ -n "${MURDOCK_HOOK}" ]; then
|
|
echo "- executing hook $1"
|
|
"${MURDOCK_HOOK}" "$@" || {
|
|
error "$0: hook \"${MURDOCK_HOOK} $@\" failed!"
|
|
}
|
|
echo "- hook $1 finished"
|
|
fi
|
|
}
|
|
|
|
# true if word "$1" is in list of words "$2", false otherwise
|
|
# uses grep -w, thus only alphanum and "_" count as word bounderies
|
|
# (word "def" matches "abc-def")
|
|
is_in_list() {
|
|
[ $# -ne 2 ] && return 1
|
|
|
|
local needle="$1"
|
|
local haystack="$2"
|
|
|
|
echo "$haystack" | grep -q -w "$needle"
|
|
}
|
|
|
|
# grep that doesn't return error on empty input
|
|
_grep() {
|
|
grep "$@"
|
|
true
|
|
}
|
|
|
|
_greplist() {
|
|
if [ $# -eq 0 ]; then
|
|
echo cat
|
|
else
|
|
echo -n "_grep -E ($1"
|
|
shift
|
|
for i in $*; do
|
|
echo -n "|$i"
|
|
done
|
|
echo ")"
|
|
fi
|
|
}
|
|
|
|
# get list of all app directories
|
|
get_apps() {
|
|
make -f makefiles/app_dirs.inc.mk info-applications \
|
|
| $(_greplist $APPS) | sort
|
|
}
|
|
|
|
# take app dir as parameter, print all boards that are supported
|
|
# Only print for boards in $BOARDS.
|
|
get_supported_boards() {
|
|
local appdir=$1
|
|
local only_changed=0
|
|
|
|
if [ -n "$APPS_CHANGED" ]; then
|
|
if is_in_list "$appdir" "${APPS_CHANGED}"; then
|
|
# this app has changed -> build for all boards
|
|
true
|
|
else
|
|
# this is not a changed app -> build for changed boards
|
|
only_changed=1
|
|
fi
|
|
else
|
|
if [ -n "$BOARDS_CHANGED" ]; then
|
|
echo d
|
|
# no changed apps, some changed boards -> build for changed boards
|
|
only_changed=1
|
|
else
|
|
# no changed apps list, no changed boards list -> build all boards
|
|
true
|
|
fi
|
|
fi
|
|
|
|
BOARDS_=${BOARDS}
|
|
if [ $only_changed -eq 1 ]; then
|
|
# if can_fast_ci_run figured out changes to specific boards,
|
|
# only consider those
|
|
export BOARDS="$BOARDS_CHANGED"
|
|
elif [ -z "${BOARDS}" -a ${QUICK_BUILD} -eq 1 ]; then
|
|
# quickbuild board filter is applied here
|
|
export BOARDS="${QUICKBUILD_BOARDS}"
|
|
fi
|
|
|
|
local boards="$(make --no-print-directory -C$appdir info-boards-supported 2>/dev/null || echo broken)"
|
|
|
|
export BOARDS="${BOARDS_}"
|
|
|
|
if [ "$boards" = broken ]; then
|
|
echo "makefile_broken"
|
|
return
|
|
fi
|
|
|
|
for board in $boards
|
|
do
|
|
echo $board
|
|
done | $(_greplist $BOARDS)
|
|
}
|
|
|
|
get_supported_toolchains() {
|
|
local appdir=$1
|
|
local board=$2
|
|
local toolchains="gnu"
|
|
|
|
if is_in_list "${board}" "${TEST_BOARDS_LLVM_COMPILE}"; then
|
|
toolchains="$(make -s --no-print-directory -C${appdir} BOARD=${board} \
|
|
info-toolchains-supported 2> /dev/null | grep -o -e "llvm" -e "gnu")"
|
|
fi
|
|
echo "${toolchains}"
|
|
}
|
|
|
|
# given an app dir as parameter, print "$appdir $board:$toolchain" for each
|
|
# supported board and toolchain. Only print for boards in $BOARDS.
|
|
# if extra args are given, they will be prepended to each output line.
|
|
get_app_board_toolchain_pairs() {
|
|
local appdir=$1
|
|
local boards="$(get_supported_boards $appdir)"
|
|
|
|
# collect extra arguments into prefix variable
|
|
shift
|
|
local prefix="$*"
|
|
|
|
if [ "$boards" = makefile_broken ]; then
|
|
echo "$0 error \"error: ${DWQ_WORKER}: get_supported_boards failed in $appdir\""
|
|
return
|
|
fi
|
|
|
|
for board in ${boards}
|
|
do
|
|
for toolchain in $(get_supported_toolchains $appdir $board)
|
|
do
|
|
echo $prefix $appdir $board:$toolchain
|
|
done
|
|
done | $(_greplist $BOARDS)
|
|
}
|
|
|
|
# use dwqc to create full "appdir board toolchain" compile job list
|
|
get_compile_jobs() {
|
|
check_label "CI: skip compile test" && return
|
|
update_changed_modules || return
|
|
|
|
get_apps | \
|
|
maybe_filter_changed_apps | \
|
|
dwqc ${DWQ_ENV} --queue default-first -s \
|
|
${DWQ_JOBID:+--subjob} \
|
|
"$0 get_app_board_toolchain_pairs \${1} $0 compile"
|
|
}
|
|
|
|
print_worker() {
|
|
[ -n "$DWQ_WORKER" ] && \
|
|
echo "-- running on worker ${DWQ_WORKER} thread ${DWQ_WORKER_THREAD}, build number $DWQ_WORKER_BUILDNUM."
|
|
}
|
|
|
|
test_hash_calc() {
|
|
local bindir=$1
|
|
|
|
# Why two times cut?
|
|
# "test-input-hash.sha1" contains a list of lines containing
|
|
# "<hash> <filename>" on each line.
|
|
# We need to filter out the filename, as it contains the full path name,
|
|
# which differs depending on the build machine.
|
|
#
|
|
# After piping through sha1sum, we get "<hash> -". " -" has to go so we save the
|
|
# hassle of escaping the resulting hash.
|
|
|
|
cat ${bindir}/test-input-hash.sha1 | cut -f1 -d' ' | sha1sum | cut -f1 -d' '
|
|
}
|
|
|
|
test_cache_get() {
|
|
test "${ENABLE_TEST_CACHE}" = "1" || return 1
|
|
test -n "$(redis-cli -h ${MURDOCK_REDIS_HOST} get $1)" > /dev/null
|
|
}
|
|
|
|
test_cache_put() {
|
|
redis-cli -h ${MURDOCK_REDIS_HOST} set "$1" ok
|
|
}
|
|
|
|
# compile one app for one board with one toolchain. delete intermediates.
|
|
compile() {
|
|
local appdir=$1
|
|
local board=$(echo $2 | cut -f 1 -d':')
|
|
local toolchain=$(echo $2 | cut -f 2 -d':')
|
|
|
|
[ "$board" = "makefile_broken" ] && {
|
|
echo "$0: There seems to be a problem in \"$appdir\" while getting supported boards!"
|
|
echo "$0: testing \"make -C$appdir info-boards-supported\"..."
|
|
make -C$appdir info-boards-supported && echo "$0: success. no idea what's wrong." || echo "$0: failed!"
|
|
exit 1
|
|
}
|
|
|
|
# set build directory. CI ensures only one build at a time in $(pwd).
|
|
export BINDIR="$(pwd)/build"
|
|
export PKGDIRBASE="${BINDIR}/pkg"
|
|
|
|
# Pre-build cleanup
|
|
rm -rf ${BINDIR}
|
|
|
|
print_worker
|
|
|
|
# sanity checks
|
|
[ $# -ne 2 ] && error "$0: compile: invalid parameters (expected \$appdir \$board:\$toolchain)"
|
|
[ ! -d "$appdir" ] && error "$0: compile: error: application directory \"$appdir\" doesn't exist"
|
|
|
|
# use our checkout as ccache temporary folder.
|
|
# On CI, this is a tmpfs, so using that instead of the `/cache/.ccache` volume
|
|
# reduces disk IO.
|
|
export CCACHE_TEMPDIR="$(pwd)/.ccache/tmp"
|
|
|
|
BOARD=${board} make -C${appdir} clean
|
|
CCACHE_BASEDIR="$(pwd)" BOARD=$board TOOLCHAIN=$toolchain RIOT_CI_BUILD=1 \
|
|
make -C${appdir} all test-input-hash -j${JOBS:-4}
|
|
RES=$?
|
|
|
|
# test hash is used to cache test results
|
|
test_hash=$(test_hash_calc "$BINDIR")
|
|
|
|
# run tests
|
|
if [ $RES -eq 0 ]; then
|
|
if is_in_list "$board" "$EMULATED_BOARDS_AVAILABLE"; then
|
|
EMULATED=1
|
|
else
|
|
EMULATED=0
|
|
fi
|
|
|
|
if [ $RUN_TESTS -eq 1 -o "$board" = "native" -o "$EMULATED" = "1" ]; then
|
|
if [ -f "${BINDIR}/.test" ]; then
|
|
if [ "$board" = "native" -o "${EMULATED}" = "1" ]; then
|
|
EMULATE=${EMULATED} BOARD=$board make -C${appdir} test
|
|
RES=$?
|
|
elif is_in_list "$board" "$TEST_BOARDS_AVAILABLE"; then
|
|
echo "-- test_hash=$test_hash"
|
|
if test_cache_get $test_hash; then
|
|
echo "-- skipping test due to positive cache hit"
|
|
else
|
|
BOARD=$board TOOLCHAIN=$toolchain TEST_HASH=$test_hash \
|
|
make -C${appdir} test-murdock
|
|
RES=$?
|
|
fi
|
|
fi
|
|
fi
|
|
fi
|
|
fi
|
|
|
|
# log some build stats
|
|
if [ -d ${BINDIR} ]
|
|
then
|
|
echo "{\"build/\": $(du -s ${BINDIR} | cut -f1)}"
|
|
# cleanup
|
|
rm -rf ${BINDIR}
|
|
fi
|
|
|
|
return $RES
|
|
}
|
|
|
|
test_job() {
|
|
local appdir=$1
|
|
local board=$(echo $2 | cut -f 1 -d':')
|
|
local toolchain=$(echo $2 | cut -f 2 -d':')
|
|
|
|
# interpret any extra arguments as file names.
|
|
# They will be sent along with the job to the test worker
|
|
# and stored in the application's binary folder.
|
|
shift 2
|
|
local files=""
|
|
for filename in "$@"; do
|
|
# check if the file is within $(BINDIR)
|
|
if startswith "${BINDIR}" "${filename}"; then
|
|
# get relative (to $(BINDIR) path
|
|
local relpath="$(realpath --relative-to ${BINDIR} ${filename})"
|
|
else
|
|
error "$0: error: extra test files not within \${BINDIR}!"
|
|
fi
|
|
|
|
# set remote file path.
|
|
# currently, the test workers build in the default build path.
|
|
local remote_bindir="${appdir}/bin/${board}"
|
|
files="${files} --file ${filename}:${remote_bindir}/${relpath}"
|
|
done
|
|
|
|
dwqc \
|
|
${DWQ_ENV} \
|
|
${DWQ_JOBID:+--subjob} \
|
|
--queue ${TEST_QUEUE:-$board} \
|
|
--maxfail 1 \
|
|
$files \
|
|
"./.murdock run_test $appdir $board:$toolchain"
|
|
}
|
|
|
|
run_test() {
|
|
local appdir=$1
|
|
local board=$(echo $2 | cut -f 1 -d':')
|
|
local toolchain=$(echo $2 | cut -f 2 -d':')
|
|
print_worker
|
|
echo "-- executing tests for $appdir on $board (compiled with $toolchain toolchain):"
|
|
hook run_test_pre
|
|
|
|
# do flashing and building of termdeps simultaneously
|
|
BOARD=$board TOOLCHAIN=${toolchain} make -C$appdir flash-only termdeps -j2
|
|
|
|
# now run the actual test
|
|
if is_in_list "${appdir}" "${TEST_WITH_CONFIG_SUPPORTED}"; then
|
|
BOARD=${board} TOOLCHAIN=${toolchain} make -C${appdir} test-with-config
|
|
else
|
|
BOARD=$board TOOLCHAIN=${toolchain} make -C$appdir test
|
|
fi
|
|
RES=$?
|
|
|
|
if [ $RES -eq 0 -a -n "$TEST_HASH" ]; then
|
|
echo -n "-- saving test result to cache: "
|
|
test_cache_put $TEST_HASH
|
|
fi
|
|
|
|
return $RES
|
|
}
|
|
|
|
basename_list () {
|
|
for path in $*; do
|
|
basename $path
|
|
done
|
|
}
|
|
|
|
# calls out to can_fast_ci_run.py.
|
|
#
|
|
# returns 1 if nothing should be built.
|
|
# or, returns 0, potentially meddling with BOARDS, APPS, BOARDS_CHANGED, APPS_CHANGED.
|
|
update_changed_modules() {
|
|
# early out if there's no base commit info
|
|
if [ -z "${CI_BASE_COMMIT}" ]; then
|
|
return 0
|
|
fi
|
|
|
|
# early out if a full build is requested
|
|
if [ $FULL_BUILD -eq 1 ]; then
|
|
return 0
|
|
fi
|
|
|
|
# if these are set, just build what's requested.
|
|
if [ -n "$BOARDS" -o -n "$APPS" ]; then
|
|
return 0
|
|
fi
|
|
|
|
# do full build if requested by label
|
|
check_label "CI: full build" && return 0
|
|
|
|
# call out to can_fast_ci_run.py
|
|
# that script will output e.g.,
|
|
# APPS_CHANGED="foo/bar foo/barfoo"
|
|
# BOARDS_CHANGED="foo/bar foo/barfoo"
|
|
# just eval that output, so we set those variables directly in this shell.
|
|
eval $(dist/tools/ci/can_fast_ci_run.py \
|
|
${CFCR_ARGS} \
|
|
--changed-boards --changed-apps \
|
|
2>/dev/null || echo CFCR_ERROR=1)
|
|
|
|
# if this errors, it means we cannot skip any builds.
|
|
if [ -n "$CFCR_ERROR" ]; then
|
|
return 0
|
|
fi
|
|
|
|
# if can_fast_ci_run exits 0 but doesn't output any changed apps or boards,
|
|
# it means we can skip all compile jobs.
|
|
if [ -z "$APPS_CHANGED" -a -z "$BOARDS_CHANGED" ]; then
|
|
return 1
|
|
fi
|
|
|
|
# can_fast_ci_run.py outputs "board/fooboard", but the rest of this script
|
|
# expects a board name without leading "board/".
|
|
if [ -n "$BOARDS_CHANGED" ]; then
|
|
export BOARDS_CHANGED="$(basename_list $BOARDS_CHANGED)"
|
|
fi
|
|
|
|
# if no board has changed, filter by just setting APPS.
|
|
# same for apps -> BOARDS.
|
|
if [ -z "$APPS_CHANGED" -a -n "$BOARDS_CHANGED" ]; then
|
|
export BOARDS="$(echo $BOARDS_CHANGED | $(_greplist ${BOARDS}))"
|
|
unset BOARDS_CHANGED
|
|
elif [ -n "$APPS_CHANGED" -a -z "$BOARDS_CHANGED" ]; then
|
|
export APPS="$(echo $APPS_CHANGED | $(_greplist ${APPS}))"
|
|
unset APPS_CHANGED
|
|
fi
|
|
|
|
export APPS_CHANGED
|
|
export BOARDS_CHANGED
|
|
}
|
|
|
|
maybe_filter_changed_apps() {
|
|
# if no boards have changes, only a subset of the apps need to be built.
|
|
# else, all apps need to be passed, as they'll need to be built for
|
|
# the changed boards.
|
|
if [ -n "$BOARDS_CHANGED" ]; then
|
|
cat
|
|
else
|
|
$(_greplist $APPS_CHANGED)
|
|
fi
|
|
}
|
|
|
|
# execute static tests
|
|
static_tests() {
|
|
print_worker
|
|
build_filter_status
|
|
|
|
[ "$STATIC_TESTS" = "1" ] && \
|
|
./dist/tools/ci/static_tests.sh
|
|
true
|
|
}
|
|
|
|
build_filter_status() {
|
|
echo "--- can_fast_ci_run:"
|
|
if [ $FULL_BUILD -eq 1 ]; then
|
|
echo "--- doing full build."
|
|
return
|
|
fi
|
|
|
|
dist/tools/ci/can_fast_ci_run.py ${CFCR_ARGS} --explain --json --changed-boards --changed-apps
|
|
if [ -n "$MURDOCK_TEST_CHANGE_FILTER" ]; then
|
|
echo MURDOCK_TEST_CHANGE_FILTER=$MURDOCK_TEST_CHANGE_FILTER
|
|
fi
|
|
|
|
echo ""
|
|
update_changed_modules
|
|
|
|
if [ -n "$CFCR_ERROR" ]; then
|
|
echo "-- can_fast_ci_run.py exited non-zero"
|
|
fi
|
|
}
|
|
|
|
get_non_compile_jobs() {
|
|
echo "$0 static_tests"
|
|
}
|
|
|
|
get_jobs() {
|
|
get_non_compile_jobs
|
|
get_compile_jobs
|
|
}
|
|
|
|
$*
|