Add tests, include HDFS

This commit is contained in:
Björn Busse 2021-03-06 22:37:01 +01:00
parent a7957b045a
commit 65c915a07b
4 changed files with 249 additions and 47 deletions

View File

@ -2,46 +2,20 @@
set -ueo pipefail
HBASE_VERSION="2.4.1"
HBASE_FILE="hbase-${HBASE_VERSION}-bin.tar.gz"
HBASE_URL="https://downloads.apache.org/hbase/${HBASE_VERSION}/${HBASE_FILE}"
HBASE_VERSION="0.96.1.1"
HBASE_FILE="hbase-${HBASE_VERSION}-hadoop2-bin.tar.gz"
#HBASE_URL="https://downloads.apache.org/hbase/${HBASE_VERSION}/${HBASE_FILE}"
HBASE_URL="https://archive.apache.org/dist/hbase/hbase-${HBASE_VERSION}/${HBASE_FILE}"
HBASE_FILE_CKSUM="5afb643c2391461619516624168e042b42a66e25217a3319552264c6af522e3a21a5212bfcba759b7b976794648ef13ee7b5a415f33cdb89bba43d40162aa685"
HBASE_CONFIG="hbase/conf/hbase-site.xml"
HBASE_CONFIG_FILE="hbase/conf/hbase-site.xml"
HBASE_TEST_SUITE_EXECUTABLE="hbase/bin/hbase"
declare -a DEPS=("java")
check_dependencies() {
for i in "${DEPS[@]}"
do
if [[ -z $(which "${i}") ]]; then
error "Could not find ${i}"
exit 1
fi
done
}
source setup.sh
prepare_hbase() {
if ! [ -f "$HBASE_TEST_SUITE_EXECUTABLE" ]; then
if [ -f "$HBASE_FILE" ]; then
CKSUM="$(sha512 -q ${HBASE_FILE})"
if [ "$CKSUM" = "$HBASE_FILE_CKSUM" ]; then
printf "HBase archive exists\n"
fi
else
printf "Downloading %s\n" "$1"
curl -LO "${1}"
fi
printf "Extracting HBase archive\n"
tar xfz ${HBASE_FILE}
mv -f hbase-${HBASE_VERSION} hbase/
fi
}
create_config() {
printf "Writing HBase config\n"
cat <<EOF > "$2"
create_hbase_config() {
read -r -d '' CONFIG <<EOF
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
@ -59,8 +33,34 @@ create_config() {
</property>
</configuration>
EOF
echo "$CONFIG"
}
prepare_hbase() {
if ! [ -f "$HBASE_TEST_SUITE_EXECUTABLE" ]; then
if [ -f "$HBASE_FILE" ]; then
printf "HBase archive exists\n"
if compare_checksum $HBASE_FILE $HBASE_FILE_CKSUM; then
extract_archive $HBASE_FILE $HBASE_VERSION
mv -f hbase-"${VERSION}" hbase/
else
printf "HBase archive has wrong checksum (%s)\n" "$1"
printf "Execute script again to redownload file\n"
exit 1
fi
fi
printf "Downloading %s\n" "$1"
curl -LO "${1}"
if compare_checksum $HBASE_FILE $HBASE_FILE_CKSUM; then
extract_archive $HBASE_FILE $HBASE_VERSION
mv -f hbase-${HBASE_VERSION} hbase/
fi
fi
}
check_dependencies
prepare_hbase ${HBASE_URL}
create_config "/tmp" ${HBASE_CONFIG}
HBASE_CONFIG=$(create_hbase_config "/tmp")
write_file ${HBASE_CONFIG_FILE} "${HBASE_CONFIG}"

103
tests/hdfs-setup.sh Executable file
View File

@ -0,0 +1,103 @@
#!/usr/bin/env bash
set -ueo pipefail
HADOOP_VERSION="2.10.1"
HADOOP_FILE="hadoop-$HADOOP_VERSION.tar.gz"
HADOOP_URL="https://artfiles.org/apache.org/hadoop/common/hadoop-${HADOOP_VERSION}/${HADOOP_FILE}"
HADOOP_FILE_CKSUM="2460e02cd1f80dfed7a8981bbc934c095c0a341435118bec781fd835ec2ebdc5543a03d92d24f2ddeebdfe1c2c460065ba1d394ed9a73cbb2020b40a8d8b5e07"
HDFS_CONFIG_FILE="hadoop/etc/hadoop/hdfs-site.xml"
HDFS_CONFIG_FILE_CORE="hadoop/etc/hadoop/core-site.xml"
HDFS_CONFIG_FILE_MAPRED="hadoop/etc/hadoop/mapred-site.xml"
HDFS_TEST_SUITE_EXECUTABLE="hadoop/bin/hdfs"
source setup.sh
declare -a DEPS=("java")
create_hdfs_core_config() {
#printf "Writing HDFS core-site.xml config\n"
read -r -d '' CONFIG <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://$1</value>
</property>
</configuration>
EOF
echo "$CONFIG"
}
create_hdfs_mapred_config() {
#printf "Writing HDFS mapred-site.xml config\n"
read -r -d '' CONFIG <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://$1</value>
</property>
</configuration>
EOF
echo "$CONFIG"
}
create_hdfs_config() {
#printf "Writing HDFS hdfs-site.xml config\n"
read -r -d '' CONFIG <<EOF
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.defaultFS</name>
<value>hdfs://$1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/tmp/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/tmp/hdfs/datanode</value>
</property>
</configuration>
EOF
echo "$CONFIG"
}
prepare_hadoop() {
if ! [ -f "$HDFS_TEST_SUITE_EXECUTABLE" ]; then
printf "Setting up Hadoop\n"
if [ -f "$HADOOP_FILE" ]; then
printf "Hadoop archive exists\n"
if compare_checksum $HADOOP_FILE $HADOOP_FILE_CKSUM; then
extract_archive "$HADOOP_FILE" "$HADOOP_VERSION"
mv -f hadoop-$HADOOP_VERSION hadoop/
return
else
printf "Hadoop archive has wrong checksum (%s)\n" "$1"
printf "Execute script again to redownload file\n"
exit 1
fi
fi
printf "Downloading %s\n" "$1"
curl -LO "${1}"
if compare_checksum $HADOOP_FILE $HADOOP_FILE_CKSUM; then
extract_archive "$HADOOP_FILE" "$HADOOP_VERSION"
mv -f hadoop-$HADOOP_VERSION hadoop/
fi
fi
}
check_dependencies
prepare_hadoop ${HADOOP_URL}
HDFS_CONFIG=$(create_hdfs_config "127.0.0.1:8020")
HDFS_CONFIG_CORE=$(create_hdfs_core_config "127.0.0.1:8020")
HDFS_CONFIG_MAPRED=$(create_hdfs_mapred_config "127.0.0.1:8021")
write_file ${HDFS_CONFIG_FILE} "${HDFS_CONFIG}"
write_file ${HDFS_CONFIG_FILE_CORE} "${HDFS_CONFIG_CORE}"
write_file ${HDFS_CONFIG_FILE_MAPRED} "${HDFS_CONFIG_MAPRED}"

47
tests/setup.sh Normal file
View File

@ -0,0 +1,47 @@
#!/usr/bin/env bash
check_dependencies() {
for i in "${DEPS[@]}"
do
if [[ -z $(which "${i}") ]]; then
error "Could not find ${i}"
exit 1
fi
done
}
extract_archive() {
printf "Extracting %s archive\n" "$1"
if ! tar xfz "${1}"; then
printf "Failed to extract archive: %s\n" "$1"
exit 1
fi
}
compare_checksum() {
local r
CKSUM=$(sha512 -q "${1}")
if ! [ "$CKSUM" = "$2" ]; then
printf "File has wrong checksum (%s)\n" "$1"
r=1
else
r=0
fi
echo "$r"
}
write_file() {
printf "Writing %s\n" "$1"
printf "%s" "$2" > "$1"
}
run() {
printf "Starting %s\n" "$2"
if $($1 > /dev/null 2>&1 &); then
printf "Started %s successfully\n" "$2"
else
printf "Failed to start %s\n" "$2"
exit 1
fi
echo "$!"
}

View File

@ -2,32 +2,68 @@
HBASE_TIME_STARTUP=15
HBASE_EXPORTER_TIME_STARTUP=60
HBASE_CMD="./bin/hbase-daemon.sh --config conf start master"
HDFS_FORMAT=false
HDFS_CMD="./hadoop/bin/hdfs --config hadoop/etc/hadoop namenode"
HDFS_CMD_FORMAT="./hadoop/bin/hdfs --config hadoop/etc/hadoop namenode -format"
source setup.sh
setup_suite() {
export JAVA_HOME=${JAVA_HOME:-"/usr/local"}
export HADOOP_PREFIX="$(pwd)/hadoop"
# Setup HBase
./hbase-setup.sh
if ! ./hbase-setup.sh; then
printf "Failed to setup HBase to run test suite\n"
exit 1
fi
# Run HBase
cd hbase || exit
printf "Starting HBase in pseudo-distributed mode\n"
./bin/hbase-daemon.sh --config conf start master
# Setup HDFS
if ! ./hdfs-setup.sh; then
printf "Failed to setup HDFS to run test suite\n"
exit 1
fi
# Start hdfs
if [ true = "$HDFS_FORMAT" ]; then
printf "Formatting %s\n" "$1"
r=run $HDFS_CMD_FORMAT "HDFS_FORMAT"
fi
run "$HDFS_CMD" "HDFS"
# Start HBase
cd hbase/ || exit
run "$HBASE_CMD" "HBASE"
if [[ "$r" == *"Stop it first."* ]]; then
printf "HBase is already running. Stop it manually first, then run script again"
exit 1
fi
sleep $HBASE_TIME_STARTUP
# Run exporter
# Start exporter
run_exporter
printf "Waiting %ss to gather exporter values\n" ${HBASE_EXPORTER_TIME_STARTUP}
sleep $HBASE_EXPORTER_TIME_STARTUP
}
run_exporter() {
cd ../../ || exit
printf "Starting hbase-exporter\n"
./hbase-exporter --zookeeper-server="${ZK_SERVER:-"127.0.0.1"}" \
--hbase-pseudo-distributed=True \
--hbase-table="foo" > /dev/null 2>&1 &
PID=$!
printf "Waiting %ss to gather exporter values\n" ${HBASE_EXPORTER_TIME_STARTUP}
sleep $HBASE_EXPORTER_TIME_STARTUP
}
test_hdfs_up() {
assert "curl -s http://127.0.0.1:50070 > /dev/null" "HDFS: Namenode ui down"
assert "curl -s http://127.0.0.1:8021 > /dev/null" "HDFS: IPC down"
}
test_hbase_running() {
assert "nc -n -w1 \"${1:-\"127.0.0.1\"}\" \"${2:-\"16010\"}\""
assert "nc -n -w1 \"${1:-\"127.0.0.1\"}\" \"${2:-\"16010\"}\"" "HBase: Not running"
}
test_hbase_zk_running() {
@ -59,8 +95,8 @@ test_hbase_exporter_export_zk_connection_count() {
test_hbase_exporter_export_zk_has_leader() {
r=$(curl -s http://127.0.0.1:9010 | grep '^zookeeper_has_leader' | cut -d " " -f2)
assert_not_equals "0.0" "$r" "exporer: Zookeeper has no leader"
assert_not_equals "" "$r" "exporer: Zookeeper has no leader"
assert_not_equals "0.0" "$r" "exporter: Zookeeper has no leader"
assert_not_equals "" "$r" "exporter: Zookeeper has no leader"
}
test_hbase_exporter_export_regionserver_live() {
@ -75,8 +111,24 @@ test_hbase_exporter_export_regionserver_dead() {
assert_not_equals "" "$r" "exporter: HBase - Dead regionservers"
}
test_hbase_exporter_export_hdfs_datanodes_live() {
r=$(curl -s http://127.0.0.1:9010 | grep '^hdfs_datanodes_live' | cut -d " " -f2)
assert_not_equals "0.0" "$r" "exporter: HDFS - No data nodes"
assert_not_equals "" "$r" "exporter: HDFS - No data nodes"
}
test_hbase_exporter_export_hdfs_datanodes_dead() {
r=$(curl -s http://127.0.0.1:9010 | grep '^hdfs_datanodes_dead' | cut -d " " -f2)
assert_equals "0.0" "$r" "exporter: HDFS - Dead data nodes"
assert_not_equals "" "$r" "exporter: HDFS - Dead data nodes"
}
teardown_suite() {
printf "Stopping hbase-exporter (%s)\n" "$PID"
kill $PID
if ! kill $PID > /dev/null 2>&1; then
printf "Failed to send SIGTERM to %s\n" "$PID"
fi
printf "Stopping HBase\n"
./tests/hbase/bin/hbase-daemon.sh stop master
}