tests: Enable HA for HDFS
This commit is contained in:
parent
65c915a07b
commit
60c854c771
@ -4,9 +4,10 @@ set -ueo pipefail
|
||||
|
||||
HBASE_VERSION="0.96.1.1"
|
||||
HBASE_FILE="hbase-${HBASE_VERSION}-hadoop2-bin.tar.gz"
|
||||
HBASE_DIR="hbase-${HBASE_VERSION}-hadoop2"
|
||||
#HBASE_URL="https://downloads.apache.org/hbase/${HBASE_VERSION}/${HBASE_FILE}"
|
||||
HBASE_URL="https://archive.apache.org/dist/hbase/hbase-${HBASE_VERSION}/${HBASE_FILE}"
|
||||
HBASE_FILE_CKSUM="5afb643c2391461619516624168e042b42a66e25217a3319552264c6af522e3a21a5212bfcba759b7b976794648ef13ee7b5a415f33cdb89bba43d40162aa685"
|
||||
HBASE_FILE_CKSUM="1625453f839f7d8c86078a131af9731f6df28c59e58870db84913dcbc640d430253134a825de7cec247ea1f0cf232435765e00844ee2e4faf31aeb356955c478"
|
||||
HBASE_CONFIG_FILE="hbase/conf/hbase-site.xml"
|
||||
HBASE_TEST_SUITE_EXECUTABLE="hbase/bin/hbase"
|
||||
|
||||
@ -42,7 +43,8 @@ prepare_hbase() {
|
||||
printf "HBase archive exists\n"
|
||||
if compare_checksum $HBASE_FILE $HBASE_FILE_CKSUM; then
|
||||
extract_archive $HBASE_FILE $HBASE_VERSION
|
||||
mv -f hbase-"${VERSION}" hbase/
|
||||
mv -f "${HBASE_DIR}" hbase
|
||||
return
|
||||
else
|
||||
printf "HBase archive has wrong checksum (%s)\n" "$1"
|
||||
printf "Execute script again to redownload file\n"
|
||||
@ -55,7 +57,7 @@ prepare_hbase() {
|
||||
|
||||
if compare_checksum $HBASE_FILE $HBASE_FILE_CKSUM; then
|
||||
extract_archive $HBASE_FILE $HBASE_VERSION
|
||||
mv -f hbase-${HBASE_VERSION} hbase/
|
||||
mv -f ${HBASE_DIR} hbase
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
@ -9,6 +9,7 @@ HADOOP_FILE_CKSUM="2460e02cd1f80dfed7a8981bbc934c095c0a341435118bec781fd835ec2eb
|
||||
HDFS_CONFIG_FILE="hadoop/etc/hadoop/hdfs-site.xml"
|
||||
HDFS_CONFIG_FILE_CORE="hadoop/etc/hadoop/core-site.xml"
|
||||
HDFS_CONFIG_FILE_MAPRED="hadoop/etc/hadoop/mapred-site.xml"
|
||||
HDFS_CONFIG_DATANODES="localhost"
|
||||
HDFS_TEST_SUITE_EXECUTABLE="hadoop/bin/hdfs"
|
||||
|
||||
source setup.sh
|
||||
@ -23,7 +24,7 @@ create_hdfs_core_config() {
|
||||
<configuration>
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://$1</value>
|
||||
<value>hdfs://$2</value>
|
||||
</property>
|
||||
</configuration>
|
||||
EOF
|
||||
@ -51,9 +52,33 @@ create_hdfs_config() {
|
||||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
|
||||
<configuration>
|
||||
<property>
|
||||
<name>dfs.nameservices</name>
|
||||
<value>$2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.ha.namenodes.$2</name>
|
||||
<value>nn1,nn2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>fs.defaultFS</name>
|
||||
<value>hdfs://$1</value>
|
||||
<value>hdfs://$2</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.$2.nn1</name>
|
||||
<value>localhost:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.rpc-address.$2.nn2</name>
|
||||
<value>master-1:8020</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.$2.nn1</name>
|
||||
<value>localhost:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.http-address.$2.nn2</name>
|
||||
<value>master-1:50070</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.name.dir</name>
|
||||
@ -62,7 +87,15 @@ create_hdfs_config() {
|
||||
<property>
|
||||
<name>dfs.datanode.data.dir</name>
|
||||
<value>/tmp/hdfs/datanode</value>
|
||||
</property>
|
||||
</property>
|
||||
<property>
|
||||
<name>dfs.namenode.shared.edits.dir</name>
|
||||
<value>file:///tmp/hadoop</value>
|
||||
</property>
|
||||
<property>
|
||||
<name>ha.zookeeper.quorum</name>
|
||||
<value>127.0.0.1:2181</value>
|
||||
</property>
|
||||
</configuration>
|
||||
EOF
|
||||
echo "$CONFIG"
|
||||
@ -95,9 +128,10 @@ prepare_hadoop() {
|
||||
|
||||
check_dependencies
|
||||
prepare_hadoop ${HADOOP_URL}
|
||||
HDFS_CONFIG=$(create_hdfs_config "127.0.0.1:8020")
|
||||
HDFS_CONFIG_CORE=$(create_hdfs_core_config "127.0.0.1:8020")
|
||||
HDFS_CONFIG=$(create_hdfs_config "127.0.0.1:8020" "test-cluster")
|
||||
HDFS_CONFIG_CORE=$(create_hdfs_core_config "127.0.0.1:8020" "test-cluster")
|
||||
HDFS_CONFIG_MAPRED=$(create_hdfs_mapred_config "127.0.0.1:8021")
|
||||
write_file ${HDFS_CONFIG_FILE} "${HDFS_CONFIG}"
|
||||
write_file ${HDFS_CONFIG_FILE_CORE} "${HDFS_CONFIG_CORE}"
|
||||
write_file ${HDFS_CONFIG_FILE_MAPRED} "${HDFS_CONFIG_MAPRED}"
|
||||
write_file ${HDFS_CONFIG_DATANODES} "localhost"
|
||||
|
@ -22,7 +22,6 @@ compare_checksum() {
|
||||
local r
|
||||
CKSUM=$(sha512 -q "${1}")
|
||||
if ! [ "$CKSUM" = "$2" ]; then
|
||||
printf "File has wrong checksum (%s)\n" "$1"
|
||||
r=1
|
||||
else
|
||||
r=0
|
||||
@ -36,12 +35,14 @@ write_file() {
|
||||
}
|
||||
|
||||
run() {
|
||||
local pid
|
||||
printf "Starting %s\n" "$2"
|
||||
if $($1 > /dev/null 2>&1 &); then
|
||||
printf "Started %s successfully\n" "$2"
|
||||
pid=$!
|
||||
else
|
||||
printf "Failed to start %s\n" "$2"
|
||||
exit 1
|
||||
pid="-1"
|
||||
fi
|
||||
echo "$!"
|
||||
echo "$pid"
|
||||
}
|
||||
|
@ -4,7 +4,8 @@ HBASE_TIME_STARTUP=15
|
||||
HBASE_EXPORTER_TIME_STARTUP=60
|
||||
HBASE_CMD="./bin/hbase-daemon.sh --config conf start master"
|
||||
HDFS_FORMAT=false
|
||||
HDFS_CMD="./hadoop/bin/hdfs --config hadoop/etc/hadoop namenode"
|
||||
HDFS_CMD_NAMENODE="./hadoop/bin/hdfs --config hadoop/etc/hadoop namenode"
|
||||
HDFS_CMD_DATANODE="./hadoop/bin/hdfs --config hadoop/etc/hadoop datanode"
|
||||
HDFS_CMD_FORMAT="./hadoop/bin/hdfs --config hadoop/etc/hadoop namenode -format"
|
||||
|
||||
source setup.sh
|
||||
@ -31,7 +32,8 @@ setup_suite() {
|
||||
r=run $HDFS_CMD_FORMAT "HDFS_FORMAT"
|
||||
fi
|
||||
|
||||
run "$HDFS_CMD" "HDFS"
|
||||
run "$HDFS_CMD_NAMENODE" "HDFS Namenode"
|
||||
run "$HDFS_CMD_DATANODE" "HDFS Datanode"
|
||||
|
||||
# Start HBase
|
||||
cd hbase/ || exit
|
||||
|
Loading…
Reference in New Issue
Block a user