大数据集群一键启动脚本
Posted 厨 神
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了大数据集群一键启动脚本相关的知识,希望对你有一定的参考价值。
1.一键启动
大数据集群启动脚本,包括zk,hadoop,spark,kafka,hbase,redisclickhouse。
vi startall.sh
#!/bin/bash
SPARK_HOME=/opt/module/spark
REDIS_HOME=/opt/module/redis
/bin/zk_shtd.sh start
/bin/hadoop_shtd.sh start
/bin/hive_shtd.sh start
$SPARK_HOME/sbin/start-history-server.sh
/bin/kafka_shtd.sh start
/bin/hbase_shtd.sh start
$REDIS_HOME/bin/redis-server /opt/module/redis/etc/redis.conf
/bin/clickhouse-server_shtd.sh start
/bin/jpsall
2.zk
vi /bin/zk_shtd.sh
#!/bin/bash
ZK_HOME=/opt/module/zookeeper
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
echo "No Args Input..."
exit ;
fi
case $1 in
"start")
for i in $names[@]
do
echo "===================== $i ======================="
ssh $i "$ZK_HOME/bin/zkServer.sh start"
done
;;
"stop")
for i in $names[@]
do
echo "===================== $i ======================="
ssh $i "$ZK_HOME/bin/zkServer.sh stop"
done
;;
"status")
for i in $names[@]
do
echo "===================== $i ======================="
ssh $i "$ZK_HOME/bin/zkServer.sh status"
done
;;
*)
echo "Input Args Error..."
;;
esac
3. hadoop
vi /bin/hadoop_shtd.sh
#!/bin/bash
HADOOP_HOME=/opt/module/hadoop
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
echo "No Args Input..."
exit ;
fi
case $1 in
"start")
echo " =================== START hadoop ==================="
echo " --------------- start hdfs ---------------"
ssh $names[0] "$HADOOP_HOME/sbin/start-dfs.sh"
echo " --------------- start yarn ---------------"
ssh $names[1] "$HADOOP_HOME/sbin/start-yarn.sh"
echo " --------------- start historyserver ---------------"
ssh $names[0] "$HADOOP_HOME/bin/mapred --daemon start historyserver"
;;
"stop")
echo " =================== STOP hadoop ==================="
echo " --------------- stop historyserver ---------------"
ssh $names[0] "$HADOOP_HOME/bin/mapred --daemon stop historyserver"
echo " ---------------stop yarn ---------------"
ssh $names[1] "$HADOOP_HOME/sbin/stop-yarn.sh"
echo " ---------------stop hdfs ---------------"
ssh $names[0] "$HADOOP_HOME/sbin/stop-dfs.sh"
;;
*)
echo "Input Args Error..."
;;
esac
4. hive
vi /bin/hive_shtd.sh
#!/bin/bash
HIVE_HOME=/opt/module/hive
HIVE_LOG_DIR=$HIVE_HOME/logs
if [ ! -d $HIVE_LOG_DIR ]
then
mkdir -p $HIVE_LOG_DIR
fi
function check_process()
pid=$(ps -ef 2>/dev/null | grep -v grep | grep -i $1 | awk 'print $2')
ppid=$(netstat -nltp 2>/dev/null | grep $2 | awk 'print $7' | cut -d '/' -f 1)
echo $pid
[[ "$pid" =~ "$ppid" ]] && [ "$ppid" ] && return 0 || return 1
function hive_start()
metapid=$(check_process HiveMetastore 9083)
cmd="nohup hive --service metastore >$HIVE_LOG_DIR/metastore.log 2>&1 &"
cmd=$cmd"hdfs dfsadmin -safemode wait >/dev/null 2>&1"
[ -z "$metapid" ] && eval $cmd || echo "Metastroe START"
server2pid=$(check_process HiveServer2 10000)
cmd="nohup hive --service hiveserver2 >$HIVE_LOG_DIR/hiveServer2.log 2>&1 &"
[ -z "$server2pid" ] && eval $cmd || echo "HiveServer2 START"
function hive_stop()
metapid=$(check_process HiveMetastore 9083)
[ "$metapid" ] && kill $metapid || echo "Metastore stop"
server2pid=$(check_process HiveServer2 10000)
[ "$server2pid" ] && kill $server2pid || echo "HiveServer2 stop"
case $1 in
"start")
hive_start
;;
"stop")
hive_stop
;;
"restart")
hive_stop
sleep 2
hive_start
;;
"status")
check_process HiveMetastore 9083 >/dev/null && echo "Metastore status" || echo "Metastore status"
check_process HiveServer2 10000 >/dev/null && echo "HiveServer2 status" || echo "HiveServer2 status"
;;
*)
echo Invalid Args!
echo 'Usage: '$(basename $0)' start|stop|restart|status'
;;
esac
--------------------------------------------------------
第二种,只有启动
#!/bin/bash
HIVE_HOME=/opt/module/hive
#start metastore
$HIVE_HOME/bin/hive --service metastore >>$HIVE_HOME/logs/metastore.log 2>&1 &
echo "start metastore"
$HIVE_HOME/bin/hiveserver2 >>$HIVE_HOME/logs/hiveserver2.log 2>&1 &
echo "start hiveserver2"
5.kafka
vi /bin/kafka_shtd.sh
#!/bin/bash
KAFKA_HOME=/opt/module/kafka
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
echo "Input Args Error....."
exit
fi
case $1 in
start)
sleep 10
;;
esac
for i in $names[@]
do
case $1 in
start)
echo "==================START $i KAFKA==================="
ssh $i $KAFKA_HOME/bin/kafka-server-start.sh -daemon $KAFKA_HOME/config/server.properties
;;
stop)
echo "==================STOP $i KAFKA==================="
ssh $i $KAFKA_HOME/bin/kafka-server-stop.sh stop
;;
*)
echo "Input Args Error....."
exit
;;
esac
done
6.hbase
vi /bin/hbase_shtd.sh
#!/bin/bash
HBASE_HOME=/opt/module/hbase
if [ $# -lt 1 ]
then
echo "Input Args Error....."
exit
fi
case $1 in
start)
sleep 10
;;
esac
case $1 in
start)
echo "==================START HBASE==================="
$HBASE_HOME/bin/start-hbase.sh
;;
stop)
echo "==================STOP HBASE==================="
$HBASE_HOME/bin/stop-hbase.sh
;;
*)
echo "Input Args Error....."
exit
;;
esac
7.clickhouse
vi /bin/clickhouse-server_shtd.sh
#!/bin/bash
if [ $# -lt 1 ]
then
echo "Input Args Error....."
exit
fi
case $1 in
start)
echo "==================START clickhouse==================="
/etc/init.d/clickhouse-server start
;;
stop)
echo "==================STOP clickhouse==================="
/etc/init.d/clickhouse-server stop
;;
status)
echo "==================STOP clickhouse==================="
systemctl status clickhouse-server
;;
*)
echo "Input Args Error....."
exit
;;
esac
8.xsync
vi /bin/xsync
rsync: command not found:yum install rsync -y
#!/bin/bash
names=(master slave1 slave2)
if [ $# -lt 1 ]
then
echo not enough argument1
exit;
fi
for host in $names[@]
do
echo ========$host=========
for file in $@
do
if [ -e $file ]
then
pdir=$(cd -P $(dirname $file);pwd)
fname=$(basename $file)
ssh $host "mkdir -p $pdir"
rsync -av $pdir/$fname $host:$pdir
else
echo $file does not exists!
fi
done
done
8.jpsall
vi /bin/jpsall
#!/bin/bash
names=(master slave1 slave2)
for host in $names[@]
do
echo "========$host========="
ssh $host "jps"
done
以上是关于大数据集群一键启动脚本的主要内容,如果未能解决你的问题,请参考以下文章
使用 docker 一键搭建 hadoop,hbase, hive 等大数据集群
使用 docker 一键搭建 hadoop,hbase, hive 等大数据集群