hadoop生态搭建(3节点)-06.hbase配置
Posted zcf5522
tags:
篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了hadoop生态搭建(3节点)-06.hbase配置相关的知识,希望对你有一定的参考价值。
# http://archive.apache.org/dist/hbase/1.2.4/
# ==================================================================安装 hbase
tar -zxvf ~/hbase-1.2.4-bin.tar.gz -C /usr/local rm –r ~/hbase-1.2.4-bin.tar.gz
# 配置环境变量
# ==================================================================node1 node2 node3
vi /etc/profile # 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加 export JAVA_HOME=/usr/java/jdk1.8.0_111 export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12 export HADOOP_HOME=/usr/local/hadoop-2.7.6 export mysql_HOME=/usr/local/mysql export HBASE_HOME=/usr/local/hbase-1.2.4 export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_INSTALL=$HADOOP_HOME export HADOOP_MAPRED_HOME=$HADOOP_HOME export HADOOP_COMMON_HOME=$HADOOP_HOME export HADOOP_HDFS_HOME=$HADOOP_HOME export YARN_HOME=$HADOOP_HOME export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
# ==================================================================node1
# 使环境变量生效 source /etc/profile # 查看配置结果 echo $HBASE_HOME vi $HBASE_HOME/conf/hbase-env.sh export JAVA_HOME=/usr/java/jdk1.8.0_111 #export HBASE_CLASSPATH=/usr/local/hbase-1.2.4/conf #flume需要 export JAVA_CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar export HADOOP_HOME=/usr/local/hadoop-2.7.6 export HADOOP_CONF_DIR=/usr/local/hadoop-2.7.6/etc/hadoop #export HBASE_OPTS="-XX:+UseConcMarkSweepGC" export HBASE_OPTS="$HBASE_OPTS -XX:+HeapDumpOnOutOfMemoryError -XX:+UseConcMarkSweepGC -XX:+CMSIncrementalMode" export HBASE_LOG_DIR=/usr/local/hbase-1.2.4/logs export HBASE_PID_DIR=/usr/local/hbase-1.2.4/pids export HBASE_MANAGES_ZK=false # export TZ="Asia/Shanghai" vi $HBASE_HOME/conf/regionservers node1 node2 node3 mkdir $HBASE_HOME/tmp cp $HADOOP_HOME/etc/hadoop/hdfs-site.xml $HBASE_HOME/conf/ cp $HADOOP_HOME/etc/hadoop/core-site.xml $HBASE_HOME/conf/ rm -r $HBASE_HOME/lib/slf4j-log4j12-1.7.5.jar
# 配置 hbase-site.xml vi $HBASE_HOME/conf/hbase-site.xml
<configuration> <property> <name>hbase.rootdir</name> <value>hdfs://appcluster/hbase</value> </property> <property> <name>hbase.cluster.distributed</name> <value>true</value> </property> <property > <name>hbase.master.port</name> <value>16000</value> </property> <!--默认HMaster HTTP访问端口--> <property> <name>hbase.master.info.port</name> <value>16010</value> </property> <property> <name>hbase.regionserver.port</name> <value>16020</value> </property> <!--默认HRegionServer HTTP访问端口--> <property> <name>hbase.regionserver.info.port</name> <value>16030</value> </property> <property> <name>hbase.tmp.dir</name> <value>/usr/local/hbase-1.2.4/tmp</value> </property> <property> <name>hbase.zookeeper.quorum</name> <value>node1,node2,node3</value> </property> <!--跟zookeeper配置的dataDir一致--> <property> <name>hbase.zookeeper.property.dataDir</name> <value>/usr/local/zookeeper-3.4.12/data</value> </property> <property> <name>dfs.replication</name> <value>2</value> </property> </configuration>
# ==================================================================node1
scp -r $HBASE_HOME node2:/usr/local/ scp -r $HBASE_HOME node3:/usr/local/
# ==================================================================node2 node3
# 使环境变量生效 source /etc/profile # 查看配置结果 echo $HBASE_HOME
# 启动
# ==================================================================node1 node2 node3 # 启动 zookeeper zkServer.sh start zkServer.sh status # ==================================================================node1 # 启动hadoop所有进程 $HADOOP_HOME/sbin/start-all.sh $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node2 $HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager $HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc # ==================================================================node1 # 要实现Hbase的HA(High Availablity) $HBASE_HOME/bin/hbase-daemon.sh start master # 启动Hbase(start-hbase.sh) $HBASE_HOME/bin/start-hbase.sh # ==================================================================node2 # 开启Hbase的HA $HBASE_HOME/bin/hbase-daemon.sh start master # 校验HA是否启动成功 jps # ==================================================================node1 jps #2785 DataNode #3377 DFSZKFailoverController #2515 QuorumPeerMain #3158 ResourceManager #4328 Jps #3788 HMaster #2685 NameNode #2973 JournalNode #3261 NodeManager # ==================================================================node2 jps #2547 NodeManager #2758 ResourceManager #2409 JournalNode #2170 QuorumPeerMain #2315 DataNode #2987 HRegionServer #3196 HMaster #3341 Jps #2686 DFSZKFailoverController #2255 NameNode
# 网页访问
# http://node1:16010
# http://node2:16010/master-status
# http://node2:16030/rs-status
# http://node3:16030/rs-status
# 高可用 # kill namenode kill -9 2255 $HBASE_HOME/bin/hbase shell # hbase shell # 创建表 > create ‘test‘,‘address‘ # 添加记录 > put ‘test‘,‘row1‘,‘address:province‘,‘sichuan‘ > put ‘test‘,‘row2‘,‘address:city‘,‘chengdu‘ # 查看记录 > get ‘test‘,‘row1‘ # 查看表中的记录总数 > count ‘test‘ # 删除记录 > delete ‘test‘,‘row1‘,‘address‘ # 删除一张表 > disable ‘test‘ > drop ‘test‘ # 查看所有记录 scan ‘test‘ # ==================================================================node1 # stop已经启动的进程 $HBASE_HOME/bin/stop-hbase.sh $HADOOP_HOME/sbin/stop-all.sh # ==================================================================node1 node2 node3 # 停止 zookeeper zkServer.sh stop # ==================================================================node1 $HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc # ==================================================================node2 $HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager $HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc shutdown -h now # 快照 hbase
以上是关于hadoop生态搭建(3节点)-06.hbase配置的主要内容,如果未能解决你的问题,请参考以下文章
hadoop生态搭建(3节点)-17.sqoop配置_单节点