hadoop生态搭建(3节点)-07.hive配置

Posted zcf5522

tags:

篇首语:本文由小常识网(cha138.com)小编为大家整理,主要介绍了hadoop生态搭建(3节点)-07.hive配置相关的知识,希望对你有一定的参考价值。

http://archive.apache.org/dist/hive/hive-2.1.1/

# ==================================================================安装 hive

tar -zxvf apache-hive-2.1.1-bin.tar.gz -C /usr/local
mv /usr/local/apache-hive-2.1.1-bin /usr/local/hive-2.1.1
rm –r ~/apache-hive-2.1.1-bin.tar.gz


cp ~/mysql-connector-java-5.1.46.jar /usr/local/hive-2.1.1/lib/

# 配置环境变量
# ==================================================================node1 node2 node3

vi /etc/profile

# 在export PATH USER LOGNAME MAIL HOSTNAME HISTSIZE HISTCONTROL下添加
export JAVA_HOME=/usr/java/jdk1.8.0_111
export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.12
export HADOOP_HOME=/usr/local/hadoop/hadoop-2.7.6
export MYSQL_HOME=/usr/local/mysql
export HBASE_HOME=/usr/local/hbase-1.2.4
export HIVE_HOME=/usr/local/hive-2.1.1

export PATH=$PATH:$JAVA_HOME/bin:$JAVA_HOME/jre/bin:$ZOOKEEPER_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$MYSQL_HOME/bin:$HBASE_HOME/bin:$HIVE_HOME/bin
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native

# ==================================================================node1

# 使环境变量生效
source /etc/profile

# 查看配置结果
echo $HIVE_HOME

mkdir -p $HIVE_HOME/data/hive
mkdir -p $HIVE_HOME/data/hive/operaion_logs
mkdir -p $HIVE_HOME/data/resources


# 配置 hive
cp $HIVE_HOME/conf/hive-env.sh.template $HIVE_HOME/conf/hive-env.sh
cp $HIVE_HOME/conf/hive-default.xml.template $HIVE_HOME/conf/hive-site.xml
cp $HIVE_HOME/conf/hive-exec-log4j2.properties.template $HIVE_HOME/conf/hive-exec-log4j2.properties
cp $HIVE_HOME/conf/hive-log4j2.properties.template $HIVE_HOME/conf/hive-log4j2.properties
# ${system:java.io.tmpdir}/${system:user.name} 替换为本机路径 /usr/local/hive-2.1.1/data/hive
# ${system:java.io.tmpdir}/${hive.session.id}_resources替换为本机路径 /usr/local/hive-2.1.1/data/resources
# ${system:java.io.tmpdir}/${system:user.name}/operation_logs 替换为本机路径 /usr/local/hive-2.1.1/data/hive/operation_logs

vi $HIVE_HOME/conf/hive-site.xml

# esc 后输入
:%s#${system:java.io.tmpdir}/${system:user.name}#/usr/local/hive-2.1.1/data/hive#

 :%s#${system:java.io.tmpdir}/${hive.session.id}_resources#/usr/local/hive-2.1.1/data/resources#

 # esc / 输入 hive.exec.scratchdir 找到后<value> 点击 Insert 键后进行修改

<property>
    <name>hive.exec.scratchdir</name>
    <value>/hive/tmp</value>
</property>

<property>
    <name>hive.metastore.warehouse.dir</name>
    <value>/hive/warehouse</value>
</property>

<!-- 通过jdbc协议连接mysql的hive库 -->
<property>
    <name>javax.jdo.option.ConnectionURL</name>
    <value>jdbc:mysql://node1:3306/hive?createDatabaseIfNotExist=true&amp;useSSL=false</value>
</property>

<!-- jdbc的mysql驱动 -->
<property>
    <name>javax.jdo.option.ConnectionDriverName</name>
    <value>com.mysql.jdbc.Driver</value>
</property>

<!-- mysql用户名 -->
<property>
    <name>javax.jdo.option.ConnectionUserName</name>
    <value>hive</value>
</property>

<!-- mysql用户密码 -->
<property>
    <name>javax.jdo.option.ConnectionPassword</name>
    <value>Hive-123</value>
</property>

<!--hiveserver2的HA-->
<property>
    <name>hive.server2.support.dynamic.service.discovery</name>
    <value>true</value>
</property>

<property>
    <name>hive.zookeeper.quorum</name>
    <value>node1:2181,node2:2181,node3:2181</value>
</property>

<!-- hive的web页面暂时不配置 -->
<property>
    <name>hive.hwi.war.file</name>
    <value>/usr/local/hive-2.1.1/lib/hive-hwi-2.1.1.jar</value>
</property>
# 从hbase/lib下复制必要jar包到hive/lib下 
cp $HBASE_HOME/lib/hbase-client-1.2.4.jar $HBASE_HOME/lib/hbase-common-1.2.4.jar $HIVE_HOME/lib


# 同步hive和hadoop的jline版本 
cp $HIVE_HOME/lib/jline-2.12.jar $HADOOP_HOME/share/hadoop/yarn/lib


# 查看版本
# cd $HADOOP_HOME/share/hadoop/yarn/lib
# find ./ -name "*jline*jar"

# 删除低版本的jline 0.9
# rm jline-0.9.94.jar


# 复制jdk的tools.jar到hive/lib下 
cp $JAVA_HOME/lib/tools.jar $HIVE_HOME/lib

# rm -f $HIVE_HOME/lib/log4j-slf4j-impl-2.4.1.jar


vi $HIVE_HOME/conf/hive-env.sh

HADOOP_HOME=/usr/local/hadoop-2.7.6
export HIVE_HOME=/usr/local/hive-2.1.1
export HIVE_CONF_DIR=/usr/local/hive-2.1.1/conf
export HIVE_AUX_JARS_PATH=/usr/local/hive-2.1.1/lib

 

# 如果hadoop之前没有配置hadoop.proxyuser.root.groups需配置

# ==================================================================node1 node2 node3
# 如果没有权限,在Hadoop的core-site.xml中增加配置:
vi $HADOOP_HOME/etc/hadoop/core-site.xml

<property>
	<name>hadoop.proxyuser.root.groups</name>
	<value>*</value>
</property>
<property>
	<name>hadoop.proxyuser.root.hosts</name>
	<value>*</value>
</property>


# ==================================================================node1
# 使用超级用户刷新配置
yarn rmadmin -refreshSuperUserGroupsConfiguration
hdfs dfsadmin -refreshSuperUserGroupsConfiguration


# ==================================================================node1 node2
# 如果是对namenode做过HA,则需要在主备namenode上执行
hdfs dfsadmin -fs hdfs://appcluster -refreshSuperUserGroupsConfiguration

# ==================================================================node2 node3

# 使环境变量生效
source /etc/profile

# 查看配置结果
echo $HIVE_HOME

# ==================================================================node1

$HIVE_HOME/bin/schematool -initSchema -dbType mysql


scp -r $HIVE_HOME node2:/usr/local/
scp -r $HIVE_HOME node3:/usr/local/

# 启动

# ==================================================================node1 node2 node3
# 启动 zookeeper
zkServer.sh start
zkServer.sh status


# ==================================================================node1
# 启动hadoop所有进程
$HADOOP_HOME/sbin/start-all.sh


$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc


# ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh start resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh start zkfc


# ==================================================================node1
# 要实现Hbase的HA(High Availablity)
$HBASE_HOME/bin/hbase-daemon.sh start master

# 启动Hbase(start-hbase.sh)
$HBASE_HOME/bin/start-hbase.sh


# ==================================================================node2
# 开启Hbase的HA
$HBASE_HOME/bin/hbase-daemon.sh start master


# ==================================================================node1
$HIVE_HOME/bin/hiveserver2


# ==================================================================node2
$HIVE_HOME/bin/hiveserver2


# ==================================================================node1
zkCli.sh

ls /hiveserver2

get /hiveserver2/serverUri=node1:10000;version=2.1.1;sequence=0000000000


$HIVE_HOME/bin/beeline -u "jdbc:hive2://node1:2181,node2:2181,node3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2" root 123456

# $HIVE_HOME/bin/beeline
# > !connect jdbc:hive2://node1:2181,node2:2181,node3:2181/;serviceDiscoveryMode=zooKeeper;zooKeeperNamespace=hiveserver2 root "123456"


> create external table user_info(user_id int comment ‘userID‘,user_name string comment ‘userName‘)row format delimited fields terminated by ‘	‘ lines terminated by ‘
‘;

> show tables;

mkdir /root/hive

vi /root/hive/user_info.txt

1001	zhangsan
1002	lisi
1003	wangwu


> load data local inpath ‘/root/hive/user_info.txt‘ into table user_info;

> select * from user_info;

> quit;


hdfs dfs -ls /

hdfs dfs -ls /hive/warehouse


hdfs dfs -cat /hive/warehouse/user_info/user_info.txt


hadoop fs -mkdir /hive_input_data


vi /root/hive/user_info.txt

1001	zhangsan
1002	lisi
1003	wangwu
1004	liuliu
1005	qiqi


hadoop fs -put /root/hive/user_info.txt /hive_input_data


hdfs dfs -ls /hive_input_data

# hdfs -dfs  -chmod 777 /hive_input_data

> select * from user_info;

> load data inpath ‘/hive_input_data/user_info.txt‘ overwrite into table user_info;

> select * from user_info;


# ==================================================================node1
# stop已经启动的进程
$HBASE_HOME/bin/stop-hbase.sh


$HADOOP_HOME/sbin/stop-all.sh


# ==================================================================node1 node2 node3
# 停止 zookeeper
zkServer.sh stop


# ==================================================================node1
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc


# ==================================================================node2
$HADOOP_HOME/sbin/yarn-daemon.sh stop resourcemanager
$HADOOP_HOME/sbin/hadoop-daemon.sh stop zkfc



shutdown -h now
# 快照 hive_hiveserver2集群

 



以上是关于hadoop生态搭建(3节点)-07.hive配置的主要内容,如果未能解决你的问题,请参考以下文章

hadoop生态搭建(3节点)-04.hadoop配置

hadoop生态搭建(3节点)-17.sqoop配置_单节点

hadoop生态搭建(3节点)-05.mysql配置_单节点

hadoop生态搭建(3节点)-12.rabbitmq配置

hadoop生态搭建(3节点)-01.基础配置

hadoop生态搭建(3节点)-13.mongodb配置