分享

hadoop_线上升级步骤

gefieder 2014-8-12 11:12:05 发表于 总结型 [显示全部楼层] 回帖奖励 阅读模式 关闭右栏 0 27598
问题导读:

1.Hadoop线上升级需要做哪些准备?
2.profile是如何修改的?
3.jdbc(oracle、mysql)、lzo.jar是如何操作的?
4.hadoop集群升级包含哪些步骤?
5.集群升级命令hive、hbase、zookeeper等分别是什么?





-- ############################################################################ --
-- 一、前期准备:

http://funshion-hadoop01:50090/status.jsp
http://funshion-hadoop0:8088/cluster/apps/RUNNING
http://funshion-hadoop0:50070/dfshealth.html#tab-overview
http://funshion-hadoop0:19888/jobhistory/app

----------------------------------------------------------------------------------
-- 1、包上传:上传java、hadoop、hive、sqoop、hbase等安装包到相关的节点或客户端:
-- 所有包上传完成。
hadoop:    hadoop-2.4.1-Linux64-20140802.tar.gz
hbase:     hbase-0.98.3-hadoop2-bin-hadoop2.4.1.tar.gz
hive:      apache-hive-0.14.0-SNAPSHOT-bin-hadoop2.4.1-Linux64.20140808.tar.gz
sqoop:     sqoop-1.4.4.bin__hadoop-2.4.1.tar.gz
java:      jdk-7u65-linux-x64.tar.gz
zookeeper: zookeeper-3.4.6.tar.gz
部分包可以到hadoop家族、strom、spark、Linux、flume等jar包、安装包汇总下载(持续更新)中查找。

-- 客户端:
192.168.117.147/61/210/211/30/33/201

147、61: hadoop OK、hive OK、java OK、sqoop OK
210、211:hadoop OK、hbase OK、hive OK、java OK
30:       hadoop OK、java OK
201、33:  hadoop OK、hive OK、java OK

-- 已操作 所有节点  
-- hadoop
cd /home/hadoop/software
tar -xvf hadoop-2.4.1-Linux64-20140802.tar.gz
rm -rf /usr/local/hadoop-2.4.1
mv /home/hadoop/software/hadoop-2.4.1 /usr/local/
chown -R hadoop.hadoop /usr/local/hadoop-2.4.1
ls -l /usr/local |grep hadoop-2.4
ls -l /usr/java/ |grep jdk1.7

-- 已操作 所有节点  
-- java
cd /home/hadoop/software
tar -xvf jdk-7u65-linux-x64.tar.gz
rm -rf /usr/java/jdk1.7.0_65
mv /home/hadoop/software/jdk1.7.0_65 /usr/java/
chown -R root.root /usr/java/jdk1.7.0_65
ls -l /usr/java |grep jdk1.7

-- 已操作 所有节点  
-- hive
cd /home/hadoop/software
tar -xvf apache-hive-0.14.0-SNAPSHOT-bin-hadoop2.4.1-Linux64.20140808.tar.gz
rm -rf /usr/local/apache-hive-0.14.0-SNAPSHOT-bin
mv /home/hadoop/software/apache-hive-0.14.0-SNAPSHOT-bin /usr/local/
chown -R hadoop.hadoop /usr/local/apache-hive-0.14.0-SNAPSHOT-bin
ls -l /usr/local |grep hive


-- 已操作 所有节点
-- sqoop
cd /home/hadoop/software
tar -xvf sqoop-1.4.4.bin__hadoop-2.4.1.tar.gz
rm -rf /usr/local/sqoop-1.4.4.bin__hadoop-2.4.1
mv /home/hadoop/software/sqoop-1.4.4.bin__hadoop-2.4.1 /usr/local
chown -R hadoop.hadoop /usr/local/sqoop-1.4.4.bin__hadoop-2.4.1
ls -l /usr/local |grep sqoop

-- 已操作 所有节点
-- hbase
cd /home/hadoop/software
tar -xvf hbase-0.98.3-hadoop2-bin-hadoop2.4.1.tar.gz
rm -rf /usr/local/hbase-0.98.3-hadoop2
mv /home/hadoop/software/hbase-0.98.3-hadoop2 /usr/local/
chown -R hadoop.hadoop /usr/local/hbase-0.98.3-hadoop2
ls -l /usr/local |grep hbase

-- 已操作 所有节点
-- zookeeper
cd /home/hadoop/software
tar -xvf zookeeper-3.4.6.tar.gz
rm -rf /usr/local/zookeeper-3.4.6
mv /home/hadoop/software/zookeeper-3.4.6 /usr/local/
chown -R hadoop.hadoop /usr/local/zookeeper-3.4.6
ls -l /usr/local |grep zookeeper

----------------------------------------------------------------------------------
-- 2、创建相关目录 或 HDFS目录(依据hadoop的配置文件):
-- 目录相关:
-- 2.1、目录相关参数(升级前):
-- 2.1 hadoop-env.sh、mapred-env.sh、yarn-env.sh
-- 待操作 export JAVA_HOME=/usr/java/latest       -- 上线前将创建软链接,指向jdk1.7
-- 待操作 export HADOOP_LOG_DIR=/hadoop-disk6/hadoop/logs  -- 日志相关目录(namenode配置,记得修改)
-- 待操作 export HADOOP_LOG_DIR=/home/hadoop/logs          -- 日志相关目录(datanode配置)
-- 已操作 export HADOOP_PID_DIR=/home/hadoop/pids          -- 进程相关目录(namenode、datanode配置一样)

-- 2.2 hdfs-site.xml(下面目录均已存在,不需要管了)
-- 已操作 dfs.name.dir                /hadoop-disk[1-3]/hdfs/name
-- 已操作 dfs.data.dir                /hadoop-disk[1-12]/hdfs/data
-- 已操作 fs.checkpoint.dir        /disk[1-3]/hdfs/tmp_namesecondary

-- 2.3 mapred-site.xml
-- 已操作 mapreduce.jobhistory.intermediate-done-dir   /hadoop_logs/mr-history/tmp  -- HDFS目录,已经创建
-- 已操作 mapreduce.jobhistory.done-dir                /hadoop_logs/mr-history/done -- HDFS目录,已经创建
-- 已操作 yarn.app.mapreduce.am.staging-dir            /hadoop_logs/yarn_userstag   -- HDFS目录,已经创建
-- 待操作 LD_LIBRARY_PATH=/usr/local/hadoop/lzo/lib    -- hadoop lzo库路径,升级前先解决

-- 2.4 yarn-site.xml
-- 已操作 yarn.nodemanager.local-dirs /hadoop-disk[1-12]/yarn/local -- 在所有namenode目录下创建,已操作
-- 已操作 yarn.nodemanager.log-dirs   /hadoop-disk[1-12]/yarn/log   -- 只影响datanode节点
-- 已操作 yarn.nodemanager.remote-app-log-dir /tmp/logs             --
-- 已操作 yarn.app.mapreduce.am.staging-dir          /data/hadoop/logs/yarn_userstag   -- 这是HDFS目录

-- 注意:如果指定yarn.nodemanager.log-dirs参数的话,就不要在yarn-env.sh文件中指定 YARN_LOG_DIR 路径,因为YARN_LOG_DIR路径为覆盖该参数。

-- 注意:所有的进程文件放在/home/hadoop/pids目录中
         所有的进程日志文件放在/home/hadoop/logs目录中(namenode对应放在/hadoop-disk6/hadoop/logs目录中,升级前记得修改)

----------------------------------------------------------------------------------
-- 3、保存旧的环境.bash_profile、上传新的.bash_profile
-- 3.1 在61 (funshion-hadoop01)下创建文件
-- 已操作 /home/users/hadoop/luoyoumou/hadoop_update_scripts/bash_profile_bak.sh

-- 3.2 编辑bash_profile_bak.sh
-- 已操作

-- 3.3 执行bash_profile_bak.sh
-- 已操作 sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/bash_profile_bak.sh

-- 3.4 在61 (funshion-hadoop01)下创建文件 ~/.bash_profile.new,添加如下内容:
-- 已操作 vi ~/.bash_profile.new

-- 3.5 将3.4步创建的文件上传到hadoop集群各节点
-- 已操作 vi /home/users/hadoop/luoyoumou/hadoop_update_scripts/bash_profile_upload.sh

-- 3.6 各客户端的环境变量保存并上传.bash_profile.new,并个性化环境变量:
-- 已操作 3.6.1 保存并上传.bash_profile.new
-- 192.168.117.147/61/210/211/30/33/201
cp ~/.bash_profile ~/.bash_profile.old
ls -al ~/ |grep profile

cd ~/
rz -b

ls -al ~/ |grep profile

-- 3.6.2 个性化环境变量(包括各客户端、180-185):
-- 192.168.117.147/61/210/211/30/33/201、 180-185
vi ~/.bash_profile.new

-- 已操作 根据其相关软件个性化
147、61: hadoop OK、hive OK、java OK、sqoop OK
210、211:hadoop OK、hbase OK、hive OK、java OK
30:       hadoop OK、java OK
201、33:  hadoop OK、hive OK、java OK

-- 3.6.3 用.bash_profile.new 替换 .bash_profile (升级后再操作)
-- 3.6.3.1 在61上增加如下脚本,用以替换集群各节点的~/.bash_profile文件
-- 已操作 vi /home/users/hadoop/luoyoumou/hadoop_update_scripts/bash_profile_update.sh
-- 待操作 sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/bash_profile_update.sh

----------------------------------------------------------------------------------
-- 4、jdbc(oracle、mysql)、lzo.jar 上传到各客户端(210、211、33 oracle ojdbc原版本中没有,就不管)、
-- 已操作 4.1 jdbc(oracle、mysql)上传到各客户端
cp /usr/java/jdk1.6.0_20/jre/lib/ext/ojdbc* /usr/java/jdk1.7.0_65/jre/lib/ext/
cp /usr/java/jdk1.6.0_20/jre/lib/ext/dms.jar /usr/java/jdk1.7.0_65/jre/lib/ext/
ls -l /usr/java/jdk1.7.0_65/jre/lib/ext/ |grep ojdbc
ls -l /usr/java/jdk1.7.0_65/jre/lib/ext/ |grep dms.jar

cp ./mysql-connector-java-5.1.17-bin.jar /usr/local/hadoop-2.4.1/share/hadoop/common/
chown -R hadoop.hadoop /usr/local/hadoop-2.4.1/
ls -l /usr/local/hadoop-2.4.1/share/hadoop/common/ |grep mysql

cp ./mysql-connector-java-5.1.17-bin.jar /usr/local/sqoop-1.4.4.bin__hadoop-2.4.1/lib
chown -R hadoop.hadoop /usr/local/sqoop-1.4.4.bin__hadoop-2.4.1
ls -l /usr/local/sqoop-1.4.4.bin__hadoop-2.4.1/lib |grep mysql

cp ./mysql-connector-java-5.1.17-bin.jar /usr/local/apache-hive-0.14.0-SNAPSHOT-bin/lib
chown -R hadoop.hadoop /usr/local/apache-hive-0.14.0-SNAPSHOT-bin
ls -l /usr/local/apache-hive-0.14.0-SNAPSHOT-bin/lib |grep mysql

-- 已操作 4.2 lzo.jar 上传
-- 已操作 vi /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_lzo_upload.sh
-- 已操作 sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_lzo_upload.sh

-- 4.3 各客户端 lzo.jar 上传
-- 已操作 cp /home/hadoop/software/hadoop-lzo*.jar /usr/local/hadoop-2.4.1/share/hadoop/common/
mv ./hadoop-lzo*.jar /usr/local/hadoop-2.4.1/share/hadoop/common/
chown -R hadoop.hadoop /usr/local/hadoop-2.4.1
ls -l /usr/local/hadoop-2.4.1/share/hadoop/common/ |grep lzo

-- 4.4 上传lib、lzo、lzop目录到集群所有节点:
-- 已操作 vi /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_lib_upload.sh
-- 已操作 sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_lib_upload.sh
-- 已操作 vi /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_lzo_update.sh
-- 已操作 sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_lzo_update.sh

-- 4.5 上传lib、lzo、lzop目录到各客户端
192.168.117.147/61/210/211/30/33/201

mv ./*.tar.gz /usr/local/hadoop-2.4.1/
cd /usr/local/hadoop-2.4.1/
tar -xvf lib.tar.gz
tar -xvf lzop.tar.gz
tar -xvf lzo.tar.gz
rm -rf /usr/local/hadoop-2.4.1/*.tar.gz
chown -R hadoop.hadoop ./*
ls -l

----------------------------------------------------------------------------------
-- 5、客户端各软件的配置文件修改:(211是hive服务器,元数据存放在210的mysql里)
-- 已操作 根据其相关软件个性化
147、61: hadoop OK、hive OK、java OK、sqoop OK
210、211:hadoop OK、hbase OK、hive OK、java OK
30:       hadoop OK、java OK
201、33:  hadoop OK、hive OK、java OK

-- 已操作 集群各节点hadoop的配置文档的同步(61上操作)

cd /usr/local/hadoop-2.4.1/etc/
scp -P5044 /usr/local/hadoop-2.4.1/etc/hadoop_online_namenode/* hadoop@funshion-hadoop0:/usr/local/hadoop-2.4.1/etc/hadoop/ -- 同步namenode配置
-- 已操作 sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/hadoop_conf_update.sh -- 同步datanode配置

-- 部分未操作 客户端 hadoop的配置文档的同步
-- 已操作 cd /usr/local/hadoop-2.4.1/etc/
-- 已操作 tar -xvf hadoop_online_allnode.tar.gz
-- 未操作 cp /usr/local/hadoop-2.4.1/etc/hadoop_online_allnode/* /usr/local/hadoop-2.4.1/etc/hadoop

-- ############################################################################ --
-- 二、hadoop集群升级步骤:

-- 1、关闭相关服务、停止hadoop监控相关软件、脚本:
-- 包括hive(211)、hbase(180-185)、zookeeper(180-185)


-- hive关闭(211):
netstat -anlp|grep 10000
kill -9 4065

-- hbase关闭(180-185):
ssh -p5044 hadoop@funshion-hadoop183
cd /usr/local/hbase/bin
./hbase-daemon.sh stop rest

$HBASE_HOME/bin/stop-hbase.sh

-- zookeeper关闭(180-185):
/usr/local/zookeeper/bin/zkServer.sh stop

-- 停止hadoop监控相关软件、脚本、永健的监控脚本


---------------------------------------------------
-- 2、查看现有集群的版本信息(升级前、升级后都应该查看一下版本信息,以比较):
-- namenode:
[hadoop@funshion-hadoop0 ~]$ more /hadoop-disk1/hdfs/name/current/VERSION
#Thu Aug 07 18:36:20 CST 2014
namespaceID=1439833971
cTime=1346037715113
storageType=NAME_NODE
layoutVersion=-32

-- Datanode:
[hadoop@funshion-hadoop150 ~]$ more /hadoop-disk1/hdfs/data/current/VERSION
#Fri Jun 13 16:26:29 CST 2014
namespaceID=1439833971
storageID=DS-1851930298-192.168.117.150-50010-1388475602372
cTime=1346037715113
storageType=DATA_NODE
layoutVersion=-32


---------------------------------------------------
-- 3、停止所有线上业务。用现有版本进入保护模式,检查元数据块:
-- 3.1、进入保护模式
hadoop dfsadmin -safemode enter

-- 3.2、检查元数据块(过滤所有以小圆点开始的行):
hadoop fsck / -files -blocks -locations |grep -v -E '^\.' > /hadoop-disk4/dfs-v-old-fsck-1.log  

-- 保存上面步骤生成的日志到本地。
tail -n 400 /home/hadoop/dfs-v-old-fsck-1.log

-- 升级后验证(升级成功后进安全模式执行):
hdfs dfsadmin -safemode enter
hdfs fsck / -files -blocks -locations > /hadoop-disk4/dfs-v-new-fsck-1.log
tail -n 400 /home/hadoop/dfs-v-new-fsck-1.log


---------------------------------------------------
-- 4、停掉现有集群(升级前,最好备份一份namenode元数据到其他机器)、
--    修改环境变量(包括新的JAVA_HOME、HADOOP_HOME等,包括一些连接文件指向的修改)、
--    并创建新版本配置文件相关的目录(已经创建)
-- 4.1 停止集群(记得停止second namenode)
-- 4.1.1 停止namenode、datanode相关进程(funshion-hadoop0上执行):
$HADOOP_HOME/bin/stop-all.sh

-- 4.1.2 停止second namenode(117.61 funshion-hadoop01上执行)
$HADOOP_HOME/bin/stop-secondarynamenode.sh

-- 4.2 备份元数据(在执行该步骤的同时,可以先修改datanode的相关软链接)
mkdir /hadoop-disk5/hdfs/name.bak.20140809
cp /hadoop-disk1/hdfs/name /hadoop-disk5/hdfs/name.bak.20140809

-- 4.3 修改namenode、datanode、secondnamenode(61)各节点相关软链接:

rm -rf /usr/local/hadoop
ln -s /usr/local/hadoop-2.4.1 /usr/local/hadoop
ls -l /usr/local |grep hadoop
rm -rf /usr/java/latest
ln -s /usr/java/jdk1.7.0_65 /usr/java/latest
ls -l /usr/java/
source ~/.bash_profile


  974  $HADOOP_HOME/sbin/hadoop-daemon.sh start namenode -upgrade
  975  jps
  976  tail -n 400 /home/hadoop/logs/hadoop-hadoop-namenode-funshion-hadoop193.log
  977  hdfs fsck / -files -blocks -locations > /home/hadoop/dfs-v-new-fsck-1.log
  978  tail -n 400 /home/hadoop/dfs-v-new-fsck-1.log
  979  jps
  980  cd $HADOOP_HOME/sbin
  981  ./start-all.sh
  982  jps
  983  hdfs dfsadmin -report
  984  hdfs dfsadmin -safemode enter
  985  hdfs fsck / -files -blocks -locations > /home/hadoop/dfs-v-new-fsck-1.log
  986  tail -n 400 /home/hadoop/dfs-v-new-fsck-1.log
  987  hdfs dfsadmin -finalizeUpgrade
  988  jps
  989  hdfs dfs -mkdir -p /tmp/hbase-hadoop/hbase
  990  hdfs dfsadmin -safemode leave
  991  hdfs dfs -mkdir -p /tmp/hbase-hadoop/hbase


---------------------------------------------------
-- 5、进61,更新集群各节点的环境变量(原有环境变量已备份):
cd /home/users/hadoop/luoyoumou/hadoop_update_scripts/

-- 执行下面脚本前,先检查确认一下脚本内容无误:
sh /home/users/hadoop/luoyoumou/hadoop_update_scripts/bash_profile_update.sh

-- stop second namenode
cd /usr/local/hadoop/bin
./stop-secondnamenode.sh


---------------------------------------------------
-- 6、升级前最后核对:
-- 核对$HADOOP_HOME/etc/hadoop/slaves文件是否正确指定所有的datanode节点
-- 若修改的话,记得同步到所有其他节点),
-- 最好核对一下每个节点的当前使用java的版本,软链接是否指向正确的版本(java、hadoop)

---------------------------------------------------
-- 升级前最好先修改201、211(安全起见,新旧hadoop版本的配置文件,让其无法访问集群)
-- 7、一切准备就绪后,开始升级
-- 7.1、(升级前,先重新加载一下hadoop用户的环境变量:source ~/.bash_profile):
source ~/.bash_profile
cd $HADOOP_HOME/
sbin/start-dfs.sh -upgrade

安全起见,或先升级namenode元数据,命令如下:
$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode -upgrade

-- 回滚命令(有异常,需要回滚时,执行如下命令):
$HADOOP_HOME/sbin/hadoop-daemon.sh start namenode -rollback

-- 7.2 升级后验证(升级成功后进安全模式执行):
hdfs dfsadmin -safemode enter
hdfs fsck / -files -blocks -locations > /hadoop-disk4/dfs-v-new-fsck-1.log
tail -n 400 /home/hadoop/dfs-v-new-fsck-1.log

-- 7.3 提交升级:
hdfs dfsadmin -finalizeUpgrade

-- 7.4 启动集群:

-- 记得在namenode节点启动historyserver,命令如下:
./mr-jobhistory-daemon.sh  start historyserver

-- 61单独执行:
hdfs secondarynamenode -checkpoint force

-- ############################################################################ --
-- 三、各客户端相关软链接修改:

-- 已操作 根据其相关软件个性化
147、61: hadoop、java、hive、sqoop
210、211:hadoop、java、hive、hbase、
30:       hadoop、java
201、33:  hadoop、java、hive
180-185:  hadoop、java、zookeeper、hbase

-- hadoop and java 147、61、210、211、30、201、33、180-185
rm -rf /usr/local/hadoop
ln -s /usr/local/hadoop-2.4.1 /usr/local/hadoop
ls -l /usr/local |grep hadoop
rm -rf /usr/java/latest
ln -s /usr/java/jdk1.7.0_65 /usr/java/latest
ls -l /usr/java/
source ~/.bash_profile

-- hive 147、61、201、33
rm -rf /usr/local/hive
ln -s /usr/local/apache-hive-0.14.0-SNAPSHOT-bin /usr/local/hive
ls -l /usr/local |grep hive
source ~/.bash_profile

-- hbase 180-185
rm -rf /usr/local/hbase
ln -s /usr/local/hbase-0.98.3-hadoop2 /usr/local/hbase
ls -l /usr/local |grep hbase
source ~/.bash_profile

-- zookeeper 180-185
rm -rf /usr/local/zookeeper
ln -s /usr/local/zookeeper-3.4.6 /usr/local/zookeeper
ls -l /usr/local |grep zookeeper
source ~/.bash_profile

-- sqoop
rm -rf /usr/local/sqoop
ln -s /usr/local/sqoop-1.4.4.bin__hadoop-2.4.1 /usr/local/sqoop
ls -l /usr/local |grep sqoop
source ~/.bash_profile

-- ############################################################################ --
-- 四、211服务器hive升级:
-- 1.1 检查/usr/local/hive相关软链接是否指向了新版本(新版本:apache-hive-0.14.0-SNAPSHOT-bin,老版本是:hive-0.12.0-bin)
ls -l /usr/local/ |grep hive

-- 1.2 以hadoop用户登录211,备份hive元数据:
mv /data1/db_bak/bak/hive.sql-2014-08-09 /data1/db_bak/bak/hive.sql-2014-08-09.bak
nohup /data1/db_bak && sh mysql-bak.sh 1>/data1/db_bak/logs/bakup.log 2>/data1/db_bak/logs/bakup.err &

-- 稍等三分钟

-- 1.3 进210服务器,执行新版本的mysql元数据升级脚本:
-- 升级前(在进mysql客户端前)最好先cd到这个目录,因为有些升级脚本有bug:依赖于当前目录)
-- 如果是从0.12版本升级到0.13,只需要执行upgrade-0.12.0-to-0.13.0.mysql.sql 脚本。
-- 如果是从0.12升级到0.14,则先需要执行 upgrade-0.12.0-to-0.13.0.mysql.sql脚本,再执行upgrade-0.13.0-to-0.14.0.mysql.sql,依此类推。

cd /usr/local/apache-hive-0.14.0-SNAPSHOT-bin/scripts/metastore/upgrade/mysql   
mysql -uroot -proot
use hive;
source /usr/local/apache-hive-0.14.0-SNAPSHOT-bin/scripts/metastore/upgrade/mysql/upgrade-0.12.0-to-0.13.0.mysql.sql  
source /usr/local/apache-hive-0.14.0-SNAPSHOT-bin/scripts/metastore/upgrade/mysql/upgrade-0.13.0-to-0.14.0.mysql.sql
source /usr/local/apache-hive-0.14.0-SNAPSHOT-bin/scripts/metastore/upgrade/mysql/hive-schema-0.14.0.mysql.sql

-- 最后,刷新一下mysql的hive用户权限:
mysql -uroot -proot
use mysql;
select Host, User, Password from mysql.user;

GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'%' Identified by 'hive';
GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'localhost' Identified by 'hive';
GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'127.0.0.1' Identified by 'hive';  
GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'192.168.115.210' Identified by 'hive';
GRANT ALL PRIVILEGES ON hive.* TO 'hive'@'192.168.117.147' Identified by 'hive';
flush privileges;

GRANT ALL PRIVILEGES ON hive_tmp.* TO 'hive'@'%' Identified by 'hive';
GRANT ALL PRIVILEGES ON hive_tmp.* TO 'hive'@'localhost' Identified by 'hive';
GRANT ALL PRIVILEGES ON hive_tmp.* TO 'hive'@'127.0.0.1' Identified by 'hive';  
GRANT ALL PRIVILEGES ON hive_tmp.* TO 'hive'@'192.168.115.210' Identified by 'hive';
GRANT ALL PRIVILEGES ON hive_tmp.* TO 'hive'@'192.168.117.147' Identified by 'hive';
flush privileges;


-- 升级到止就OK了,可以测试了。

-- ############################################################################ --
-- 五、183-185 zookeeper、hbase升级:
-- 1.1.0
-- 先检查软链接是不是指向老版本(如果不是则执行如下步骤):
rm -rf /usr/local/zookeeper
ln -s /usr/local/zookeeper-3.3.5 /usr/local/zookeeper
ls -l /usr/local |grep zookeeper
source ~/.bash_profile

-- 1.1.1、关闭现有Zookeeper集群:
-- 在183、184、185三个节点执行如下命令:
/usr/local/zookeeper-3.3.5/bin/zkServer.sh stop

-- 1.1.2、将原有的dataDir目录copy到新版本下:
-- 原来:dataDir=/usr/local/zookeeper/data
-- 说明:因为 zookeeper是个软链接指向 /usr/local/zookeeper-3.3.5,
-- 所以在每个节点这样操作:(/usr/local/zookeeper-3.4.6/data目录我已经创建)
cp -r /usr/local/zookeeper-3.3.5/data/* /usr/local/zookeeper-3.4.6/data

-- 1.1.3、在每个节点进root用户将原来软链接/usr/local/zookeeper指向新版本,操作如下:
su - root
cd /usr/local
rm -rf /usr/local/zookeeper
ln -s zookeeper-3.4.6 zookeeper
ls -l |grep zookeeper

-- 1.1.4 配置新的zoo.cfg

-- 1.1.5、升级:
/usr/local/zookeeper-3.4.6/bin/zkServer.sh upgrade

JMX enabled by default
Using config: /usr/local/zookeeper-3.4.6/bin/../conf/zoo.cfg
upgrading the servers to 3.*
2014-08-08 20:02:33,734 [myid:] - ERROR [main:UpgradeMain@170] - Usage: UpgradeMain dataDir snapShotDir
Upgrading ...


/usr/local/zookeeper-3.4.6/bin/zkServer.sh start

-- 1.1.5 升级后检查(可能需要重启):
/usr/local/zookeeper-3.4.6/bin/zkServer.sh status

-- #################################################################################### --
-- 六、Hbase升级(如果zookeeper升级成功,则执行此步)

-- 参考URL:http://blog.sunhs.me/?p=431

-- 1.1、检查老版本的Hbase是否停止
jps

-- 1.2、将/usr/local/hbase软链接指向新版本
rm -rf /usr/local/hbase
ln -s /usr/local/hbase-0.98.3-hadoop2 /usr/local/hbase
ls -l /usr/local |grep hbase
source ~/.bash_profile

-- 1.3、检查升级(执行此步前,记得修改配置文件)
cd $HBASE_HOME
bin/hbase upgrade -check

-- 输出类似如下:
[hadoop@funshion-hadoop194 hbase]$ bin/hbase upgrade -check
SLF4J: Class path contains multiple SLF4J bindings.
SLF4J: Found binding in [jar:file:/usr/local/hbase-0.98.3-hadoop2/lib/slf4j-log4j12-1.6.4.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: Found binding in [jar:file:/usr/local/hadoop-2.4.1/share/hadoop/common/lib/slf4j-log4j12-1.7.5.jar!/org/slf4j/impl/StaticLoggerBinder.class]
SLF4J: See http://www.slf4j.org/codes.html#multiple_bindings for an explanation.
2014-08-08 20:17:18,804 WARN  [main] util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2014-08-08 20:17:20,445 INFO  [main] Configuration.deprecation: fs.default.name is deprecated. Instead, use fs.defaultFS
2014-08-08 20:17:20,447 INFO  [main] util.HFileV1Detector: Target dir is: hdfs://funshion-hadoop193:8020/hbase/XosRS
2014-08-08 20:17:20,723 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/-ROOT-
2014-08-08 20:17:20,736 DEBUG [main] util.HFileV1Detector: processing table: hdfs://funshion-hadoop193:8020/hbase/XosRS/.META.
2014-08-08 20:17:20,749 DEBUG [main] util.HFileV1Detector: processing region: hdfs://funshion-hadoop193:8020/hbase/XosRS/.META./1028785192
2014-08-08 20:17:21,004 INFO  [pool-4-thread-1] Configuration.deprecation: hadoop.native.lib is deprecated. Instead, use io.native.lib.available
2014-08-08 20:17:22,061 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/.corrupt
2014-08-08 20:17:22,069 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/.logs
2014-08-08 20:17:22,087 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/.oldlogs
2014-08-08 20:17:22,096 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/.tmp
2014-08-08 20:17:22,099 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/hbase.id
2014-08-08 20:17:22,102 INFO  [main] util.HFileV1Detector: Ignoring path: hdfs://funshion-hadoop193:8020/hbase/XosRS/hbase.version
2014-08-08 20:17:22,110 DEBUG [main] util.HFileV1Detector: processing table: hdfs://funshion-hadoop193:8020/hbase/XosRS/testtable
2014-08-08 20:17:22,130 DEBUG [main] util.HFileV1Detector: processing region: hdfs://funshion-hadoop193:8020/hbase/XosRS/testtable/4695964d9c79ae1bc08f5133ed0f1031
2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: Result:

2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: Tables Processed:
2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: hdfs://funshion-hadoop193:8020/hbase/XosRS/.META.
2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: hdfs://funshion-hadoop193:8020/hbase/XosRS/testtable
2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: Count of HFileV1: 0
2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: Count of corrupted files: 0
2014-08-08 20:17:22,155 INFO  [main] util.HFileV1Detector: Count of Regions with HFileV1: 0
2014-08-08 20:17:22,264 INFO  [main] migration.UpgradeTo96: No HFileV1 found.
[hadoop@funshion-hadoop194 hbase]$

-- 1.4、启动Zookeeper(上面升级的时候已经启动)

-- 1.5、升级
bin/hbase upgrade -execute

-- 输出类似如下:
console.jar:/usr/java/latest/lib/sa-jdi.jar:/usr/java/latest/lib/tools.jar:/usr/java/latest/lib/ant-javafx.jar:/usr/java/latest/lib/dt.jar:/usr/java/latest/lib/javafx-doclet.jar:/usr/java/latest/lib/javafx-mx.jar:/usr/java/latest/lib/jconsole.jar:/usr/java/latest/lib/sa-jdi.jar:/usr/java/latest/lib/tools.jar::/usr/java/latest/lib/ant-javafx.jar:/usr/java/latest/lib/dt.jar:/usr/java/latest/lib/javafx-doclet.jar:/usr/java/latest/lib/javafx-mx.jar:/usr/java/latest/lib/jconsole.jar:/usr/java/latest/lib/sa-jdi.jar:/usr/java/latest/lib/tools.jar:/usr/java/latest/lib/ant-javafx.jar:/usr/java/latest/lib/dt.jar:/usr/java/latest/lib/javafx-doclet.jar:/usr/java/latest/lib/javafx-mx.jar:/usr/java/latest/lib/jconsole.jar:/usr/java/latest/lib/sa-jdi.jar:/usr/java/latest/lib/tools.jar
2014-08-08 20:18:44,221 INFO  [main] zookeeper.ZooKeeper: Client environment:java.io.tmpdir=/tmp
2014-08-08 20:18:44,221 INFO  [main] zookeeper.ZooKeeper: Client environment:java.compiler=<NA>
2014-08-08 20:18:44,221 INFO  [main] zookeeper.ZooKeeper: Client environment:os.name=Linux
2014-08-08 20:18:44,221 INFO  [main] zookeeper.ZooKeeper: Client environment:os.arch=amd64
2014-08-08 20:18:44,221 INFO  [main] zookeeper.ZooKeeper: Client environment:os.version=2.6.32-358.el6.x86_64
2014-08-08 20:18:44,222 INFO  [main] zookeeper.ZooKeeper: Client environment:user.name=hadoop
2014-08-08 20:18:44,222 INFO  [main] zookeeper.ZooKeeper: Client environment:user.home=/home/hadoop
2014-08-08 20:18:44,222 INFO  [main] zookeeper.ZooKeeper: Client environment:user.dir=/usr/local/hbase-0.98.3-hadoop2
2014-08-08 20:18:44,224 INFO  [main] zookeeper.ZooKeeper: Initiating client connection, connectString=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181 sessionTimeout=90000 watcher=Check Live Processes., quorum=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181, baseZNode=/hbase
2014-08-08 20:18:44,300 INFO  [main] zookeeper.RecoverableZooKeeper: Process identifier=Check Live Processes. connecting to ZooKeeper ensemble=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181
2014-08-08 20:18:44,306 INFO  [main-SendThread(funshion-hadoop195:2181)] zookeeper.ClientCnxn: Opening socket connection to server funshion-hadoop195/192.168.117.195:2181. Will not attempt to authenticate using SASL (unknown error)
2014-08-08 20:18:44,343 WARN  [main-SendThread(funshion-hadoop195:2181)] zookeeper.ClientCnxn: Session 0x0 for server null, unexpected error, closing socket connection and attempting reconnect
java.net.ConnectException: Connection refused
        at sun.nio.ch.SocketChannelImpl.checkConnect(Native Method)
        at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:739)
        at org.apache.zookeeper.ClientCnxnSocketNIO.doTransport(ClientCnxnSocketNIO.java:361)
        at org.apache.zookeeper.ClientCnxn$SendThread.run(ClientCnxn.java:1081)
2014-08-08 20:18:44,467 INFO  [main-SendThread(funshion-hadoop194:2181)] zookeeper.ClientCnxn: Opening socket connection to server funshion-hadoop194/192.168.117.194:2181. Will not attempt to authenticate using SASL (unknown error)
2014-08-08 20:18:44,469 INFO  [main-SendThread(funshion-hadoop194:2181)] zookeeper.ClientCnxn: Socket connection established to funshion-hadoop194/192.168.117.194:2181, initiating session
2014-08-08 20:18:44,499 WARN  [main] zookeeper.RecoverableZooKeeper: Possibly transient ZooKeeper, quorum=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181, exception=org.apache.zookeeper.KeeperException$ConnectionLossException: KeeperErrorCode = ConnectionLoss for /hbase
2014-08-08 20:18:44,500 INFO  [main] util.RetryCounter: Sleeping 1000ms before retry #0...
2014-08-08 20:18:44,502 INFO  [main-SendThread(funshion-hadoop194:2181)] zookeeper.ClientCnxn: Session establishment complete on server funshion-hadoop194/192.168.117.194:2181, sessionid = 0x147b581ed320002, negotiated timeout = 40000
2014-08-08 20:18:45,548 INFO  [main] zookeeper.ZooKeeper: Session: 0x147b581ed320002 closed
2014-08-08 20:18:45,549 INFO  [main-EventThread] zookeeper.ClientCnxn: EventThread shut down
2014-08-08 20:18:45,563 INFO  [main] migration.UpgradeTo96: Starting Namespace upgrade
2014-08-08 20:18:46,149 WARN  [main] util.NativeCodeLoader: Unable to load native-hadoop library for your platform... using builtin-java classes where applicable
2014-08-08 20:18:47,720 INFO  [main] Configuration.deprecation: fs.default.name is deprecated. Instead, use fs.defaultFS
2014-08-08 20:18:48,461 INFO  [main] util.FSUtils: Updating the hbase.version file format with version=7
2014-08-08 20:18:49,311 DEBUG [main] util.FSUtils: Created version file at hdfs://funshion-hadoop193:8020/hbase/XosRS with version=7
2014-08-08 20:18:49,445 INFO  [main] migration.NamespaceUpgrade: Migrating table testtable to hdfs://funshion-hadoop193:8020/hbase/XosRS/.data/default/testtable
2014-08-08 20:18:49,567 INFO  [main] migration.NamespaceUpgrade: Does not exist: hdfs://funshion-hadoop193:8020/hbase/XosRS/.lib
2014-08-08 20:18:49,589 INFO  [main] migration.NamespaceUpgrade: Migrating meta table .META. to hdfs://funshion-hadoop193:8020/hbase/XosRS/data/hbase/meta
2014-08-08 20:18:49,676 INFO  [main] migration.NamespaceUpgrade: Migrating meta region hdfs://funshion-hadoop193:8020/hbase/XosRS/data/hbase/meta/1028785192 to hdfs://funshion-hadoop193:8020/hbase/XosRS/data/hbase/meta/1588230740
2014-08-08 20:18:50,154 DEBUG [main] util.FSUtils: Created version file at hdfs://funshion-hadoop193:8020/hbase/XosRS with version=8
2014-08-08 20:18:50,154 INFO  [main] migration.UpgradeTo96: Successfully completed Namespace upgrade
2014-08-08 20:18:50,158 INFO  [main] migration.UpgradeTo96: Starting Znode upgrade
2014-08-08 20:18:50,171 INFO  [main] zookeeper.ZooKeeper: Initiating client connection, connectString=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181 sessionTimeout=90000 watcher=Migrate ZK data to PB., quorum=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181, baseZNode=/hbase
2014-08-08 20:18:50,174 INFO  [main-SendThread(funshion-hadoop194:2181)] zookeeper.ClientCnxn: Opening socket connection to server funshion-hadoop194/192.168.117.194:2181. Will not attempt to authenticate using SASL (unknown error)
2014-08-08 20:18:50,175 INFO  [main] zookeeper.RecoverableZooKeeper: Process identifier=Migrate ZK data to PB. connecting to ZooKeeper ensemble=funshion-hadoop195:2181,funshion-hadoop194:2181,funshion-hadoop196:2181
2014-08-08 20:18:50,177 INFO  [main-SendThread(funshion-hadoop194:2181)] zookeeper.ClientCnxn: Socket connection established to funshion-hadoop194/192.168.117.194:2181, initiating session
2014-08-08 20:18:50,190 INFO  [main-SendThread(funshion-hadoop194:2181)] zookeeper.ClientCnxn: Session establishment complete on server funshion-hadoop194/192.168.117.194:2181, sessionid = 0x147b581ed320003, negotiated timeout = 40000
2014-08-08 20:18:50,422 INFO  [main-EventThread] zookeeper.ClientCnxn: EventThread shut down
2014-08-08 20:18:50,424 INFO  [main] zookeeper.ZooKeeper: Session: 0x147b581ed320003 closed
2014-08-08 20:18:50,424 INFO  [main] migration.UpgradeTo96: Successfully completed Znode upgrade
2014-08-08 20:18:50,424 INFO  [main] migration.UpgradeTo96: Starting Log splitting
2014-08-08 20:18:50,432 INFO  [main] migration.UpgradeTo96: No log directories to split, returning


-- 1.6 最后启动hbase试试:

cd $HBASE_HOME/bin
./start-hbase.sh




-- 注意事项:由于我的hbase-0.98.3-hadoop2是基于hadoop 2.4.1 编译的,所以在hbase-0.98.3-hadoop2/lib目录下已经有相应的hadoop相关的jar包,
类似如下:
[hadoop@funshion-hadoop183 lib]$ pwd
/usr/local/hbase-0.98.3-hadoop2/lib
[hadoop@funshion-hadoop183 lib]$ ls -l |grep hadoop-
-rw-r--r-- 1 hadoop hadoop    17037 Aug  5 08:57 hadoop-annotations-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop    50525 Aug  5 08:57 hadoop-auth-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop     2560 Aug  6 10:02 hadoop-client-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  2908722 Aug  5 08:57 hadoop-common-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  6829695 Aug  6 10:02 hadoop-hdfs-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  2563520 Aug  6 10:02 hadoop-hdfs-2.4.1-tests.jar
-rw-r--r-- 1 hadoop hadoop   487973 Aug  6 10:02 hadoop-mapreduce-client-app-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop   662822 Aug  6 10:02 hadoop-mapreduce-client-common-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  1493531 Aug  5 08:57 hadoop-mapreduce-client-core-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop    35726 Aug  6 10:02 hadoop-mapreduce-client-jobclient-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  1486492 Aug  6 10:02 hadoop-mapreduce-client-jobclient-2.4.1-tests.jar
-rw-r--r-- 1 hadoop hadoop    25680 Aug  6 10:02 hadoop-mapreduce-client-shuffle-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  1638528 Aug  5 08:57 hadoop-yarn-api-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop   107565 Aug  6 10:02 hadoop-yarn-client-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop  1407445 Aug  5 08:57 hadoop-yarn-common-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop   213670 Aug  6 10:02 hadoop-yarn-server-common-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop   478367 Aug  6 10:02 hadoop-yarn-server-nodemanager-2.4.1.jar
-rw-r--r-- 1 hadoop hadoop    32460 Aug  6 10:15 hbase-hadoop-compat-0.98.3-hadoop2.jar

cd /usr/local/hadoop-2.4.1
cp ./share/hadoop/mapreduce/lib/hadoop-annotations-2.4.1.jar /usr/local/hbase-0.98.3-hadoop2/lib

not stripped

-- ./mr-jobhistory-daemon.sh  stop historyserver


<property>
  <name>javax.jdo.option.ConnectionURL</name>
  <value>jdbc:mysql://funshion-hadoop148:3306/hive?createDatabaseIfNotExist=true&amp;useUnicode=true&amp;characterEncoding=UTF-8</value>
  <description>JDBC connect string for a JDBC metastore</description>
</property>


<property>
  <name>hive.server2.thrift.port</name>
  <value>10000</value>
  <description>Port number of HiveServer2 Thrift interface.
  Can be overridden by setting $HIVE_SERVER2_THRIFT_PORT</description>
</property>

<property>
  <name>hive.server2.thrift.bind.host</name>
  <value>funshion-hadoop148</value>
  <description>Bind host on which to run the HiveServer2 Thrift interface.
  Can be overridden by setting $HIVE_SERVER2_THRIFT_BIND_HOST</description>
</property>





欢迎加入about云群371358502、39327136,云计算爱好者群,亦可关注about云腾讯认证空间||关注本站微信

没找到任何评论,期待你打破沉寂

您需要登录后才可以回帖 登录 | 立即注册

本版积分规则

关闭

推荐上一条 /2 下一条