hadoop ha集群搭建(5台)

yum install ntpdate lrzsz -y

systemctl stop firewalld
systemctl disable firewalld
systemctl stop NetworkManager
systemctl disable NetworkManager
setenforce 0

mkdir /home/myapps && cd /home/myapps

hosts文件配置:
cat >> /etc/hosts << EOF
192.168.163.129 node1
192.168.163.131 node2
192.168.163.132 node3
192.168.163.133 node4
192.168.163.128 node5
EOF

时间同步配置:
/usr/sbin/ntpdate ntp1.aliyun.com
crontab -e
/30 * /usr/sbin/ntpdate ntp1.aliyun.com

互信配置:
ssh-keygen
for ip in 132 129 131 133 128;do ssh-copy-id -i ~/.ssh/id_rsa.pub root@192.168.163.$ip ;done

java环境变量配置:
java -version
vim /etc/profile
export JAVA_HOME=/home/myapp/java
export PATH=$JAVA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

source /etc/profile

zookeeper集群搭建:
vim /etc/profile

export ZOOKEEPER_HOME=/usr/local/zookeeper-3.4.11
export PATH=$PATH:$ZOOKEEPER_HOME/bin

cd /home/myapps/zookeeper-3.4.11/
mkdir data
mkdir log
vim conf/zoo.cfg

    dataDir=/home/myapps/zookeeper-3.4.11/data
    dataLogDir=/home/myapps/zookeeper-3.4.11/log
    server.1=node1:2888:3888
    server.2=node2:2888:3888
    server.3=node3:2888:3888
将修改配置文件推送到各个节点:
scp -r zookeeper-3.4.11 root@node2:/home/myapps/
scp -r zookeeper-3.4.11 root@node3:/home/myapps/

设置myid
在我们配置的dataDir指定的目录下面,创建一个myid文件,里面内容为一个数字,用来标识当前主机
[root@node1 zookeeper-3.4.11]# echo "1" > /home/myapps/zookeeper-3.4.11/data/myid
[root@node2 zookeeper-3.4.11]# echo "2" > /home/myapps/zookeeper-3.4.11/data/myid
[root@node3 zookeeper-3.4.11]# echo "3" > /home/myapps/zookeeper-3.4.11/data/myid
启动集群:
[root@node1 zookeeper-3.4.11]# /home/myapps/zookeeper-3.4.11/bin/zkServer.sh start
[root@node2 zookeeper-3.4.11]# /home/myapps/zookeeper-3.4.11/bin/zkServer.sh start
[root@node3 zookeeper-3.4.11]# /home/myapps/zookeeper-3.4.11/bin/zkServer.sh start

hadoop集群搭建:
cd /home/myapps/hadoop/hadoop-2.7.5/etc/hadoop

vim hadoop-env.sh

25 export JAVA_HOME=/home/myapp/java

vim yarn-env.sh

23 export JAVA_HOME=/home/myapp/java

vim hdfs-site.xml

<configuration>
<property>
  <name>dfs.nameservices</name>
  <value>mycluster</value>
</property>
<property>
  <name>dfs.ha.namenodes.mycluster</name>
  <value>nn1,nn2</value>
</property>
<property>
  <name>dfs.namenode.rpc-address.mycluster.nn1</name>
  <value>node1:8020</value>
</property>
<property>
  <name>dfs.namenode.rpc-address.mycluster.nn2</name>
  <value>node2:8020</value>
</property>
<property>
  <name>dfs.namenode.http-address.mycluster.nn1</name>
  <value>node1:50070</value>
</property>
<property>
  <name>dfs.namenode.http-address.mycluster.nn2</name>
  <value>node2:50070</value>
</property>
<property>
  <name>dfs.namenode.shared.edits.dir</name>
 <value>qjournal://node3:8485;node4:8485;node5:8485/mycluster</value>
</property>
<property>
  <name>dfs.client.failover.proxy.provider.mycluster</name>
  <value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
      <name>dfs.ha.fencing.methods</name>
      <value>sshfence</value>
    </property>
    <property>
      <name>dfs.ha.fencing.ssh.private-key-files</name>
      <value>/root/.ssh/id_dsa</value>
    </property>
<property>
  <name>dfs.journalnode.edits.dir</name>
  <value>/home/myapps/hadoop/node/local/data</value>
</property>
<property>
   <name>dfs.ha.automatic-failover.enabled</name>
   <value>true</value>
 </property>
 </configuration>

vim core-site.xml

<configuration>
<property>
       <name>fs.defaultFS</name>
       <value>hdfs://mycluster</value>
   </property>
  <property> 
        <name>hadoop.tmp.dir</name>
       <value>/home/myapps/hadoop/data/hadoop/temp</value>
       <description>Abase for other temporarydirectories.</description>
   </property>
 <property>
       <name>ha.zookeeper.quorum</name>
       <value>node1:2181,node2:2181,node3:2181</value>
</property>
</configuration>

vim yarn-site.xml

<configuration>
<!-- Site specific YARN configuration properties -->
<property>
  <name>yarn.resourcemanager.hostname</name>
  <value>node1</value>
</property>
<property>
    <name>yarn.nodemanager.aux-services</name>
    <value>mapreduce_shuffle</value>
</property>
<property>
        <name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
        <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
</configuration>

vim mapred-site.xml

<configuration>
    <property>
        <name>mapreduce.framework.name</name>
        <value>yarn</value>
        <description>Execution framework set to Hadoop YARN.</description>
    </property>
</configuration>

vim slaves
node3
node4
node5

将修改后的hadoop拷贝到各个节点:
for ip in 2 3 4 5 ;do scp -r /home/myapps/hadoop root@node${ip}:/home/myapps/ ;done

首先启动zookeeper:
验证:jps查看进程 QuorumPeerMain
hadoop集群启动:

1、在node3,node4,node5分别执行:
    cd /home/myapps/hadoop/hadoop-2.7.5/sbin/
    ./hadoop-daemon.sh start journalnode
    验证:jps 查看结果,如果出现JournalNode,预示着初步成功
2、在其中一个namenode上执行格式化:
    cd /home/myapps/hadoop/hadoop-2.7.5/bin
    ./hdfs namenode -format
    将执行成功后生成的dir拷贝到另一个namenode上面:
    scp -r /home/myapps/hadoop/data root@node2:/home/myapps/hadoop/
    成功后执行node1执行    hdfs zkfc -formatZK

3、启动
    cd /home/myapps/hadoop/hadoop-2.7.5/sbin/
    ./start-all.sh
    jps 查看进程验证结果
    可以统计浏览器访问:      
        http://node1:50070
        NameNode 'node1:9000' (active)
        http://node2:50070
        NameNode 'node2:9000' (standby)

zookeeper启动脚本:

#!/bin/bash

# chkconfig: 2345 20 80
# description: zk 集群 启动停止

case $1 in
"start")
    cat /root/myapps/onekey/zk/slave | while read line
    do
    {
     echo $line
     ssh $line "source /etc/profile;nohup zkServer.sh start >/dev/null 2>&1 &"
    }&
    wait
    done 
;;
"stop")
    cat /root/myapps/onekey/zk/slave | while read line
    do
    {
     echo $line
     ssh $line "source /etc/profile;jps |grep QuorumPeerMain |cut -c 1-5 |xargs kill -s 9"
    }&
    wait
    done 

;;
"status")
jps
;;
*)
echo "用法:/etc/init.d/zkd {start|stop|status}"
;;
esac

hadoop启动脚本

#!/bin/bash

# chkconfig: 2345 20 80
# description: hadoop 集群 启动停止

case $1 in
"start")
/root/myapps/hadoop/hadoop-2.7.4/sbin/start-all.sh
;;
"stop")
/root/myapps/hadoop/hadoop-2.7.4/sbin/stop-all.sh
;;
"status")
jps
;;
*)
echo "用法:/etc/init.d/hadoopd {start|stop|status}"
;;
esac

原创文章,作者:Maggie-Hunter,如若转载,请注明出处:https://blog.ytso.com/190645.html

(0)
上一篇 2021年11月14日
下一篇 2021年11月14日

相关推荐

发表回复

登录后才能评论