首页 > 网络 > 云计算 >

CentOS7下Hadoop2.7.3+Spark2.1.0集群环境搭建(1NN+2DN)

2017-04-06

CentOS7下Hadoop2 7 3+Spark2 1 0集群环境搭建(1NN+2DN)。

环境

主机名 ip 进程
nn.hadoop.data.example.net 172.16.156.220 NameNode、Master、ResourceManager、SecondaryNameNode、JobHistoryServer
dn1.hadoop.data.example.net 172.16.156.221 NodeManager、DataNode、Worker
dn2.hadoop.data.example.net 172.16.156.222 NodeManager、DataNode、Worker

yum安装如下包 (可能有部分包用不到)

yum install pcre-devel openssl openssl-devel openssh-clients htop gcc zlib lrzsz zip unzip vim telnet-server ncurses wget net-tools

关闭防火墙

systemctl stop firewalld.service
systemctl disable firewalld.service

配置hosts文件

vi /etc/hosts
127.0.0.1   localhost localhost.localdomain localhost4 localhost4.localdomain4
::1         localhost localhost.localdomain localhost6 localhost6.localdomain6

172.16.156.220  nn.hadoop.data.example.net
172.16.156.221  dn1.hadoop.data.example.net
172.16.156.222  dn2.hadoop.data.example.net

安装JDK和Scala

0.创建文件夹

mkdir /app/java
mkdir /app/scala

1.下载

下载JDK

wget http://download.oracle.com/otn-pub/java/jdk/8u121-b13/e9e7ea248e2c4826b92b3f075a80e441/jdk-8u121-linux-x64.tar.gz

如果失效,点这里下载 并上传至服务器
下载Scala

wget http://downloads.lightbend.com/scala/2.12.1/scala-2.12.1.tgz

2.移动&解压

mv jdk-8u121-linux-x64.tar.gz /app/java
tar -zxvf jdk-8u121-linux-x64.tar.gz
mv scala-2.12.1.tgz /app/scala
tar -zxvf scala-2.12.1.tgz

3.授权

chmod -R 775 /app/
chown -R hadoop /app/

创建hadoop用户

useradd hadoop
passwd hadoop

如无特殊说明 以后均为hadoop用户操作

SSH完密码登录

生成秘钥:~/.ssh/id_rsa和~/.ssh/id_rsa.pub

ssh-keygen -t rsa 

拷贝公钥到其他机器上

ssh-copy-id -i nn.hadoop.data.example.net
ssh-copy-id -i dn1.hadoop.data.example.net
ssh-copy-id -i dn2.hadoop.data.example.net

安装Hadoop

0.创建文件夹

mkdir /app/hadoop/data
mkdir /app/hadoop/name
mkdir /app/hadoop/tmp

1.下载hadoop

wget http://www.apache.org/dyn/closer.cgi/hadoop/common/hadoop-2.7.3/hadoop-2.7.3.tar.gz

2.移动&解压

mv hadoop-2.7.3.tar.gz /app/hadoop
tar -zxvf hadoop-2.7.3.tar.gz

3.修改配置文件

/etc/profile (root权限)

HADOOP_HOME=/app/hadoop/hadoop-2.7.3
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

slaves

dn1.hadoop.data.example.net
dn2.hadoop.data.example.net

hadoop-env.sh

# export JAVA_HOME=${JAVA_HOME}
改为
export JAVA_HOME=/app/java/jdk1.8.0_121/

core-site.xml


    
    fs.defaultFS
    hdfs://nn.hadoop.data.example.net:9000
    
    
    hadoop.tmp.dir
    /app/hadoop/tmp
    

hdfs-site.xml


    
        dfs.namenode.secondary.http-address
        nn.hadoop.data.example.net:50090
    
    
        dfs.namenode.name.dir
        /app/hadoop/name
    
    
        dfs.replication
        1
    
    
        dfs.datanode.data.dir
        /app/hadoop/data
    

mapred-site.xml

cp mapred-site.xml.template mapred-site.xml

    
        mapreduce.framework.name
        yarn
    
    
        mapreduce.jobhistory.address
        nn.hadoop.data.example.net:10020
    
    
        mapreduce.jobhistory.webapp.address
        nn.hadoop.data.example.net:19888
    

yarn-site.xml



    
        yarn.resourcemanager.hostname
        nn.hadoop.data.example.net
    
    
        yarn.nodemanager.aux-services
        mapreduce_shuffle
    
    
         yarn.nodemanager.aux-services.mapreduce.shuffle.class
         org.apache.hadoop.mapred.ShuffleHandler
    

4.格式化namenode

hadoop namenode -format

5.复制文件到其他机器

将/app/hadoop(包括data、name、tmp和配置好的hadoop)复制到其他机器。

6.启动dfs

start-dfs.sh

7.启动yarn

start-yarn.sh

8.启动jobhistory

mr-jobhistory-daemon.sh start historyserver

安装Spark2

0.创建文件夹

mkdir /app/spark

1.下载Spark2

wget http://www.apache.org/dyn/closer.lua/spark/spark-2.1.0/spark-2.1.0-bin-hadoop2.7.tgz

2.移动&解压

mv spark-2.1.0-bin-hadoop2.7.tgz /app/spark
tar -zxvf spark-2.1.0-bin-hadoop2.7.tgz

3.修改配置文件

/etc/profile (root权限)

export SPARK_HOME=/app/spark/spark-2.1.0-bin-hadoop2.7
export PATH="$SPARK_HOME/bin:$PATH"

spark-env.sh

cp spark-env.sh.template spark-env.sh
export SCALA_HOME=/app/scala/scala-2.12.1
export JAVA_HOME=/app/java/jdk1.8.0_121
export SPARK_MASTER_IP=nn.hadoop.data.easydebug.net
export SPARK_WORKER_MEMORY=1g
export HADOOP_CONF_DIR=/app/hadoop/hadoop-2.7.3/etc/hadoop

slaves

dn1.hadoop.data.example.net
dn2.hadoop.data.example.net

4.复制文件到其他机器

将/app/spark复制到其他机器。

5.启动Spark

/app/spark/spark-2.1.0-bin-hadoop2.7/sbin/start-all.sh

安装完成 ^_^


export JAVA_HOME=/app/java/jdk1.8.0_121
export SCALA_HOME=/app/scala/scala-2.12.1
export PATH=$JAVA_HOME/bin:$SCALA_HOME/bin:$PATH
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar

HADOOP_HOME=/app/hadoop/hadoop-2.7.3
export HADOOP_INSTALL=$HADOOP_HOME
export HADOOP_MAPRED_HOME=$HADOOP_HOME
export HADOOP_COMMON_HOME=$HADOOP_HOME
export HADOOP_HDFS_HOME=$HADOOP_HOME
export YARN_HOME=$HADOOP_HOME
export HADOOP_COMMON_LIB_NATIVE_DIR=$HADOOP_HOME/lib/native
export PATH=$PATH:$HADOOP_HOME/sbin:$HADOOP_HOME/bin

export SPARK_HOME=/app/spark/spark-2.1.0-bin-hadoop2.7
export PATH="$SPARK_HOME/bin:$PATH"
相关文章
最新文章
热点推荐