- 解压
- tar -xvf hadoop-2.7.6.tar.gz
- 配置环境变量
- vim /etc/profile
- 增加hadoop环境变量,将bin和sbin都配置到PATh中
export JAVA_HOME=/usr/local/soft/jdk1.8.0_171 export HADOOP_HOME=/usr/local/soft/hadoop-2.7.6 export PATH=.:$PATH:$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin
- source /etc/profile
- 修改配置文件
- hadoop 配置文件在/usr/local/soft/hadoop-2.7.6/etc/hadoop/
- cd /usr/local/soft/hadoop-2.7.6/etc/hadoop/
export JAVA_HOME=/usr/local/soft/jdk1.8.0_171
<property> <name>fs.defaultFS</name> <value>hdfs://master:9000</value> </property> <property> <name>hadoop.tmp.dir</name> <value>/usr/local/soft/hadoop-2.7.6/tmp</value> </property> <property> <name>fs.trash.interval</name> <value>1440</value> </property>
<property> <name>dfs.replication</name> <value>1</value> </property> <property> <name>dfs.permissions</name> <value>false</value> </property>
<property> <name>yarn.resourcemanager.hostname</name> <value>master</value> </property> <property> <name>yarn.nodemanager.aux-services</name> <value>mapreduce_shuffle</value> </property> <property> <name>yarn.log-aggregation-enable</name> <value>true</value> </property> <property> <name>yarn.log-aggregation.retain-seconds</name> <value>604800</value> </property> <property> <name>yarn.nodemanager.resource.memory-mb</name> <value>20480</value> </property> <property> <name>yarn.scheduler.minimum-allocation-mb</name> <value>2048</value> </property> <property> <name>yarn.nodemanager.vmem-pmem-ratio</name> <value>2.1</value> </property>
- mapred-site.xml: mapreduce配置文件
<property> <name>mapreduce.framework.name</name> <value>yarn</value> </property> <property> <name>mapreduce.jobhistory.address</name> <value>master:10020</value> </property> <property> <name>mapreduce.jobhistory.webapp.address</name> <value>master:19888</value> </property>
- 将hadoop安装文件同步到子节点
- scp -r /usr/local/soft/hadoop-2.7.6/ node1:/usr/local/soft/
- scp -r /usr/local/soft/hadoop-2.7.6/ node2:/usr/local/soft/
- scp -r /etc/profile node1:/etc/profile
- scp -r /etc/profile node2:/etc/profile
- 格式化namenode
- cd /usr/local/soft/hadoop-2.7.6/bin
- ./hdfs namenode -format
- 启动hadoop
- start-all.sh
- 访问hdfs页面验证是否安装成功
- http://master:50070
- 如果安装失败
- stop-all.sh
- 再次重启的时候
- 在主节点执行命令:
- hdfs namenode -format
- 启动hadoop
- start-all.sh
版权声明:本文内容由互联网用户自发贡献,该文观点与技术仅代表作者本人。本站仅提供信息存储空间服务,不拥有所有权,不承担相关法律责任。如发现本站有涉嫌侵权/违法违规的内容, 请发送邮件至 [email protected] 举报,一经查实,本站将立刻删除。