@awsekfozc
2015-11-20T18:22:08.000000Z
字数 2466
阅读 1675
分布式部署
$ sudo hostname hadoop02.zc.com
$ sudo vi /etc/sysconfig/network
删除网卡:
$ vi /etc/udev/rules.d/70-persistent-net.rules
//复制下图红色部分 mac地址
设置mac地址
$ sudo vi /etc/sysconfig/network-scripts/ifcfg-eth0
//上面复制的mac地址,设置到下图红色部分
$ sudo vi /etc/hosts
虚拟机内映射
windows映射
C:\Windows\System32\drivers\etc\hosts
$ cd /tmp/
$ sudo rm -rf ./*
$ cd /opt/moduels/
$ rm -rf hadoop-2.5.0/
<configuration>
<!--namenode-->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop.zc.com:8020</value>
</property>
<!--本地目录-->
<property>
<name>hadoop.tmp.dir</name>
<value>/tmp/hadoop-${user.name}</value>
</property>
<!--默认用户-->
<property>
<name>hadoop.http.staticuser.user</name>
<value>zc</value>
</property>
</configuration>
<configuration>
<!--权限检查-->
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
<!--副本数-->
<property>
<name>dfs.replication</name>
<value>2</value>
</property>
<!--secondary服务-->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop02.zc.com:50090</value>
</property>
</configuration>
hadoop.zc.com
hadoop01.zc.com
hadoop02.zc.com
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!--配置ResourceManager-->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop01.zc.com</value>
</property>
<!--启用日志聚集-->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!--aggregation(日志聚集)保留时间,秒。-->
<property>
<name>yyarn.log-aggregation.retain-seconds</name>
<value>100800</value>
</property>
</configuration>
<configuration>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!--历史日志服务内部地址,mapred-site.xml-->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop.zc.com:10020</value>
</property>
<!--历史日志服务地址web,mapred-site.xml-->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop.zc.com:19888</value>
</property>
</configuration>
$ scp -r hadoop-2.5.0/ zc@hadoop02.zc.com:/opt/app/
$ bin/hdfs namenode -format
$ sbin/hadoop-daemon.sh start namenode
$ sbin/hadoop-daemon.sh start datanode
$ sbin/yarn-daemon.sh start resourcemanager
$ sbin/yarn-daemon.sh start nodemanager
$ bin/hdfs dfs -put /opt/datas/wc.input data
$ bin/yarn jar share/hadoop/mapreduce/hadoop-mapreduce-examples-2.5.0.jar wrodcount data output