Q:

hadoop slave node storage configuration

<!-- master node storage configuration -->

<!-- /etc/hadoop - configuration directory -->


<!-- /etc/hadoop/hdfs-site.xml - configuration file for storage hdfs -->

<!-- Create a /nn directory -->


<configuration>

<property>
<name>dfs.name.dir</name>
<value>/nn</value>
</property>

</configuration>


<!-- /etc/hadoop/core-site.xml - configuration file for ip and port -->

<configuration>

<property>
<name>fs.default.name</name>
<!-- Assign IP_OF_MASTER 0.0.0.0 in cloud -->
<value>hdfs://IP_OF_MASTER:9001</value>
</property>

</configuration>

<!-- Run the below command to format the /nn directory -->
hadoop namenode -format

<!-- Run the below command to start to hadoop daemon -->
hadoop-daemon.sh start namenode

<!-- Run the below command to check if daemon has started or not -->
jps 

<!-- Run the below command to get info on the slaves -->
hadoop dfsadmin -report

<!-- Additional things -->

<!-- 1) Start namenode on startup -->
cat >> /etc/rc.d/rc.local
hadoop-daemon.sh start namenode
chmod +x /etc/rc.d/rc.local

<!-- Clear cache memory -->
echo 1 > /proc/sys/vm/drop_caches

<!-- To leave safemode -->
hadoop dfsadmin -safemode leave
2
<!-- slave node storage configuration -->

<!-- /etc/hadoop - configuration directory -->
<!-- /etc/hadoop/hdfs-site.xml - configuration file for storage hdfs -->

<!-- Create a /dn1 directory -->


<configuration>

<property>
<name>dfs.data.dir</name>
<value>/dn1</value>
</property>

</configuration>



<!-- /etc/hadoop/core-site.xml - configuration file for ip and port -->

<configuration>

<property>
<name>fs.default.name</name>
<value>hdfs://IP_OF_MASTER:9001</value>
</property>

</configuration>

<!-- Run the below command to start to hadoop daemon -->
hadoop-daemon.sh start datanode

<!-- remove and re-create the "/dn" if unable to connect -->
2
export HADOOP_HOME=/home/hadoop/hadoop
export PATH=${PATH}:${HADOOP_HOME}/bin:${HADOOP_HOME}/sbin
0

New to Communities?

Join the community