js192.168.201.100 k8s-master01
192.168.201.101 k8s-node01
192.168.201.102 k8s-node02
jsjava -version
js# 关闭防火墙
systemctl stop firewalld && systemctl disable firewalld
# 关闭SELinux
sed -i 's/^SELINUX=enforcing/SELINUX=disabled/' /etc/selinux/config
setenforce 0
# 重启服务器(可选,确保配置生效)
reboot
js# 安装ntp
yum install -y ntp
# 同步时间
ntpdate ntp.aliyun.com
# 设置定时同步(每10分钟)
echo "*/10 * * * * /usr/sbin/ntpdate ntp.aliyun.com >/dev/null 2>&1" >> /var/spool/cron/root
在 k8s-master01 执行:
js# 生成密钥对(一路回车,不设密码)
ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
# 公钥分发到所有节点(包括自己)
ssh-copy-id root@k8s-master01
ssh-copy-id root@k8s-node01
ssh-copy-id root@k8s-node02
# 验证免密(无密码登录node01)
ssh root@k8s-node01 # 能直接登录则成功
js# 分发密钥到k8s-node01(首次执行需要输入node01的root密码) ssh-copy-id root@k8s-node01 # 分发密钥到k8s-node02(首次执行需要输入node02的root密码) ssh-copy-id root@k8s-node02
js# 验证登录k8s-node01(无需密码直接登录则成功) ssh root@k8s-node01 # 登录后执行exit返回master exit # 验证登录k8s-node02(无需密码直接登录则成功) ssh root@k8s-node02 # 登录后执行exit返回master exit

直接用scp命令把环境变量文件拷贝过去也行
jsvi /opt/hadoop/hadoop-3.3.6/etc/hadoop/core-site.xml
js<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<!-- 指定HDFS的默认访问地址(主机名对应你的k8s-Master01) -->
<name>fs.defaultFS</name>
<value>hdfs://192.168.201.100:9820</value>
</property>
<property>
<!-- HDFS临时目录(需确保root有读写权限) -->
<name>hadoop.tmp.dir</name>
<value>/opt/hadoop/hadoop-3.3.6/tmp</value>
</property>
</configuration>
jsvi /opt/hadoop/hadoop-3.3.6/etc/hadoop/hdfs-site.xml
js<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<property>
<name>dfs.replication</name>
<value>3</value>
</property>
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>192.168.201.101:9868</value>
</property>
<property>
<name>dfs.namenode.http-address</name>
<value>192.168.201.100:9870</value>
</property>
</configuration>
jsvi /opt/hadoop/hadoop-3.3.6/etc/hadoop/hadoop-env.sh
js# 1. 统一JAVA_HOME(改为软链接,三台机器需一致,先确认/usr/lib/jvm下的软链接)
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-1.8.0.262.b10-1.el7.x86_64
# 2. 保留原有HDFS用户配置
export HDFS_NAMENODE_USER="root"
export HDFS_DATANODE_USER="root"
export HDFS_SECONDARYNAMENODE_USER="root"
# 3. 全分布式新增:YARN进程root用户配置(必需)
export YARN_RESOURCEMANAGER_USER="root"
export YARN_NODEMANAGER_USER="root"
jsvi /opt/hadoop/hadoop-3.3.6/etc/hadoop/workers
覆盖修改
jsk8s-master01 k8s-node01 k8s-node02
jsscp -r /opt/hadoop/hadoop-3.3.6/ root@k8s-node01:/opt/hadoop
scp -r /opt/hadoop/hadoop-3.3.6/ root@k8s-node02:/opt/hadoop
#然后全部机器删除日志临时文件
cd /opt/hadoop/hadoop-3.3.6/
rm -rf tmp/
rm -rf logs/
jshdfs namenode -format
jsstart-dfs.sh
验证完成

本文作者:松轩(^U^)
本文链接:
版权声明:本博客所有文章除特别声明外,均采用 BY-NC-SA 许可协议。转载请注明出处!