shell-install
shell脚本-install
install_elasticsearch
#!/usr/bin/bash
#搭建elastic集群
#elastic服务必须要重启才可以进行安装完成,要不然系统句柄无法设置。
#--------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------#
#定义需要搭建的集群和密码
ip_addr="
192.168.98.173 123456
192.168.98.174 123456
192.168.98.175 123456
"
#定义hosts名称
name="elastic"
#定义安装包名称
elasticsearch_package_name="elasticsearch-7.14.0-linux-x86_64.tar.gz"
#创建的elastic用户
user="elastic"
#创建的elastic用户密码
pass="1234567"
#定义elastic的日志和数据存储路径
path_data="/opt/data"
path_log="/opt/log"
#权限问题必须定义data和log的上层目录要不然程序无法启动
path_ll="/opt"
#--------------------------------------------------------------------------------------#
#--------------------------------------------------------------------------------------#
node_id="1"
IFS='
'
#推送主机公钥
if [ ! -e $elasticsearch_package_name ];then
exit
fi
for line in $ip_addr
do
ip=`echo "$line" |awk '{print $1}'`
passwd=`echo "$line" |awk '{print $2}'`
ping -c1 -W1 $ip &>/dev/null
if [ $? -eq 0 ];then
/usr/bin/expect <<EOF
spawn ssh-copy-id $ip
expect {
"yes/no" { send "yes\r" ; exp_continue }
"password" { send "$passwd\r" ; exp_continue }
"*#" { send "\r"}
}
EOF
else
echo "$ip"| tee -a fail_ip.txt
fi
done
wait
echo "1、ssh-copy-id is ok!"
IFS='
'
#修改主机hosts和hostname
for e_host in $ip_addr
do
ip=`echo "$e_host" |awk '{print $1}'`
net_addr=`ssh root@$ip " ip addr |egrep '\<inet\>'|grep -v '127.0.0.1'|awk '{print \\$2}'|awk -F'/' '{print \\$1}' "`
ssh root@$ip " echo "$name-$net_addr" > /etc/hostname "
gg=`ssh root@$ip " cat /etc/hostname "`
ssh root@$ip " echo "$net_addr $gg" >> /etc/hosts "
done
wait
#拷贝安装包
IFS='
'
for a_copy in $ip_addr
do
ip=`echo "$a_copy" |awk '{print $1}'`
el_ip[++j]=`echo "$a_copy"|awk '{print $1}'`
scp $elasticsearch_package_name root@$ip:/tmp/
ssh root@$ip " if [ -e "/tmp/$elasticsearch_package_name" ];then echo "$ip File copy complete" ; else echo "$ip File copy failed" exit ; fi "
done
wait
#获取ip传递给数组
for hq_ip in ${!el_ip[@]}
do
#echo "${el_ip[hq_ip]}"
echo "ipaddr"
done
#配置elastic
IFS='
'
for c_con in $ip_addr
do
ip=`echo "$c_con" |awk '{print $1}'`
ssh root@$ip " cd /tmp
tar -zxvf $elasticsearch_package_name
mv elasticsearch-7.14.0 /usr/local/elasticsearch
useradd $user ; echo "$pass" |passwd --stdin $user &>/dev/null
chown -R $user:$user /usr/local/elasticsearch
mkdir $path_data
mkdir $path_log
chown -R $user:$user $path_ll
chown -R $user:$user $path_data
chown -R $user:$user $path_log
cat <<EOF >>/etc/security/limits.conf
$user soft nofile 65536
$user hard nofile 65536
* soft nproc 65536
* hard nproc 65536
EOF
cat <<EOF >>/etc/security/limits.d/20-nproc.conf
$user soft nofile 65536
$user hard nofile 65536
EOF
cat <<EOF >>/etc/sysctl.conf
vm.max_map_count=655360
EOF
cat <<EOF >>/usr/local/elasticsearch/config/elasticsearch.yml
cluster.name: elastic-application
node.name: node-$node_id
node.master: true
node.data: true
node.max_local_storage_nodes: 3
network.host: 0.0.0.0
http.port: 9200
transport.tcp.port: 9700
discovery.seed_hosts: ["${el_ip[1]}", "${el_ip[2]}","${el_ip[3]}"]
cluster.initial_master_nodes: ["node-1", "node-2","node-3"]
path.data: $path_data
path.logs: $path_log
EOF
sysctl -p
systemctl stop firewalld
systemctl disable firewalld
cat <<EOF >>/usr/lib/systemd/system/elasticsearch.service
[Unit]
Description=elasticsearch
After=network.target
[Service]
Type=forking
User=elastic
ExecStart=/usr/local/elasticsearch/bin/elasticsearch -d
PrivateTmp=true
# 指定此进程可以打开的最大文件数
LimitNOFILE=65535
# 指定此进程可以打开的最大进程数
LimitNPROC=65535
# 最大虚拟内存
LimitAS=infinity
# 最大文件大小
LimitFSIZE=infinity
# 超时设置 0-永不超时
TimeoutStopSec=0
# SIGTERM是停止java进程的信号
KillSignal=SIGTERM
# 信号只发送给给JVM
KillMode=process
# java进程不会被杀掉
SendSIGKILL=no
# 正常退出状态
SuccessExitStatus=143
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable elasticsearch.service
systemctl start elasticsearch.service
reboot
"
let ++node_id
done
sleep 1m
#重启服务器是limit配置句柄生效
IFS='
'
for d_res in $ip_addr
do
echo -n "$ip 系统句柄为"
ip=`echo "$d_res" |awk '{print $1}'`
ssh root@$ip "ulimit -Hu"
echo -n "$i 状态为"
ssh root@$ip " systemctl status elasticsearch.service|grep Active"
done
wait
echo "请通过以下链接查看服务集群是否搭建完成"
echo "http://${el_ip[1]}:9200/_cat/health?v"
install_grafana
#!/usr/bin/bash
download_url=
service_status=`systemctl status grafana-server |grep Active |awk '{print $3}'`
ip=`ip addr |grep inet |grep brd |awk '{print $2}' |awk -F '/' '{print $1}'`
if [ -e "grafana-8.0.6-1.x86_64.rpm" ];then
yum install -y grafana-8.0.6-1.x86_64.rpm
/sbin/chkconfig --add grafana-server
systemctl enable grafana-server.service
systemctl start grafana-server
systemctl status grafana-server &>/dev/null
if [[ $service_status == "(running)" ]];then
echo "服务运行成功,请通过 http://$ip:3000 进行访问,账号:admin 密码 admin"
echo "grafana 日志文件位置:/var/log/grafana"
echo "grafana 环境文件位置: /etc/sysconfig/grafana-server"
echo "grafana 配置文件位置: /etc/grafana/grafana.ini"
echo "grafana 的图表通过以下地址进行下载https://grafana.com/grafana/dashboards"
fi
else
wget $download_url
yum install -y grafana-8.0.6-1.x86_64.rpm
/sbin/chkconfig --add grafana-server
systemctl enable grafana-server.service
systemctl start grafana-server
systemctl status grafana-server &>/dev/null
if [[ $service_status == "(running)" ]];then
echo "服务运行成功,请通过 http://$ip:3000 进行访问,账号:admin 密码 admin"
echo "grafana 日志文件位置:/var/log/grafana"
echo "grafana 环境文件位置: /etc/sysconfig/grafana-server"
echo "grafana 配置文件位置: /etc/grafana/grafana.ini"
echo "grafana 的图表通过以下地址进行下载https://grafana.com/grafana/dashboards"
fi
fi
install_node_exporter
#!/usr/bin/bash
download_url=
ip=`ip addr |grep inet |grep brd |awk '{print $2}' |awk -F '/' '{print $1}'`
node_exporter_status=`systemctl status node_exporter |grep Active |awk '{print $3}'`
if [ -e "node_exporter-1.2.0.linux-amd64.tar.gz" ];then
tar -zvxf node_exporter-1.2.0.linux-amd64.tar.gz
mv node_exporter-1.2.0.linux-amd64 /usr/local/node_exporter
cat > /etc/systemd/system/node_exporter.service << EOF
[Unit]
Description=node_exporter
Documentation=https://prometheus.io/
After=network.target
[Service]
Type=simple
User=root
ExecStart=/usr/local/node_exporter/node_exporter
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable node_exporter
systemctl daemon-reload
systemctl start node_exporter
systemctl status node_exporter &>/dev/null
if [[ $node_exporter_status == "(running)" ]];then
echo "服务安装成功,请通过 http://$ip:9100 进行访问"
else
echo "服务安装失败"
fi
else
wget $download_url
tar -zvxf node_exporter-1.2.0.linux-amd64.tar.gz
mv node_exporter-1.2.0.linux-amd64 /usr/local/node_exporter
cat > /etc/systemd/system/node_exporter.service << EOF
[Unit]
Description=node_exporter
Documentation=https://prometheus.io/
After=network.target
[Service]
Type=simple
User=root
ExecStart=/usr/local/node_exporter/node_exporter
Restart=on-failure
[Install]
WantedBy=multi-user.target
EOF
systemctl enable node_exporter
systemctl daemon-reload
systemctl start node_exporter
systemctl status node_exporter &>/dev/null
if [[ $node_exporter_status == "(running)" ]];then
echo "服务安装成功,请通过 http://$ip:9100 进行访问"
else
echo "服务安装失败"
fi
fi
install_prometheus
#!/usr/bin/bash
url=""
ip=`ip addr |grep inet |grep brd |awk '{print $2}' |awk -F '/' '{print $1}'`
prometheus_status=`systemctl status prometheus |grep Active |awk '{print $3}' `
old_status="(running)"
if [ -e prometheus-2.28.1.linux-amd64.tar.gz ];then
tar -zxvf prometheus-2.28.1.linux-amd64.tar.gz
if [ ! -d /usr/local/prometheus ];then
echo "移动文件"
mv prometheus-2.28.1.linux-amd64 /usr/local/prometheus
echo "修改配置文件的localhost为本机$ip"
sed -ir "s/localhost/$ip/" /usr/local/prometheus/prometheus.yml
cat <<EOF >/usr/lib/systemd/system/prometheus.service
[Unit]
Description=prometheus
Documentation=https://prometheus.io/
After=network-online.target
[Service]
User=root
Restart=on-failure
ExecStart=/usr/local/prometheus/prometheus \
--config.file=/usr/local/prometheus/prometheus.yml
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
EOF
echo "创建系统服务"
systemctl enable prometheus
echo "设置成开机自启"
systemctl daemon-reload
echo "每次更改prometheus.service都需要执行一次,重新加载一下"
systemctl start prometheus
echo "启动prometheus服务"
systemctl status prometheus &>/dev/null
if [[ $prometheus_status -eq $old_status ]];then
echo "服务install成功,请通过http://$ip:9090进行访问,程序退出。。。"
exit
else
echo "服务安装失败,请检查。。"
exit
fi
else
echo "目录已存在,程序退出"
exit
fi
else
echo "当前目录不存在安装包,将要开始下载安装包"
wget $url
tar -zxvf prometheus-2.28.1.linux-amd64.tar.gz
if [ ! -d /usr/local/prometheus ];then
echo "移动文件"
mv prometheus-2.28.1.linux-amd64 /usr/local/prometheus
echo "修改配置文件的localhost为本机$ip"
sed -ir "s/localhost/$ip/" /usr/local/prometheus/prometheus.yml
cat <<EOF >/usr/lib/systemd/system/prometheus.service
[Unit]
Description=prometheus
Documentation=https://prometheus.io/
After=network-online.target
[Service]
User=root
Restart=on-failure
ExecStart=/usr/local/prometheus/prometheus \
--config.file=/usr/local/prometheus/prometheus.yml
ExecReload=/bin/kill -HUP $MAINPID
[Install]
WantedBy=multi-user.target
EOF
echo "创建系统服务"
systemctl enable prometheus
echo "设置成开机自启"
systemctl daemon-reload
echo "每次更改prometheus.service都需要执行一次,重新加载一下"
systemctl start prometheus
echo "启动prometheus服务"
systemctl status prometheus &>/dev/null
if [[ $prometheus_status -eq $old_status ]];then
echo "服务install成功,请通过http://$ip:9090进行访问,程序退出。。。"
exit
else
echo "服务安装失败,请检查。。"
exit
fi
else
echo "目录已存在,程序退出"
exit
fi
fi
install_kibana
#!/usr/bin/bash
package_name="kibana-7.14.0-linux-x86_64.tar.gz"
elastic_path="http://192.168.98.173:9200\",\"http://192.168.98.174:9200\",\"http://192.168.98.175:9200"
ip_addr=`ip addr |grep inet |grep brd |awk '{print $2}' |awk -F '/' '{print $1}'`
if [ -e $package_name ];then
tar -zxvf $package_name
mv ${package_name:0:26} /usr/local/kibana
cat <<EOF >>/usr/local/kibana/config/kibana.yml
server.port: 5601
server.host: "0.0.0.0"
server.name: "kibana-elastic"
elasticsearch.hosts:
["$elastic_path"]
elasticsearch.requestTimeout: 30000
EOF
cat <<EOF >>/usr/lib/systemd/system/kibana.service
[Unit]
Description=kibana
After=network.target
[Service]
Type=simple
User=root
ExecStart=/usr/local/kibana/bin/kibana --allow-root
PrivateTmp=true
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable kibana.service
systemctl restart kibana.service
echo "访问地址http//$ip_addr:5601"
echo "请通过页面中的Stack Monitoring查看集群状态"
else
echo "file is not existence"
fi
install_MongoDB
#!/usr/bin/bash
rm -rf /root/mongodb-linux-x86_64-3.0.0 mongodb-linux-x86_64-3.0.0.tgz
rm -rf /usr/local/mongodb
cd $home;
echo "开始下载安装包"
wget https://fastdl.mongodb.org/linux/mongodb-linux-x86_64-3.0.0.tgz
tar tf mongodb-linux-x86_64-3.0.0.tgz
if [ $? -eq 0 ];then
tar -zxvf mongodb-linux-x86_64-3.0.0.tgz;
if [ ! -d "/usr/local/mongodb/" ];then
echo "移动文件";
mv /root/mongodb-linux-x86_64-3.0.0 /usr/local/mongodb;
mkdir -p /usr/local/mongodb/data /usr/local/mongodb/log;
if [ $? -eq 0 ];then
/usr/local/mongodb/bin/mongod --dbpath=/usr/local/mongodb/data --logpath=/usr/local/mongodb/log/mongodb.log --logappend --port=27017 --fork
echo "服务已创建"
echo "请通过/usr/local/mongodb/bin/mongo 进行访问"
fi
else
echo "文件已存在"
exit
fi
else
echo "文件损坏"
exit
fi
#!/usr/bin/bash
#主要实现以下几个功能。
#1、远程主机推送公钥。
#2、拷贝文件配置mongo.conf,完成副本集群的创建
#3、配置远程主机初始化副本集群功能。
#注意,需要配置mongo从节点可写状态需要在从节点执行rs.slaveOk()命令。
#配置的mongo.conf文件中缩进应以四个空格为前提,二级缩进八个空格,不然会出现配置无法启动服务报yml错误问题
#配置的每个主机的端口不一,脚本暂时为设置port为变量需要改为一直可以全局替换
#expect send 中的"号和:号需要进行转译不然会出现报错。
#默认情况下,从节点是没有读写权限的,可以增加读的权限,但需要进行设置。
#需要登录从节点配置以下参数rs.slaveOk()
#执行后查看数据库 show dbs;不显示报错说明成功
#version v1.0 by gyx
ip='
192.168.98.157
192.168.98.158
192.168.98.159
'
#主节点ip
ip_1=192.168.98.157
#副本节点ip
ip_2=192.168.98.158
#仲裁节点ip
ip_3=192.168.98.159
password=123456
#通用passwd
mongodb_file_name='mongodb-linux-x86_64-4.0.10.tgz'
for a in $ip
do
ping c1 -w1 $a &>/dev/null
if [ $? -ne 0 ];then
echo "$a 主机不通" > ip.txt
fi
done
for i in $ip
do
/usr/bin/expect <<-EOF
spawn ssh-copy-id $i
expect {
"yes/no" { send "yes\r"; exp_continue }
"password" { send "$password/r"}
}
EOF
done
for j in $ip
do
echo "copy fle $i "
ssh root@$j "
rm -rf /root/$mongodb_file_name
rm -rf /usr/local/mongodb"
scp $mongodb_file_name root@$j:/root/
ssh root@$j "tar -zxvf /root/$mongodb_file_name
mv /root/mongodb-linux-x86_64-4.0.10 /usr/local/mongodb
"
done
#创建主节点
ssh root@$ip_1 "
mkdir -p /mongodb/replica_sets/myrs_27017/log
mkdir -p /mongodb/replica_sets/myrs_27017/data/db
cat <<EOF >/mongodb/replica_sets/myrs_27017/mongod.conf
systemLog:
#MongoDB发送所有日志输出的目标指定为文件
destination: file
#mongod或mongos应向其发送所有诊断日志记录信息的日志文件的路径
path: /mongodb/replica_sets/myrs_27017/log/mongod.log
#当mongos或mongod实例重新启动时,mongos或mongod会将新条目附加到现有日志文件的末尾。
logAppend: true
storage:
#mongod实例存储其数据的目录。storage.dbPath设置仅适用于mongod。
dbPath: /mongodb/replica_sets/myrs_27017/data/db
journal:
#启用或禁用持久性日志以确保数据文件保持有效和可恢复。
enabled: true
processManagement:
#启用在后台运行mongos或mongod进程的守护进程模式。
fork: true
#指定用于保存mongos或mongod进程的进程ID的文件位置,其中mongos或mongod将写入其PID
pidFilePath: /mongodb/replica_sets/myrs_27017/log/mongod.pid
net:
#服务实例绑定所有IP,有副作用,副本集初始化的时候,节点名字会自动设置为本地域名,而不是ip
#bindIpAll: true
#服务实例绑定的IP
bindIp: localhost,$ip_1
#bindIp
#绑定的端口
port: 27017
replication:
#副本集的名称
replSetName: myrs
EOF
/usr/local/mongodb/bin/mongod -f /mongodb/replica_sets/myrs_27017/mongod.conf >/root/log.txt
cat <<EOF >/root/mongo_start.sh
#!/usr/bin/bash
/usr/local/mongodb/bin/mongod -f /mongodb/replica_sets/myrs_27017/mongod.conf
EOF
"
#创建副本节点
ssh root@$ip_2 "
mkdir -p /mongodb/replica_sets/myrs_27018/log
mkdir -p /mongodb/replica_sets/myrs_27018/data/db
cat <<EOF >/mongodb/replica_sets/myrs_27018/mongod.conf
systemLog:
#MongoDB发送所有日志输出的目标指定为文件
destination: file
#mongod或mongos应向其发送所有诊断日志记录信息的日志文件的路径
path: /mongodb/replica_sets/myrs_27018/log/mongod.log
#当mongos或mongod实例重新启动时,mongos或mongod会将新条目附加到现有日志文件的末尾。
logAppend: true
storage:
#mongod实例存储其数据的目录。storage.dbPath设置仅适用于mongod。
dbPath: /mongodb/replica_sets/myrs_27018/data/db
journal:
#启用或禁用持久性日志以确保数据文件保持有效和可恢复。
enabled: true
processManagement:
#启用在后台运行mongos或mongod进程的守护进程模式。
fork: true
#指定用于保存mongos或mongod进程的进程ID的文件位置,其中mongos或mongod将写入其PID
pidFilePath: /mongodb/replica_sets/myrs_27018/log/mongod.pid
net:
#服务实例绑定所有IP,有副作用,副本集初始化的时候,节点名字会自动设置为本地域名,而不是ip
#bindIpAll: true
#服务实例绑定的IP
bindIp: localhost,$ip_2
#bindIp
#绑定的端口
port: 27018
replication:
#副本集的名称
replSetName: myrs
EOF
/usr/local/mongodb/bin/mongod -f /mongodb/replica_sets/myrs_27018/mongod.conf >/root/log.txt
cat <<EOF >/root/mongo_start.sh
#!/usr/bin/bash
/usr/local/mongodb/bin/mongod -f /mongodb/replica_sets/myrs_27018/mongod.conf
EOF
"
#创建仲裁节点
ssh root@$ip_3 "
mkdir -p /mongodb/replica_sets/myrs_27019/log
mkdir -p /mongodb/replica_sets/myrs_27019/data/db
cat <<EOF >/mongodb/replica_sets/myrs_27019/mongod.conf
systemLog:
#MongoDB发送所有日志输出的目标指定为文件
destination: file
#mongod或mongos应向其发送所有诊断日志记录信息的日志文件的路径
path: /mongodb/replica_sets/myrs_27019/log/mongod.log
#当mongos或mongod实例重新启动时,mongos或mongod会将新条目附加到现有日志文件的末尾。
logAppend: true
storage:
#mongod实例存储其数据的目录。storage.dbPath设置仅适用于mongod。
dbPath: /mongodb/replica_sets/myrs_27019/data/db
journal:
#启用或禁用持久性日志以确保数据文件保持有效和可恢复。
enabled: true
processManagement:
#启用在后台运行mongos或mongod进程的守护进程模式。
fork: true
#指定用于保存mongos或mongod进程的进程ID的文件位置,其中mongos或mongod将写入其PID
pidFilePath: /mongodb/replica_sets/myrs_27019/log/mongod.pid
net:
#服务实例绑定所有IP,有副作用,副本集初始化的时候,节点名字会自动设置为本地域名,而不是ip
#bindIpAll: true
#服务实例绑定的IP
bindIp: localhost,$ip_3
#bindIp
#绑定的端口
port: 27019
replication:
#副本集的名称
replSetName: myrs
EOF
/usr/local/mongodb/bin/mongod -f /mongodb/replica_sets/myrs_27019/mongod.conf >/root/log.txt
cat <<EOF >/root/mongo_start.sh
#!/usr/bin/bash
/usr/local/mongodb/bin/mongod -f /mongodb/replica_sets/myrs_27019/mongod.conf
EOF
"
#检查服务
for g in $ip
do
ssh root@$g ps -ef|grep mongo
if [ $? -eq 0 ];then
echo " $g install success"
else
echo "$g install fail"
echo "服务未启动成功脚本将无法配置mongo副本模式。程序退出。。。"
exit
fi
done
#自动配置副本集模式
#"rs.initiate()" 节点初始化
#rs.add(\"192.168.98.158\:27018\" 增加副本节点
#rs.addArb(\"192.168.98.159\:27019\") 增加仲裁节点
/usr/bin/expect <<-EOF
spawn ssh root@$ip_1
expect "#*"
send "/usr/local/mongodb/bin/mongo --host=$ip_1 --port=27017 \n"
expect "#*"
send "rs.initiate() \n"
expect "#*"
send "rs.add(\"$ip_2\:27018\") \n"
expect "#*"
send "rs.addArb(\"$ip_3\:27019\") \n"
expect "#*"
send "exit \n"
expect eof
EOF
echo "进入mongo主节点输入rs.status() 在配置有刚才配置的三个ip及副本创建成功"
echo "登录命令参照 /usr/local/mongodb/bin/mongo --host=$ip_1 --port=27017"
install_zookeeper_kafka
#!/usr/bin/bash
#1、配置脚本中的myid文件一定要在设置的dataDir=/tmp/zookeeper目录下,否则项目无法启动
#2、zookeeper要设置server.1-3.其中zookeeper服务端口为2181 选举端口为2888:3888
#kafaka创建topic
#bin/kafka-topics.sh --create --topic quickstart-events --bootstrap-server localhost:9092
#kafaka显示topic
#bin/kafka-topics.sh --describe --topic quickstart-events --bootstrap-server localhost:9092
#kafaka使用生产者生产数据
#bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:9092
#kafaka使用消费着消费数据
#bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092
#####################################################################################
#需要自行配置hostname和hosts################*************************************###
############################################*************注意*******************###
############################################***********************************###
#firewalled需要禁用或者放行#
############################
#systemctl stop firewall
#systemctl stop firewalld
#systemctl disable firewalld
#systemctl status firewalld
##############################测试卡夫卡##########################################
#kafaka创建topic
#bin/kafka-topics.sh --create --topic quickstart-events --bootstrap-server localhost:9092
#kafaka显示topic
#bin/kafka-topics.sh --describe --topic quickstart-events --bootstrap-server localhost:9092
#kafaka使用生产者生产数据
#bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:9092
#kafaka使用消费着消费数据
#bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092
ip="
192.168.98.170
192.168.98.171
192.168.98.172
"
#myid增长起始值
g=1
p=0
#服务器密码
passwd=123456
#sed替换kafka配置文件中的zookeeper地址
config="zookeeper.connect\=192.168.98.170:2181,192.168.98.171:2181,192.168.98.172:2181"
#追加配置zookeeper配置文件
cat <<EOF >tmp.txt
tickTime=2000
initLimit=10
syncLimit=5
server.1=192.168.98.170:2888:3888
server.2=192.168.98.171:2888:3888
server.3=192.168.98.172:2888:3888
EOF
#循环后台启动zookeeper任务进程
cat <<EOF >jc.txt
nohup /usr/local/kafka/bin/zookeeper-server-start.sh /usr/local/kafka/config/zookeeper.properties &>/tmp/zookeeper.log
EOF
exp1="export JAVA_HOME=\/usr\/lib\/jvm\/java-1.8.0-openjdk-1.8.0.292.b10-1.el7_9.x86_64"
exp2="export CLASSPATH=.:\$JAVA_HOME\/jre\/lib\/rt.jar:\$JAVA_HOME\/lib\/dt.jar:\$JAVA_HOME\/lib\/tools.jar"
exp3="export PATH=\$PATH:\$JAVA_HOME\/bin"
l1="unset i"
echo "############################1、配置服务器密钥############################"
for a in $ip
do
/usr/bin/expect <<EOF
spawn ssh-copy-id $a
expect {
"yes/no" { send "yes\r"; exp_continue }
"*password" { send "$passwd\n"; exp_continue }
{ send "\n"}
}
EOF
done
echo "############################2、拷贝kafka安装包############################"
for i in $ip
do
scp kafka_2.12-2.8.0.tgz tmp.txt root@$i:/tmp/
done
wait;
echo "sed 替换的zookeeper.connect 未设置为变量请自行设置czookeeper.connect\=192.168.98.170:2181,192.168.98.171:2181,192.168.98.172:2181"
echo "############################3、配置jdk、及服务变量############################"
for j in $ip
do
ssh root@$j "cd /tmp/
tar -zvxf kafka_2.12-2.8.0.tgz
rm -rf /usr/local/kafka/
cp -rp kafka_2.12-2.8.0 /usr/local/kafka/
mkdir -p /usr/local/kafka/zookeeper
mkdir -p /tmp/zookeeper/
mkdir -p /tmp/zookeeper/log
cat <<EOF >/tmp/zookeeper/myid
$g
EOF
cat /tmp/tmp.txt >> /usr/local/kafka/config/zookeeper.properties
sed -ir "/^broker.id\=0/cbroker.id\=$g" /usr/local/kafka/config/server.properties
sed -ir "/^zookeeper.connect\=/c$config" /usr/local/kafka/config/server.properties
yum install -y java-1.8.0-openjdk.x86_64
yum install -y java-1.8.0-openjdk-devel.x86_64
sed -ri '/unset i/i$exp1' /etc/profile
sed -ri '/unset i/i$exp2' /etc/profile
sed -ri '/unset i/i$exp3' /etc/profile
source /etc/profile
"
let ++p
let ++g
done
wait
echo "启动任务并设置为后台进程"
echo "############################4、配置服务开机自启############################"
for kk in $ip
do
{
ssh root@$kk "
cat <<EOF >/etc/systemd/system/zookeeper.service
[Unit]
Description=Zookeeper
After=network.target remote-fs.target nss-lookup.target
[Service]
ExecStart=/usr/local/kafka/bin/zookeeper-server-start.sh /usr/local/kafka/config/zookeeper.properties
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/usr/local/kafka/bin/zookeeper-server-stop.sh
[Install]
WantedBy=multi-user.target
EOF
cat <<EOF >/etc/systemd/system/kafka.service
[Unit]
Description=kafka
After=network.target remote-fs.target nss-lookup.target zookeeper.service
[Service]
ExecStart=/usr/local/kafka/bin/kafka-server-start.sh /usr/local/kafka/config/server.properties
ExecReload=/bin/kill -s HUP $MAINPID
ExecStop=/usr/local/kafka/bin/kafka-server-stop.sh
#PrivateTmp=true
[Install]
WantedBy=multi-user.target
EOF
systemctl daemon-reload
systemctl enable zookeeper.service
systemctl start zookeeper
systemctl enable kafka.service
systemctl start kafka
"
}&
done
echo "服务启动完毕,请自检 systemctl status zookeeper"
echo "服务启动完毕,请自检 systemctl status kafka"
echo "创建topic,查看是否可以创建成功"
echo "/usr/local/kafka/bin/kafka-topics.sh --create --topic quickstart-events --bootstrap-server localhost:9092"
echo "显示topic,显示Leader,显示Replicas,显示Isr"
echo "/usr/local/kafka/bin/kafka-topics.sh --describe --topic quickstart-events --bootstrap-server localhost:9092"
echo "创建生产者,生产数据"
echo "/usr/local/kafka/bin/kafka-console-producer.sh --topic quickstart-events --bootstrap-server localhost:9092"
echo "创建消费者,读取生产者数据"
echo "/usr/local/kafka/bin/kafka-console-consumer.sh --topic quickstart-events --from-beginning --bootstrap-server localhost:9092"