@saltyang
2022-03-29T16:16:54.000000Z
字数 9400
阅读 1538
mbk
docker
deploy
- Centos 6.x
- Docker Engine Version: 1.7.1
- Docker Compose Version: 1.5.2
- Docker Redis Image Version: 3.2
- Docker Rabbitmq Image Version: 3.6.2
- Docker Cassandra Image Version: 3.5
- Docker Mysql Image Version: 5.6
- Docker Centos Image Version: 6.7
#Install docker enginee
yum install docker-io
orcurl -fsSL https://get.docker.com/ | sh
#restart docker service
service docker start
#start docker deamon when machine power on
chkconfig docker on
Note: Centos7装docker
# copy compose-mbk.tag.gz to your machine (compose-mbk.tag.gz 放在百度云盘
我的应用数据
>bypy
>compose-mbk.tag.gz
)
tar -xzvf compose-mbk.tag.gz
# load images
cd compose-mbk
docker load -i mbkcentos6.7.tar
docker load -i cassandra.tar
docker load -i mysql.tar
docker load -i rabbitmq.tar
docker load -i redis.tar
# Check whether load image successfylly or not. There are five images: mbkcentos6.7, cassandra, mysql, rabbitmq, redis
docker images
# Install docker-compose (Note : docker-compose version is 1.5.2)
curl -L https://get.daocloud.io/docker/compose/releases/download/1.5.2/\
(Note This is one command, not two commands)
docker-compose-`uname -s`-`uname -m` > /usr/local/bin/docker-compose
chmod +x /usr/local/bin/docker-compose
docker-compose -v
(Check the docker-compose version)# Run docker-compose (Note: please stop mysqld/rabbitmq-server/redis/cassandra service on host machine to make sure the needed ports haven't used. )
docker-compose up -d
# Check whether docker container has started successfully or not. (There are five running containers: mbk_server, mbk_worker,mbk_cassandra, mbk_mysql, mbk_redis, the state of containers are "Up")
docker-compose ps
Mysql:
# If you haven't mysql client , you can run this command to connect mbk_mysql container:
docker run -it --link mbk_mysql:mysql --rm mysql
(Note: this is one command, not three commands)
sh -c 'exec mysql -h"$MYSQL_PORT_3306_TCP_ADDR" -P"$MYSQL_PORT_3306_TCP_PORT"
-uroot -p"$MYSQL_ENV_MYSQL_ROOT_PASSWORD"'# If you have install mysql client, use mysql client connect mbk_mysql, you can get password from docker-compose.yml in compose-mbk folder
mysql -h you_host_ip -uroot -p
# after connect mbk_mysql, change default charset:
alter database mbackup character set utf8;
Cassandra:
# connect cassandra with cqlsh
docker run -it --link mbk_cassandra:cassandra --rm cassandra cqlsh cassandra
# create mbk keyspace
create keyspace mbackup with replication={'class': 'SimpleStrategy', 'replication_factor': 1};
Redis:
# connect redis with redis-cli
- Port Hint:
docker run -it --link mbk_redis:redis --rm redis redis-cli -h redis -p 6379 -a passwd
- Port Expose:
redis-cli -h host -a passwd
1> Clone code (*If you have cloned, please pass this step*)
<code>git clone git@www.cloudraid.com.cn:puya/mbackup.git</code>
2> Enter into `mbackup/deploy` folder and Modify `deployConfig_local.json`
Note : The port of sshd is mbkserver: 50001 and mbkworker: 50002. The address of mysql/redis/rabbitmq/cassandra use service name in `docker-compose.yml`. For Example: mysql address should be `mbk_mysql` which is mapping to a ip address via host file in container.
Example File Content:
server
"192.168.1.191:50001"
worker
"192.168.1.191:50002"
portal
"192.168.1.191:50001"
mysql database address
"mbk_mysql"
mysql password:
"puyacn#1.."
redis address
"mbk_redis"
cassandra address
"mbk_cassandra"
rabbitmq-server
"mbk_rabbitmq"
3> Deploy Step:
a> Deploy Mbkserver
python deployV2.py mbkserver -c deploy -a -l local
b> Deploy Mbkworker
python deployV2.py mbkworker -c deploy -a -l local
c> Deploy Mbkportal
python deployV2.py mbkportal -c deploy -a -l local
4> Finish
Client Slient Install Command Line:
C:\Users\Salt\Desktop\wbksetup.exe /S /server_host=192.168.1.155 /ar=1
- Need modify
CD-ROM ResourceSubType
tovmware.cdrom.remotepassthrough
- re-sha1sum ovf file and Change the value of sha1 in
mf
file
制作OVA注意事项
Note:
Volume写入不了,需要disabel Selinux:
- check Selinux的状态:/usr/sbin/sestatus -v
- disable Selinux :
vi /etc/selinux/config
将SELINUX=enforcing
改成SELINUX=disabled
- temporarily fix :
su -c "setenforce 0"
- set selinux rule to fix this:
chcon -Rt svirt_sandbox_file_t /path/to/volume
Iptables 重启后,docker的规则被清除掉:
iptables -t nat --list
查看iptables中转换规则- 修改iptables 配置文件:
vim /etc/sysconfig/iptables-config
将no
改为yes
copy文件到container中:
- 在container中安装scp:
yum install openssh-clients
- Exapmle:
scp -P 50002 -r storageManager.py hosted.py root@192.168.1.142:/opt/mbk/mbkworker/utils/
添加存储功能:
- docker-compose.yml:
./mbk/storage
-->/mnt/storage
- 渠道用户登录后,添加本地存储:
bucket0
,bucket1
,...- 备份的数据存储在
/mnt/storage
- 客户端支持http模式:mbk_storage_proto http;
- PTBUS 配置:
{
"access_key":"WLG36LHZ1HXERPTRH4H9",
"access_secret":"qZXfctq9tMZfIJhcULiW4CnuqBqCejsVlEz2X2Du",
"bucket":"bucket4",
"url":"116.62.61.218"
}
- 网卡修改 UUID
- 设置上海时区: cp /usr/share/zoneinfo/Asia/Shanghai /etc/localtime
- 同步时间: yum install ntp; ntpdate 1.cn.pool.ntp.org; systemctl enable ntpd
私密云添加存储配置:
- 私密云是标准的S3接口,经测试,只能通过http的方式进行上传和下载。所以需要在mbkclient.conf文件中添加一项:
mbk_storage_proto http;
例如:
location ~ ^/(task|user|sysinfo|device)/ {
mbk_storage_proto http;
mbk_backend_process;
mbk_backend_debug on;
mbk_backend_upstream https://192.168.1.189:443;
}
Note: 在客户端build的时候需要加入此项
- Install mysql
wget http://repo.mysql.com/mysql-community-release-el7-5.noarch.rpm
rpm -ivh mysql-community-release-el7-5.noarch.rpm
yum install mysql-server
# 重置密码
$ mysql -u root
mysql> use mysql
mysql> update user set password=password('puyacn#1..') where user='root';
mysql> flush privileges;
mysql> exit;
// create database mbackup and set charset
$ mysql -uroot -p
mysql> alter database mbackup character set utf8;
Config mysql master-slave
主数据库:192.168.1.177
从数据库:192.168.1.176
1 修改主数据库master
a> 创建用于同步的用户账户:
mysql> CREATE USER 'backup'@'192.168.1.176' IDENTIFIED BY 'backupcn1..';#创建用户
mysql> GRANT REPLICATION SLAVE ON *.* TO 'backup'@'192.168.1.176';#分配权限
mysql> flush privileges; #刷新权限
b> 在配置文件:/etc/my.conf中插入如下两行:
[mysqld]
log-bin=mysql-bin
server-id=1
c> 重启mysql 查看master状态,并记录二进制文件名和位置。
mysql > SHOW MASTER STATUS;
+------------------+----------+--------------+------------------+
| File | Position | Binlog_Do_DB | Binlog_Ignore_DB |
+------------------+----------+--------------+------------------+
| mysql-bin.000003 | 73 | test | manual,mysql |
+------------------+----------+--------------+------------------+
2 修改从数据库master
a> 导入主数据库的数据
mysql> create database mbackup default charset utf8;
mysql> use mbackup;
mysql> source /root/backup.sql;
b> 修改mysql配置:/etc/my.cnf
[mysqld]
server-id=2 #设置server-id,必须唯一
c> 重启mysql,打开mysql会话,执行同步SQL语句
mysql> CHANGE MASTER TO
-> MASTER_HOST='192.168.1.176',
-> MASTER_USER='backup',
-> MASTER_PASSWORD='backupcn1..',
-> MASTER_LOG_FILE='mysql-bin.000003',
-> MASTER_LOG_POS=73;
d> 启动slave同步进程
mysql> start slave;
e> 查看slave状态
show slave status\G;
*************************** 1. row ***************************
Slave_IO_State: Waiting for master to send event
Master_Host: 182.92.172.80
Master_User: rep1
Master_Port: 3306
Connect_Retry: 60
Master_Log_File: mysql-bin.000013
Read_Master_Log_Pos: 11662
Relay_Log_File: mysqld-relay-bin.000022
Relay_Log_Pos: 11765
Relay_Master_Log_File: mysql-bin.000013
Slave_IO_Running: Yes # 两者都为Yes是正常,如果Slave_IO_Running为Connecting
Slave_SQL_Running: Yes # 则检查网络,查看主机的3306端口是否开放
Replicate_Do_DB:
Replicate_Ignore_DB:
Intstall redis
yum install epel-release
yum install redis
//change redis conf file
vim /etc/redis.conf
将bind 127.0.0.1注释掉
//启动服务
service redis start
//修改密码
requirepass 111111
//重启服务
service redis restart
Install rabbitmq
wget http://www.rabbitmq.com/releases/rabbitmq-server/v3.6.6/rabbitmq-server-3.6.6-1.el6.noarch.rpm
yum -y install rabbitmq-server-3.6.6-1.el6.noarch.rpm
rabbitmq-server start
rabbitmqctl add_user mbk mbkpwd
rabbitmqctl add_vhost vhost_mbk
rabbitmqctl set_permissions -p vhost_mbk mbk ".*" ".*" ".*"
Install cassandra
pass
Install ElasticSearch
# download elasticsearch and install
wget https://artifacts.elastic.co/downloads/elasticsearch/elasticsearch-6.4.1.rpm
rpm -ivh elasticsearch-6.4.1.rpm
service elasticsearch start
# (Optional) you can downlaod kibana to post api with dev-tool
wget https://artifacts.elastic.co/downloads/kibana/kibana-6.4.1-x86_64.rpm
rpm -ivh kibana-6.4.1-x86_64.rpm
service kibana start
# config elasticsearch yml
vim /etc/elasticsearch/elasticsearch.yml
'''
cluster.name: mbk
http.port: 9200
network.host: 0.0.0.0
'''
service elasticsearch restart
# config kibana yml
vim /etc/kibana/kibana.yml
'''
server.host: "0.0.0.0"
server.port: 5601
elasticsearch.url: "http://localhost:9200"
'''
service kibana restart
# open firewall to enable port
firewall-cmd --zone=public --add-port=9200/tcp --permanent
firewall-cmd --zone=public --add-port=5601/tcp --permanent
service firewalld reload
# add elasticsearch to pip
# test
curl http://192.168.1.144:9200/
curl curl http://192.168.1.144:5601/app/kibana
Deploy code
copy env and 3rdParty to /opt/mbk
python deployV2.py mbkserver -a -c deploy -l vpc
Note: libmysqlclient_r.so.16: cannot open shared object file: No such file or directory
cp libmysqlclient_r.so.16 to /usr/lib64
Note: ngix start -> libpcre.so.0: cannot open shared object file: No such file or directory
ln -s libpcre.so.0 /lib64/libpcre.so
开放443端口
yum install firewalld firewalld-config
firewall-cmd --zone=public --add-port=443/tcp --permanent
service firewalld restart
存储配置
系统盘和数据盘分离(至少需要添加两块磁盘)
fdisk -l #查看磁盘是否已分配
fdisk /dev/sdb #发现有磁盘,路径为/dev/sdb。然后使用fdisk命令进行建立分区
输入命令依次是:n -> p -> 1 -> 默认 -> 默认 -> w
fdisk -l #查看一下,应该已经有了分区
mkfs.xfs -f /dev/sdb1 #建好分区后要格式化分区,建立文件系统,最好和其他几个分区保持一致
mount /dev/sdb1 /opt/mbk/storage # 文件系统建好后,选择挂载到/opt/mbk/storage 下
chown -R mbk:mbk /opt/mbk/storage #修改目录所属权限
\# 系统安装好后,发现home分区过大,想从home分区中拿出100G给/分区
\# 针对XFS系统
umount /home/
lvreduce -L -100G /dev/mapper/centos-home
mkfs.xfs /dev/mapper/centos-home -f
mount /dev/mapper/centos-home /home/
df -hT # 再次查看分区,发现home分区已经减小了100G,只不过这个分区里之前的数据都没有了
vgdisplay # #然后将上面从home分区拿出的100G放到/分区下
lvextend -L +100G /dev/mapper/centos-root
xfs_growfs /dev/mapper/centos-root
df -hT
\# 针对ext2、ext3、ext4文件系统
umount /home/
resize2fs -p /dev/mapper/vg_weidianserver2-lv_home 20G
mount /home
df -h
lvextend -L +812G /dev/mapper/vg_weidianserver2-lv_root
resize2fs -p /dev/mapper/vg_weidianserver2-lv_root
df -h
[解决linux系统CentOS下调整home和根分区大小的方法][3]
mount cifs 开机自启动
//192.168.1.33/webackup_share /opt/mbk/test_storage/bucket0/1 cifs user=Everyone,password=,vers=2.0,gid=994,uid=996 0 0