# 准备工作

# 环境

# Golang

建议版本:1.18及以上

$ go version
go version go1.18 linux/amd64

开启Go mod,并且不把GOPATH设置为项目目录

export GO111MODULE=on
export GOPROXY=https://goproxy.cn,direct

# Node

没有版本要求,但也不要太低

# 中间件集群

这里只给出单机搭建多节点伪集群示例,生产环境应独立主机部署,或请使用Docker Swarm集群管理

# Rabbit MQ集群

启动三个rabbitmq

sudo docker run -d --restart=always --hostname rabbit01 --name mq01 -p 5671:5672 -p 15671:15672 -e RABBITMQ_ERLANG_COOKIE="rabbitmq_cluster_cookie" rabbitmq:3-management

sudo docker run -d --restart=always  --hostname rabbit02 --name mq02 --link mq01:mylink01 -p 5672:5672 -p 15672:15672 -e RABBITMQ_ERLANG_COOKIE="rabbitmq_cluster_cookie" rabbitmq:3-management

sudo docker run -d --restart=always --hostname rabbit03 --name mq03 --link mq01:mylink02 --link mq02:mylink03 -p 5673:5672 -p 15673:15672 -e RABBITMQ_ERLANG_COOKIE="rabbitmq_cluster_cookie" rabbitmq:3-management

进入容器2

sudo docker exec -it mq02 /bin/bash

把容器2添加到集群中

rabbitmqctl stop_app
rabbitmqctl join_cluster rabbit@rabbit01
rabbitmqctl start_app

进入容器3

sudo docker exec -it mq03 /bin/bash

将容器2添加到集群中

rabbitmqctl stop_app
rabbitmqctl join_cluster rabbit@rabbit01
rabbitmqctl start_app

将队列配置为镜像队列

image-20220415205314366

# Redis集群

三主三从

创建Redis集群网络

docker network create redis

创建六个redis配置

rm -rf /mydata/redis
for port in $(seq 1 6);
do
mkdir -p /mydata/redis/node-${port}/conf
touch /mydata/redis/node-${port}/conf/redis.conf
cat <<EOF>>/mydata/redis/node-${port}/conf/redis.conf
port 6379
requirepass <Password>
masterauth <Password>
protected-mode no
cluster-enabled yes
cluster-config-file nodes.conf
cluster-node-timeout 5000
cluster-announce-ip 172.38.0.1${port}
cluster-announce-port 6379
cluster-announce-bus-port 16379
appendonly yes
EOF
done

启动六个redis容器

for port in $(seq 1 6);
do
docker run -p 637${port}:6379 -p 1637${port}:16379 --name redis-${port} -v /mydata/redis/node-${port}/data:/data -v /mydata/redis/node-${port}/conf/redis.conf:/etc/redis/redis.conf -d --net redis --ip 172.38.0.1${port} redis:6.2.6-alpine redis-server /etc/redis/redis.conf
done

进入某个节点,创建集群

docker exec -it redis-6371 /bin/bash
redis-cli -a <Password> --cluster create ip:6371 ip:6372 ip:6373 ip:6374 ip:6375 ip:6376 --cluster-replicas 1

# Elasticsearch集群

三节点Elasticsearch + es-head + kibana

docker-compose.yml

version: '3'
services:
  es-master:
    image: elasticsearch:7.8.0
    container_name: es-master
    privileged: true
    environment:
      - cluster.name=elasticsearch-cluster
      - node.name=es-master
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      #- discovery.seed_hosts=es-node1,es-node2
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - cluster.initial_master_nodes=es-master,es-node1,es-node2
      - "ES_JAVA_OPTS=-Xms64m -Xmx512m"
      - "discovery.zen.ping.unicast.hosts=es-master,es-node1,es-node2"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - ./es/master/data:/usr/share/elasticsearch/data
      - ./es/master/logs:/usr/share/elasticsearch/logs
    ports:
      - 9200:9200
    restart: always
    networks:
      - elastic

  es-node1:
    image: elasticsearch:7.8.0
    container_name: es-node1
    privileged: true
    environment:
      - cluster.name=elasticsearch-cluster
      - node.name=es-node1
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      #- discovery.seed_hosts=es-node1,es-node2
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - cluster.initial_master_nodes=es-master,es-node1,es-node2
      - "ES_JAVA_OPTS=-Xms64m -Xmx512m"
      - "discovery.zen.ping.unicast.hosts=es-master,es-node1,es-node2"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - ./es/node1/data:/usr/share/elasticsearch/data
      - ./es/node1/logs:/usr/share/elasticsearch/logs
    ports:
      - 9201:9200
    restart: always
    networks:
      - elastic
      
  es-node2:
    image: elasticsearch:7.8.0
    container_name: es-node2
    privileged: true
    environment:
      - cluster.name=elasticsearch-cluster
      - node.name=es-node2
      - node.master=true
      - node.data=true
      - bootstrap.memory_lock=true
      #- discovery.seed_hosts=es-node1,es-node2
      - http.cors.enabled=true
      - http.cors.allow-origin=*
      - cluster.initial_master_nodes=es-master,es-node1,es-node2
      - "ES_JAVA_OPTS=-Xms64m -Xmx512m"
      - "discovery.zen.ping.unicast.hosts=es-master,es-node1,es-node2"
      - "discovery.zen.minimum_master_nodes=2"
    ulimits:
      memlock:
        soft: -1
        hard: -1
    volumes:
      - ./es/node2/data:/usr/share/elasticsearch/data
      - ./es/node2/logs:/usr/share/elasticsearch/logs
    ports:
      - 9202:9200
    restart: always
    networks:
      - elastic

  kibana:
    image: kibana:7.8.0
    container_name: kibana
    environment:
      - SERVER_NAME=kibana
      - ELASTICSEARCH_HOSTS=http://es-master:9200
      - XPACK_MONITORING_ENABLED=true
    ports:
      - 5601:5601
    depends_on:
      - es-master
    restart: always
    networks:
      - elastic
      
  es-head:
    image: tobias74/elasticsearch-head:6
    container_name: es-head
    restart: always
    ports:
      - 9100:9100
    restart: always
    networks:
      - elastic
      
networks: 
   elastic:
      driver: bridge

编排服务

sudo docker-compose up -d es-master es-node1 es-node2 kibana es-head

# MongoDB副本集

一主一从一仲裁

创建三个mongodb的数据目录

mkdir -p /mongodb-cluster/data/master
mkdir -p /mongodb-cluster/data/secondary
mkdir -p /mongodb-cluster/data/arbiter

docker-compose.yml

version: '3'
services:
  master:
    image: mongo:5.0.5
    container_name: master
    restart: always
    ports:
      - 27017:27017
    volumes:
      - ./data/master:/data/db
    command: mongod --dbpath /data/db --replSet ossSet --oplogSize 128
  secondary:
    image: mongo:5.0.5
    container_name: secondary
    restart: always
    ports:
      - 27018:27017
    volumes:
      - ./data/secondary:/data/db
    command: mongod --dbpath /data/db --replSet ossSet --oplogSize 128
  arbiter:
    image: mongo:5.0.5
    container_name: arbiter
    restart: always
    ports:
      - 27019:27017
    volumes:
      - ./data/arbiter:/data/db
    command: mongod --replSet ossSet --oplogSize 128

编排服务

docker-compse up -d

进入master容器

docker exec -it master mongo

初始化副本集

rs.initiate({
    "_id":"ossSet",
    "members":[
        {
            "_id" : 0,
            "host" : "ip:27017"
        },
           {
            "_id" : 1,
            "host" : "ip:27018"
        },
        {
            "_id" : 2,
            "host" : "ip:27019",
            "arbiterOnly" : true 
        }
       ]
})

# 设置环境变量

参照以下格式分别export:Rabbit MQ集群连接URLElasticsearch集群连接URLRedis集群连接URLRedis密码Mongodb集群连接URL、指定日志输出目录

其中日志目录需要在系统提前创建

# Rabbit MQ
export RABBITMQ_SERVER=amqp://test:test@127.0.0.1:5671,amqp://test:test@127.0.0.1:5672,amqp://test:test@127.0.0.1:5673

# Elasticsearch
export ES_SERVER=127.0.0.1:9200,127.0.0.1:9201,127.0.0.1:9202

# Redis
export REDIS_CLUSTER=127.0.0.1:6371,127.0.0.1:6372,127.0.0.1:6373,127.0.0.1:6374,127.0.0.1:6375,127.0.0.1:6376
export REDIS_PASSWORD=XXXXXX

# MongoDB
export MONGO_SERVER=mongodb://127.0.0.1:27017,127.0.0.1:27018,127.0.0.1:27019/?replicaSet=ossSet

# 日志输出目录
export LOG_DIRECTORY=/tmp/log/

除此之外,项目中集成了钉钉群聊机器人,需要您在环境变量中设置钉钉群聊机器人的token和签名,就像下面这样

export DINGTALK_TOKEN=60fb83c9adf34f9fb440bfc7*************
export DINGTALK_SECRET=SECf97815***************************