redis-shake工具
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

187 lines
7.2 KiB

6 years ago
# this is the configuration of redis-shake.
# if you have any problem, please visit https://github.com/alibaba/RedisShake/wiki/FAQ
6 years ago
# id
id = redis-shake
6 years ago
# log file,日志文件,不配置将打印到stdout (e.g. /var/log/redis-shake.log )
log.file =
# log level: "none", "error", "warn", "info", "all". default is "info".
log.level = info
6 years ago
# pid path,进程文件存储地址(e.g. /var/run/),不配置将默认输出到执行下面,
# 注意这个是目录,真正的pid是`{pid_path}/{id}.pid`
pid_path =
6 years ago
# pprof port
system_profile = 9310
# restful port,查看metric端口
6 years ago
http_profile = 9320
# runtime.GOMAXPROCS, 0 means use cpu core number: runtime.NumCPU()
ncpu = 0
# parallel routines number used in RDB file syncing. default is 64.
parallel = 32
6 years ago
# source redis configuration.
# used in `dump`, `sync` and `rump`.
6 years ago
# ip:port
# the source address can be redis db or cluster that has several db nodes split by semicolon(;).
# if the type is `rump`, source type can also be the proxy address.
# 源redis地址(ip:port),源端支持开源cluster以及db节点暴露且有权限的proxy集群模式,不同db以分号(;)
# 分割,例如:ip1:port1;ip2;port2;ip3;port3。
source.address = 127.0.0.1:20441
# password.
6 years ago
source.password_raw = 123456
6 years ago
# auth type, don't modify it
source.auth_type = auth
# target redis configuration. used in `restore` and `sync`.
# used in `restore`, `sync` and `rump`.
6 years ago
# ip:port
# the source address can be cluster that has several db nodes split by semicolon(;).
# 目的redis地址(ip:port),目的端支持开源的cluster架构和proxy模式,不同db以分号(;)分割,例如:
# ip1:port1;ip2;port2;ip3;port3。
6 years ago
target.address = 127.0.0.1:20551
# password.
6 years ago
target.password_raw = 123456
6 years ago
# auth type, don't modify it
target.auth_type = auth
# the type of target redis can be `standalone`, `proxy` or `cluster`.
# `standalone`: standalone db mode.
# `proxy`: proxy layer ahead redis.
# `cluster`: open source cluster (not supported currently)
# If the target is proxy, data will be inserted in a round-robin way.
# standalone表示单个db节点进行写入(包括主从模式),proxy表示写入的是代理层,将会以轮回的方式进行写入,1个proxy对应一条
# 长连接通道(1个源端db的数据只会写入1个proxy),cluster表示开源的集群架构
target.type = standalone
# all the data will be written into this db. < 0 means disable.
6 years ago
target.db = -1
# input RDB file. read from stdin, default is stdin ('/dev/stdin').
# used in `decode` and `restore`.
# if the input is list split by semicolon(;), redis-shake will restore the list one by one.
# 如果是decode或者restore,这个参数表示读取的rdb文件。支持输入列表,例如:rdb.0;rdb.1;rdb.2
# redis-shake将会挨个进行恢复。
rdb.input = local
# output RDB file prefix.
# used in `decode` and `dump`.
# 如果是decode或者dump,这个参数表示输出的rdb前缀,比如输入有3个db,那么dump分别是:
# ${output_rdb}.0, ${output_rdb}.1, ${output_rdb}.2
rdb.output = local_dump
# the concurrence of fetching data, default is len(source.address) or len(rdb.input).
# used in `dump`, `sync` and `restore`.
# 拉取的并发度,如果是`dump`或者`sync`,默认是source.address中db的个数,`restore`模式默认len(rdb.input)。
# 假如db节点/输入的rdb有5个,但rdb.parallel=3,那么一次只会
# 并发拉取3个db的全量数据,直到某个db的rdb拉取完毕,才会拉取第4个db节点的rdb,以此类推。
rdb.parallel =
6 years ago
# use for expire key, set the time gap when source and target timestamp are not the same.
# 用于处理过期的键值,当迁移两端不一致的时候,目的端需要加上这个值
6 years ago
fake_time =
# force rewrite when destination restore has the key
# used in `restore`, `sync` and `rump`.
# 当源目的有重复key,是否进行覆写
6 years ago
rewrite = true
# filter db or key or slot
# choose these db, e.g., 5, only choose db5. defalut is all.
# used in `restore` and `sync`.
# 支持过滤db,只让指定的db通过
filter.db =
6 years ago
# filter key with prefix string. multiple keys are separated by ';'.
# e.g., a;b;c
# default is all.
# used in `restore` and `sync`.
# 支持过滤key,只让指定的key通过,分号分隔
filter.key =
6 years ago
# filter given slot, multiple slots are separated by ';'.
# e.g., 1;2;3
# used in `sync`.
# 指定过滤slot,只让指定的slot通过
filter.slot =
6 years ago
# big key threshold, the default is 500 * 1024 * 1024 bytes. If the value is bigger than
# this given value, all the field will be spilt and write into the target in order.
# 正常key如果不大,那么都是直接调用restore写入到目的端,如果key对应的value字节超过了给定
# 的值,那么会分批依次一个一个写入。
6 years ago
big_key_threshold = 524288000
# use psync command.
# used in `sync`.
# 默认使用sync命令,启用将会使用psync命令
6 years ago
psync = false
# enable metric
# used in `sync`.
# 是否启用metric
6 years ago
metric = true
# print in log
# 是否将metric打印到log中
6 years ago
metric.print_log = false
6 years ago
# heartbeat
6 years ago
# send heartbeat to this url
# used in `sync`.
# 心跳的url地址,redis-shake将会发送到这个地址
6 years ago
#heartbeat.url = http://127.0.0.1:8000
heartbeat.url =
6 years ago
# interval by seconds
# 心跳保活周期
6 years ago
heartbeat.interval = 3
# external info which will be included in heartbeat data.
# 在心跳报文中添加额外的信息
6 years ago
heartbeat.external = test external
# local network card to get ip address, e.g., "lo", "eth0", "en0"
# 获取ip的网卡
heartbeat.network_interface =
6 years ago
# sender information.
# sender flush buffer size of byte.
# used in `sync`.
# 发送缓存的字节长度,超过这个阈值将会强行刷缓存发送
sender.size = 104857600
6 years ago
# sender flush buffer size of oplog number.
# used in `sync`.
# 发送缓存的报文个数,超过这个阈值将会强行刷缓存发送
6 years ago
sender.count = 5000
# delay channel size. once one oplog is sent to target redis, the oplog id and timestamp will also
# stored in this delay queue. this timestamp will be used to calculate the time delay when receiving
# ack from target redis.
6 years ago
# used in `sync`.
# 用于metric统计时延的队列
6 years ago
sender.delay_channel_size = 65535
# enable keep_alive option in TCP when connecting redis.
# the unit is second.
# 0 means disable.
# TCP keep-alive保活参数,单位秒,0表示不启用。
keep_alive = 0
# used in `rump`.
# number of keys captured each time. default is 100.
# 每次scan的个数,不配置则默认100.
scan.key_number = 50
# used in `rump`.
# we support some special redis types that don't use default `scan` command like alibaba cloud and tencent cloud.
# 有些版本具有特殊的格式,与普通的scan命令有所不同,我们进行了特殊的适配。目前支持腾讯云的集群版"tencent_cluster"
# 和阿里云的集群版"aliyun_cluster"。
scan.special_cloud =
# used in `rump`.
# we support to fetching data from given file which marks the key list.
# 有些云版本,既不支持sync/psync,也不支持scan,我们支持从文件中进行读取所有key列表并进行抓取:一行一个key。
scan.key_file =
6 years ago
# ----------------splitter----------------
# below variables are useless for current open source version so don't set.
6 years ago
# replace hash tag.
# used in `sync`.
replace_hash_tag = false
# used in `restore` and `dump`.
extra = false