redis-shake工具
You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 
 

143 lines
4.3 KiB

# this is the configuration of redis-shake.
# id
id = redis-shake
# log file,日志文件,不配置将打印到stdout
log_file =
# pprof port
system_profile = 9310
# restful port,查看metric端口
http_profile = 9320
# runtime.GOMAXPROCS, 0 means use cpu core number: runtime.NumCPU()
ncpu = 0
# parallel routines number used in RDB file syncing.
parallel = 4
# input RDB file. read from stdin, default is stdin ('/dev/stdin').
# used in `decode` and `restore`.
# 如果是decode或者restore,这个参数表示读取的rdb文件
input_rdb = local
# output RDB file. default is stdout ('/dev/stdout').
# used in `decode` and `dump`.
# 如果是decode或者dump,这个参数表示输出的rdb
output_rdb = local_dump
# source redis configuration.
# used in `dump` and `sync`.
# ip:port
# 源redis地址
source.address = 127.0.0.1:20441
# password.
source.password_raw = 123456
# auth type, don't modify it
source.auth_type = auth
# version number, default is 6 (6 for Redis Version <= 3.0.7, 7 for >=3.2.0, 9 for >= 5.0)
source.version = 9
# target redis configuration. used in `restore` and `sync`.
# used in `restore` and `sync`.
# ip:port
# 目的redis地址
target.address = 127.0.0.1:20551
# password.
target.password_raw = 123456
# auth type, don't modify it
target.auth_type = auth
# version number, default is 6 (6 for Redis Version <= 3.0.7, 7 for >=3.2.0, 9 for >= 5.0)
target.version = 6
# all the data will come into this db. < 0 means disable.
# used in `restore` and `sync`.
target.db = -1
# use for expire key, set the time gap when source and target timestamp are not the same.
# 用于处理过期的键值,当迁移两端不一致的时候,目的端需要加上这个值
fake_time =
# force rewrite when destination restore has the key
# used in `restore` and `sync`.
# 当源目的有重复key,是否进行覆写
rewrite = true
# filter db or key or slot
# choose these db, e.g., 5, only choose db5. defalut is all.
# used in `restore` and `sync`.
# 支持过滤db,只让指定的db通过
filter.db =
# filter key with prefix string. multiple keys are separated by ';'.
# e.g., a;b;c
# default is all.
# used in `restore` and `sync`.
# 支持过滤key,只让指定的key通过,分号分隔
filter.key =
# filter given slot, multiple slots are separated by ';'.
# e.g., 1;2;3
# used in `sync`.
# 指定过滤slot,只让指定的slot通过
filter.slot =
# big key threshold, the default is 500 * 1024 * 1024. The field of the big key will be split in processing.
# 我们对大key有特殊的处理,此处需要指定大key的阈值
big_key_threshold = 524288000
# use psync command.
# used in `sync`.
# 默认使用sync命令,启用将会使用psync命令
psync = false
# enable metric
# used in `sync`.
# 是否启用metric
metric = true
# print in log
# 是否将metric打印到log中
metric.print_log = false
# heartbeat
# send heartbeat to this url
# used in `sync`.
# 心跳的url地址,redis-shake将会发送到这个地址
#heartbeat.url = http://127.0.0.1:8000
heartbeat.url =
# interval by seconds
# 心跳保活周期
heartbeat.interval = 3
# external info which will be included in heartbeat data.
# 在心跳报文中添加额外的信息
heartbeat.external = test external
# local network card to get ip address, e.g., "lo", "eth0", "en0"
# 获取ip的网卡
heartbeat.network_interface =
# sender information.
# sender flush buffer size of byte.
# used in `sync`.
# 发送缓存的字节长度,超过这个阈值将会强行刷缓存发送
sender.size = 104857600
# sender flush buffer size of oplog number.
# used in `sync`.
# 发送缓存的报文个数,超过这个阈值将会强行刷缓存发送
sender.count = 5000
# delay channel size. once one oplog is sent to target redis, the oplog id and timestamp will also stored in this delay queue. this timestamp will be used to calculate the time delay when receiving ack from target redis.
# used in `sync`.
# 用于metric统计时延的队列
sender.delay_channel_size = 65535
# enable keep_alive option in TCP when connecting redis.
# the unit is second.
# 0 means disable.
# TCP keep-alive保活参数,单位秒,0表示不启用。
keep_alive = 0
# ----------------splitter----------------
# below variables are useless for current opensource version so don't set.
# replace hash tag.
# used in `sync`.
replace_hash_tag = false
# used in `restore` and `dump`.
extra = false