# this is the configuration of redis-shake. # if you have any problem, please visit https://github.com/alibaba/RedisShake/wiki/FAQ # id id = redis-shake # log file,日志文件,不配置将打印到stdout (e.g. /var/log/redis-shake.log ) log.file = # log level: "none", "error", "warn", "info", "all". default is "info". log.level = info # pid path,进程文件存储地址(e.g. /var/run/),不配置将默认输出到执行下面, # 注意这个是目录,真正的pid是`{pid_path}/{id}.pid` pid_path = # pprof port system_profile = 9310 # restful port,查看metric端口 http_profile = 9320 # runtime.GOMAXPROCS, 0 means use cpu core number: runtime.NumCPU() ncpu = 0 # parallel routines number used in RDB file syncing. default is 64. parallel = 32 # input RDB file. read from stdin, default is stdin ('/dev/stdin'). # used in `decode` and `restore`. # if the input is list split by semicolon(;), redis-shake will restore the list one by one. # 如果是decode或者restore,这个参数表示读取的rdb文件。支持输入列表,例如:rdb.0;rdb.1;rdb.2 # redis-shake将会挨个进行恢复。 input_rdb = local # output RDB file prefix. # used in `decode` and `dump`. # 如果是decode或者dump,这个参数表示输出的rdb前缀,比如输入有3个db,那么dump分别是: # ${output_rdb}.0, ${output_rdb}.1, ${output_rdb}.2 output_rdb = local_dump # source redis configuration. # used in `dump`, `sync` and `rump`. # ip:port # the source address can be redis db or cluster that has several db nodes split by semicolon(;). # if the type is `rump`, source type can also be the proxy address. # 源redis地址(ip:port),源端支持开源cluster以及db节点暴露且有权限的proxy集群模式,不同db以分号(;) # 分割,例如:ip1:port1;ip2;port2;ip3;port3。 source.address = 127.0.0.1:20441 # password. source.password_raw = 123456 # auth type, don't modify it source.auth_type = auth # the concurrence of fetching data, default is len(source.address) # used in `dump` and `sync`. # 拉取的并发度,默认是source.address中db的个数。假如db节点有5个,但source.parallel=3,那么一次只会 # 并发拉取3个db的全量数据,直到某个db的rdb拉取完毕,才会拉取第4个db节点的rdb,以此类推。 source.parallel = # target redis configuration. used in `restore` and `sync`. # used in `restore`, `sync` and `rump`. # ip:port # the source address can be cluster that has several db nodes split by semicolon(;). # 目的redis地址(ip:port),目的端支持开源的cluster架构和proxy模式,不同db以分号(;)分割,例如: # ip1:port1;ip2;port2;ip3;port3。 target.address = 127.0.0.1:20551 # password. target.password_raw = 123456 # auth type, don't modify it target.auth_type = auth # all the data will be written into this db. < 0 means disable. target.db = -1 # the type of target redis: `cluster`, `opensource`. If the target is open source redis cluster, # redis-shake uses redis-go-cluster driver to write data, otherwise, redigo driver is used to # insert data in round robin way. # 目的redis的类型:`opensource`和`proxy`. `opensource`表示是开源的cluster或者redis db节点; # `proxy`表示是proxy类型,将会以round robin循环方式写入。对于开源的cluster用redis-go-cluster驱动写入,其余 # 的则用redigo写入 # target.type = opensource # use for expire key, set the time gap when source and target timestamp are not the same. # 用于处理过期的键值,当迁移两端不一致的时候,目的端需要加上这个值 fake_time = # force rewrite when destination restore has the key # used in `restore`, `sync` and `rump`. # 当源目的有重复key,是否进行覆写 rewrite = true # filter db or key or slot # choose these db, e.g., 5, only choose db5. defalut is all. # used in `restore` and `sync`. # 支持过滤db,只让指定的db通过 filter.db = # filter key with prefix string. multiple keys are separated by ';'. # e.g., a;b;c # default is all. # used in `restore` and `sync`. # 支持过滤key,只让指定的key通过,分号分隔 filter.key = # filter given slot, multiple slots are separated by ';'. # e.g., 1;2;3 # used in `sync`. # 指定过滤slot,只让指定的slot通过 filter.slot = # big key threshold, the default is 500 * 1024 * 1024 bytes. If the value is bigger than # this given value, all the field will be spilt and write into the target in order. # 正常key如果不大,那么都是直接调用restore写入到目的端,如果key对应的value字节超过了给定 # 的值,那么会分批依次一个一个写入。 big_key_threshold = 524288000 # use psync command. # used in `sync`. # 默认使用sync命令,启用将会使用psync命令 psync = false # enable metric # used in `sync`. # 是否启用metric metric = true # print in log # 是否将metric打印到log中 metric.print_log = false # heartbeat # send heartbeat to this url # used in `sync`. # 心跳的url地址,redis-shake将会发送到这个地址 #heartbeat.url = http://127.0.0.1:8000 heartbeat.url = # interval by seconds # 心跳保活周期 heartbeat.interval = 3 # external info which will be included in heartbeat data. # 在心跳报文中添加额外的信息 heartbeat.external = test external # local network card to get ip address, e.g., "lo", "eth0", "en0" # 获取ip的网卡 heartbeat.network_interface = # sender information. # sender flush buffer size of byte. # used in `sync`. # 发送缓存的字节长度,超过这个阈值将会强行刷缓存发送 sender.size = 104857600 # sender flush buffer size of oplog number. # used in `sync`. # 发送缓存的报文个数,超过这个阈值将会强行刷缓存发送 sender.count = 5000 # delay channel size. once one oplog is sent to target redis, the oplog id and timestamp will also # stored in this delay queue. this timestamp will be used to calculate the time delay when receiving # ack from target redis. # used in `sync`. # 用于metric统计时延的队列 sender.delay_channel_size = 65535 # enable keep_alive option in TCP when connecting redis. # the unit is second. # 0 means disable. # TCP keep-alive保活参数,单位秒,0表示不启用。 keep_alive = 0 # used in `rump`. # number of keys captured each time. default is 100. # 每次scan的个数,不配置则默认100. scan.key_number = 50 # used in `rump`. # we support some special redis types that don't use default `scan` command like alibaba cloud and tencent cloud. # 有些版本具有特殊的格式,与普通的scan命令有所不同,我们进行了特殊的适配。目前支持腾讯云的集群版"tencent_cluster" # 和阿里云的集群版"aliyun_cluster"。 scan.special_cloud = # used in `rump`. # we support to fetching data from given file which marks the key list. # 有些云版本,既不支持sync/psync,也不支持scan,我们支持从文件中进行读取所有key列表并进行抓取:一行一个key。 scan.key_file = # ----------------splitter---------------- # below variables are useless for current open source version so don't set. # replace hash tag. # used in `sync`. replace_hash_tag = false # used in `restore` and `dump`. extra = false