You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
165 lines
5.7 KiB
165 lines
5.7 KiB
# this is the configuration of redis-shake.
|
|
# if you have any problem, please visit https://github.com/alibaba/RedisShake/wiki/FAQ
|
|
|
|
# id
|
|
id = redis-shake
|
|
|
|
# log file,日志文件,不配置将打印到stdout (e.g. /var/log/redis-shake.log )
|
|
log.file =
|
|
# log level: "none", "error", "warn", "info", "all". default is "info".
|
|
log.level = info
|
|
# pid path,进程文件存储地址(e.g. /var/run/),不配置将默认输出到执行下面,
|
|
# 注意这个是目录,真正的pid是`{pid_path}/{id}.pid`
|
|
pid_path =
|
|
|
|
# pprof port
|
|
system_profile = 9310
|
|
# restful port,查看metric端口
|
|
http_profile = 9320
|
|
|
|
# runtime.GOMAXPROCS, 0 means use cpu core number: runtime.NumCPU()
|
|
ncpu = 0
|
|
|
|
# parallel routines number used in RDB file syncing. default is 64.
|
|
parallel = 32
|
|
|
|
# input RDB file. read from stdin, default is stdin ('/dev/stdin').
|
|
# used in `decode` and `restore`.
|
|
# 如果是decode或者restore,这个参数表示读取的rdb文件
|
|
input_rdb = local
|
|
|
|
# output RDB file. default is stdout ('/dev/stdout').
|
|
# used in `decode` and `dump`.
|
|
# 如果是decode或者dump,这个参数表示输出的rdb
|
|
output_rdb = local_dump
|
|
|
|
# source redis configuration.
|
|
# used in `dump`, `sync` and `rump`.
|
|
# ip:port
|
|
# 源redis地址
|
|
source.address = 127.0.0.1:20441
|
|
# password.
|
|
source.password_raw = 123456
|
|
# auth type, don't modify it
|
|
source.auth_type = auth
|
|
|
|
# target redis configuration. used in `restore` and `sync`.
|
|
# used in `restore`, `sync` and `rump`.
|
|
# ip:port
|
|
# 目的redis地址
|
|
target.address = 127.0.0.1:20551
|
|
# password.
|
|
target.password_raw = 123456
|
|
# auth type, don't modify it
|
|
target.auth_type = auth
|
|
# all the data will come into this db. < 0 means disable.
|
|
# used in `restore` and `sync`.
|
|
target.db = -1
|
|
|
|
# use for expire key, set the time gap when source and target timestamp are not the same.
|
|
# 用于处理过期的键值,当迁移两端不一致的时候,目的端需要加上这个值
|
|
fake_time =
|
|
|
|
# force rewrite when destination restore has the key
|
|
# used in `restore`, `sync` and `rump`.
|
|
# 当源目的有重复key,是否进行覆写
|
|
rewrite = true
|
|
|
|
# filter db or key or slot
|
|
# choose these db, e.g., 5, only choose db5. defalut is all.
|
|
# used in `restore` and `sync`.
|
|
# 支持过滤db,只让指定的db通过
|
|
filter.db =
|
|
# filter key with prefix string. multiple keys are separated by ';'.
|
|
# e.g., a;b;c
|
|
# default is all.
|
|
# used in `restore` and `sync`.
|
|
# 支持过滤key,只让指定的key通过,分号分隔
|
|
filter.key =
|
|
# filter given slot, multiple slots are separated by ';'.
|
|
# e.g., 1;2;3
|
|
# used in `sync`.
|
|
# 指定过滤slot,只让指定的slot通过
|
|
filter.slot =
|
|
|
|
# big key threshold, the default is 500 * 1024 * 1024 bytes. If the value is bigger than
|
|
# this given value, all the field will be spilt and write into the target in order.
|
|
# 正常key如果不大,那么都是直接调用restore写入到目的端,如果key对应的value字节超过了给定
|
|
# 的值,那么会分批依次一个一个写入。
|
|
big_key_threshold = 524288000
|
|
|
|
# use psync command.
|
|
# used in `sync`.
|
|
# 默认使用sync命令,启用将会使用psync命令
|
|
psync = false
|
|
|
|
# enable metric
|
|
# used in `sync`.
|
|
# 是否启用metric
|
|
metric = true
|
|
# print in log
|
|
# 是否将metric打印到log中
|
|
metric.print_log = false
|
|
|
|
# heartbeat
|
|
# send heartbeat to this url
|
|
# used in `sync`.
|
|
# 心跳的url地址,redis-shake将会发送到这个地址
|
|
#heartbeat.url = http://127.0.0.1:8000
|
|
heartbeat.url =
|
|
# interval by seconds
|
|
# 心跳保活周期
|
|
heartbeat.interval = 3
|
|
# external info which will be included in heartbeat data.
|
|
# 在心跳报文中添加额外的信息
|
|
heartbeat.external = test external
|
|
# local network card to get ip address, e.g., "lo", "eth0", "en0"
|
|
# 获取ip的网卡
|
|
heartbeat.network_interface =
|
|
|
|
# sender information.
|
|
# sender flush buffer size of byte.
|
|
# used in `sync`.
|
|
# 发送缓存的字节长度,超过这个阈值将会强行刷缓存发送
|
|
sender.size = 104857600
|
|
# sender flush buffer size of oplog number.
|
|
# used in `sync`.
|
|
# 发送缓存的报文个数,超过这个阈值将会强行刷缓存发送
|
|
sender.count = 5000
|
|
# delay channel size. once one oplog is sent to target redis, the oplog id and timestamp will also stored in this delay queue. this timestamp will be used to calculate the time delay when receiving ack from target redis.
|
|
# used in `sync`.
|
|
# 用于metric统计时延的队列
|
|
sender.delay_channel_size = 65535
|
|
|
|
# enable keep_alive option in TCP when connecting redis.
|
|
# the unit is second.
|
|
# 0 means disable.
|
|
# TCP keep-alive保活参数,单位秒,0表示不启用。
|
|
keep_alive = 0
|
|
|
|
# used in `rump`.
|
|
# number of keys captured each time. default is 100.
|
|
# 每次scan的个数,不配置则默认100.
|
|
scan.key_number = 50
|
|
|
|
# used in `rump`.
|
|
# we support some special redis types that don't use default `scan` command like alibaba cloud and tencent cloud.
|
|
# 有些版本具有特殊的格式,与普通的scan命令有所不同,我们进行了特殊的适配。目前支持腾讯云的集群版"tencent_cluster"
|
|
# 和阿里云的集群版"aliyun_cluster"。
|
|
scan.special_cloud =
|
|
# 如果源端是腾讯云的集群版,那么需要传入不同子节点的id(通过`cluster nodes`命令),以数组形式表示(分号分割)。
|
|
# shake会"串行"进行遍历并抓取。例如:"25b21f1836026bd49c52b2d10e09fbf8c6aa1fdc;da6041781b5d7fe21404811d430cdffea2bf84de"
|
|
# 具体请参考:https://cloud.tencent.com/document/product/239/18336 中的"自定义命令"小节。
|
|
scan.special_cloud.tencent.urls =
|
|
# 如果源端是阿里云的集群版,那么需要传入子节点的个数。例如:16
|
|
scan.special_cloud.aliyun.node_number =
|
|
|
|
# ----------------splitter----------------
|
|
# below variables are useless for current open source version so don't set.
|
|
|
|
# replace hash tag.
|
|
# used in `sync`.
|
|
replace_hash_tag = false
|
|
|
|
# used in `restore` and `dump`.
|
|
extra = false
|
|
|