feature: Add support for GitHub Pages

v4
suxb201 1 year ago committed by suxb201
parent 4262d728bd
commit acd3e2eed0
  1. 1
      .github/release-drafter.yml
  2. 6
      .github/workflows/ci.yml
  3. 74
      .github/workflows/pages.yml
  4. 111
      README.md
  5. 28
      cmd/redis-shake/main.go
  6. 68
      docs/.vitepress/config.ts
  7. 783
      docs/package-lock.json
  8. 0
      docs/src/en/api-examples.md
  9. 0
      docs/src/en/index.md
  10. 25
      docs/src/index.md
  11. 155
      docs/src/zh/guide/config.md
  12. 48
      docs/src/zh/guide/getting-started.md
  13. BIN
      docs/src/zh/guide/getting-started.png
  14. 0
      docs/src/zh/markdown-examples.md
  15. 65
      docs/src/zh/transform/examples.md
  16. 6
      docs/src/zh/transform/getting-started.md
  17. 0
      shake.toml
  18. 8
      tests/cases/auth_acl.py
  19. 22
      tests/helpers/shake.py

@ -1,3 +1,4 @@
# https://github.com/release-drafter/release-drafter#example
name-template: 'redis-shake-v$RESOLVED_VERSION'
tag-template: 'v$RESOLVED_VERSION'
template: |

@ -8,6 +8,7 @@ jobs:
strategy:
matrix:
redis-version: [ "2.8", "3.0", "4.0", "5.0", "6.0", "7.0" ]
container: ubuntu:latest
steps:
- name: Git checkout
uses: actions/checkout@v2
@ -19,7 +20,8 @@ jobs:
- name: clone and make redis
run: |
sudo apt-get install git
apt-get update
apt-get install -y --no-install-recommends git build-essential ca-certificates
git clone https://github.com/redis/redis
cd redis
git checkout ${{ matrix.redis-version }}
@ -39,5 +41,5 @@ jobs:
- name: test
run: |
pip3 install -r tests/requirements.txt
python -m pip install -r tests/requirements.txt
sh test.sh

@ -1,33 +1,63 @@
# Sample workflow for building and deploying a VitePress site to GitHub Pages
#
name: Pages
on:
workflow_dispatch: { }
# Runs on pushes targeting the `main` branch. Change this to `master` if you're
# using the `master` branch as the default branch.
push:
branches:
- main
branches: [ v4 ]
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
# Sets permissions of the GITHUB_TOKEN to allow deployment to GitHub Pages
permissions:
contents: read
pages: write
id-token: write
# Allow only one concurrent deployment, skipping runs queued between the run in-progress and latest queued.
# However, do NOT cancel in-progress runs as we want to allow these production deployments to complete.
concurrency:
group: pages
cancel-in-progress: false
jobs:
deploy:
# Build job
build:
runs-on: ubuntu-latest
permissions:
pages: write
id-token: write
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
steps:
- uses: actions/checkout@v3
with:
fetch-depth: 0
- uses: actions/setup-node@v3
- name: Checkout
uses: actions/checkout@v3
- name: Setup Node
uses: actions/setup-node@v3
with:
node-version: 16
node-version: 18
cache: npm
- run: npm ci
- name: Build
run: npm run docs:build
- uses: actions/configure-pages@v2
- uses: actions/upload-pages-artifact@v1
cache-dependency-path: docs/package-lock.json
- name: Setup Pages
uses: actions/configure-pages@v3
- name: Install dependencies
run: npm ci # or pnpm install / yarn install
working-directory: docs
- name: Build with VitePress
run: npm run docs:build # or pnpm docs:build / yarn docs:build
working-directory: docs
- name: Upload artifact
uses: actions/upload-pages-artifact@v2
with:
path: docs/.vitepress/dist
- name: Deploy
# Deployment job
deploy:
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
needs: build
runs-on: ubuntu-latest
name: Deploy
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v1
uses: actions/deploy-pages@v2

@ -2,122 +2,17 @@
[![CI](https://RedisShake/actions/workflows/ci.yml/badge.svg?branch=v3)](https://RedisShake/actions/workflows/ci.yml)
- [中文文档](https://RedisShake/wiki)
- [中文文档](https://tair-opensource.github.io/RedisShake/)
- [English Document](https://tair-opensource.github.io/RedisShake/en/)
redis-shake is a tool for Redis data migration and data filtering.
## Feature
* 🚄 High performance
* ✅ Tested on Redis 5.0, Redis 6.0 and Redis 7.0
* ✅ Tested on Redis 2.8, Redis 3.0, Redis 4.0, Redis 5.0, Redis 6.0 and Redis 7.0
* 🤗 Support custom filtering rules
* 💪 Support large instance migration
* 💖 Support `restore` mode, `sync` mode and `scan` mode
* ☁ Support Aliyun Redis and ElastiCache
For older versions of redis-shake (support codis, twemproxy) please
visit [here](https://RedisShake/tree/develop).
![redis-shake2.PNG](https://s2.loli.net/2022/07/10/OZrSGutknlI8XNp.png)
![image.png](https://s2.loli.net/2022/06/30/vU346lVBrNofKzu.png)
# Document
## Install
### Binary package
Download from Release: [https://RedisShake/releases](https://RedisShake/releases)
### Compile from source
After downloading the source code, run the `sh build.sh` command to compile.
```shell
git clone https://RedisShake
cd RedisShake
sh build.sh
```
## Usage
1. Edit `sync.toml` or `restore.toml`.
2. Start redis-shake.
```shell
./bin/redis-shake redis-shake.toml
# or
./bin/redis-shake restore.toml
```
3. Check data synchronization status.
## Configure
The redis-shake configuration file refers to `sync.toml` or `restore.toml`.
## Data filtering
redis-shake supports custom filtering rules using lua scripts. redis-shake can be started with
the following command:
```shell
./bin/redis-shake sync.toml filter/xxx.lua
```
Some following filter templates are provided in `filter` directory:
1. `filter/print.lua`:print all commands
2. `filter/swap_db.lua`:swap the data of db0 and db1
### Custom filter rules
Refer to `filter/print.lua` to create a new lua script, and implement the filter function in the lua script. The
arguments of the function are:
- id: command id
- is_base: is the command read from the dump.rdb file
- group: command group, see the description file
under [redis/src/commands](https://github.com/redis/redis/tree/unstable/src/commands)
- cmd_name: command name
- keys: keys in command
- slots: slots in command
- db_id: database id
- timestamp_ms: timestamp of the command in milliseconds. The current version does not support it.
The return value is:
- code
- 0: allow this command to pass
- 1: this command is not allowed to pass
- 2: this command should not appear, and let redis-shake exit with an error
- db_id: redirected db_id
# Contribution
## Lua script
Welcome to share more creative lua scripts.
1. Add lua scripts under `filters/`.
2. Add description to `README.md`.
3. Submit a pull request.
## Redis Module support
1. Add code under `internal/rdb/types`.
2. Add a command file under `scripts/commands`, and use the script to generate a `table.go` file and move it to
the `internal/commands` directory.
3. Add test cases under `test/cases`.
4. Submit a pull request.
# 感谢
redis-shake 旧版是阿里云基于豌豆荚开源的 redis-port 进行二次开发的一个支持 Redis 异构集群实时同步的工具。
redis-shake v3 在 redis-shake 旧版的基础上重新组织代码结构,使其更具可维护性的版本。
redis-shake v3 参考借鉴了以下项目:
- https://github.com/HDT3213/rdb
- https://github.com/sripathikrishnan/redis-rdb-tools

@ -24,46 +24,46 @@ func main() {
// create reader
var theReader reader.Reader
if v.IsSet("SyncStandaloneReader") {
if v.IsSet("sync_standalone_reader") {
opts := new(reader.SyncStandaloneReaderOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("SyncStandaloneReader", opts)
err := v.UnmarshalKey("sync_standalone_reader", opts)
if err != nil {
log.Panicf("failed to read the SyncReader config entry. err: %v", err)
}
theReader = reader.NewSyncStandaloneReader(opts)
log.Infof("create SyncStandaloneReader: %v", opts.Address)
} else if v.IsSet("SyncClusterReader") {
} else if v.IsSet("sync_cluster_reader") {
opts := new(reader.SyncClusterReaderOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("SyncClusterReader", opts)
err := v.UnmarshalKey("sync_cluster_reader", opts)
if err != nil {
log.Panicf("failed to read the SyncReader config entry. err: %v", err)
}
theReader = reader.NewSyncClusterReader(opts)
log.Infof("create SyncClusterReader: %v", opts.Address)
} else if v.IsSet("ScanStandaloneReader") {
} else if v.IsSet("scan_standalone_reader") {
opts := new(reader.ScanStandaloneReaderOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("ScanStandaloneReader", opts)
err := v.UnmarshalKey("scan_standalone_reader", opts)
if err != nil {
log.Panicf("failed to read the ScanReader config entry. err: %v", err)
}
theReader = reader.NewScanStandaloneReader(opts)
log.Infof("create ScanStandaloneReader: %v", opts.Address)
} else if v.IsSet("ScanClusterReader") {
} else if v.IsSet("scan_cluster_reader") {
opts := new(reader.ScanClusterReaderOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("ScanClusterReader", opts)
err := v.UnmarshalKey("scan_cluster_reader", opts)
if err != nil {
log.Panicf("failed to read the ScanReader config entry. err: %v", err)
}
theReader = reader.NewScanClusterReader(opts)
log.Infof("create ScanClusterReader: %v", opts.Address)
} else if v.IsSet("RdbReader") {
} else if v.IsSet("rdb_reader") {
opts := new(reader.RdbReaderOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("RdbReader", opts)
err := v.UnmarshalKey("rdb_reader", opts)
if err != nil {
log.Panicf("failed to read the RdbReader config entry. err: %v", err)
}
@ -75,19 +75,19 @@ func main() {
// create writer
var theWriter writer.Writer
if v.IsSet("RedisStandaloneWriter") {
if v.IsSet("redis_standalone_writer") {
opts := new(writer.RedisStandaloneWriterOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("RedisStandaloneWriter", opts)
err := v.UnmarshalKey("redis_standalone_writer", opts)
if err != nil {
log.Panicf("failed to read the RedisStandaloneWriter config entry. err: %v", err)
}
theWriter = writer.NewRedisStandaloneWriter(opts)
log.Infof("create RedisStandaloneWriter: %v", opts.Address)
} else if v.IsSet("RedisClusterWriter") {
} else if v.IsSet("redis_cluster_writer") {
opts := new(writer.RedisClusterWriterOptions)
defaults.SetDefaults(opts)
err := v.UnmarshalKey("RedisClusterWriter", opts)
err := v.UnmarshalKey("redis_cluster_writer", opts)
if err != nil {
log.Panicf("failed to read the RedisClusterWriter config entry. err: %v", err)
}

@ -2,28 +2,48 @@ import { defineConfig } from 'vitepress'
// https://vitepress.dev/reference/site-config
export default defineConfig({
title: "RedisShake",
description: "RedisShake is a tool for processing and migrating Redis data.",
themeConfig: {
// https://vitepress.dev/reference/default-theme-config
nav: [
{ text: 'Home', link: '/' },
{ text: 'Examples', link: '/markdown-examples' }
],
sidebar: [
{
text: 'Examples',
items: [
{ text: 'Markdown Examples', link: '/markdown-examples' },
{ text: 'Runtime API Examples', link: '/api-examples' }
]
}
],
socialLinks: [
{ icon: 'github', link: 'https://github.com/tair-opensource/RedisShake' }
]
}
base: "/RedisShake/",
title: "RedisShake",
description: "RedisShake is a tool for processing and migrating Redis data.",
srcDir: './src',
locales: {
root: {
label: '中文',
lang: 'zh', // optional, will be added as `lang` attribute on `html` tag
themeConfig: {
// https://vitepress.dev/reference/default-theme-config
nav: [
{ text: '主页', link: '/' },
{ text: '使用文档', link: '/zh/guide/getting-started' },
{ text: '云原生内存数据库 Tair', link: 'https://www.aliyun.com/product/apsaradb/kvstore/tair' }
],
sidebar: [
{
text: '基础教程',
items: [
{ text: 'RedisShake 简介', link: '/zh/guide/getting-started' },
{ text: '快速上手', link: '/zh/guide/getting-started' },
{ text: '配置文件', link: '/zh/guide/config' }
]
},
{
text: '变换/过滤',
items: [
{ text: '上手使用', link: '/zh/transform/getting-started' },
{ text: '样例', link: '/zh/transform/examples' }
]
}
],
}
},
en: {
label: 'English',
lang: 'en',
},
},
themeConfig: {
socialLinks: [
{ icon: 'github', link: 'https://github.com/tair-opensource/RedisShake' }
],
}
})

File diff suppressed because it is too large Load Diff

@ -0,0 +1,25 @@
---
# https://vitepress.dev/reference/default-theme-home-page
layout: home
hero:
name: "RedisShake"
# text: "用于 Redis-like 数据库的数据迁移与处理服务"
tagline: 用于 Redis-like 数据库的数据迁移与处理服务
actions:
- theme: brand
text: 快速上手
link: /zh/guide/getting-started
# - theme: alt
# text: 云原生内存数据库Tair
# link: https://www.aliyun.com/product/apsaradb/kvstore/tair
features:
- title: 数据迁移
details: 支持 sync、scan 和 restore 三种数据迁移模式
- title: 数据处理
details: 支持使用 lua 脚本对数据进行过滤与修改
- title: 云数据库支持
details: 兼容主流云厂商的多种架构:主从、集群等
---

@ -0,0 +1,155 @@
---
outline: deep
---
# 配置文件
RedisShake 使用 [TOML](https://toml.io/cn/) 语言书写,所有的配置参数在 all.toml 中均有说明。
配置文件的组成如下:
```toml
transform = "..."
[xxx_reader]
...
[xxx_writer]
...
[advanced]
...
```
一般用法下,只需要书写 `xxx_reader`、`xxx_writer` 两个部分即可,`transform` 和 `advanced` 部分为进阶用法,用户可以根据自己的需求进行配置。
## reader 配置
根据源端的类型,RedisShake 提供了不同的 Reader 配置,用来对接不同的源端。
* 对于支持 [Redis Sync/Psync 协议](https://redis.io/docs/management/replication/)的源端,推荐使用 `sync_xxx_reader`
* 对于不支持 [Redis Sync/Psync 协议](https://redis.io/docs/management/replication/)的源端,可以使用 `scan_xxx_reader`
* 对于使用 dump.rdb 文件恢复数据场景,可以使用 `rdb_reader`
### sync_xxx_reader
对于源端为单机 Redis-like 数据库时,使用 `sync_standalone_reader`;对于源端为 Redis Cluster 时,使用 `sync_cluster_reader`
#### sync_standlone_reader
```toml
[sync_standlone_reader]
address = "127.0.0.1:6379"
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
```
#### sync_cluster_reader
```toml
[sync_cluster_reader]
address = "127.0.0.1:6379"
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
```
### scan_xxx_reader
对于源端为单机 Redis-like 数据库时,使用 `scan_standalone_reader`;对于源端为 Redis Cluster 时,使用 `scan_cluster_reader`
#### scan_standlone_reader
```toml
[scan_standlone_reader]
address = "127.0.0.1:6379"
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
```
#### scan_cluster_reader
```toml
[scan_cluster_reader]
address = "127.0.0.1:6379"
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
```
### rdb_reader
```toml
[rdb_reader]
filepath = "/path/to/dump.rdb"
```
filepath 为 dump.rdb 文件的路径,最好使用绝对路径。
## writer 配置
根据目标端的类型,RedisShake 提供了不同的 Writer 配置,用来对接不同的目标端。
目前 RedisShake 支持的目标端有:
* 单机 Redis-like 数据库:redis_standalone_writer
* Redis Cluster:redis_cluster_writer
### redis_standalone_writer
```toml
[redis_standalone_writer]
address = "127.0.0.1:6380"
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
```
### redis_cluster_writer
```toml
[redis_cluster_writer]
address = "127.0.0.1:6380"
username = "" # keep empty if not using ACL
password = "" # keep empty if no authentication is required
tls = false
```
## advanced 配置
```toml
[advanced]
dir = "data"
ncpu = 3 # runtime.GOMAXPROCS, 0 means use runtime.NumCPU() cpu cores
pprof_port = 0 # pprof port, 0 means disable
status_port = 0 # status port, 0 means disable
# log
log_file = "shake.log"
log_level = "info" # debug, info or warn
log_interval = 5 # in seconds
# redis-shake gets key and value from rdb file, and uses RESTORE command to
# create the key in target redis. Redis RESTORE will return a "Target key name
# is busy" error when key already exists. You can use this configuration item
# to change the default behavior of restore:
# panic: redis-shake will stop when meet "Target key name is busy" error.
# rewrite: redis-shake will replace the key with new value.
# ignore: redis-shake will skip restore the key when meet "Target key name is busy" error.
rdb_restore_command_behavior = "rewrite" # panic, rewrite or skip
# redis-shake uses pipeline to improve sending performance.
# This item limits the maximum number of commands in a pipeline.
pipeline_count_limit = 1024
# Client query buffers accumulate new commands. They are limited to a fixed
# amount by default. This amount is normally 1gb.
target_redis_client_max_querybuf_len = 1024_000_000
# In the Redis protocol, bulk requests, that are, elements representing single
# strings, are normally limited to 512 mb.
target_redis_proto_max_bulk_len = 512_000_000
# If the source is Elasticache or MemoryDB, you can set this item.
aws_psync = ""
```

@ -0,0 +1,48 @@
---
outline: deep
---
# 快速上手
## 安装
### 下载二进制包
直接在此处下载:https://github.com/tair-opensource/RedisShake/releases
### 从源码编译
确保本地有 Golang 环境:
```shell
git clone https://github.com/alibaba/RedisShake
cd RedisShake
sh build.sh
```
## 运行
假设现在有两个 Redis 实例:
* 实例 A:127.0.0.1:6379
* 实例 B:127.0.0.1:6380
新建文件 `shake.toml`
```toml
[sync_standalone_reader]
address = "127.0.0.1:6379"
[redis_standalone_writer]
address = "127.0.0.1:6380"
```
启动 redis-shake:
```shell
./redis-shake shake.toml
```
运行日志如图:
![img](./getting-started.png)

Binary file not shown.

After

Width:  |  Height:  |  Size: 1.5 MiB

@ -0,0 +1,65 @@
---
outline: deep
---
# transform 样例
## 阿里云兼容
```lua
-- Aliyun Redis 4.0: skip OPINFO command
function transform(id, is_base, group, cmd_name, keys, slots, db_id, timestamp_ms)
if cmd_name == "OPINFO" then
return 1, db_id -- disallow
else
return 0, db_id -- allow
end
end
```
## AWS 兼容
```lua
-- ElastiCache: skip REPLCONF command
function transform(id, is_base, group, cmd_name, keys, slots, db_id, timestamp_ms)
if cmd_name == "REPLCONF" then
return 1, db_id -- disallow
else
return 0, db_id -- allow
end
end
```
## 过滤命令
### 过滤所有 lua 脚本
```
-- skip all scripts included LUA scripts and Redis Functions.
function filter(id, is_base, group, cmd_name, keys, slots, db_id, timestamp_ms)
if group == "SCRIPTING" then
return 1, db_id -- disallow
else
return 0, db_id -- allow
end
end
```
## key 操作
### 按照前缀过滤 key
```
-- skip keys prefixed with ABC
function filter(id, is_base, group, cmd_name, keys, slots, db_id, timestamp_ms)
if #keys ~= 1 then
return 0, db_id -- allow
end
if string.sub(keys[1], 0, 3) == "ABC" then
return 1, db_id -- disallow
end
return 0, db_id -- allow
end
```

@ -0,0 +1,6 @@
---
outline: deep
---
# 上手使用
TODO

@ -20,10 +20,10 @@ def acl():
inserter.add_data(src, cross_slots_cmd=True)
opts = h.ShakeOpts.create_sync_opts(src, dst)
opts["SyncStandaloneReader"]["username"] = "user0"
opts["SyncStandaloneReader"]["password"] = "password0"
opts["RedisStandaloneWriter"]["username"] = "user1"
opts["RedisStandaloneWriter"]["password"] = "password1"
opts["sync_standalone_reader"]["username"] = "user0"
opts["sync_standalone_reader"]["password"] = "password0"
opts["redis_standalone_writer"]["username"] = "user1"
opts["redis_standalone_writer"]["password"] = "password1"
p.log(f"opts: {opts}")
shake = h.Shake(opts)

@ -29,35 +29,35 @@ class ShakeOpts:
def create_sync_opts(src: Redis, dst: Redis) -> typing.Dict:
d = {}
if src.is_cluster():
d["SyncClusterReader"] = {"address": src.get_address()}
d["sync_cluster_reader"] = {"address": src.get_address()}
else:
d["SyncStandaloneReader"] = {"address": src.get_address()}
d["sync_standalone_reader"] = {"address": src.get_address()}
if dst.is_cluster():
d["RedisClusterWriter"] = {"address": dst.get_address()}
d["redis_cluster_writer"] = {"address": dst.get_address()}
else:
d["RedisStandaloneWriter"] = {"address": dst.get_address()}
d["redis_standalone_writer"] = {"address": dst.get_address()}
return d
@staticmethod
def create_scan_opts(src: Redis, dst: Redis) -> typing.Dict:
d = {}
if src.is_cluster():
d["ScanClusterReader"] = {"address": src.get_address()}
d["scan_cluster_reader"] = {"address": src.get_address()}
else:
d["ScanStandaloneReader"] = {"address": src.get_address()}
d["scan_standalone_reader"] = {"address": src.get_address()}
if dst.is_cluster():
d["RedisClusterWriter"] = {"address": dst.get_address()}
d["redis_cluster_writer"] = {"address": dst.get_address()}
else:
d["RedisStandaloneWriter"] = {"address": dst.get_address()}
d["redis_standalone_writer"] = {"address": dst.get_address()}
return d
@staticmethod
def create_rdb_opts(rdb_path: str, dts: Redis) -> typing.Dict:
d = {"RdbReader": {"filepath": rdb_path}}
d = {"rdb_reader": {"filepath": rdb_path}}
if dts.is_cluster():
d["RedisClusterWriter"] = {"address": dts.get_address()}
d["redis_cluster_writer"] = {"address": dts.get_address()}
else:
d["RedisStandaloneWriter"] = {"address": dts.get_address()}
d["redis_standalone_writer"] = {"address": dts.get_address()}
return d

Loading…
Cancel
Save