diff --git a/Dockerfile.test.gravity b/Dockerfile.test.gravity index 26ac50ea..93a36d10 100644 --- a/Dockerfile.test.gravity +++ b/Dockerfile.test.gravity @@ -1,4 +1,4 @@ -FROM golang:1.11.4 +FROM golang:1.13.3 WORKDIR /gravity diff --git a/Makefile b/Makefile index 63d8b87f..d583f8f5 100644 --- a/Makefile +++ b/Makefile @@ -27,6 +27,7 @@ dev-down: go-test: go test -failfast -race ./integration_test + cd pkg/registry/test_data && make build go test -timeout 10m -coverprofile=cover.out $(TEST_DIRS) && go tool cover -func=cover.out | tail -n 1 test-local: @@ -49,8 +50,6 @@ run-dev: build: $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/gravity cmd/gravity/main.go - #$(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/padder cmd/padder/main.go - build-linux: GOARCH=amd64 GOOS=linux $(GOBUILD) -ldflags '$(LDFLAGS)' -o bin/gravity-linux-amd64 cmd/gravity/main.go @@ -74,7 +73,7 @@ proto: @ which protoc >/dev/null || brew install protobuf @ which protoc-gen-gofast >/dev/null || go get github.com/gogo/protobuf/protoc-gen-gofast - protoc -I=protocol/msgpb -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf --gofast_out=\ + protoc -I=protocol/msgpb -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf --gofast_out=.\ plugins=grpc,\ Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\ Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types,\ @@ -82,6 +81,13 @@ proto: Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:./pkg/protocol/msgpb \ protocol/msgpb/message.proto + protoc -I=protocol/tidb -I=${GOPATH}/src -I=${GOPATH}/src/github.com/gogo/protobuf/protobuf --gofast_out=.\ + Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:./pkg/protocol/tidb \ + protocol/tidb/tidb.proto + mock: mockgen -destination ./mock/binlog_checker/mock.go github.com/moiot/gravity/pkg/inputs/helper/binlog_checker BinlogChecker diff --git a/docker-compose-gravity-test.yml b/docker-compose-gravity-test.yml index 8f98ab0f..933c8a3c 100644 --- a/docker-compose-gravity-test.yml +++ b/docker-compose-gravity-test.yml @@ -1,7 +1,7 @@ version: '3.2' services: source-db: - image: mysql:5.7.18 + image: mysql:5.7 container_name: source-db-test environment: - MYSQL_ALLOW_EMPTY_PASSWORD=yes @@ -14,7 +14,7 @@ services: - ./mycnf:/etc/mysql/conf.d target-db: - image: mysql:5.7.18 + image: mysql:5.7 container_name: target-db-test environment: - MYSQL_ALLOW_EMPTY_PASSWORD=yes @@ -62,6 +62,91 @@ services: - "ES_JAVA_OPTS=-Xms750m -Xmx750m" logging: driver: none + + pd: + image: pingcap/pd:latest + ports: + - "2379" + volumes: + - ./integration_test/config/pd.toml:/pd.toml:ro + command: + - --name=pd + - --client-urls=http://0.0.0.0:2379 + - --peer-urls=http://0.0.0.0:2380 + - --advertise-client-urls=http://pd:2379 + - --advertise-peer-urls=http://pd:2380 + - --initial-cluster=pd=http://pd:2380 + - --data-dir=/data/pd + - --config=/pd.toml + restart: on-failure + + tikv: + image: pingcap/tikv:latest + volumes: + - ./integration_test/config/tikv.toml:/tikv.toml:ro + command: + - --addr=0.0.0.0:20160 + - --advertise-addr=tikv:20160 + - --data-dir=/data/tikv + - --pd=pd:2379 + - --config=/tikv.toml + depends_on: + - "pd" + restart: on-failure + + pump: + image: pingcap/tidb-binlog:latest + logging: + driver: none + volumes: + - ./integration_test/config/pump.toml:/pump.toml:ro + command: + - /pump + - --addr=0.0.0.0:8250 + - --advertise-addr=pump:8250 + - --data-dir=/data/pump + - --node-id=pump + - --pd-urls=http://pd:2379 + - --config=/pump.toml + depends_on: + - "pd" + restart: on-failure + + drainer: + image: pingcap/tidb-binlog:latest + logging: + driver: none + volumes: + - ./integration_test/config/drainer.toml:/drainer.toml:ro + command: + - /drainer + - --addr=drainer:8249 + - --data-dir=/data/data.drainer + - --pd-urls=http://pd:2379 + - --config=/drainer.toml + - --initial-commit-ts=0 + - --dest-db-type=kafka + depends_on: + - "pd" + - "kafka" + restart: on-failure + + tidb: + image: pingcap/tidb:latest + ports: + - "4000:4000" + - "10080:10080" + volumes: + - ./integration_test/config/tidb.toml:/tidb.toml:ro + command: + - --store=tikv + - --path=pd:2379 + - --config=/tidb.toml + - --enable-binlog=true + depends_on: + - "tikv" + - "pump" + restart: on-failure gravity-test: build: @@ -69,6 +154,7 @@ services: dockerfile: Dockerfile.test.gravity depends_on: - mongo + - tidb environment: - MONGO_HOST=mongo - KAFKA_BROKER=kafka:9092 @@ -78,4 +164,5 @@ services: "--", "./wait-for-it.sh", "mongo:27017", "-t", "0", "--", "./wait-for-it.sh", "kafka:9092", "-t", "0", "--", "./wait-for-it.sh", "elasticsearch:9200", "-t", "0", + "--", "./wait-for-it.sh", "tidb:4000", "-t", "0", "--", "make", "go-test"] diff --git a/docs/2.0/example-tidb2tidb.toml b/docs/2.0/example-tidb2tidb.toml new file mode 100644 index 00000000..ead56de7 --- /dev/null +++ b/docs/2.0/example-tidb2tidb.toml @@ -0,0 +1,58 @@ +name = "tidb2tidbDemo" +version = "1.0" + +[input] +type = "tidbkafka" +mode = "stream" + +[input.config] +ignore-bidirectional-data = true + +[input.config.position-repo] +type = "mysql-repo" +[input.config.position-repo.config.source] +host = "127.0.0.1" +username = "root" +password = "" +port = 4000 + +[input.config.source-db] +host = "127.0.0.1" +username = "root" +password = "" +port = 4000 + +[input.config.source-kafka] +topics = ["obinlog"] +consume-from = "oldest" +group-id = "tidb2tidbDemo" +[input.config.source-kafka.brokers] +broker-addrs = ["localhost:9092", "localhost:9093", "localhost:9094"] + +[output] +type = "mysql" + +[output.config] +enable-ddl = true + +[output.config.target] +host = "127.0.0.1" +username = "root" +password = "" +port = 4000 +max-open = 30 # optional, max connections +max-idle = 30 # optional, suggest to be the same as max-open + +# The definition of the routing rule +[[output.config.routes]] +match-schema = "test" +match-table = "t" +target-table = "t2" + +[scheduler] +type = "batch-table-scheduler" +[scheduler.config] +nr-worker = 30 +batch-size = 1000 +queue-size = 1024 +sliding-window-size = 16384 \ No newline at end of file diff --git a/integration_test/config/drainer.toml b/integration_test/config/drainer.toml new file mode 100644 index 00000000..87114974 --- /dev/null +++ b/integration_test/config/drainer.toml @@ -0,0 +1,101 @@ +# drainer Configuration. + + # addr (i.e. 'host:port') to listen on for drainer connections +# will register this addr into etcd +# addr = "127.0.0.1:8249" + + # the interval time (in seconds) of detect pumps' status +detect-interval = 10 + + # drainer meta data directory path +data-dir = "data.drainer" + + # a comma separated list of PD endpoints +pd-urls = "http://127.0.0.1:2379" + + # Use the specified compressor to compress payload between pump and drainer +compressor = "" + + #[security] +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +# ssl-ca = "/path/to/ca.pem" +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +# ssl-cert = "/path/to/pump.pem" +# Path of file that contains X509 key in PEM format for connection with cluster components. +# ssl-key = "/path/to/pump-key.pem" + + # syncer Configuration. +[syncer] + + # Assume the upstream sql-mode. +# If this is setted , will use the same sql-mode to parse DDL statment, and set the same sql-mode at downstream when db-type is mysql. +# If this is not setted, it will not set any sql-mode. +# sql-mode = "STRICT_TRANS_TABLES,NO_ENGINE_SUBSTITUTION" + + # number of binlog events in a transaction batch +txn-batch = 20 + + # work count to execute binlogs +# if the latency between drainer and downstream(mysql or tidb) are too high, you might want to increase this +# to get higher throughput by higher concurrent write to the downstream +worker-count = 16 + +#disable-dispatch = false + + # safe mode will split update to delete and insert +safe-mode = false + + # downstream storage, equal to --dest-db-type +# valid values are "mysql", "file", "tidb", "flash", "kafka" +db-type = "kafka" + + # disable sync these schema +ignore-schemas = "INFORMATION_SCHEMA,PERFORMANCE_SCHEMA,mysql" + + ##replicate-do-db priority over replicate-do-table if have same db name +##and we support regex expression , start with '~' declare use regex expression. +# +#replicate-do-db = ["~^b.*","s1"] + + #[[syncer.replicate-do-table]] +#db-name ="test" +#tbl-name = "log" + + #[[syncer.replicate-do-table]] +#db-name ="test" +#tbl-name = "~^a.*" + + # disable sync these table +#[[syncer.ignore-table]] +#db-name = "test" +#tbl-name = "log" + + # the downstream mysql protocol database +#[syncer.to] +#host = "127.0.0.1" +#user = "root" +#password = "" +#port = 3306 + + [syncer.to.checkpoint] +# you can uncomment this to change the database to save checkpoint when the downstream is mysql or tidb +#schema = "tidb_binlog" + + # Uncomment this if you want to use file as db-type. +#[syncer.to] +# directory to save binlog file, default same as data-dir(save checkpoint file) if this is not configured. +# dir = "data.drainer" + + + # when db-type is kafka, you can uncomment this to config the down stream kafka, it will be the globle config kafka default +[syncer.to] +# only need config one of zookeeper-addrs and kafka-addrs, will get kafka address if zookeeper-addrs is configed. +# zookeeper-addrs = "127.0.0.1:2181" + kafka-addrs = "kafka:9092" + kafka-version = "5.1.0" + kafka-max-messages = 1024 +# +# +# the topic name drainer will push msg, the default name is _obinlog +# be careful don't use the same name if run multi drainer instances +topic-name = "obinlog" diff --git a/integration_test/config/pd.toml b/integration_test/config/pd.toml new file mode 100644 index 00000000..35da9d4d --- /dev/null +++ b/integration_test/config/pd.toml @@ -0,0 +1,87 @@ +# PD Configuration. + +name = "pd" +data-dir = "default.pd" + +client-urls = "http://127.0.0.1:2379" +# if not set, use ${client-urls} +advertise-client-urls = "" + +peer-urls = "http://127.0.0.1:2380" +# if not set, use ${peer-urls} +advertise-peer-urls = "" + +initial-cluster = "pd=http://127.0.0.1:2380" +initial-cluster-state = "new" + +lease = 3 +tso-save-interval = "3s" + +[security] +# Path of file that contains list of trusted SSL CAs. if set, following four settings shouldn't be empty +cacert-path = "" +# Path of file that contains X509 certificate in PEM format. +cert-path = "" +# Path of file that contains X509 key in PEM format. +key-path = "" + +[log] +level = "error" + +# log format, one of json, text, console +#format = "text" + +# disable automatic timestamps in output +#disable-timestamp = false + +# file logging +[log.file] +#filename = "" +# max log file size in MB +#max-size = 300 +# max log file keep days +#max-days = 28 +# maximum number of old log files to retain +#max-backups = 7 +# rotate log by day +#log-rotate = true + +[metric] +# prometheus client push interval, set "0s" to disable prometheus. +interval = "15s" +# prometheus pushgateway address, leaves it empty will disable prometheus. +# address = "pushgateway:9091" +address = "" + +[schedule] +max-merge-region-size = 0 +split-merge-interval = "1h" +max-snapshot-count = 3 +max-pending-peer-count = 16 +max-store-down-time = "30m" +leader-schedule-limit = 4 +region-schedule-limit = 4 +replica-schedule-limit = 8 +merge-schedule-limit = 8 +tolerant-size-ratio = 5.0 + +# customized schedulers, the format is as below +# if empty, it will use balance-leader, balance-region, hot-region as default +# [[schedule.schedulers]] +# type = "evict-leader" +# args = ["1"] + +[replication] +# The number of replicas for each region. +max-replicas = 3 +# The label keys specified the location of a store. +# The placement priorities is implied by the order of label keys. +# For example, ["zone", "rack"] means that we should place replicas to +# different zones first, then to different racks if we don't have enough zones. +location-labels = [] + +[label-property] +# Do not assign region leaders to stores that have these tags. +# [[label-property.reject-leader]] +# key = "zone" +# value = "cn1 diff --git a/integration_test/config/pump.toml b/integration_test/config/pump.toml new file mode 100644 index 00000000..30871719 --- /dev/null +++ b/integration_test/config/pump.toml @@ -0,0 +1,46 @@ +# pump Configuration. + + # addr(i.e. 'host:port') to listen on for client traffic +addr = "127.0.0.1:8250" + + # addr(i.e. 'host:port') to advertise to the public +advertise-addr = "" + + # a integer value to control expiry date of the binlog data, indicates for how long (in days) the binlog data would be stored. +# must bigger than 0 +gc = 7 + + # path to the data directory of pump's data +data-dir = "data.pump" + + # number of seconds between heartbeat ticks (in 2 seconds) +heartbeat-interval = 2 + + # a comma separated list of PD endpoints +pd-urls = "http://127.0.0.1:2379" + + #[security] +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +# ssl-ca = "/path/to/ca.pem" +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +# ssl-cert = "/path/to/drainer.pem" +# Path of file that contains X509 key in PEM format for connection with cluster components. +# ssl-key = "/path/to/drainer-key.pem" +# +[storage] +stop-write-at-available-space = 100 +# Set to `true` (default) for best reliability, which prevents data loss when there is a power failure. +# sync-log = true +# +# we suggest using the default config of the embedded LSM DB now, do not change it useless you know what you are doing +#[storage.kv] +# block-cache-capacity = 8388608 +# block-restart-interval = 16 +# block-size = 4096 +# compaction-L0-trigger = 8 +# compaction-table-size = 67108864 +# compaction-total-size = 536870912 +# compaction-total-size-multiplier = 8.0 +# write-buffer = 67108864 +# write-L0-pause-trigger = 24 +# write-L0-slowdown-trigger = 17 diff --git a/integration_test/config/tidb.toml b/integration_test/config/tidb.toml new file mode 100644 index 00000000..6f64b8d7 --- /dev/null +++ b/integration_test/config/tidb.toml @@ -0,0 +1,241 @@ +# TiDB Configuration. + +# TiDB server host. +host = "0.0.0.0" + +# TiDB server port. +port = 4000 + +# Registered store name, [tikv, mocktikv] +store = "tikv" + +# TiDB storage path. +path = "/tmp/tidb" + +# The socket file to use for connection. +socket = "" + +# Run ddl worker on this tidb-server. +run-ddl = true + +# Schema lease duration, very dangerous to change only if you know what you do. +lease = "0" + +# When create table, split a separated region for it. It is recommended to +# turn off this option if there will be a large number of tables created. +split-table = true + +# The limit of concurrent executed sessions. +token-limit = 1000 + +# Only print a log when out of memory quota. +# Valid options: ["log", "cancel"] +oom-action = "log" + +# Set the memory quota for a query in bytes. Default: 32GB +mem-quota-query = 34359738368 + +# Enable coprocessor streaming. +enable-streaming = false + +# Set system variable 'lower_case_table_names' +lower-case-table-names = 2 + +[log] +# Log level: debug, info, warn, error, fatal. +level = "error" + +# Log format, one of json, text, console. +format = "text" + +# Disable automatic timestamp in output +disable-timestamp = false + +# Stores slow query log into separated files. +slow-query-file = "" + +# Queries with execution time greater than this value will be logged. (Milliseconds) +slow-threshold = 300 + +# Queries with internal result greater than this value will be logged. +expensive-threshold = 10000 + +# Maximum query length recorded in log. +query-log-max-len = 2048 + +# File logging. +[log.file] +# Log file name. +filename = "" + +# Max log file size in MB (upper limit to 4096MB). +max-size = 300 + +# Max log file keep days. No clean up by default. +max-days = 0 + +# Maximum number of old log files to retain. No clean up by default. +max-backups = 0 + +# Rotate log by day +log-rotate = true + +[security] +# Path of file that contains list of trusted SSL CAs for connection with mysql client. +ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with mysql client. +ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with mysql client. +ssl-key = "" + +# Path of file that contains list of trusted SSL CAs for connection with cluster components. +cluster-ssl-ca = "" + +# Path of file that contains X509 certificate in PEM format for connection with cluster components. +cluster-ssl-cert = "" + +# Path of file that contains X509 key in PEM format for connection with cluster components. +cluster-ssl-key = "" + +[status] +# If enable status report HTTP service. +report-status = true + +# TiDB status port. +status-port = 10080 + +# Prometheus pushgateway address, leaves it empty will disable prometheus push. +# metrics-addr = "pushgateway:9091" +metrics-addr = "" + +# Prometheus client push interval in second, set \"0\" to disable prometheus push. +metrics-interval = 15 + +[performance] +txn-total-size-limit = 104857599 +# Max CPUs to use, 0 use number of CPUs in the machine. +max-procs = 0 +# StmtCountLimit limits the max count of statement inside a transaction. +stmt-count-limit = 5000 + +# Set keep alive option for tcp connection. +tcp-keep-alive = true + +# The maximum number of retries when commit a transaction. +retry-limit = 10 + +# Whether support cartesian product. +cross-join = true + +# Stats lease duration, which influences the time of analyze and stats load. +stats-lease = "3s" + +# Run auto analyze worker on this tidb-server. +run-auto-analyze = true + +# Probability to use the query feedback to update stats, 0 or 1 for always false/true. +feedback-probability = 0.0 + +# The max number of query feedback that cache in memory. +query-feedback-limit = 1024 + +# Pseudo stats will be used if the ratio between the modify count and +# row count in statistics of a table is greater than it. +pseudo-estimate-ratio = 0.7 + +[proxy-protocol] +# PROXY protocol acceptable client networks. +# Empty string means disable PROXY protocol, * means all networks. +networks = "" + +# PROXY protocol header read timeout, unit is second +header-timeout = 5 + +[plan-cache] +enabled = false +capacity = 2560 +shards = 256 + +[prepared-plan-cache] +enabled = false +capacity = 100 + +[opentracing] +# Enable opentracing. +enable = false + +# Whether to enable the rpc metrics. +rpc-metrics = false + +[opentracing.sampler] +# Type specifies the type of the sampler: const, probabilistic, rateLimiting, or remote +type = "const" + +# Param is a value passed to the sampler. +# Valid values for Param field are: +# - for "const" sampler, 0 or 1 for always false/true respectively +# - for "probabilistic" sampler, a probability between 0 and 1 +# - for "rateLimiting" sampler, the number of spans per second +# - for "remote" sampler, param is the same as for "probabilistic" +# and indicates the initial sampling rate before the actual one +# is received from the mothership +param = 1.0 + +# SamplingServerURL is the address of jaeger-agent's HTTP sampling server +sampling-server-url = "" + +# MaxOperations is the maximum number of operations that the sampler +# will keep track of. If an operation is not tracked, a default probabilistic +# sampler will be used rather than the per operation specific sampler. +max-operations = 0 + +# SamplingRefreshInterval controls how often the remotely controlled sampler will poll +# jaeger-agent for the appropriate sampling strategy. +sampling-refresh-interval = 0 + +[opentracing.reporter] +# QueueSize controls how many spans the reporter can keep in memory before it starts dropping +# new spans. The queue is continuously drained by a background go-routine, as fast as spans +# can be sent out of process. +queue-size = 0 + +# BufferFlushInterval controls how often the buffer is force-flushed, even if it's not full. +# It is generally not useful, as it only matters for very low traffic services. +buffer-flush-interval = 0 + +# LogSpans, when true, enables LoggingReporter that runs in parallel with the main reporter +# and logs all submitted spans. Main Configuration.Logger must be initialized in the code +# for this option to have any effect. +log-spans = false + +# LocalAgentHostPort instructs reporter to send spans to jaeger-agent at this address +local-agent-host-port = "" + +[tikv-client] +# Max gRPC connections that will be established with each tikv-server. +grpc-connection-count = 16 + +# After a duration of this time in seconds if the client doesn't see any activity it pings +# the server to see if the transport is still alive. +grpc-keepalive-time = 10 + +# After having pinged for keepalive check, the client waits for a duration of Timeout in seconds +# and if no activity is seen even after that the connection is closed. +grpc-keepalive-timeout = 3 + +# max time for commit command, must be twice bigger than raft election timeout. +commit-timeout = "41s" + +[binlog] + +# Socket file to write binlog. +binlog-socket = "" + +# WriteTimeout specifies how long it will wait for writing binlog to pump. +write-timeout = "15s" + +# If IgnoreError is true, when writting binlog meets error, TiDB would stop writting binlog, +# but still provide service. +ignore-error = false diff --git a/integration_test/config/tikv.toml b/integration_test/config/tikv.toml new file mode 100644 index 00000000..63e792dd --- /dev/null +++ b/integration_test/config/tikv.toml @@ -0,0 +1,497 @@ +# TiKV config template +# Human-readable big numbers: +# File size(based on byte): KB, MB, GB, TB, PB +# e.g.: 1_048_576 = "1MB" +# Time(based on ms): ms, s, m, h +# e.g.: 78_000 = "1.3m" + +# log level: trace, debug, info, warn, error, off. +log-level = "error" +# file to store log, write to stderr if it's empty. +# log-file = "" + +[readpool.storage] +# size of thread pool for high-priority operations +# high-concurrency = 4 +# size of thread pool for normal-priority operations +# normal-concurrency = 4 +# size of thread pool for low-priority operations +# low-concurrency = 4 +# max running high-priority operations, reject if exceed +# max-tasks-high = 8000 +# max running normal-priority operations, reject if exceed +# max-tasks-normal = 8000 +# max running low-priority operations, reject if exceed +# max-tasks-low = 8000 +# size of stack size for each thread pool +# stack-size = "10MB" + +[readpool.coprocessor] +# Notice: if CPU_NUM > 8, default thread pool size for coprocessors +# will be set to CPU_NUM * 0.8. + +# high-concurrency = 8 +# normal-concurrency = 8 +# low-concurrency = 8 +# max-tasks-high = 16000 +# max-tasks-normal = 16000 +# max-tasks-low = 16000 +# stack-size = "10MB" + +[server] +# set listening address. +# addr = "127.0.0.1:20160" +# set advertise listening address for client communication, if not set, use addr instead. +# advertise-addr = "" +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# compression type for grpc channel, available values are no, deflate and gzip. +# grpc-compression-type = "no" +# size of thread pool for grpc server. +# grpc-concurrency = 4 +# The number of max concurrent streams/requests on a client connection. +# grpc-concurrent-stream = 1024 +# The number of connections with each tikv server to send raft messages. +# grpc-raft-conn-num = 10 +# Amount to read ahead on individual grpc streams. +# grpc-stream-initial-window-size = "2MB" + +# How many snapshots can be sent concurrently. +# concurrent-send-snap-limit = 32 +# How many snapshots can be recv concurrently. +# concurrent-recv-snap-limit = 32 + +# max count of tasks being handled, new tasks will be rejected. +# end-point-max-tasks = 2000 + +# max recursion level allowed when decoding dag expression +# end-point-recursion-limit = 1000 + +# max time to handle coprocessor request before timeout +# end-point-request-max-handle-duration = "60s" + +# the max bytes that snapshot can be written to disk in one second, +# should be set based on your disk performance +# snap-max-write-bytes-per-sec = "100MB" + +# set attributes about this server, e.g. { zone = "us-west-1", disk = "ssd" }. +# labels = {} + +[storage] +# set the path to rocksdb directory. +# data-dir = "/tmp/tikv/store" + +# notify capacity of scheduler's channel +# scheduler-notify-capacity = 10240 + +# maximum number of messages can be processed in one tick +# scheduler-messages-per-tick = 1024 + +# the number of slots in scheduler latches, concurrency control for write. +# scheduler-concurrency = 2048000 + +# scheduler's worker pool size, should increase it in heavy write cases, +# also should less than total cpu cores. +# scheduler-worker-pool-size = 4 + +# When the pending write bytes exceeds this threshold, +# the "scheduler too busy" error is displayed. +# scheduler-pending-write-threshold = "100MB" + +[pd] +# pd endpoints +# endpoints = [] + +[metric] +# the Prometheus client push interval. Setting the value to 0s stops Prometheus client from pushing. +# interval = "15s" +# the Prometheus pushgateway address. Leaving it empty stops Prometheus client from pushing. +# address = "pushgateway:9091" +# the Prometheus client push job name. Note: A node id will automatically append, e.g., "tikv_1". +# job = "tikv" + +[raftstore] +# true (default value) for high reliability, this can prevent data loss when power failure. +# sync-log = true + +# set the path to raftdb directory, default value is data-dir/raft +# raftdb-path = "" + +# set store capacity, if no set, use disk capacity. +# capacity = 0 + +# notify capacity, 40960 is suitable for about 7000 regions. +# notify-capacity = 40960 + +# maximum number of messages can be processed in one tick. +# messages-per-tick = 4096 + +# Region heartbeat tick interval for reporting to pd. +# pd-heartbeat-tick-interval = "60s" +# Store heartbeat tick interval for reporting to pd. +# pd-store-heartbeat-tick-interval = "10s" + +# When region size changes exceeds region-split-check-diff, we should check +# whether the region should be split or not. +# region-split-check-diff = "6MB" + +# Interval to check region whether need to be split or not. +# split-region-check-tick-interval = "10s" + +# When raft entry exceed the max size, reject to propose the entry. +# raft-entry-max-size = "8MB" + +# Interval to gc unnecessary raft log. +# raft-log-gc-tick-interval = "10s" +# A threshold to gc stale raft log, must >= 1. +# raft-log-gc-threshold = 50 +# When entry count exceed this value, gc will be forced trigger. +# raft-log-gc-count-limit = 72000 +# When the approximate size of raft log entries exceed this value, gc will be forced trigger. +# It's recommanded to set it to 3/4 of region-split-size. +# raft-log-gc-size-limit = "72MB" + +# When a peer hasn't been active for max-peer-down-duration, +# we will consider this peer to be down and report it to pd. +# max-peer-down-duration = "5m" + +# Interval to check whether start manual compaction for a region, +# region-compact-check-interval = "5m" +# Number of regions for each time to check. +# region-compact-check-step = 100 +# The minimum number of delete tombstones to trigger manual compaction. +# region-compact-min-tombstones = 10000 +# Interval to check whether should start a manual compaction for lock column family, +# if written bytes reach lock-cf-compact-threshold for lock column family, will fire +# a manual compaction for lock column family. +# lock-cf-compact-interval = "10m" +# lock-cf-compact-bytes-threshold = "256MB" + +# Interval (s) to check region whether the data are consistent. +# consistency-check-interval = 0 + +# Use delete range to drop a large number of continuous keys. +# use-delete-range = false + +# delay time before deleting a stale peer +# clean-stale-peer-delay = "10m" + +# Interval to cleanup import sst files. +# cleanup-import-sst-interval = "10m" + +[coprocessor] +# When it is true, it will try to split a region with table prefix if +# that region crosses tables. It is recommended to turn off this option +# if there will be a large number of tables created. +# split-region-on-table = true +# When the region's size exceeds region-max-size, we will split the region +# into two which the left region's size will be region-split-size or a little +# bit smaller. +# region-max-size = "144MB" +# region-split-size = "96MB" + +[rocksdb] +# Maximum number of concurrent background jobs (compactions and flushes) +# max-background-jobs = 8 + +# This value represents the maximum number of threads that will concurrently perform a +# compaction job by breaking it into multiple, smaller ones that are run simultaneously. +# Default: 1 (i.e. no subcompactions) +# max-sub-compactions = 1 + +# Number of open files that can be used by the DB. You may need to +# increase this if your database has a large working set. Value -1 means +# files opened are always kept open. You can estimate number of files based +# on target_file_size_base and target_file_size_multiplier for level-based +# compaction. +# If max-open-files = -1, RocksDB will prefetch index and filter blocks into +# block cache at startup, so if your database has a large working set, it will +# take several minutes to open the db. +max-open-files = 1024 + +# Max size of rocksdb's MANIFEST file. +# For detailed explanation please refer to https://github.com/facebook/rocksdb/wiki/MANIFEST +# max-manifest-file-size = "20MB" + +# If true, the database will be created if it is missing. +# create-if-missing = true + +# rocksdb wal recovery mode +# 0 : TolerateCorruptedTailRecords, tolerate incomplete record in trailing data on all logs; +# 1 : AbsoluteConsistency, We don't expect to find any corruption in the WAL; +# 2 : PointInTimeRecovery, Recover to point-in-time consistency; +# 3 : SkipAnyCorruptedRecords, Recovery after a disaster; +# wal-recovery-mode = 2 + +# rocksdb write-ahead logs dir path +# This specifies the absolute dir path for write-ahead logs (WAL). +# If it is empty, the log files will be in the same dir as data. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-dir to a directory on a persistent storage. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-dir = "/tmp/tikv/store" + +# The following two fields affect how archived write-ahead logs will be deleted. +# 1. If both set to 0, logs will be deleted asap and will not get into the archive. +# 2. If wal-ttl-seconds is 0 and wal-size-limit is not 0, +# WAL files will be checked every 10 min and if total size is greater +# then wal-size-limit, they will be deleted starting with the +# earliest until size_limit is met. All empty files will be deleted. +# 3. If wal-ttl-seconds is not 0 and wal-size-limit is 0, then +# WAL files will be checked every wal-ttl-seconds / 2 and those that +# are older than wal-ttl-seconds will be deleted. +# 4. If both are not 0, WAL files will be checked every 10 min and both +# checks will be performed with ttl being first. +# When you set the path to rocksdb directory in memory like in /dev/shm, you may want to set +# wal-ttl-seconds to a value greater than 0 (like 86400) and backup your db on a regular basis. +# See https://github.com/facebook/rocksdb/wiki/How-to-persist-in-memory-RocksDB-database +# wal-ttl-seconds = 0 +# wal-size-limit = 0 + +# rocksdb max total wal size +# max-total-wal-size = "4GB" + +# Rocksdb Statistics provides cumulative stats over time. +# Turn statistics on will introduce about 5%-10% overhead for RocksDB, +# but it is worthy to know the internal status of RocksDB. +# enable-statistics = true + +# Dump statistics periodically in information logs. +# Same as rocksdb's default value (10 min). +# stats-dump-period = "10m" + +# Due to Rocksdb FAQ: https://github.com/facebook/rocksdb/wiki/RocksDB-FAQ, +# If you want to use rocksdb on multi disks or spinning disks, you should set value at +# least 2MB; +# compaction-readahead-size = 0 + +# This is the maximum buffer size that is used by WritableFileWrite +# writable-file-max-buffer-size = "1MB" + +# Use O_DIRECT for both reads and writes in background flush and compactions +# use-direct-io-for-flush-and-compaction = false + +# Limit the disk IO of compaction and flush. Compaction and flush can cause +# terrible spikes if they exceed a certain threshold. Consider setting this to +# 50% ~ 80% of the disk throughput for a more stable result. However, in heavy +# write workload, limiting compaction and flush speed can cause write stalls too. +# rate-bytes-per-sec = 0 + +# Enable or disable the pipelined write +# enable-pipelined-write = true + +# Allows OS to incrementally sync files to disk while they are being +# written, asynchronously, in the background. +# bytes-per-sync = "0MB" + +# Allows OS to incrementally sync WAL to disk while it is being written. +# wal-bytes-per-sync = "0KB" + +# Specify the maximal size of the Rocksdb info log file. If the log file +# is larger than `max_log_file_size`, a new info log file will be created. +# If max_log_file_size == 0, all logs will be written to one log file. +# Default: 1GB +# info-log-max-size = "1GB" + +# Time for the Rocksdb info log file to roll (in seconds). +# If specified with non-zero value, log file will be rolled +# if it has been active longer than `log_file_time_to_roll`. +# Default: 0 (disabled) +# info-log-roll-time = "0" + +# Maximal Rocksdb info log files to be kept. +# Default: 10 +# info-log-keep-log-file-num = 10 + +# This specifies the Rocksdb info LOG dir. +# If it is empty, the log files will be in the same dir as data. +# If it is non empty, the log files will be in the specified dir, +# and the db data dir's absolute path will be used as the log file +# name's prefix. +# Default: empty +# info-log-dir = "" + +# Column Family default used to store actual data of the database. +[rocksdb.defaultcf] +# compression method (if any) is used to compress a block. +# no: kNoCompression +# snappy: kSnappyCompression +# zlib: kZlibCompression +# bzip2: kBZip2Compression +# lz4: kLZ4Compression +# lz4hc: kLZ4HCCompression +# zstd: kZSTD + +# per level compression +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] + +# Approximate size of user data packed per block. Note that the +# block size specified here corresponds to uncompressed data. +# block-size = "64KB" + +# If you're doing point lookups you definitely want to turn bloom filters on, We use +# bloom filters to avoid unnecessary disk reads. Default bits_per_key is 10, which +# yields ~1% false positive rate. Larger bits_per_key values will reduce false positive +# rate, but increase memory usage and space amplification. +# bloom-filter-bits-per-key = 10 + +# false means one sst file one bloom filter, true means evry block has a corresponding bloom filter +# block-based-bloom-filter = false + +# level0-file-num-compaction-trigger = 4 + +# Soft limit on number of level-0 files. We start slowing down writes at this point. +# level0-slowdown-writes-trigger = 20 + +# Maximum number of level-0 files. We stop writes at this point. +# level0-stop-writes-trigger = 36 + +# Amount of data to build up in memory (backed by an unsorted log +# on disk) before converting to a sorted on-disk file. +# write-buffer-size = "128MB" + +# The maximum number of write buffers that are built up in memory. +# max-write-buffer-number = 5 + +# The minimum number of write buffers that will be merged together +# before writing to storage. +# min-write-buffer-number-to-merge = 1 + +# Control maximum total data size for base level (level 1). +# max-bytes-for-level-base = "512MB" + +# Target file size for compaction. +# target-file-size-base = "8MB" + +# Max bytes for compaction.max_compaction_bytes +# max-compaction-bytes = "2GB" + +# There are four different algorithms to pick files to compact. +# 0 : ByCompensatedSize +# 1 : OldestLargestSeqFirst +# 2 : OldestSmallestSeqFirst +# 3 : MinOverlappingRatio +# compaction-pri = 3 + +# block-cache used to cache uncompressed blocks, big block-cache can speed up read. +# in normal cases should tune to 30%-50% system's total memory. +# block-cache-size = "1GB" + +# Indicating if we'd put index/filter blocks to the block cache. +# If not specified, each "table reader" object will pre-load index/filter block +# during table initialization. +# cache-index-and-filter-blocks = true + +# Pin level0 filter and index blocks in cache. +# pin-l0-filter-and-index-blocks = true + +# Enable read amplication statistics. +# value => memory usage (percentage of loaded blocks memory) +# 1 => 12.50 % +# 2 => 06.25 % +# 4 => 03.12 % +# 8 => 01.56 % +# 16 => 00.78 % +# read-amp-bytes-per-bit = 0 + +# Pick target size of each level dynamically. +# dynamic-level-bytes = true + +# Options for Column Family write +# Column Family write used to store commit informations in MVCC model +[rocksdb.writecf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# in normal cases should tune to 10%-30% system's total memory. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 3 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[rocksdb.lockcf] +# compression-per-level = ["no", "no", "no", "no", "no", "no", "no"] +# block-size = "16KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "128MB" +# target-file-size-base = "8MB" +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 1 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[raftdb] +# max-sub-compactions = 1 +max-open-files = 1024 +# max-manifest-file-size = "20MB" +# create-if-missing = true + +# enable-statistics = true +# stats-dump-period = "10m" + +# compaction-readahead-size = 0 +# writable-file-max-buffer-size = "1MB" +# use-direct-io-for-flush-and-compaction = false +# enable-pipelined-write = true +# allow-concurrent-memtable-write = false +# bytes-per-sync = "0MB" +# wal-bytes-per-sync = "0KB" + +# info-log-max-size = "1GB" +# info-log-roll-time = "0" +# info-log-keep-log-file-num = 10 +# info-log-dir = "" + +[raftdb.defaultcf] +# compression-per-level = ["no", "no", "lz4", "lz4", "lz4", "zstd", "zstd"] +# block-size = "64KB" +# write-buffer-size = "128MB" +# max-write-buffer-number = 5 +# min-write-buffer-number-to-merge = 1 +# max-bytes-for-level-base = "512MB" +# target-file-size-base = "8MB" + +# should tune to 256MB~2GB. +# block-cache-size = "256MB" +# level0-file-num-compaction-trigger = 4 +# level0-slowdown-writes-trigger = 20 +# level0-stop-writes-trigger = 36 +# cache-index-and-filter-blocks = true +# pin-l0-filter-and-index-blocks = true +# compaction-pri = 0 +# read-amp-bytes-per-bit = 0 +# dynamic-level-bytes = true + +[security] +# set the path for certificates. Empty string means disabling secure connectoins. +# ca-path = "" +# cert-path = "" +# key-path = "" + +[import] +# the directory to store importing kv data. +# import-dir = "/tmp/tikv/import" +# number of threads to handle RPC requests. +# num-threads = 8 +# stream channel window size, stream will be blocked on channel full. +# stream-channel-window = 128 diff --git a/integration_test/tidb_tidb_test.go b/integration_test/tidb_tidb_test.go new file mode 100644 index 00000000..f5a0d330 --- /dev/null +++ b/integration_test/tidb_tidb_test.go @@ -0,0 +1,152 @@ +package integration + +import ( + "fmt" + "strings" + "testing" + "time" + + "github.com/moiot/gravity/pkg/inputs" + "github.com/moiot/gravity/pkg/outputs/mysql" + "github.com/moiot/gravity/pkg/sql_execution_engine" + "github.com/moiot/gravity/pkg/utils" + + "github.com/stretchr/testify/require" + + "github.com/moiot/gravity/pkg/app" + "github.com/moiot/gravity/pkg/config" + "github.com/moiot/gravity/pkg/mysql_test" +) + +func TestBidirection(t *testing.T) { + r := require.New(t) + + sourceDBName := strings.ToLower(t.Name()) + "_source" + targetDBName := strings.ToLower(t.Name()) + "_target" + + sourceDBConfig := config.DBConfig{ + Host: "tidb", + Username: "root", + Port: 4000, + } + targetDBConfig := config.DBConfig{ + Host: "tidb", + Username: "root", + Port: 4000, + } + + sourceDB, err := utils.CreateDBConnection(&sourceDBConfig) + r.NoError(err) + defer sourceDB.Close() + + r.NoError(mysql_test.SetupTestDB(sourceDB, sourceDBName)) + + err = utils.InitInternalTxnTags(sourceDB) + r.NoError(err) + + targetDB, err := utils.CreateDBConnection(&targetDBConfig) + r.NoError(err) + defer targetDB.Close() + + r.NoError(mysql_test.SetupTestDB(targetDB, targetDBName)) + + pipelineConfig := config.PipelineConfigV3{ + PipelineName: t.Name(), + Version: config.PipelineConfigV3Version, + InputPlugin: config.InputConfig{ + Type: inputs.TiDB, + Mode: config.Stream, + Config: utils.MustAny2Map(config.SourceTiDBConfig{ + SourceDB: &sourceDBConfig, + SourceKafka: &config.SourceKafkaConfig{ + BrokerConfig: config.KafkaGlobalConfig{ + BrokerAddrs: []string{"kafka:9092"}, + }, + Topics: []string{"obinlog"}, + ConsumeFrom: "oldest", + GroupID: t.Name(), + }, + PositionRepo: &config.GenericPluginConfig{ + Type: "mysql-repo", + Config: map[string]interface{}{ + "source": utils.MustAny2Map(sourceDBConfig), + }, + }, + IgnoreBiDirectionalData: true, + }), + }, + OutputPlugin: config.GenericPluginConfig{ + Type: "mysql", + Config: utils.MustAny2Map(mysql.MySQLPluginConfig{ + DBConfig: &targetDBConfig, + EnableDDL: true, + Routes: []map[string]interface{}{ + { + "match-schema": sourceDBName, + "match-table": "*", + "target-schema": targetDBName, + }, + }, + EngineConfig: &config.GenericPluginConfig{ + Type: sql_execution_engine.MySQLReplaceEngine, + Config: map[string]interface{}{ + "tag-internal-txn": true, + }, + }, + }), + }, + } + // start the server + server, err := app.NewServer(pipelineConfig) + r.NoError(err) + + r.NoError(server.Start()) + + _, err = sourceDB.Exec(fmt.Sprintf("create table `%s`.t(id int(11), primary key(id)) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4 ", sourceDBName)) + r.NoError(err) + + tx, err := sourceDB.Begin() + r.NoError(err) + _, err = tx.Exec(utils.GenerateTxnTagSQL(sourceDBName)) + r.NoError(err) + _, err = tx.Exec(fmt.Sprintf("insert into `%s`.t(id) values (1)", sourceDBName)) + r.NoError(err) + err = tx.Commit() + r.NoError(err) + + _, err = sourceDB.Exec(fmt.Sprintf("insert into `%s`.t(id) values (2)", sourceDBName)) + r.NoError(err) + + err = mysql_test.SendDeadSignal(sourceDB, pipelineConfig.PipelineName) + r.NoError(err) + + server.Input.Wait() + server.Close() + + success := false + + for retry := 0; retry < 30; retry++ { + rows, err := targetDB.Query(fmt.Sprintf("select id from `%s`.t", targetDBName)) + r.NoError(err) + + ids := make([]int, 0, 1) + for rows.Next() { + var id int + err = rows.Scan(&id) + r.NoError(err) + ids = append(ids, id) + } + r.NoError(rows.Err()) + _ = rows.Close() + + if len(ids) == 1 && ids[0] == 2 { + success = true + break + } else { + fmt.Printf("wait for syncing %d time(s)...\n", retry) + time.Sleep(time.Second) + } + } + + r.True(success) +} diff --git a/pkg/config/config.go b/pkg/config/config.go index c128cc6a..dce64f83 100644 --- a/pkg/config/config.go +++ b/pkg/config/config.go @@ -121,6 +121,7 @@ type SourceTiDBConfig struct { // OffsetStoreConfig *SourceProbeCfg `mapstructure:"offset-store" toml:"offset-store" json:"offset-store"` PositionRepo *GenericPluginConfig `mapstructure:"position-repo" toml:"position-repo" json:"position-repo"` IgnoreBiDirectionalData bool `mapstructure:"ignore-bidirectional-data" toml:"ignore-bidirectional-data" json:"ignore-bidirectional-data"` + FailOnTxnTags []string `mapstructure:"fail-on-txn-tags" toml:"fail-on-txn-tags" json:"fail-on-txn-tags"` } type GtmConfig struct { diff --git a/pkg/core/msg.go b/pkg/core/msg.go index 2dc4773c..e573ee18 100644 --- a/pkg/core/msg.go +++ b/pkg/core/msg.go @@ -225,7 +225,7 @@ func IsInternalTrafficV2(db string, tbl string) bool { } func MatchTxnTagPipelineName(patterns []string, msg *Msg) (string, bool) { - if IsInternalTrafficV2(msg.Database, msg.Table) { + if len(patterns) > 0 && IsInternalTrafficV2(msg.Database, msg.Table) { pipelineName := msg.DmlMsg.Data["pipeline_name"].(string) for _, pattern := range patterns { if utils.Glob(pattern, pipelineName) { diff --git a/pkg/inputs/plugins.go b/pkg/inputs/plugins.go index b9a14c62..c74f6dee 100644 --- a/pkg/inputs/plugins.go +++ b/pkg/inputs/plugins.go @@ -7,10 +7,12 @@ import ( "github.com/moiot/gravity/pkg/inputs/mysql" _ "github.com/moiot/gravity/pkg/inputs/mysqlbatch" _ "github.com/moiot/gravity/pkg/inputs/mysqlstream" + "github.com/moiot/gravity/pkg/inputs/tidb_kafka" _ "github.com/moiot/gravity/pkg/inputs/tidb_kafka" ) const ( Mongo = mongo.Name Mysql = mysql.Name + TiDB = tidb_kafka.TiDBKafka ) diff --git a/pkg/inputs/tidb_kafka/binlog_tailer.go b/pkg/inputs/tidb_kafka/binlog_tailer.go index 25311cd7..c55c39ae 100644 --- a/pkg/inputs/tidb_kafka/binlog_tailer.go +++ b/pkg/inputs/tidb_kafka/binlog_tailer.go @@ -1,14 +1,18 @@ package tidb_kafka import ( + "fmt" + "reflect" "strings" "sync" "time" "github.com/OneOfOne/xxhash" + "github.com/mitchellh/hashstructure" "github.com/Shopify/sarama" "github.com/juju/errors" + "github.com/pingcap/parser" log "github.com/sirupsen/logrus" gCfg "github.com/moiot/gravity/pkg/config" @@ -19,6 +23,7 @@ import ( "github.com/moiot/gravity/pkg/metrics" "github.com/moiot/gravity/pkg/mysql_test" "github.com/moiot/gravity/pkg/position_cache" + "github.com/moiot/gravity/pkg/position_repos" pb "github.com/moiot/gravity/pkg/protocol/tidb" "github.com/moiot/gravity/pkg/sarama_cluster" "github.com/moiot/gravity/pkg/utils" @@ -35,6 +40,7 @@ type BinlogTailer struct { router core.Router binlogChecker binlog_checker.BinlogChecker mapLock sync.Mutex + parser *parser.Parser wg sync.WaitGroup } @@ -46,42 +52,29 @@ func (t *BinlogTailer) Start() error { go func() { defer t.wg.Done() - ListenPartitionLoop: - for { - select { - case partitionConsumer, ok := <-t.consumer.Partitions(): - if !ok { - log.Info("cannot fetch partitionConsumers, the channel may be closed") - break ListenPartitionLoop - } - - log.Infof("[mq_consumer] partition consumer topic: %v, partition: %v", partitionConsumer.Topic(), partitionConsumer.Partition()) - - t.wg.Add(1) - go func(partitionConsumer sarama_cluster.PartitionConsumer) { - defer t.wg.Done() - - for msg := range partitionConsumer.Messages() { - log.Debugf("[tidb_binlog_tailer]: topic: %v, partition: %v, offset: %v", msg.Topic, msg.Partition, msg.Offset) - binlog := pb.Binlog{} - if err := binlog.Unmarshal(msg.Value); err != nil { - log.Fatalf("[binlog_tailer] failed to parse tidb binlog msg: %v", errors.ErrorStack(err)) - } - jobs, err := t.createMsgs(binlog, msg) - if err != nil { - log.Fatalf("[tidb_binlog_tailer] failed to convert tidb binlog to gravity jobs. offset: %v.%v.%v, err: %v", msg.Topic, msg.Partition, msg.Offset, err) - } - for _, job := range jobs { - if err := t.dispatchMsg(job); err != nil { - log.Fatalf("[tidb_binlog_tailer] failed to dispatch job. offset: %v.%v.%v. err: %v", msg.Topic, msg.Partition, msg.Offset, err) - } - } - } - }(partitionConsumer) + for msg := range t.consumer.Messages() { + log.Debugf("[tidb_binlog_tailer]: topic: %v, partition: %v, offset: %v", msg.Topic, msg.Partition, msg.Offset) + binlog := pb.Binlog{} + if err := binlog.Unmarshal(msg.Value); err != nil { + log.Fatalf("[binlog_tailer] failed to parse tidb binlog msg: %v", errors.ErrorStack(err)) + } + jobs, err := t.createMsgs(binlog, msg) + if err != nil { + log.Fatalf("[tidb_binlog_tailer] failed to convert tidb binlog to gravity jobs. offset: %v.%v.%v, err: %v", msg.Topic, msg.Partition, msg.Offset, err) } + t.dispatchMsg(jobs) } - log.Info("Get out of ListenPartitionLoop") }() + + t.wg.Add(1) + go func() { + defer t.wg.Done() + + for err := range t.consumer.Errors() { + log.Fatalf("[tidb_binlog_tailer] received error: %s", err) + } + }() + return nil } @@ -94,24 +87,6 @@ func (t *BinlogTailer) Close() { t.Wait() } -func buildPKColumnList(colInfoList []*pb.ColumnInfo) []*pb.ColumnInfo { - var pkCols []*pb.ColumnInfo - for _, colInfo := range colInfoList { - if colInfo.IsPrimaryKey { - pkCols = append(pkCols, colInfo) - } - } - return pkCols -} - -func buildPKNameList(pkColList []*pb.ColumnInfo) []string { - pkNames := make([]string, len(pkColList)) - for i, colInfo := range pkColList { - pkNames[i] = colInfo.Name - } - return pkNames -} - func buildPKValueMap(columnInfos []*pb.ColumnInfo, row *pb.Row) map[string]interface{} { pkValues := make(map[string]interface{}) for i, columnInfo := range columnInfos { @@ -122,56 +97,63 @@ func buildPKValueMap(columnInfos []*pb.ColumnInfo, row *pb.Row) map[string]inter return pkValues } -func (t *BinlogTailer) createMsgs( - binlog pb.Binlog, - kMsg *sarama.ConsumerMessage, -) ([]*core.Msg, error) { - +func (t *BinlogTailer) createMsgs(binlog pb.Binlog, kMsg *sarama.ConsumerMessage) ([]*core.Msg, error) { var msgList []*core.Msg + if binlog.Type == pb.BinlogType_DDL { - metrics.InputCounter.WithLabelValues(t.name, "", "", string(core.MsgDDL), "").Add(1) - ddlStmt := string(binlog.DdlData.DdlQuery) - if t.config.IgnoreBiDirectionalData && strings.Contains(ddlStmt, consts.DDLTag) { - log.Info("ignore internal ddl: ", ddlStmt) - return msgList, nil - } else { - //TODO support ddl for tidb - log.Infof("skip ddl %s", ddlStmt) - return msgList, nil - } + return msgList, t.handleDDL(binlog, kMsg) } - received := time.Now() + + processTime := time.Now() + eventTime := time.Unix(int64(ParseTimeStamp(uint64(binlog.CommitTs))), 0) + for _, table := range binlog.DmlData.Tables { schemaName := *table.SchemaName tableName := *table.TableName for _, mutation := range table.Mutations { msg := core.Msg{ Phase: core.Phase{ - Start: received, + Start: processTime, }, Type: core.MsgDML, Database: schemaName, Table: tableName, - Timestamp: time.Unix(int64(ParseTimeStamp(uint64(binlog.CommitTs))), 0), + Timestamp: eventTime, Done: make(chan struct{}), } if binlog_checker.IsBinlogCheckerMsg(schemaName, tableName) { msg.Type = core.MsgCtl - } - if binlog_checker.IsBinlogCheckerMsg(schemaName, tableName) && *mutation.Type == pb.MutationType_Update { - row := *mutation.ChangeRow - checkerRow, err := binlog_checker.ParseTiDBRow(row) - if err != nil { - return msgList, errors.Trace(err) - } - if !t.binlogChecker.IsEventBelongsToMySelf(checkerRow) { - log.Debugf("skip other binlog checker row. row: %v", row) - continue + if *mutation.Type == pb.MutationType_Update { + row := *mutation.ChangeRow + checkerRow, err := binlog_checker.ParseTiDBRow(row) + if err != nil { + return msgList, errors.Trace(err) + } + if !t.binlogChecker.IsEventBelongsToMySelf(checkerRow) { + log.Debugf("skip other binlog checker row. row: %v", row) + continue + } + t.binlogChecker.MarkActive(checkerRow) } - t.binlogChecker.MarkActive(checkerRow) } + + // skip binlog position event + if position_repos.IsPositionStoreEvent(schemaName, tableName) { + log.Debugf("[binlogTailer] skip position event") + continue + } + + // do not send messages without router to the system + if !consts.IsInternalDBTraffic(schemaName) && + t.router != nil && !t.router.Exists(&core.Msg{ + Database: schemaName, + Table: tableName, + }) { + continue + } + dmlMsg := &core.DMLMsg{} data := make(map[string]interface{}) colInfoList := table.ColumnInfo @@ -197,7 +179,7 @@ func (t *BinlogTailer) createMsgs( data[colInfoList[index].Name] = deserialize(value, colInfoList[index].MysqlType) } default: - log.Warnf("unexpected MutationType: %v", *mutation.Type) + log.Fatalf("unexpected MutationType: %v", *mutation.Type) continue } metrics.InputCounter.WithLabelValues(t.name, msg.Database, msg.Table, string(msg.Type), string(dmlMsg.Operation)).Add(1) @@ -210,9 +192,12 @@ func (t *BinlogTailer) createMsgs( dmlMsg.Data = data dmlMsg.Pks = buildPKValueMap(table.ColumnInfo, mutation.Row) msg.DmlMsg = dmlMsg + msg.InputStreamKey = utils.NewStringPtr(inputStreamKey) + msg.OutputDepHashes = calculateOutputDep(table.UniqueKeys, msg) msgList = append(msgList, &msg) } } + if len(msgList) > 0 { lastMsg := msgList[len(msgList)-1] lastMsg.InputContext = kMsg @@ -221,6 +206,141 @@ func (t *BinlogTailer) createMsgs( return msgList, nil } +func (t *BinlogTailer) handleDDL(binlog pb.Binlog, kMsg *sarama.ConsumerMessage) error { + metrics.InputCounter.WithLabelValues(t.name, "", "", string(core.MsgDDL), "").Add(1) + processTime := time.Now() + eventTime := time.Unix(int64(ParseTimeStamp(uint64(binlog.CommitTs))), 0) + ddlStmt := string(binlog.DdlData.DdlQuery) + + if t.config.IgnoreBiDirectionalData && strings.Contains(ddlStmt, consts.DDLTag) { + log.Info("ignore internal ddl: ", ddlStmt) + return nil + } + + dbNames, tables, asts := parseDDL(t.parser, binlog) + + // emit barrier msg + barrierMsg := NewBarrierMsg() + if err := t.emitter.Emit(barrierMsg); err != nil { + log.Fatalf("failed to emit barrier msg: %v", errors.ErrorStack(err)) + } + <-barrierMsg.Done + + sent := 0 + + for i := range dbNames { + dbName := dbNames[i] + table := tables[i] + ast := asts[i] + + if dbName == consts.MySQLInternalDBName { + continue + } + + if dbName == consts.GravityDBName || dbName == consts.OldDrcDBName { + continue + } + + log.Infof("QueryEvent: database: %s, sql: %s", dbName, ddlStmt) + + // emit ddl msg + ddlMsg := &core.Msg{ + Phase: core.Phase{ + Start: processTime, + }, + Type: core.MsgDDL, + Timestamp: eventTime, + Database: dbName, + Table: table, + DdlMsg: &core.DDLMsg{Statement: ddlStmt, AST: ast}, + Done: make(chan struct{}), + InputStreamKey: utils.NewStringPtr(inputStreamKey), + InputContext: kMsg, + AfterCommitCallback: t.AfterMsgCommit, + } + + // do not send messages without router to the system + if consts.IsInternalDBTraffic(dbName) || (t.router != nil && !t.router.Exists(ddlMsg)) { + continue + } + + if err := t.emitter.Emit(ddlMsg); err != nil { + log.Fatalf("failed to emit ddl msg: %v", errors.ErrorStack(err)) + } + sent++ + } + + if sent > 0 { + // emit barrier msg + barrierMsg = NewBarrierMsg() + if err := t.emitter.Emit(barrierMsg); err != nil { + log.Fatalf("failed to emit barrier msg: %v", errors.ErrorStack(err)) + } + <-barrierMsg.Done + log.Infof("[binlogTailer] ddl done with commit ts: %d, offset: %d, stmt: %s", binlog.CommitTs, kMsg.Offset, ddlStmt) + } + + return nil +} + +var hasher = xxhash.New64() +var hashOptions = hashstructure.HashOptions{ + Hasher: hasher, +} + +func calculateOutputDep(uniqueKeys []*pb.Key, msg core.Msg) (hashes []core.OutputHash) { + for _, uk := range uniqueKeys { + var isUKUpdate bool + isUKUpdate = ukUpdated(uk.ColumnNames, msg.DmlMsg.Data, msg.DmlMsg.Old) + + // add hash based on new data + keyName, h := dataHash(msg.Database, msg.Table, uk.GetName(), uk.ColumnNames, msg.DmlMsg.Data) + if keyName != "" { + hashes = append(hashes, core.OutputHash{Name: keyName, H: h}) + } + + // add hash if unique key changed + if isUKUpdate { + keyName, h := dataHash(msg.Database, msg.Table, uk.GetName(), uk.ColumnNames, msg.DmlMsg.Old) + if keyName != "" { + hashes = append(hashes, core.OutputHash{Name: keyName, H: h}) + } + } + } + + return +} + +func dataHash(schema string, table string, idxName string, idxColumns []string, data map[string]interface{}) (string, uint64) { + key := []interface{}{schema, table, idxName} + var nonNull bool + for _, columnName := range idxColumns { + if data[columnName] != nil { + key = append(key, columnName, data[columnName]) + nonNull = true + } + } + if !nonNull { + return "", 0 + } + + h, err := hashstructure.Hash(key, &hashOptions) + if err != nil { + log.Fatalf("error hash: %v, uk: %v", err, idxName) + } + return fmt.Sprint(key), h +} + +func ukUpdated(ukColumns []string, newData map[string]interface{}, oldData map[string]interface{}) bool { + for _, column := range ukColumns { + // if oldData[column] == nil, we consider this is a insert + if oldData[column] != nil && !reflect.DeepEqual(newData[column], oldData[column]) { + return true + } + } + return false +} + func (t *BinlogTailer) AfterMsgCommit(msg *core.Msg) error { kMsg, ok := msg.InputContext.(*sarama.ConsumerMessage) if !ok { @@ -257,17 +377,53 @@ func deserialize(raw *pb.Column, colType string) interface{} { case "json": return raw.GetBytesValue() default: - log.Warnf("un-recognized mysql type: %v", raw) + log.Fatalf("un-recognized mysql type: %v", raw) return raw } } -func (t *BinlogTailer) dispatchMsg(msg *core.Msg) error { - msg.InputStreamKey = utils.NewStringPtr("tidbbinlog") - pkSign := msg.GetPkSign() - msg.OutputDepHashes = []core.OutputHash{{pkSign, xxhash.ChecksumString64(pkSign)}} +func (t *BinlogTailer) dispatchMsg(msgs []*core.Msg) { + // ignore internal txn data + hasInternalTxnTag := false + for _, msg := range msgs { + if utils.IsCircularTrafficTag(msg.Database, msg.Table) { + hasInternalTxnTag = true + log.Debugf("[binlogTailer] internal traffic found") + break + } + } + + if hasInternalTxnTag && t.config.IgnoreBiDirectionalData { + last := msgs[len(msgs)-1] + msgs = []*core.Msg{ + { + Phase: last.Phase, + Type: core.MsgCtl, + Timestamp: last.Timestamp, + Done: last.Done, + InputContext: last.InputContext, + InputStreamKey: last.InputStreamKey, + AfterCommitCallback: last.AfterAckCallback, + }, + } + } else { + log.Debugf("[binlogTailer] do not ignore traffic: hasInternalTxnTag %v, cfg.Ignore %v, msgTxnBufferLen: %v", hasInternalTxnTag, t.config.IgnoreBiDirectionalData, len(msgs)) + } + + for i, m := range msgs { + if binlog_checker.IsBinlogCheckerMsg(m.Database, m.Table) || m.Database == consts.GravityDBName { + m.Type = core.MsgCtl + } - return errors.Trace(t.emitter.Emit(msg)) + // check circular traffic again before emitter emit the message + if pipelineName, circular := core.MatchTxnTagPipelineName(t.config.FailOnTxnTags, m); circular { + log.Fatalf("[binlog_tailer] detected internal circular traffic, txn tag: %v", pipelineName) + } + + if err := t.emitter.Emit(m); err != nil { + log.Fatalf("failed to emit, idx: %d, schema: %v, table: %v, msgType: %v, err: %v", i, m.Database, m.Table, m.Type, errors.ErrorStack(err)) + } + } } func NewBinlogTailer( @@ -302,47 +458,54 @@ func NewBinlogTailer( kafkaConfig.Net.SASL.Password = kafkaGlobalConfig.Net.SASL.Password } - kafkaConfig.Group.Mode = sarama_cluster.ConsumerModePartitions - // // common settings // - kafkaConfig.ClientID = srcKafkaCfg.Common.ClientID - kafkaConfig.ChannelBufferSize = srcKafkaCfg.Common.ChannelBufferSize + if srcKafkaCfg.Common.ClientID != "" { + kafkaConfig.ClientID = srcKafkaCfg.Common.ClientID + } else { + kafkaConfig.ClientID = "_gravity" + } + if srcKafkaCfg.Common.ChannelBufferSize > 0 { + kafkaConfig.ChannelBufferSize = srcKafkaCfg.Common.ChannelBufferSize + } // // consumer related performance tuning // - if srcKafkaCfg.Consumer == nil { - return nil, errors.Errorf("empty consumer config") - } + if srcKafkaCfg.Consumer != nil { + d, err := time.ParseDuration(srcKafkaCfg.Consumer.Offsets.CommitInterval) + if err != nil { + return nil, errors.Errorf("invalid commit interval: %v", srcKafkaCfg.Consumer.Offsets.CommitInterval) + } + kafkaConfig.Consumer.Offsets.CommitInterval = d - d, err := time.ParseDuration(srcKafkaCfg.Consumer.Offsets.CommitInterval) - if err != nil { - return nil, errors.Errorf("invalid commit interval: %v", srcKafkaCfg.Consumer.Offsets.CommitInterval) - } - kafkaConfig.Consumer.Offsets.CommitInterval = d + if srcKafkaCfg.Consumer.Fetch.Default != 0 { + kafkaConfig.Consumer.Fetch.Default = srcKafkaCfg.Consumer.Fetch.Default + } - if srcKafkaCfg.Consumer.Fetch.Default != 0 { - kafkaConfig.Consumer.Fetch.Default = srcKafkaCfg.Consumer.Fetch.Default - } + if srcKafkaCfg.Consumer.Fetch.Max != 0 { + kafkaConfig.Consumer.Fetch.Max = srcKafkaCfg.Consumer.Fetch.Max + } - if srcKafkaCfg.Consumer.Fetch.Max != 0 { - kafkaConfig.Consumer.Fetch.Max = srcKafkaCfg.Consumer.Fetch.Max - } + if srcKafkaCfg.Consumer.Fetch.Min != 0 { + kafkaConfig.Consumer.Fetch.Min = srcKafkaCfg.Consumer.Fetch.Min + } - if srcKafkaCfg.Consumer.Fetch.Min != 0 { - kafkaConfig.Consumer.Fetch.Min = srcKafkaCfg.Consumer.Fetch.Min - } + maxWaitDuration, err := time.ParseDuration(srcKafkaCfg.Consumer.MaxWaitTime) + if err != nil { + return nil, errors.Errorf("invalid max wait time") + } - maxWaitDuration, err := time.ParseDuration(srcKafkaCfg.Consumer.MaxWaitTime) - if err != nil { - return nil, errors.Errorf("invalid max wait time") + kafkaConfig.Consumer.MaxWaitTime = maxWaitDuration } + kafkaConfig.Consumer.Return.Errors = true - kafkaConfig.Consumer.MaxWaitTime = maxWaitDuration + if err := kafkaConfig.Validate(); err != nil { + log.Fatal(err) + } - log.Infof("[tidb_binlog_tailer] consumer config: sarama config: %v, pipeline config: %+v", kafkaConfig, srcKafkaCfg) + log.Infof("[tidb_binlog_tailer] consumer config: sarama config: %#v", kafkaConfig) consumer, err := sarama_cluster.NewConsumer( srcKafkaCfg.BrokerConfig.BrokerAddrs, @@ -364,6 +527,7 @@ func NewBinlogTailer( emitter: emitter, router: router, binlogChecker: binlogChecker, + parser: parser.New(), } return tailer, nil } diff --git a/pkg/inputs/tidb_kafka/input.go b/pkg/inputs/tidb_kafka/input.go index a76b5cc1..3831c1c7 100644 --- a/pkg/inputs/tidb_kafka/input.go +++ b/pkg/inputs/tidb_kafka/input.go @@ -27,6 +27,8 @@ var ( BinlogCheckInterval = time.Second ) +const TiDBKafka = "tidbkafka" + type tidbKafkaStreamInputPlugin struct { pipelineName string diff --git a/pkg/inputs/tidb_kafka/position_value.go b/pkg/inputs/tidb_kafka/position_value.go index ec9a623a..74cb7c55 100644 --- a/pkg/inputs/tidb_kafka/position_value.go +++ b/pkg/inputs/tidb_kafka/position_value.go @@ -4,6 +4,7 @@ import ( "time" jsoniter "github.com/json-iterator/go" + "github.com/moiot/gravity/pkg/config" "github.com/moiot/gravity/pkg/position_cache" "github.com/moiot/gravity/pkg/position_repos" @@ -76,6 +77,10 @@ func (store *OffsetStore) CommitOffset(req *offsets.OffsetCommitRequest) (*offse return nil, errors.Errorf("invalid position type") } + if positionValue.Offsets == nil { + positionValue.Offsets = make(map[string]ConsumerGroupOffset) + } + if _, ok := positionValue.Offsets[req.ConsumerGroup]; !ok { positionValue.Offsets[req.ConsumerGroup] = make(map[string]TopicOffset) } diff --git a/pkg/inputs/tidb_kafka/utils.go b/pkg/inputs/tidb_kafka/utils.go index e6a1a092..7a67857c 100644 --- a/pkg/inputs/tidb_kafka/utils.go +++ b/pkg/inputs/tidb_kafka/utils.go @@ -1,7 +1,86 @@ package tidb_kafka +import ( + "time" + + "github.com/pingcap/parser" + "github.com/pingcap/parser/ast" + _ "github.com/pingcap/tidb/types/parser_driver" + log "github.com/sirupsen/logrus" + + "github.com/moiot/gravity/pkg/core" + "github.com/moiot/gravity/pkg/protocol/tidb" + "github.com/moiot/gravity/pkg/utils" +) + +const inputStreamKey = "tidbbinlog" + func ParseTimeStamp(tso uint64) uint64 { // https://github.com/pingcap/pd/blob/master/tools/pd-ctl/pdctl/command/tso_command.go#L49 // timstamp in seconds format return (tso >> 18) / 1000 } + +func parseDDL(p *parser.Parser, binlog tidb.Binlog) (db, table []string, node []ast.StmtNode) { + stmt, err := p.ParseOneStmt(string(binlog.DdlData.DdlQuery), "", "") + if err != nil { + log.Errorf("sql parser: %s. error: %v", string(binlog.DdlData.DdlQuery), err.Error()) + return []string{""}, []string{""}, []ast.StmtNode{nil} + } + + switch v := stmt.(type) { + case *ast.CreateDatabaseStmt: + db = append(db, v.Name) + table = append(table, "") + node = append(node, stmt) + case *ast.DropDatabaseStmt: + db = append(db, v.Name) + table = append(table, "") + node = append(node, stmt) + case *ast.CreateTableStmt: + db = append(db, v.Table.Schema.String()) + table = append(table, v.Table.Name.String()) + node = append(node, stmt) + case *ast.DropTableStmt: + for i := range v.Tables { + db = append(db, v.Tables[i].Schema.String()) + table = append(table, v.Tables[i].Name.String()) + dropTableStmt := *v + dropTableStmt.Tables = nil + dropTableStmt.Tables = append(dropTableStmt.Tables, v.Tables[i]) + node = append(node, &dropTableStmt) + } + case *ast.AlterTableStmt: + db = append(db, v.Table.Schema.String()) + table = append(table, v.Table.Name.String()) + node = append(node, stmt) + case *ast.TruncateTableStmt: + db = append(db, v.Table.Schema.String()) + table = append(table, v.Table.Name.String()) + node = append(node, stmt) + case *ast.RenameTableStmt: + db = append(db, v.OldTable.Schema.String()) + table = append(table, v.OldTable.Name.String()) + node = append(node, stmt) + default: + db = append(db, "") + table = append(table, "") + node = append(node, stmt) + } + if len(db) == 1 && db[0] == "" && binlog.DdlData.SchemaName != nil { + db[0] = *binlog.DdlData.SchemaName + } + return +} + +func NewBarrierMsg() *core.Msg { + return &core.Msg{ + Type: core.MsgCtl, + Timestamp: time.Now(), + Done: make(chan struct{}), + InputStreamKey: utils.NewStringPtr(inputStreamKey), + Phase: core.Phase{ + Start: time.Now(), + }, + } +} diff --git a/pkg/mysql_test/test.go b/pkg/mysql_test/test.go index 8fff5d31..81debc0e 100644 --- a/pkg/mysql_test/test.go +++ b/pkg/mysql_test/test.go @@ -432,7 +432,7 @@ func SeedCompositePrimaryKeyInt(db *sql.DB, dbName string) { } -func setupTestDB(db *sql.DB, dbName string) error { +func SetupTestDB(db *sql.DB, dbName string) error { // setup test tableNames if _, err := db.Exec(dropDBStatement(dbName)); err != nil { @@ -502,7 +502,7 @@ func MustCreateSourceDBConn() *sql.DB { // MustSetupSourceDB setup a test db, so that we can use different db in different test cases func MustSetupSourceDB(dbName string) *sql.DB { db := MustCreateSourceDBConn() - err := setupTestDB(db, dbName) + err := SetupTestDB(db, dbName) if err != nil { log.Fatalf("failed to setup source db err: %v", errors.ErrorStack(err)) } @@ -538,7 +538,7 @@ func MustCreateTargetDBConn() *sql.DB { func MustSetupTargetDB(dbName string) *sql.DB { db := MustCreateTargetDBConn() - err := setupTestDB(db, dbName) + err := SetupTestDB(db, dbName) if err != nil { log.Fatalf("failed to setup source db1 err: %v", errors.ErrorStack(err)) } diff --git a/pkg/position_repos/mysql_repo.go b/pkg/position_repos/mysql_repo.go index 7019a9e8..b19628be 100644 --- a/pkg/position_repos/mysql_repo.go +++ b/pkg/position_repos/mysql_repo.go @@ -94,7 +94,7 @@ func (repo *mysqlPositionRepo) Configure(pipelineName string, data map[string]in func (repo *mysqlPositionRepo) Init() error { db, err := utils.CreateDBConnection(&repo.dbCfg) if err != nil { - return errors.Trace(err) + return errors.Annotatef(err, "%#v", repo.dbCfg) } _, err = db.Exec(fmt.Sprintf("%sCREATE DATABASE IF NOT EXISTS %s", repo.annotation, consts.GravityDBName)) diff --git a/pkg/protocol/tidb/tidb.pb.go b/pkg/protocol/tidb/tidb.pb.go index b1109d8c..e2f2fa46 100644 --- a/pkg/protocol/tidb/tidb.pb.go +++ b/pkg/protocol/tidb/tidb.pb.go @@ -1,17 +1,18 @@ // Code generated by protoc-gen-gogo. DO NOT EDIT. -// source: binlog.proto +// source: tidb.proto /* - Package slave_binlog is a generated protocol buffer package. + Package tidb is a generated protocol buffer package. It is generated from these files: - binlog.proto + tidb.proto It has these top-level messages: Column ColumnInfo Row Table + Key TableMutation DMLData DDLData @@ -19,19 +20,14 @@ */ package tidb -import ( - "fmt" +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" - proto "github.com/golang/protobuf/proto" +import binary "encoding/binary" - math "math" - - github_com_golang_protobuf_proto "github.com/golang/protobuf/proto" - - encoding_binary "encoding/binary" - - io "io" -) +import io "io" // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal @@ -79,7 +75,7 @@ func (x *MutationType) UnmarshalJSON(data []byte) error { *x = MutationType(value) return nil } -func (MutationType) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{0} } +func (MutationType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTidb, []int{0} } type BinlogType int32 @@ -89,12 +85,12 @@ const ( ) var BinlogType_name = map[int32]string{ - 0: "DmlMsg", - 1: "DdlMsg", + 0: "DML", + 1: "DDL", } var BinlogType_value = map[string]int32{ - "DmlMsg": 0, - "DdlMsg": 1, + "DML": 0, + "DDL": 1, } func (x BinlogType) Enum() *BinlogType { @@ -113,7 +109,7 @@ func (x *BinlogType) UnmarshalJSON(data []byte) error { *x = BinlogType(value) return nil } -func (BinlogType) EnumDescriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{1} } +func (BinlogType) EnumDescriptor() ([]byte, []int) { return fileDescriptorTidb, []int{1} } // for text and char type, string_value is set // for blob and binary type, bytes_value is set @@ -132,7 +128,7 @@ type Column struct { func (m *Column) Reset() { *m = Column{} } func (m *Column) String() string { return proto.CompactTextString(m) } func (*Column) ProtoMessage() {} -func (*Column) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{0} } +func (*Column) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{0} } const Default_Column_IsNull bool = false @@ -184,7 +180,7 @@ type ColumnInfo struct { // https://dev.mysql.com/doc/refman/8.0/en/data-types.html // for numeric type: int bigint smallint tinyint float double decimal bit // for string type: text longtext mediumtext char tinytext varchar - // blob longblog mediumblog binary tinyblob varbinary + // blob longblob mediumblob binary tinyblob varbinary // enum set // for json type: json MysqlType string `protobuf:"bytes,2,opt,name=mysql_type,json=mysqlType" json:"mysql_type"` @@ -195,7 +191,7 @@ type ColumnInfo struct { func (m *ColumnInfo) Reset() { *m = ColumnInfo{} } func (m *ColumnInfo) String() string { return proto.CompactTextString(m) } func (*ColumnInfo) ProtoMessage() {} -func (*ColumnInfo) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{1} } +func (*ColumnInfo) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{1} } func (m *ColumnInfo) GetName() string { if m != nil { @@ -226,7 +222,7 @@ type Row struct { func (m *Row) Reset() { *m = Row{} } func (m *Row) String() string { return proto.CompactTextString(m) } func (*Row) ProtoMessage() {} -func (*Row) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{2} } +func (*Row) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{2} } func (m *Row) GetColumns() []*Column { if m != nil { @@ -237,17 +233,19 @@ func (m *Row) GetColumns() []*Column { // Table contains mutations in a table. type Table struct { - SchemaName *string `protobuf:"bytes,1,opt,name=schema_name,json=schemaName" json:"schema_name,omitempty"` - TableName *string `protobuf:"bytes,2,opt,name=table_name,json=tableName" json:"table_name,omitempty"` - ColumnInfo []*ColumnInfo `protobuf:"bytes,3,rep,name=column_info,json=columnInfo" json:"column_info,omitempty"` - Mutations []*TableMutation `protobuf:"bytes,4,rep,name=mutations" json:"mutations,omitempty"` - XXX_unrecognized []byte `json:"-"` + SchemaName *string `protobuf:"bytes,1,opt,name=schema_name,json=schemaName" json:"schema_name,omitempty"` + TableName *string `protobuf:"bytes,2,opt,name=table_name,json=tableName" json:"table_name,omitempty"` + ColumnInfo []*ColumnInfo `protobuf:"bytes,3,rep,name=column_info,json=columnInfo" json:"column_info,omitempty"` + Mutations []*TableMutation `protobuf:"bytes,4,rep,name=mutations" json:"mutations,omitempty"` + // will only be set with version >= 3.0.9 + UniqueKeys []*Key `protobuf:"bytes,5,rep,name=unique_keys,json=uniqueKeys" json:"unique_keys,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *Table) Reset() { *m = Table{} } func (m *Table) String() string { return proto.CompactTextString(m) } func (*Table) ProtoMessage() {} -func (*Table) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{3} } +func (*Table) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{3} } func (m *Table) GetSchemaName() string { if m != nil && m.SchemaName != nil { @@ -277,8 +275,42 @@ func (m *Table) GetMutations() []*TableMutation { return nil } +func (m *Table) GetUniqueKeys() []*Key { + if m != nil { + return m.UniqueKeys + } + return nil +} + +// Key contains Key info. +type Key struct { + // name will be PRIMARY if it's the primary key. + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + ColumnNames []string `protobuf:"bytes,2,rep,name=column_names,json=columnNames" json:"column_names,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Key) Reset() { *m = Key{} } +func (m *Key) String() string { return proto.CompactTextString(m) } +func (*Key) ProtoMessage() {} +func (*Key) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{4} } + +func (m *Key) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *Key) GetColumnNames() []string { + if m != nil { + return m.ColumnNames + } + return nil +} + type TableMutation struct { - Type *MutationType `protobuf:"varint,1,req,name=type,enum=slave.binlog.MutationType" json:"type,omitempty"` + Type *MutationType `protobuf:"varint,1,req,name=type,enum=tidb.MutationType" json:"type,omitempty"` Row *Row `protobuf:"bytes,2,req,name=row" json:"row,omitempty"` // for Update MutationType only ChangeRow *Row `protobuf:"bytes,3,opt,name=change_row,json=changeRow" json:"change_row,omitempty"` @@ -288,7 +320,7 @@ type TableMutation struct { func (m *TableMutation) Reset() { *m = TableMutation{} } func (m *TableMutation) String() string { return proto.CompactTextString(m) } func (*TableMutation) ProtoMessage() {} -func (*TableMutation) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{4} } +func (*TableMutation) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{5} } func (m *TableMutation) GetType() MutationType { if m != nil && m.Type != nil { @@ -320,7 +352,7 @@ type DMLData struct { func (m *DMLData) Reset() { *m = DMLData{} } func (m *DMLData) String() string { return proto.CompactTextString(m) } func (*DMLData) ProtoMessage() {} -func (*DMLData) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{5} } +func (*DMLData) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{6} } func (m *DMLData) GetTables() []*Table { if m != nil { @@ -342,7 +374,7 @@ type DDLData struct { func (m *DDLData) Reset() { *m = DDLData{} } func (m *DDLData) String() string { return proto.CompactTextString(m) } func (*DDLData) ProtoMessage() {} -func (*DDLData) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{6} } +func (*DDLData) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{7} } func (m *DDLData) GetSchemaName() string { if m != nil && m.SchemaName != nil { @@ -367,9 +399,9 @@ func (m *DDLData) GetDdlQuery() []byte { // Binlog contains all the changes in a transaction. type Binlog struct { - Type BinlogType `protobuf:"varint,1,opt,name=type,enum=slave.binlog.BinlogType" json:"type"` + Type BinlogType `protobuf:"varint,1,opt,name=type,enum=tidb.BinlogType" json:"type"` CommitTs int64 `protobuf:"varint,2,opt,name=commit_ts,json=commitTs" json:"commit_ts"` - // dml_data is marshalled from DmlMsg type + // dml_data is marshalled from DML type DmlData *DMLData `protobuf:"bytes,3,opt,name=dml_data,json=dmlData" json:"dml_data,omitempty"` DdlData *DDLData `protobuf:"bytes,4,opt,name=ddl_data,json=ddlData" json:"ddl_data,omitempty"` XXX_unrecognized []byte `json:"-"` @@ -378,7 +410,7 @@ type Binlog struct { func (m *Binlog) Reset() { *m = Binlog{} } func (m *Binlog) String() string { return proto.CompactTextString(m) } func (*Binlog) ProtoMessage() {} -func (*Binlog) Descriptor() ([]byte, []int) { return fileDescriptorBinlog, []int{7} } +func (*Binlog) Descriptor() ([]byte, []int) { return fileDescriptorTidb, []int{8} } func (m *Binlog) GetType() BinlogType { if m != nil { @@ -409,16 +441,17 @@ func (m *Binlog) GetDdlData() *DDLData { } func init() { - proto.RegisterType((*Column)(nil), "slave.binlog.Column") - proto.RegisterType((*ColumnInfo)(nil), "slave.binlog.ColumnInfo") - proto.RegisterType((*Row)(nil), "slave.binlog.Row") - proto.RegisterType((*Table)(nil), "slave.binlog.Table") - proto.RegisterType((*TableMutation)(nil), "slave.binlog.TableMutation") - proto.RegisterType((*DMLData)(nil), "slave.binlog.DMLData") - proto.RegisterType((*DDLData)(nil), "slave.binlog.DDLData") - proto.RegisterType((*Binlog)(nil), "slave.binlog.Binlog") - proto.RegisterEnum("slave.binlog.MutationType", MutationType_name, MutationType_value) - proto.RegisterEnum("slave.binlog.BinlogType", BinlogType_name, BinlogType_value) + proto.RegisterType((*Column)(nil), "tidb.Column") + proto.RegisterType((*ColumnInfo)(nil), "tidb.ColumnInfo") + proto.RegisterType((*Row)(nil), "tidb.Row") + proto.RegisterType((*Table)(nil), "tidb.Table") + proto.RegisterType((*Key)(nil), "tidb.Key") + proto.RegisterType((*TableMutation)(nil), "tidb.TableMutation") + proto.RegisterType((*DMLData)(nil), "tidb.DMLData") + proto.RegisterType((*DDLData)(nil), "tidb.DDLData") + proto.RegisterType((*Binlog)(nil), "tidb.Binlog") + proto.RegisterEnum("tidb.MutationType", MutationType_name, MutationType_value) + proto.RegisterEnum("tidb.BinlogType", BinlogType_name, BinlogType_value) } func (m *Column) Marshal() (dAtA []byte, err error) { size := m.Size() @@ -448,29 +481,29 @@ func (m *Column) MarshalTo(dAtA []byte) (int, error) { if m.Int64Value != nil { dAtA[i] = 0x10 i++ - i = encodeVarintBinlog(dAtA, i, uint64(*m.Int64Value)) + i = encodeVarintTidb(dAtA, i, uint64(*m.Int64Value)) } if m.Uint64Value != nil { dAtA[i] = 0x18 i++ - i = encodeVarintBinlog(dAtA, i, uint64(*m.Uint64Value)) + i = encodeVarintTidb(dAtA, i, uint64(*m.Uint64Value)) } if m.DoubleValue != nil { dAtA[i] = 0x21 i++ - encoding_binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.DoubleValue)))) + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(*m.DoubleValue)))) i += 8 } if m.BytesValue != nil { dAtA[i] = 0x2a i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(m.BytesValue))) + i = encodeVarintTidb(dAtA, i, uint64(len(m.BytesValue))) i += copy(dAtA[i:], m.BytesValue) } if m.StringValue != nil { dAtA[i] = 0x32 i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(*m.StringValue))) + i = encodeVarintTidb(dAtA, i, uint64(len(*m.StringValue))) i += copy(dAtA[i:], *m.StringValue) } if m.XXX_unrecognized != nil { @@ -496,11 +529,11 @@ func (m *ColumnInfo) MarshalTo(dAtA []byte) (int, error) { _ = l dAtA[i] = 0xa i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(m.Name))) + i = encodeVarintTidb(dAtA, i, uint64(len(m.Name))) i += copy(dAtA[i:], m.Name) dAtA[i] = 0x12 i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(m.MysqlType))) + i = encodeVarintTidb(dAtA, i, uint64(len(m.MysqlType))) i += copy(dAtA[i:], m.MysqlType) dAtA[i] = 0x18 i++ @@ -535,7 +568,7 @@ func (m *Row) MarshalTo(dAtA []byte) (int, error) { for _, msg := range m.Columns { dAtA[i] = 0xa i++ - i = encodeVarintBinlog(dAtA, i, uint64(msg.Size())) + i = encodeVarintTidb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -567,20 +600,20 @@ func (m *Table) MarshalTo(dAtA []byte) (int, error) { if m.SchemaName != nil { dAtA[i] = 0xa i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(*m.SchemaName))) + i = encodeVarintTidb(dAtA, i, uint64(len(*m.SchemaName))) i += copy(dAtA[i:], *m.SchemaName) } if m.TableName != nil { dAtA[i] = 0x12 i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(*m.TableName))) + i = encodeVarintTidb(dAtA, i, uint64(len(*m.TableName))) i += copy(dAtA[i:], *m.TableName) } if len(m.ColumnInfo) > 0 { for _, msg := range m.ColumnInfo { dAtA[i] = 0x1a i++ - i = encodeVarintBinlog(dAtA, i, uint64(msg.Size())) + i = encodeVarintTidb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -592,7 +625,19 @@ func (m *Table) MarshalTo(dAtA []byte) (int, error) { for _, msg := range m.Mutations { dAtA[i] = 0x22 i++ - i = encodeVarintBinlog(dAtA, i, uint64(msg.Size())) + i = encodeVarintTidb(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.UniqueKeys) > 0 { + for _, msg := range m.UniqueKeys { + dAtA[i] = 0x2a + i++ + i = encodeVarintTidb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -606,6 +651,48 @@ func (m *Table) MarshalTo(dAtA []byte) (int, error) { return i, nil } +func (m *Key) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Key) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Name != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintTidb(dAtA, i, uint64(len(*m.Name))) + i += copy(dAtA[i:], *m.Name) + } + if len(m.ColumnNames) > 0 { + for _, s := range m.ColumnNames { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.XXX_unrecognized != nil { + i += copy(dAtA[i:], m.XXX_unrecognized) + } + return i, nil +} + func (m *TableMutation) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -622,18 +709,18 @@ func (m *TableMutation) MarshalTo(dAtA []byte) (int, error) { var l int _ = l if m.Type == nil { - return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + return 0, new(proto.RequiredNotSetError) } else { dAtA[i] = 0x8 i++ - i = encodeVarintBinlog(dAtA, i, uint64(*m.Type)) + i = encodeVarintTidb(dAtA, i, uint64(*m.Type)) } if m.Row == nil { - return 0, new(github_com_golang_protobuf_proto.RequiredNotSetError) + return 0, new(proto.RequiredNotSetError) } else { dAtA[i] = 0x12 i++ - i = encodeVarintBinlog(dAtA, i, uint64(m.Row.Size())) + i = encodeVarintTidb(dAtA, i, uint64(m.Row.Size())) n1, err := m.Row.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -643,7 +730,7 @@ func (m *TableMutation) MarshalTo(dAtA []byte) (int, error) { if m.ChangeRow != nil { dAtA[i] = 0x1a i++ - i = encodeVarintBinlog(dAtA, i, uint64(m.ChangeRow.Size())) + i = encodeVarintTidb(dAtA, i, uint64(m.ChangeRow.Size())) n2, err := m.ChangeRow.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -675,7 +762,7 @@ func (m *DMLData) MarshalTo(dAtA []byte) (int, error) { for _, msg := range m.Tables { dAtA[i] = 0xa i++ - i = encodeVarintBinlog(dAtA, i, uint64(msg.Size())) + i = encodeVarintTidb(dAtA, i, uint64(msg.Size())) n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -707,19 +794,19 @@ func (m *DDLData) MarshalTo(dAtA []byte) (int, error) { if m.SchemaName != nil { dAtA[i] = 0xa i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(*m.SchemaName))) + i = encodeVarintTidb(dAtA, i, uint64(len(*m.SchemaName))) i += copy(dAtA[i:], *m.SchemaName) } if m.TableName != nil { dAtA[i] = 0x12 i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(*m.TableName))) + i = encodeVarintTidb(dAtA, i, uint64(len(*m.TableName))) i += copy(dAtA[i:], *m.TableName) } if m.DdlQuery != nil { dAtA[i] = 0x1a i++ - i = encodeVarintBinlog(dAtA, i, uint64(len(m.DdlQuery))) + i = encodeVarintTidb(dAtA, i, uint64(len(m.DdlQuery))) i += copy(dAtA[i:], m.DdlQuery) } if m.XXX_unrecognized != nil { @@ -745,14 +832,14 @@ func (m *Binlog) MarshalTo(dAtA []byte) (int, error) { _ = l dAtA[i] = 0x8 i++ - i = encodeVarintBinlog(dAtA, i, uint64(m.Type)) + i = encodeVarintTidb(dAtA, i, uint64(m.Type)) dAtA[i] = 0x10 i++ - i = encodeVarintBinlog(dAtA, i, uint64(m.CommitTs)) + i = encodeVarintTidb(dAtA, i, uint64(m.CommitTs)) if m.DmlData != nil { dAtA[i] = 0x1a i++ - i = encodeVarintBinlog(dAtA, i, uint64(m.DmlData.Size())) + i = encodeVarintTidb(dAtA, i, uint64(m.DmlData.Size())) n3, err := m.DmlData.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -762,7 +849,7 @@ func (m *Binlog) MarshalTo(dAtA []byte) (int, error) { if m.DdlData != nil { dAtA[i] = 0x22 i++ - i = encodeVarintBinlog(dAtA, i, uint64(m.DdlData.Size())) + i = encodeVarintTidb(dAtA, i, uint64(m.DdlData.Size())) n4, err := m.DdlData.MarshalTo(dAtA[i:]) if err != nil { return 0, err @@ -775,7 +862,7 @@ func (m *Binlog) MarshalTo(dAtA []byte) (int, error) { return i, nil } -func encodeVarintBinlog(dAtA []byte, offset int, v uint64) int { +func encodeVarintTidb(dAtA []byte, offset int, v uint64) int { for v >= 1<<7 { dAtA[offset] = uint8(v&0x7f | 0x80) v >>= 7 @@ -791,21 +878,21 @@ func (m *Column) Size() (n int) { n += 2 } if m.Int64Value != nil { - n += 1 + sovBinlog(uint64(*m.Int64Value)) + n += 1 + sovTidb(uint64(*m.Int64Value)) } if m.Uint64Value != nil { - n += 1 + sovBinlog(uint64(*m.Uint64Value)) + n += 1 + sovTidb(uint64(*m.Uint64Value)) } if m.DoubleValue != nil { n += 9 } if m.BytesValue != nil { l = len(m.BytesValue) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.StringValue != nil { l = len(*m.StringValue) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -817,9 +904,9 @@ func (m *ColumnInfo) Size() (n int) { var l int _ = l l = len(m.Name) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) l = len(m.MysqlType) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) n += 2 if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -833,7 +920,7 @@ func (m *Row) Size() (n int) { if len(m.Columns) > 0 { for _, e := range m.Columns { l = e.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } } if m.XXX_unrecognized != nil { @@ -847,22 +934,47 @@ func (m *Table) Size() (n int) { _ = l if m.SchemaName != nil { l = len(*m.SchemaName) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.TableName != nil { l = len(*m.TableName) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if len(m.ColumnInfo) > 0 { for _, e := range m.ColumnInfo { l = e.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } } if len(m.Mutations) > 0 { for _, e := range m.Mutations { l = e.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) + } + } + if len(m.UniqueKeys) > 0 { + for _, e := range m.UniqueKeys { + l = e.Size() + n += 1 + l + sovTidb(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *Key) Size() (n int) { + var l int + _ = l + if m.Name != nil { + l = len(*m.Name) + n += 1 + l + sovTidb(uint64(l)) + } + if len(m.ColumnNames) > 0 { + for _, s := range m.ColumnNames { + l = len(s) + n += 1 + l + sovTidb(uint64(l)) } } if m.XXX_unrecognized != nil { @@ -875,15 +987,15 @@ func (m *TableMutation) Size() (n int) { var l int _ = l if m.Type != nil { - n += 1 + sovBinlog(uint64(*m.Type)) + n += 1 + sovTidb(uint64(*m.Type)) } if m.Row != nil { l = m.Row.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.ChangeRow != nil { l = m.ChangeRow.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -897,7 +1009,7 @@ func (m *DMLData) Size() (n int) { if len(m.Tables) > 0 { for _, e := range m.Tables { l = e.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } } if m.XXX_unrecognized != nil { @@ -911,15 +1023,15 @@ func (m *DDLData) Size() (n int) { _ = l if m.SchemaName != nil { l = len(*m.SchemaName) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.TableName != nil { l = len(*m.TableName) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.DdlQuery != nil { l = len(m.DdlQuery) - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -930,15 +1042,15 @@ func (m *DDLData) Size() (n int) { func (m *Binlog) Size() (n int) { var l int _ = l - n += 1 + sovBinlog(uint64(m.Type)) - n += 1 + sovBinlog(uint64(m.CommitTs)) + n += 1 + sovTidb(uint64(m.Type)) + n += 1 + sovTidb(uint64(m.CommitTs)) if m.DmlData != nil { l = m.DmlData.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.DdlData != nil { l = m.DdlData.Size() - n += 1 + l + sovBinlog(uint64(l)) + n += 1 + l + sovTidb(uint64(l)) } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) @@ -946,7 +1058,7 @@ func (m *Binlog) Size() (n int) { return n } -func sovBinlog(x uint64) (n int) { +func sovTidb(x uint64) (n int) { for { n++ x >>= 7 @@ -956,8 +1068,8 @@ func sovBinlog(x uint64) (n int) { } return n } -func sozBinlog(x uint64) (n int) { - return sovBinlog(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +func sozTidb(x uint64) (n int) { + return sovTidb(uint64((x << 1) ^ uint64((int64(x) >> 63)))) } func (m *Column) Unmarshal(dAtA []byte) error { l := len(dAtA) @@ -967,7 +1079,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -995,7 +1107,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1016,7 +1128,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { var v int64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1036,7 +1148,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { var v uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1057,7 +1169,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { if (iNdEx + 8) > l { return io.ErrUnexpectedEOF } - v = uint64(encoding_binary.LittleEndian.Uint64(dAtA[iNdEx:])) + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) iNdEx += 8 v2 := float64(math.Float64frombits(v)) m.DoubleValue = &v2 @@ -1068,7 +1180,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1081,7 +1193,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + byteLen if postIndex > l { @@ -1099,7 +1211,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1113,7 +1225,7 @@ func (m *Column) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1124,12 +1236,12 @@ func (m *Column) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1152,7 +1264,7 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1180,7 +1292,7 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1194,7 +1306,7 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1209,7 +1321,7 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1223,7 +1335,7 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1238,7 +1350,7 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { var v int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1253,12 +1365,12 @@ func (m *ColumnInfo) Unmarshal(dAtA []byte) error { m.IsPrimaryKey = bool(v != 0) default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1281,7 +1393,7 @@ func (m *Row) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1309,7 +1421,7 @@ func (m *Row) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1322,7 +1434,7 @@ func (m *Row) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -1335,12 +1447,12 @@ func (m *Row) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1363,7 +1475,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1391,7 +1503,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1405,7 +1517,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1421,7 +1533,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1435,7 +1547,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1451,7 +1563,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1464,7 +1576,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -1482,7 +1594,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1495,7 +1607,7 @@ func (m *Table) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -1506,14 +1618,155 @@ func (m *Table) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UniqueKeys", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTidb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthTidb + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.UniqueKeys = append(m.UniqueKeys, &Key{}) + if err := m.UniqueKeys[len(m.UniqueKeys)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipTidb(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTidb + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Key) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTidb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Key: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Key: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTidb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTidb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + s := string(dAtA[iNdEx:postIndex]) + m.Name = &s + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ColumnNames", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTidb + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthTidb + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ColumnNames = append(m.ColumnNames, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1537,7 +1790,7 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1565,7 +1818,7 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { var v MutationType for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1586,7 +1839,7 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1599,7 +1852,7 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -1620,7 +1873,7 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1633,7 +1886,7 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -1648,12 +1901,12 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1663,10 +1916,10 @@ func (m *TableMutation) Unmarshal(dAtA []byte) error { } } if hasFields[0]&uint64(0x00000001) == 0 { - return new(github_com_golang_protobuf_proto.RequiredNotSetError) + return new(proto.RequiredNotSetError) } if hasFields[0]&uint64(0x00000002) == 0 { - return new(github_com_golang_protobuf_proto.RequiredNotSetError) + return new(proto.RequiredNotSetError) } if iNdEx > l { @@ -1682,7 +1935,7 @@ func (m *DMLData) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1710,7 +1963,7 @@ func (m *DMLData) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1723,7 +1976,7 @@ func (m *DMLData) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -1736,12 +1989,12 @@ func (m *DMLData) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1764,7 +2017,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1792,7 +2045,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1806,7 +2059,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1822,7 +2075,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { var stringLen uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1836,7 +2089,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { } intStringLen := int(stringLen) if intStringLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + intStringLen if postIndex > l { @@ -1852,7 +2105,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { var byteLen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1865,7 +2118,7 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { } } if byteLen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + byteLen if postIndex > l { @@ -1878,12 +2131,12 @@ func (m *DDLData) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -1906,7 +2159,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1934,7 +2187,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { m.Type = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1953,7 +2206,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { m.CommitTs = 0 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1972,7 +2225,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -1985,7 +2238,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -2005,7 +2258,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { var msglen int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return ErrIntOverflowBinlog + return ErrIntOverflowTidb } if iNdEx >= l { return io.ErrUnexpectedEOF @@ -2018,7 +2271,7 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { } } if msglen < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } postIndex := iNdEx + msglen if postIndex > l { @@ -2033,12 +2286,12 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { iNdEx = postIndex default: iNdEx = preIndex - skippy, err := skipBinlog(dAtA[iNdEx:]) + skippy, err := skipTidb(dAtA[iNdEx:]) if err != nil { return err } if skippy < 0 { - return ErrInvalidLengthBinlog + return ErrInvalidLengthTidb } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF @@ -2053,14 +2306,14 @@ func (m *Binlog) Unmarshal(dAtA []byte) error { } return nil } -func skipBinlog(dAtA []byte) (n int, err error) { +func skipTidb(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowBinlog + return 0, ErrIntOverflowTidb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -2077,7 +2330,7 @@ func skipBinlog(dAtA []byte) (n int, err error) { case 0: for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowBinlog + return 0, ErrIntOverflowTidb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -2095,7 +2348,7 @@ func skipBinlog(dAtA []byte) (n int, err error) { var length int for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowBinlog + return 0, ErrIntOverflowTidb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -2109,7 +2362,7 @@ func skipBinlog(dAtA []byte) (n int, err error) { } iNdEx += length if length < 0 { - return 0, ErrInvalidLengthBinlog + return 0, ErrInvalidLengthTidb } return iNdEx, nil case 3: @@ -2118,7 +2371,7 @@ func skipBinlog(dAtA []byte) (n int, err error) { var start int = iNdEx for shift := uint(0); ; shift += 7 { if shift >= 64 { - return 0, ErrIntOverflowBinlog + return 0, ErrIntOverflowTidb } if iNdEx >= l { return 0, io.ErrUnexpectedEOF @@ -2134,7 +2387,7 @@ func skipBinlog(dAtA []byte) (n int, err error) { if innerWireType == 4 { break } - next, err := skipBinlog(dAtA[start:]) + next, err := skipTidb(dAtA[start:]) if err != nil { return 0, err } @@ -2154,53 +2407,56 @@ func skipBinlog(dAtA []byte) (n int, err error) { } var ( - ErrInvalidLengthBinlog = fmt.Errorf("proto: negative length found during unmarshaling") - ErrIntOverflowBinlog = fmt.Errorf("proto: integer overflow") + ErrInvalidLengthTidb = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTidb = fmt.Errorf("proto: integer overflow") ) -func init() { proto.RegisterFile("binlog.proto", fileDescriptorBinlog) } - -var fileDescriptorBinlog = []byte{ - // 654 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x93, 0xc1, 0x6e, 0xdb, 0x38, - 0x10, 0x86, 0x4d, 0xdb, 0xb1, 0xad, 0x91, 0x37, 0xf0, 0x72, 0xb3, 0x80, 0x90, 0x60, 0x1d, 0x45, - 0xb9, 0x08, 0x59, 0xc0, 0x0d, 0x8c, 0x36, 0x40, 0x7a, 0x4c, 0x75, 0x09, 0x9a, 0x04, 0x2d, 0x91, - 0xf6, 0x2a, 0xd0, 0x16, 0xed, 0x08, 0xa5, 0x44, 0xc7, 0xa4, 0x62, 0xe8, 0x45, 0x8a, 0xbe, 0x4c, - 0x4f, 0xbd, 0xe4, 0x58, 0xa0, 0xf7, 0xa2, 0x48, 0x5f, 0xa4, 0x20, 0x29, 0xc7, 0x71, 0xe1, 0x5b, - 0x6f, 0xf4, 0x3f, 0xdf, 0x90, 0xf3, 0xff, 0x23, 0x43, 0x77, 0x94, 0xe6, 0x5c, 0x4c, 0x07, 0xb3, - 0xb9, 0x50, 0x02, 0x77, 0x25, 0xa7, 0x77, 0x6c, 0x60, 0xb5, 0xdd, 0x9d, 0xa9, 0x98, 0x0a, 0x53, - 0x78, 0xa6, 0x4f, 0x96, 0x09, 0xbe, 0x21, 0x68, 0xbd, 0x12, 0xbc, 0xc8, 0x72, 0xdc, 0x87, 0x76, - 0x2a, 0xe3, 0xbc, 0xe0, 0xdc, 0x43, 0x3e, 0x0a, 0x3b, 0x2f, 0xb7, 0x26, 0x94, 0x4b, 0x46, 0x5a, - 0xa9, 0xbc, 0x2a, 0x38, 0xc7, 0xfb, 0xe0, 0xa6, 0xb9, 0x3a, 0x79, 0x1e, 0xdf, 0x51, 0x5e, 0x30, - 0xaf, 0xee, 0xa3, 0xb0, 0x41, 0xc0, 0x48, 0xef, 0xb5, 0x82, 0x0f, 0xa0, 0x5b, 0x3c, 0x25, 0x1a, - 0x3e, 0x0a, 0x9b, 0xc4, 0x2d, 0xd6, 0x91, 0x44, 0x14, 0x23, 0xce, 0x2a, 0xa4, 0xe9, 0xa3, 0x10, - 0x11, 0xd7, 0x6a, 0x16, 0xd9, 0x07, 0x77, 0x54, 0x2a, 0x26, 0x2b, 0x62, 0xcb, 0x47, 0x61, 0x97, - 0x80, 0x91, 0x1e, 0xef, 0x90, 0x6a, 0x9e, 0xe6, 0xd3, 0x8a, 0x68, 0xf9, 0x28, 0x74, 0x88, 0x6b, - 0x35, 0x83, 0x04, 0x0b, 0x00, 0x6b, 0xea, 0x3c, 0x9f, 0x08, 0xec, 0x41, 0x33, 0xa7, 0x19, 0x33, - 0xae, 0x9c, 0xb3, 0xe6, 0xfd, 0xf7, 0xfd, 0x1a, 0x31, 0x0a, 0x3e, 0x04, 0xc8, 0x4a, 0x79, 0xcb, - 0x63, 0x55, 0xce, 0xac, 0xa3, 0x65, 0xdd, 0x31, 0xfa, 0x75, 0x39, 0x63, 0xf8, 0x08, 0xb6, 0x53, - 0x19, 0xcf, 0xe6, 0x69, 0x46, 0xe7, 0x65, 0xfc, 0x81, 0x95, 0xc6, 0x58, 0xa7, 0x02, 0xbb, 0xa9, - 0x7c, 0x63, 0x4b, 0xaf, 0x59, 0x19, 0xbc, 0x80, 0x06, 0x11, 0x0b, 0x3c, 0x80, 0xf6, 0xd8, 0xbc, - 0x2f, 0x3d, 0xe4, 0x37, 0x42, 0x77, 0xb8, 0x33, 0x78, 0xba, 0x8b, 0x81, 0x1d, 0x8e, 0x2c, 0xa1, - 0xe0, 0x33, 0x82, 0xad, 0x6b, 0x3a, 0xe2, 0xc6, 0xbd, 0x1c, 0xdf, 0xb0, 0x8c, 0xc6, 0xab, 0x91, - 0x09, 0x58, 0xe9, 0x4a, 0x8f, 0xfc, 0x1f, 0x80, 0xd2, 0xa4, 0xad, 0x9b, 0x91, 0x89, 0x63, 0x14, - 0x53, 0x3e, 0x05, 0xd7, 0x5e, 0x1a, 0xa7, 0xf9, 0x44, 0x78, 0x0d, 0xf3, 0xba, 0xb7, 0xe9, 0x75, - 0x1d, 0x0d, 0x81, 0xf1, 0x2a, 0xa6, 0x53, 0x70, 0xb2, 0x42, 0x51, 0x95, 0x8a, 0x5c, 0x7a, 0x4d, - 0xd3, 0xb8, 0xb7, 0xde, 0x68, 0x46, 0xbc, 0xac, 0x18, 0xb2, 0xa2, 0x83, 0x8f, 0x08, 0xfe, 0x5a, - 0x2b, 0xe2, 0x01, 0x34, 0x4d, 0xa6, 0xc8, 0xaf, 0x87, 0xdb, 0xc3, 0xdd, 0xf5, 0x7b, 0x96, 0x94, - 0x8e, 0x97, 0x18, 0x0e, 0x1f, 0x42, 0x63, 0x2e, 0x16, 0x5e, 0xdd, 0xaf, 0x87, 0xee, 0xf0, 0xef, - 0x75, 0x9c, 0x88, 0x05, 0xd1, 0x55, 0x7c, 0x0c, 0x30, 0xbe, 0xa1, 0xf9, 0x94, 0xc5, 0x9a, 0xd5, - 0x5b, 0xd8, 0xc8, 0x3a, 0x16, 0x22, 0x62, 0x11, 0x9c, 0x40, 0x3b, 0xba, 0xbc, 0x88, 0xa8, 0xa2, - 0xf8, 0x7f, 0x68, 0x99, 0x98, 0x96, 0x2b, 0xf9, 0x67, 0x83, 0x37, 0x52, 0x21, 0xc1, 0x04, 0xda, - 0x51, 0x64, 0xfb, 0xfe, 0x74, 0x23, 0x7b, 0xe0, 0x24, 0x09, 0x8f, 0x6f, 0x0b, 0x36, 0xb7, 0x5f, - 0x4e, 0x97, 0x74, 0x92, 0x84, 0xbf, 0xd5, 0xbf, 0x83, 0x2f, 0x08, 0x5a, 0x67, 0x66, 0x00, 0x3c, - 0x7c, 0x4c, 0x0c, 0x85, 0xdb, 0xbf, 0xaf, 0xcc, 0x32, 0x3a, 0xaf, 0xe5, 0xf7, 0x6b, 0x52, 0x3b, - 0x00, 0x67, 0x2c, 0xb2, 0x2c, 0x55, 0xb1, 0x92, 0xf6, 0x0f, 0x59, 0x95, 0x3b, 0x56, 0xbe, 0x96, - 0xf8, 0x18, 0x3a, 0x49, 0xc6, 0xe3, 0x84, 0x2a, 0x5a, 0x25, 0xf6, 0xef, 0xfa, 0xd5, 0x55, 0x3e, - 0xa4, 0x9d, 0x64, 0xdc, 0x18, 0xd6, 0x1d, 0x49, 0xd5, 0xd1, 0xdc, 0xd8, 0x11, 0x2d, 0x3b, 0x12, - 0xd3, 0x71, 0x34, 0x84, 0xee, 0xd3, 0x95, 0x62, 0x80, 0xd6, 0x79, 0x2e, 0xd9, 0x5c, 0xf5, 0x6a, - 0xfa, 0xfc, 0x6e, 0x96, 0x50, 0xc5, 0x7a, 0x48, 0x9f, 0x23, 0xc6, 0x99, 0x62, 0xbd, 0xfa, 0x51, - 0x1f, 0x60, 0x65, 0x0a, 0xb7, 0xa1, 0x11, 0x5d, 0x5e, 0xf4, 0x6a, 0xe6, 0x10, 0x5d, 0xf4, 0xd0, - 0x59, 0xef, 0xfe, 0xa1, 0x8f, 0xbe, 0x3e, 0xf4, 0xd1, 0x8f, 0x87, 0x3e, 0xfa, 0xf4, 0xb3, 0x5f, - 0xfb, 0x15, 0x00, 0x00, 0xff, 0xff, 0xdd, 0xc4, 0xea, 0xa6, 0xdd, 0x04, 0x00, 0x00, +func init() { proto.RegisterFile("tidb.proto", fileDescriptorTidb) } + +var fileDescriptorTidb = []byte{ + // 702 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x53, 0xcd, 0x6a, 0xdb, 0x4c, + 0x14, 0xf5, 0x58, 0xf2, 0x8f, 0xae, 0x9c, 0x60, 0xe6, 0xdb, 0x88, 0x2f, 0xd4, 0x51, 0x14, 0x08, + 0xc2, 0x10, 0x87, 0x84, 0xd2, 0x45, 0xe9, 0x2a, 0xd5, 0x26, 0x38, 0x09, 0xed, 0x90, 0x76, 0x2b, + 0x64, 0x6b, 0xec, 0x88, 0x8e, 0x34, 0x8e, 0x67, 0x54, 0xa3, 0x4d, 0x9f, 0xa3, 0xef, 0xd0, 0x17, + 0xc9, 0xb2, 0xd0, 0x55, 0x37, 0xa5, 0xa4, 0x2f, 0x52, 0x66, 0x46, 0x8e, 0xed, 0x75, 0x77, 0x77, + 0xce, 0x39, 0x77, 0xe6, 0xde, 0x73, 0x24, 0x00, 0x99, 0xa5, 0x93, 0xd1, 0x62, 0xc9, 0x25, 0xc7, + 0xb6, 0xaa, 0xff, 0x3f, 0x9d, 0x67, 0xf2, 0xbe, 0x9c, 0x8c, 0xa6, 0x3c, 0x3f, 0x9b, 0xf3, 0x39, + 0x3f, 0xd3, 0xe4, 0xa4, 0x9c, 0xe9, 0x93, 0x3e, 0xe8, 0xca, 0x34, 0x05, 0x3f, 0x10, 0xb4, 0xdf, + 0x72, 0x56, 0xe6, 0x05, 0x1e, 0x40, 0x27, 0x13, 0x71, 0x51, 0x32, 0xe6, 0x21, 0x1f, 0x85, 0xdd, + 0xd7, 0xad, 0x59, 0xc2, 0x04, 0x25, 0xed, 0x4c, 0xdc, 0x96, 0x8c, 0xe1, 0x43, 0x70, 0xb3, 0x42, + 0xbe, 0x7a, 0x19, 0x7f, 0x4e, 0x58, 0x49, 0xbd, 0xa6, 0x8f, 0x42, 0x8b, 0x80, 0x86, 0x3e, 0x2a, + 0x04, 0x1f, 0x41, 0xaf, 0xdc, 0x56, 0x58, 0x3e, 0x0a, 0x6d, 0xe2, 0x96, 0xbb, 0x92, 0x94, 0x97, + 0x13, 0x46, 0x6b, 0x89, 0xed, 0xa3, 0x10, 0x11, 0xd7, 0x60, 0x46, 0x72, 0x08, 0xee, 0xa4, 0x92, + 0x54, 0xd4, 0x8a, 0x96, 0x8f, 0xc2, 0x1e, 0x01, 0x0d, 0x3d, 0xdf, 0x21, 0xe4, 0x32, 0x2b, 0xe6, + 0xb5, 0xa2, 0xed, 0xa3, 0xd0, 0x21, 0xae, 0xc1, 0xb4, 0x24, 0x58, 0x01, 0x98, 0xa5, 0xae, 0x8a, + 0x19, 0xc7, 0x1e, 0xd8, 0x45, 0x92, 0x53, 0xbd, 0x95, 0x73, 0x69, 0x3f, 0xfe, 0x3a, 0x6c, 0x10, + 0x8d, 0xe0, 0x63, 0x80, 0xbc, 0x12, 0x0f, 0x2c, 0x96, 0xd5, 0xc2, 0x6c, 0xb4, 0xe6, 0x1d, 0x8d, + 0xdf, 0x55, 0x0b, 0x8a, 0x87, 0xb0, 0x9f, 0x89, 0x78, 0xb1, 0xcc, 0xf2, 0x64, 0x59, 0xc5, 0x9f, + 0x68, 0xa5, 0x17, 0xeb, 0xd6, 0xc2, 0x5e, 0x26, 0xde, 0x19, 0x6a, 0x4c, 0xab, 0xe0, 0x14, 0x2c, + 0xc2, 0x57, 0xf8, 0x04, 0x3a, 0x53, 0xfd, 0xbe, 0xf0, 0x90, 0x6f, 0x85, 0xee, 0x45, 0x6f, 0xa4, + 0x83, 0x32, 0x43, 0x91, 0x35, 0x19, 0xfc, 0x44, 0xd0, 0xba, 0x4b, 0x26, 0x4c, 0x6f, 0x2d, 0xa6, + 0xf7, 0x34, 0x4f, 0xe2, 0xcd, 0xa8, 0x04, 0x0c, 0x74, 0xab, 0x46, 0x7d, 0x01, 0x20, 0x95, 0xd2, + 0xf0, 0x7a, 0x54, 0xe2, 0x68, 0x44, 0xd3, 0xe7, 0xe0, 0x9a, 0x4b, 0xe3, 0xac, 0x98, 0x71, 0xcf, + 0xd2, 0xaf, 0xf6, 0xb7, 0x5f, 0x55, 0x56, 0x10, 0x98, 0x6e, 0x6c, 0x39, 0x07, 0x27, 0x2f, 0x65, + 0x22, 0x33, 0x5e, 0x08, 0xcf, 0xd6, 0x0d, 0xff, 0x99, 0x06, 0x3d, 0xd2, 0x4d, 0xcd, 0x91, 0x8d, + 0x0a, 0x0f, 0xc1, 0x2d, 0x8b, 0xec, 0xa1, 0xa4, 0xca, 0x06, 0xe1, 0xb5, 0x74, 0x93, 0x63, 0x9a, + 0xc6, 0xb4, 0x22, 0x60, 0xd8, 0x31, 0xad, 0x44, 0xf0, 0x06, 0xac, 0x31, 0xad, 0x30, 0xde, 0x36, + 0xbf, 0xb6, 0xfd, 0x08, 0x7a, 0xf5, 0xb0, 0xea, 0x28, 0xbc, 0xa6, 0x6f, 0xa9, 0x04, 0x0d, 0xa6, + 0xd6, 0x11, 0xc1, 0x17, 0xd8, 0xdb, 0x99, 0x02, 0x9f, 0x80, 0xad, 0x43, 0x42, 0x7e, 0x33, 0xdc, + 0xbf, 0xc0, 0xe6, 0xcd, 0x35, 0xab, 0x72, 0x22, 0x9a, 0xc7, 0x07, 0x60, 0x2d, 0xf9, 0xca, 0x6b, + 0xfa, 0xcd, 0xcd, 0x68, 0x84, 0xaf, 0x88, 0x42, 0x71, 0x08, 0x30, 0xbd, 0x4f, 0x8a, 0x39, 0x8d, + 0x95, 0x46, 0xc5, 0xb8, 0xa3, 0x71, 0x0c, 0x49, 0xf8, 0x2a, 0x18, 0x41, 0x27, 0xba, 0xb9, 0x8e, + 0x12, 0x99, 0xe0, 0x63, 0x68, 0x6b, 0x9f, 0xd7, 0x59, 0xba, 0x5b, 0x26, 0x91, 0x9a, 0x0a, 0x66, + 0xd0, 0x89, 0x22, 0xa3, 0xff, 0xd7, 0x28, 0x0f, 0xc0, 0x49, 0x53, 0x16, 0x3f, 0x94, 0x74, 0x69, + 0x3e, 0xb5, 0x1e, 0xe9, 0xa6, 0x29, 0x7b, 0xaf, 0xce, 0xc1, 0x37, 0x04, 0xed, 0xcb, 0xac, 0x60, + 0x7c, 0x8e, 0x87, 0xcf, 0x8e, 0xa0, 0x70, 0x7f, 0x9d, 0xb5, 0xe1, 0x94, 0x1f, 0xeb, 0x0f, 0x5d, + 0xbb, 0x72, 0x04, 0xce, 0x94, 0xe7, 0x79, 0x26, 0x63, 0x29, 0xcc, 0x9f, 0x5b, 0xd3, 0x5d, 0x03, + 0xdf, 0x09, 0x1c, 0x42, 0x37, 0xcd, 0x59, 0x9c, 0x26, 0x32, 0xa9, 0x9d, 0xd9, 0x33, 0x57, 0xd6, + 0x3e, 0x90, 0x4e, 0x9a, 0x33, 0xbd, 0xa0, 0x52, 0xa6, 0xb5, 0xd2, 0xde, 0x51, 0x46, 0x6b, 0x65, + 0xaa, 0x95, 0xc3, 0x0b, 0xe8, 0x6d, 0x47, 0x84, 0x01, 0xda, 0x57, 0x85, 0xa0, 0x4b, 0xd9, 0x6f, + 0xa8, 0xfa, 0xc3, 0x22, 0x4d, 0x24, 0xed, 0x23, 0x55, 0x47, 0x94, 0x51, 0x49, 0xfb, 0xcd, 0xe1, + 0x00, 0x60, 0xb3, 0x04, 0xee, 0x80, 0x15, 0xdd, 0x5c, 0xf7, 0x1b, 0xba, 0x88, 0xae, 0xfb, 0xe8, + 0xb2, 0xff, 0xf8, 0x34, 0x40, 0xdf, 0x9f, 0x06, 0xe8, 0xf7, 0xd3, 0x00, 0x7d, 0xfd, 0x33, 0x68, + 0xfc, 0x0d, 0x00, 0x00, 0xff, 0xff, 0xf5, 0x1d, 0x3d, 0xe4, 0x05, 0x05, 0x00, 0x00, } diff --git a/pkg/registry/test_data/Makefile b/pkg/registry/test_data/Makefile index 7f3f912e..cd5ba746 100644 --- a/pkg/registry/test_data/Makefile +++ b/pkg/registry/test_data/Makefile @@ -1,5 +1,8 @@ PWD = $(shell pwd) -default: - docker run --rm -v $(PWD):/usr/src/myapp -w /usr/src/myapp -e GOOS=linux -e GOARCH=amd64 golang:1.11.4 go build -buildmode=plugin -v -o dump_filter_plugin.linux.so dump_filter_plugin.go - GOARCH=amd64 GOOS=darwin go build -buildmode=plugin -o dump_filter_plugin.darwin.so dump_filter_plugin.go \ No newline at end of file +docker: + docker run --rm -v $(PWD):/usr/src/myapp -w /usr/src/myapp -e GOOS=linux -e GOARCH=amd64 golang:1.13.3 go build -buildmode=plugin -v -o dump_filter_plugin.linux.so dump_filter_plugin.go + GOARCH=amd64 GOOS=darwin go build -buildmode=plugin -o dump_filter_plugin.darwin.so dump_filter_plugin.go + +build: + go build -buildmode=plugin -o dump_filter_plugin.linux.so dump_filter_plugin.go \ No newline at end of file diff --git a/pkg/registry/test_data/dump_filter_plugin.darwin.so b/pkg/registry/test_data/dump_filter_plugin.darwin.so deleted file mode 100644 index b4441945..00000000 Binary files a/pkg/registry/test_data/dump_filter_plugin.darwin.so and /dev/null differ diff --git a/pkg/registry/test_data/dump_filter_plugin.linux.so b/pkg/registry/test_data/dump_filter_plugin.linux.so deleted file mode 100644 index ae5e745e..00000000 Binary files a/pkg/registry/test_data/dump_filter_plugin.linux.so and /dev/null differ diff --git a/pkg/sarama_cluster/consumer.go b/pkg/sarama_cluster/consumer.go index e336c453..74149587 100644 --- a/pkg/sarama_cluster/consumer.go +++ b/pkg/sarama_cluster/consumer.go @@ -481,7 +481,7 @@ func (c *Consumer) release() (err error) { if e := c.commitOffsetsWithRetry(c.client.config.Group.Offsets.Retry.Max); e != nil { err = e } - log.Info("consumer commits the offset if necessary") + log.Debug("consumer commits the offset if necessary") return } diff --git a/protocol/tidb/tidb.proto b/protocol/tidb/tidb.proto index 32fbc4ac..c766c213 100644 --- a/protocol/tidb/tidb.proto +++ b/protocol/tidb/tidb.proto @@ -1,6 +1,6 @@ syntax = "proto2"; -package slave.binlog; -import "gogoproto/gogo.proto"; +package tidb; +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; option (gogoproto.marshaler_all) = true; option (gogoproto.sizer_all) = true; @@ -32,7 +32,7 @@ message ColumnInfo { // https://dev.mysql.com/doc/refman/8.0/en/data-types.html // for numeric type: int bigint smallint tinyint float double decimal bit // for string type: text longtext mediumtext char tinytext varchar - // blob longblog mediumblog binary tinyblob varbinary + // blob longblob mediumblob binary tinyblob varbinary // enum set // for json type: json optional string mysql_type = 2 [ (gogoproto.nullable) = false ]; @@ -53,6 +53,16 @@ message Table { optional string table_name = 2; repeated ColumnInfo column_info = 3; repeated TableMutation mutations = 4; + // will only be set with version >= 3.0.9 + repeated Key unique_keys = 5; +} + + +// Key contains Key info. +message Key { + // name will be PRIMARY if it's the primary key. + optional string name = 1; + repeated string column_names = 2; } message TableMutation {