Open wuwu20048 opened 5 days ago
Hi @wuwu20048 This works for me on seatunnel v 2.3.8. java Azul 17
CREATE TABLE default.sink_table
(
id int, name String, out String
)
ENGINE = MergeTree ORDER BY id
# Set the basic configuration of the task to be performed
env {
parallelism = 1
job.mode = "BATCH"
}
source {
FakeSource {
row.num = 6
schema = {
fields {
id = "int"
name = "string"
out = "string"
}
}
rows = [
{fields = [1, "Jia Fan", "ffrrr"], kind = INSERT}
{fields = [2, "Hailin Wang", "ddd"], kind = INSERT}
{fields = [3, "Tomas", "vfs"], kind = INSERT}
{fields = [4, "Eric", "kfkd"], kind = INSERT}
{fields = [5, "Guangdong Liu", "pew"], kind = INSERT}
{fields = [6, "Юрий Гаврилов", "szs"], kind = INSERT}
]
}
}
#transform {
#}
sink {
Clickhouse {
host = "some-clickhouse-server:8123"
database = "default"
table = "sink_table"
username = ""
password = ""
bulk_size = 1000
# source_table_name = "gateway_logs"
clickhouse.config = {
max_rows_to_read = "100"
read_overflow_mode = "throw"
}
}
}
@wuwu20048 This also working
maybe problem in data types or schema or schema registry ( try to use if it exist )
env {
# You can set SeaTunnel environment configuration here
parallelism = 2
job.mode = "STREAMING"
checkpoint.interval = 2000
}
source {
# This is a example source plugin **only for test and demonstrate the feature source plugin**
FakeSource {
parallelism = 2
result_table_name = "fake"
row.num = 16
schema = {
fields {
id = "int"
name = "string"
out = "string"
}
}
}
# If you would like to get more information about how to configure SeaTunnel and see full list of source plugins,
# please go to https://seatunnel.apache.org/docs/connector-v2/source
}
#transform {
#}
sink {
Clickhouse {
host = "some-clickhouse-server:8123"
database = "default"
table = "sink_table"
username = ""
password = ""
bulk_size = 1000
# source_table_name = "gateway_logs"
clickhouse.config = {
max_rows_to_read = "100"
read_overflow_mode = "throw"
}
}
}
The problem is that when I use locally newer clickhouse, the version is 24.1.1.2048. The above configuration can be imported normally. The clickhouse version on the server is lower than 20.3.19.4, but the table structure definition is the same as my local. create table default.logs_local ( beanName String, method String, serverId Nullable(String), serverIp Nullable(String), domain String, logDate DateTime, ceateDate DateTime, ipAddress Nullable(String), urt Nullable(Int32), referer Nullable(String), uri Nullable(String), region Nullable(String), clientId Nullable(Int32), entrance Nullable(String), source Nullable(String), contentMd5 Nullable(String), timeCost Int32, exceptionCode Nullable(String), contentLength Nullable(Int32), resultLength Nullable(Int32), host Nullable(String) ) engine = MergeTree PARTITION BY toYYYYMMDD(logDate) ORDER BY (logDate, beanName, method)
Search before asking
What happened
kafka import to the clickhouse reported an error . is the clickhouse version too old ? the version is 20.3.19.4
SeaTunnel Version
2.3.7
SeaTunnel Config
Running Command
Error Exception
Zeta or Flink or Spark Version
No response
Java or Scala Version
jdk 1.8
Screenshots
Are you willing to submit PR?
Code of Conduct