confluentinc / kafka-connect-jdbc

Kafka Connect connector for JDBC-compatible databases
Other
1.01k stars 953 forks source link

ERROR [jdbc-sqlserver-sink|task-0] WorkerSinkTask{id=jdbc-sqlserver-sink-0} Task is being killed and will not recover until manually restarted (org.apache.kafka.connect.runtime.WorkerTask:188) #1406

Open icaroperetti opened 3 months ago

icaroperetti commented 3 months ago

I'm trying to insert data from a mysql to sqlserver but my connector is getting the status 'degradade'. I also get this message "com.microsoft.sqlserver.jdbc.SQLServerException: Database 'mysql-server' does not exist".

I tried multiple configuration but none solved it

mysql.properties

name=mysql-connector connector.class=io.debezium.connector.mysql.MySqlConnector tasks.max=1 database.user=root database.server.name=mysql-server database.hostname=mysql database.password=root database.history.kafka.bootstrap.servers=kafka:9092 database.history.kafka.topic=history database.port=3306

jdbc-sink.properties

name=jdbc-sqlserver-sink connector.class=io.confluent.connect.jdbc.JdbcSinkConnector tasks.max=1 topics=mysql-server.products.products connection.url=jdbc:sqlserver://host.docker.internal:1433;databaseName=products connection.user=sa connection.password=password insert.mode=upsert auto.create=true auto.evolve=false transforms=unwrap transforms.unwrap.type=io.debezium.transforms.ExtractNewRecordState pk.fields=id pk.mode=record_value value.converter.schemas.enable=false key.converter.schemas.enable=false

version: "3"

services:
  mysql:
    image: wesleywillians/mysql-kafka-connect:latest
    command: --innodb-use-native-aio=0
    tty: true
    restart: always
    ports:
      - "33600:3306"
    environment:
      - MYSQL_DATABASE=products
      - MYSQL_ROOT_PASSWORD=root
      - MYSQL_USER=root
    extra_hosts:
      - "host.docker.internal:172.17.0.1"
    volumes:
      - ./volumes/mysql/data:/data

  sqlserver:
    image: mcr.microsoft.com/mssql/server:latest
    container_name: sqlserver_1
    user: root
    environment:
      SA_PASSWORD: "Icarolk3345@"
      ACCEPT_EULA: "Y"
    ports:
      - "1433:1433"
    extra_hosts:
      - "host.docker.internal:172.17.0.1"
    volumes:
      - ./volumes/sqlserver/data:/data

  zookeeper:
    image: confluentinc/cp-zookeeper:latest
    environment:
      ZOOKEEPER_CLIENT_PORT: 2181

  kafka:
    image: confluentinc/cp-kafka:latest
    depends_on:
      - zookeeper
    ports:
      - "9092:9092"
      - "9094:9094"
    environment:
      KAFKA_BROKER_ID: 1
      KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: 1
      KAFKA_ZOOKEEPER_CONNECT: zookeeper:2181
      KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
      KAFKA_LISTENERS: INTERNAL://:9092,OUTSIDE://:9094
      KAFKA_ADVERTISED_LISTENERS: INTERNAL://kafka:9092,OUTSIDE://host.docker.internal:9094
      KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: INTERNAL:PLAINTEXT,OUTSIDE:PLAINTEXT
    extra_hosts:
      - "host.docker.internal:172.17.0.1"

  control-center:
    image: confluentinc/cp-enterprise-control-center:6.0.1
    hostname: control-center
    depends_on:
      - kafka
    ports:
      - "9021:9021"
    environment:
      CONTROL_CENTER_BOOTSTRAP_SERVERS: "kafka:9092"
      CONTROL_CENTER_REPLICATION_FACTOR: 1
      CONTROL_CENTER_CONNECT_CLUSTER: http://kafka-connect:8083
      PORT: 9021
    extra_hosts:
      - "host.docker.internal:172.17.0.1"

  kafka-connect:
    image: confluentinc/cp-kafka-connect-base:6.0.0
    container_name: kafka-connect
    depends_on:
      - zookeeper
      - kafka
    ports:
      - 8083:8083
    environment:
      CONNECT_BOOTSTRAP_SERVERS: "kafka:9092"
      CONNECT_REST_PORT: 8083
      CONNECT_GROUP_ID: kafka-connect
      CONNECT_CONFIG_STORAGE_TOPIC: _connect-configs
      CONNECT_OFFSET_STORAGE_TOPIC: _connect-offsets
      CONNECT_STATUS_STORAGE_TOPIC: _connect-status
      CONNECT_KEY_CONVERTER: org.apache.kafka.connect.storage.StringConverter
      CONNECT_VALUE_CONVERTER: org.apache.kafka.connect.json.JsonConverter
      CONNECT_INTERNAL_KEY_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
      CONNECT_INTERNAL_VALUE_CONVERTER: "org.apache.kafka.connect.json.JsonConverter"
      CONNECT_REST_ADVERTISED_HOST_NAME: "kafka-connect"
      CONNECT_LOG4J_ROOT_LOGLEVEL: "INFO"
      CONNECT_LOG4J_LOGGERS: "org.apache.kafka.connect.runtime.rest=WARN,org.reflections=ERROR"
      CONNECT_LOG4J_APPENDER_STDOUT_LAYOUT_CONVERSIONPATTERN: "[%d] %p %X{connector.context}%m (%c:%L)%n"
      CONNECT_CONFIG_STORAGE_REPLICATION_FACTOR: "1"
      CONNECT_OFFSET_STORAGE_REPLICATION_FACTOR: "1"
      CONNECT_STATUS_STORAGE_REPLICATION_FACTOR: "1"
      # # Optional settings to include to support Confluent Control Center
      #   CONNECT_PRODUCER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringProducerInterceptor"
      #   CONNECT_CONSUMER_INTERCEPTOR_CLASSES: "io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor"
      #  ---------------
      CONNECT_PLUGIN_PATH: /usr/share/java,/usr/share/confluent-hub-components,/data/connect-jars
    # If you want to use the Confluent Hub installer to d/l component, but make them available
    # when running this offline, spin up the stack once and then run :
    #   docker cp kafka-connect:/usr/share/confluent-hub-components ./data/connect-jars
    volumes:
      - $PWD/data:/data
    # In the command section, $ are replaced with $$ to avoid the error 'Invalid interpolation format for "command" option'
    command:
      - bash
      - -c
      - |
        echo "Installing Connector"
        confluent-hub install --no-prompt debezium/debezium-connector-mysql:1.2.2
        confluent-hub install --no-prompt confluentinc/kafka-connect-elasticsearch:10.0.1
        confluent-hub install --no-prompt confluentinc/kafka-connect-jdbc:10.7.6
        #
        echo "Launching Kafka Connect worker"
        /etc/confluent/docker/run &
        #
        sleep infinity
    extra_hosts:
      - "host.docker.internal:172.17.0.1"

  # es01:
  #   image: docker.elastic.co/elasticsearch/elasticsearch:7.11.2
  #   container_name: es01
  #   environment:
  #     - node.name=es01
  #     - cluster.name=es-docker-cluster
  #     - cluster.initial_master_nodes=es01
  #     - bootstrap.memory_lock=true
  #     - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
  #   ulimits:
  #     memlock:
  #       soft: -1
  #       hard: -1
  #   volumes:
  #     - ./es01:/usr/share/elasticsearch/data
  #   ports:
  #     - 9200:9200
  #   extra_hosts:
  #     - "host.docker.internal:172.17.0.1"

  # kib01:
  #   image: docker.elastic.co/kibana/kibana:7.11.2
  #   container_name: kib01
  #   ports:
  #     - 5601:5601
  #   environment:
  #     ELASTICSEARCH_URL: http://es01:9200
  #     ELASTICSEARCH_HOSTS: '["http://es01:9200"]'
  #   extra_hosts:
  #     - "host.docker.internal:172.17.0.1"