tronprotocol / java-tron

Java implementation of the Tron whitepaper
GNU Lesser General Public License v3.0
3.74k stars 1.41k forks source link

Changing the active , passive and seed.node Ip in all sr node configuration in my private chain? #5937

Closed 0xarmaansidhu closed 3 months ago

0xarmaansidhu commented 3 months ago

Question:- I have a stable private chain network working with 27 sr nodes and 7 fullnodes , all the 7 fullnodes have their Ips(with port) in each sr's config(at active, passive and seed.node) , so now i have to change the ips of all the fullnodes.How to go about it?

If i change it on all the Srs it can cause network instability reconnecting to the new ips SR CONFIG


net {
   type = mainnet
  # type = testnet
}

storage {
  # Directory for storing persistent data

  db.version = 2,
  db.engine = "ROCKSDB",
  db.directory = "database",
  index.directory = "index",

  # You can custom these 14 databases' configs:

  # account, account-index, asset-issue, block, block-index,
  # block_KDB, peers, properties, recent-block, trans,
  # utxo, votes, witness, witness_schedule.

  # Otherwise, db configs will remain defualt and data will be stored in
  # the path of "output-directory" or which is set by "-d" ("--output-directory").

  # Attention: name is a required field that must be set !!!
  properties = [
    //    {
    //      name = "account",
    //      path = "storage_directory_test",
    //      createIfMissing = true,
    //      paranoidChecks = true,
    //      verifyChecksums = true,
    //      compressionType = 1,        // compressed with snappy
    //      blockSize = 4096,           // 4  KB =         4 * 1024 B
    //      writeBufferSize = 10485760, // 10 MB = 10 * 1024 * 1024 B
    //      cacheSize = 10485760,       // 10 MB = 10 * 1024 * 1024 B
    //      maxOpenFiles = 100
    //    },
    //    {
    //      name = "account-index",
    //      path = "storage_directory_test",
    //      createIfMissing = true,
    //      paranoidChecks = true,
    //      verifyChecksums = true,
    //      compressionType = 1,        // compressed with snappy
    //      blockSize = 4096,           // 4  KB =         4 * 1024 B
    //      writeBufferSize = 10485760, // 10 MB = 10 * 1024 * 1024 B
    //      cacheSize = 10485760,       // 10 MB = 10 * 1024 * 1024 B
    //      maxOpenFiles = 100
    //    },
  ]

dbSettings={
levelNumber=7
blocksize=64
maxBytesForLevelBase=256
maxBytesforLevelMultiplier=10
level0FileNumCompactionTrigger=4
targetFileSizeBase=256
targetFileSizeMultiplier=1
}

}

# this part of config is used to node discovery.
node.discovery = {
  enable = true  # you should set this entry value with true if you want your node can be discovered by other node.
  persist = true  # this entry is used to determined to whether storing the peers in the database or not.
  bind.ip = "0.0.0.0"
  external.ip = 172.232.119.187 # PLACE YOUR MACHINE PUBLIC IP/EXTERNAL IP HERE
}

# this part of config is used to set backup node for witness service.
node.backup {
  port = 10001
  priority = 8
  members = [
  ]
}
#Enabling JSON-RPC service of a node
node.jsonrpc {
httpFullNodeEnable = true
httpFullNodePort = 50545
httpSolidityEnable = true
httpSolidityPort = 50555
}

node.unsolidifiedBlockCheck = true
node.maxUnsolidifiedBlocks = 1000

node {
  # trust node for solidity node
  # trustNode = "ip:port"
  trustNode = "127.0.0.1:50051"

  # expose extension api to public or not
  walletExtensionApi = true

  listen.port = 16666

  connection.timeout = 2

  tcpNettyWorkThreadNum = 0

  udpNettyWorkThreadNum = 1

  # Number of validate sign thread, default availableProcessors / 2
  # validateSignThreadNum = 16

  maxActiveNodes = 300

  maxActiveNodesWithSameIp = 20

  minParticipationRate = 0

  # check the peer data transfer ,disconnect factor
  disconnectNumberFactor = 0.4
  maxConnectNumberFactor = 0.8
  receiveTcpMinDataLength = 2048
  isOpenFullTcpDisconnect = true

  p2p {
    version = 1
  }

  active = [
    # Active establish connection in any case
    # Sample entries:
    # "ip:port",
    # "ip:port"
    <ALL IPS OF FULLNODE>
  ]

  passive = [
    <ALL IPS OF FULLNODE>
    # Passive accept connection in any case
    # Sample entries:
    # "ip:port",
    # "ip:port"
  ]

  http {
    fullNodePort = 16667
    solidityPort = 16668
  }

  rpc {
    port = 16669

    # Number of gRPC thread, default availableProcessors / 2
    # thread = 16

    # The maximum number of concurrent calls permitted for each incoming connection
    # maxConcurrentCallsPerConnection =

    # The HTTP/2 flow control window, default 1MB
    # flowControlWindow =

    # Connection being idle for longer than which will be gracefully terminated
    maxConnectionIdleInMillis = 60000

    # Connection lasting longer than which will be gracefully terminated
    # maxConnectionAgeInMillis =

    # The maximum message size allowed to be received on the server, default 4MB
    # maxMessageSize =

    # The maximum size of header list allowed to be received, default 8192
    # maxHeaderListSize =

    # Transactions can only be broadcast if the number of effective connections is reached.
    minEffectiveConnection = 0
  }

  # Whether to enable the node detection function, default false
  # nodeDetectEnable = false

  # use your ipv6 address for node discovery and tcp connection, default false
  # enableIpv6 = false

  # if your node's highest block num is below than all your pees', try to acquire new connection. default false
  # effectiveCheckEnable = false

  # Dynamic loading configuration function, disabled by default
  # dynamicConfig = {
    # enable = false
    # Configuration file change check interval, default is 600 seconds
    # checkInterval = 600
  # }

  dns {
    # dns urls to get nodes, url format tree://{pubkey}@{domain}, default empty
    treeUrls = [
      #"tree://APFGGTFOBVE2ZNAB3CSMNNX6RRK3ODIRLP2AA5U4YFAA6MSYZUYTQ@nodes1.example.org",
    ]
  }
}

rate.limiter = {
  # Every api could be set a specific rate limit strategy. Three strategy are supported:GlobalPreemptibleAdapterãIPQPSRateLimiterAdapteãQpsRateLimiterAdapter
  # GlobalPreemptibleAdapter: permit is the number of preemptible resource, every client must apply one resourse
  #       before do the request and release the resource after got the reponse automaticlly. permit should be a Integer.
  # QpsRateLimiterAdapter: qps is the average request count in one second supported by the server, it could be a Double or a Integer.
  # IPQPSRateLimiterAdapter: similar to the QpsRateLimiterAdapter, qps could be a Double or a Integer.
  # If do not set, the "default strategy" is set.The "default startegy" is based on QpsRateLimiterAdapter, the qps is set as 10000.
  #
  # Sample entries:
  #
  http = [
    #  {
    #    component = "GetNowBlockServlet",
    #    strategy = "GlobalPreemptibleAdapter",
    #    paramString = "permit=1"
    #  },

    #  {
    #    component = "GetAccountServlet",
    #    strategy = "IPQPSRateLimiterAdapter",
    #    paramString = "qps=1"
    #  },

    #  {
    #    component = "ListWitnessesServlet",
    #    strategy = "QpsRateLimiterAdapter",
    #    paramString = "qps=1"
    #  }
  ],

  rpc = [
    #  {
    #    component = "protocol.Wallet/GetBlockByLatestNum2",
    #    strategy = "GlobalPreemptibleAdapter",
    #    paramString = "permit=1"
    #  },

    #  {
    #    component = "protocol.Wallet/GetAccount",
    #    strategy = "IPQPSRateLimiterAdapter",
    #    paramString = "qps=1"
    #  },

    #  {
    #    component = "protocol.Wallet/ListWitnesses",
    #    strategy = "QpsRateLimiterAdapter",
    #    paramString = "qps=1"
    #  },
  ]

  # global qps, default 50000
global.api.qps=2147483647
  global.qps = 2147483647
  # IP-based global qps, default 10000
  global.ip.qps = 2147483647
}

seed.node = {
  ip.list = [<ALL IPS OF FULLNODE>]
}

genesis.block = {
  # Reserve balance
  assets = [

  ]

  witnesses = [

  ]

  timestamp = "0" #2017-8-26 12:00:00

  parentHash = "957dc2d350daecc7bb6a38f3938ebde0a0c1cedafe15f0edae4256a2907449f6"
}

localwitness = []    #PLACE YOUR PRIVATE KEY HERE in []

#localwitnesskeystore = [
#  "src/main/resources/localwitnesskeystore.json"  # if you do not set the localwitness above, you must set this value.Otherwise,your SuperNode can not produce the block. 
#]

block = {
  needSyncCheck = true # first node : false, other : true
  maintenanceTimeInterval = 600000 // 1 day: 86400000(ms), 6 hours: 21600000(ms),15 mins: 900000
  proposalExpireTime = 600000 // 10 mins: 600000
}

vm = {
  supportConstant = true
  minTimeRatio = 0.0
  maxTimeRatio = 5.0
}

committee = {
  allowCreationOfContracts = 1  //mainnet:0 (reset by committee),test:1
}

event.subscribe = {
  native = {
    useNativeQueue = true // if true, use native message queue, else use event plugin.
    bindport = 5555 // bind port
    sendqueuelength = 1000 //max length of send queue
  }

  path = "" // absolute path of plugin
  server = "" // target server address to receive event triggers
  dbconfig = "" // dbname|username|password
  contractParse = true,
  topics = [
    {
      triggerName = "block" // block trigger, the value can't be modified
      enable = true
      topic = "block" // plugin topic, the value could be modified
    },
    {
      triggerName = "transaction"
      enable = true
      topic = "transaction"
    },
    {
      triggerName = "contractevent"
      enable = true
      topic = "contractevent"
    },
    {
      triggerName = "contractlog"
      enable = true
      topic = "contractlog"
    },
    {
      triggerName = "solidity" // solidity block event trigger, the value can't be modified
      enable = true            // the default value is true
      topic = "solidity"
    }
  ]

  filter = {
    fromblock = "0" // the value could be "", "earliest" or a specified block number as the beginning of the queried range
    toblock = "latest" // the value could be "", "latest" or a specified block number as end of the queried range
    contractAddress = []

    contractTopic = []
  }
}
enery.limit.block.num = 0
317787106 commented 3 months ago

@0xarmaansidhu After you start 7 new fullnodes, just place them ip:port in SR's config file: seed.node.ip.list. There is no need to place them in node.active nor node.passive if all SR and fullnodes have enable node.discovery. Restart one SR and wait it works. And then modify next SR's config file and Restart it...

0xarmaansidhu commented 3 months ago

@317787106 won' the Sr's have differnet configs initially? how would they connect with each other?

xxo1shine commented 3 months ago

@0xarmaansidhu You can first replace the IP addresses of some fullnodes, and then configure the updated IP addresses to the SR configuration. After the operation is stable, update the IP addresses of other nodes, and then update the SR configuration. If an IP address is configured in the active node, it does not need to be configured in the passive node.

317787106 commented 3 months ago

@0xarmaansidhu SR can find other peers' ip through seed node and then connect with them.

0xarmaansidhu commented 3 months ago

ohk i'll try it

endiaoekoe commented 3 months ago

@0xarmaansidhu for the specific mechanism of how nodes find each other, you can refer to this doc which may explain more details.