The code of buildTraceBlock need build the trace transaction one by one; but the buildTraceTransaction need executor the tx from the first tx to the current index tx , so this do a lot of repetitive executor , like flower;
for _, tx := range l2Block.Transactions() {
checkReceipt, err := s.GetTransactionReceipt(ctx, tx.Hash(), dbTx)
if err != nil {
return nil, err
}
if checkReceipt.TransactionIndex < receipt.TransactionIndex {
count++
}
}
// since the executor only stores the state roots by block, we need to
// execute all the txs in the block until the tx we want to trace
var txsToEncode []types.Transaction
var effectivePercentage []uint8
for i := 0; i <= count; i++ {
txsToEncode = append(txsToEncode, *l2Block.Transactions()[i])
effectivePercentage = append(effectivePercentage, MaxEffectivePercentage)
log.Debugf("trace will reprocess tx: %v", l2Block.Transactions()[i].Hash().String())
}
But the go-ethereum traceBlock only need executor all tx of the block once;
func (api *API) traceBlock(ctx context.Context, block *types.Block, config *TraceConfig) ([]*txTraceResult, error) {
if block.NumberU64() == 0 {
return nil, errors.New("genesis is not traceable")
}
// Prepare base state
parent, err := api.blockByNumberAndHash(ctx, rpc.BlockNumber(block.NumberU64()-1), block.ParentHash())
if err != nil {
return nil, err
}
reexec := defaultTraceReexec
if config != nil && config.Reexec != nil {
reexec = *config.Reexec
}
statedb, release, err := api.backend.StateAtBlock(ctx, parent, reexec, nil, true, false)
if err != nil {
return nil, err
}
defer release()
// JS tracers have high overhead. In this case run a parallel
// process that generates states in one thread and traces txes
// in separate worker threads.
if config != nil && config.Tracer != nil && *config.Tracer != "" {
if isJS := DefaultDirectory.IsJS(*config.Tracer); isJS {
return api.traceBlockParallel(ctx, block, statedb, config)
}
}
// Native tracers have low overhead
var (
txs = block.Transactions()
blockHash = block.Hash()
blockCtx = core.NewEVMBlockContext(block.Header(), api.chainContext(ctx), nil)
signer = types.MakeSigner(api.backend.ChainConfig(), block.Number(), block.Time())
results = make([]*txTraceResult, len(txs))
)
if beaconRoot := block.BeaconRoot(); beaconRoot != nil {
vmenv := vm.NewEVM(blockCtx, vm.TxContext{}, statedb, api.backend.ChainConfig(), vm.Config{})
core.ProcessBeaconBlockRoot(*beaconRoot, vmenv, statedb)
}
for i, tx := range txs {
// Generate the next state snapshot fast without tracing
msg, _ := core.TransactionToMessage(tx, signer, block.BaseFee())
txctx := &Context{
BlockHash: blockHash,
BlockNumber: block.Number(),
TxIndex: i,
TxHash: tx.Hash(),
}
res, err := api.traceTx(ctx, tx, msg, txctx, blockCtx, statedb, config)
if err != nil {
return nil, err
}
results[i] = &txTraceResult{TxHash: tx.Hash(), Result: res}
}
return results, nil
}
Is zkevm-node have a optimizing strategy about this issue? Thanks
The code of
buildTraceBlock
need build the trace transaction one by one; but thebuildTraceTransaction
need executor the tx from the first tx to the current index tx , so this do a lot of repetitive executor , like flower;But the go-ethereum traceBlock only need executor all tx of the block once;
Is zkevm-node have a optimizing strategy about this issue? Thanks