Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
16 commits
Select commit Hold shift + click to select a range
daafc3c
feat(sync/customrawdb): migrate customrawdb package from coreth
powerslider Oct 3, 2025
f185407
fix: migrate missing code
powerslider Oct 3, 2025
136ba64
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 3, 2025
b79eeab
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 6, 2025
e6f5461
fix: add missing code needed by subnet-evm
powerslider Oct 6, 2025
bf62dfb
fix: add generics to ReadChainConfig and WriteChainConfig to eliminat…
powerslider Oct 6, 2025
fff1df9
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 8, 2025
7a1e70d
test(customrawdb): add table-driven accessor tests and sentinel error…
powerslider Oct 10, 2025
04094ec
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 10, 2025
65d6032
fix: remove unused InspectDatabase function
powerslider Oct 10, 2025
aef0bb6
chore(customrawdb): switch from geth-style to explicit error returns,…
powerslider Oct 13, 2025
02e8079
chore(customrawdb): switch from geth-style to explicit error returns,…
powerslider Oct 13, 2025
e5492d9
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 13, 2025
019b3d3
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 14, 2025
1db8ca0
Merge branch 'master' into powerslider/4386-migrate-customrawdb-coreth
powerslider Oct 14, 2025
d69f871
refactor(customrawdb)!: restructure package and align metadata respon…
powerslider Oct 15, 2025
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
36 changes: 36 additions & 0 deletions vms/evm/sync/customrawdb/db.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,36 @@
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package customrawdb

import (
"errors"

"github.com/ava-labs/libevm/core/rawdb"
"github.com/ava-labs/libevm/ethdb"
)

var (
// ErrEntryNotFound indicates the requested key/value was not present in the DB.
ErrEntryNotFound = errors.New("entry not found")
// errStateSchemeConflict indicates the provided state scheme conflicts with what is on disk.
errStateSchemeConflict = errors.New("state scheme conflict")
// FirewoodScheme is the scheme for the Firewood storage scheme.
FirewoodScheme = "firewood"
)

// ParseStateScheme parses the state scheme from the provided string.
func ParseStateScheme(provided string, db ethdb.Database) (string, error) {
// Check for custom scheme
if provided == FirewoodScheme {
if diskScheme := rawdb.ReadStateScheme(db); diskScheme != "" {
// Valid scheme on db mismatched
return "", errStateSchemeConflict
}
// If no conflicting scheme is found, is valid.
return FirewoodScheme, nil
}

// Check for valid eth scheme
return rawdb.ParseStateScheme(provided, db)
}
31 changes: 31 additions & 0 deletions vms/evm/sync/customrawdb/db_test.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,31 @@
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package customrawdb

import (
"testing"

"github.com/ava-labs/libevm/core/rawdb"
"github.com/stretchr/testify/require"
)

func TestParseStateScheme(t *testing.T) {
db := rawdb.NewMemoryDatabase()

// Provided Firewood on empty disk -> allowed.
scheme, err := ParseStateScheme(FirewoodScheme, db)
require.NoError(t, err)
require.Equal(t, FirewoodScheme, scheme)

// Simulate disk has non-empty path scheme by writing persistent state id.
rawdb.WritePersistentStateID(db, 1)
scheme2, _ := ParseStateScheme(FirewoodScheme, db)
require.Empty(t, scheme2)

// Pass-through to rawdb for non-Firewood using a fresh empty DB.
db2 := rawdb.NewMemoryDatabase()
scheme, err = ParseStateScheme("hash", db2)
require.NoError(t, err)
require.Equal(t, "hash", scheme)
}
241 changes: 241 additions & 0 deletions vms/evm/sync/customrawdb/markers.go
Original file line number Diff line number Diff line change
@@ -0,0 +1,241 @@
// Copyright (C) 2019-2025, Ava Labs, Inc. All rights reserved.
// See the file LICENSE for licensing terms.

package customrawdb

import (
"encoding/json"
"errors"
"fmt"
"time"

"github.com/ava-labs/libevm/common"
"github.com/ava-labs/libevm/core/rawdb"
"github.com/ava-labs/libevm/ethdb"
"github.com/ava-labs/libevm/log"
"github.com/ava-labs/libevm/params"
"github.com/ava-labs/libevm/rlp"
)

var (
// errInvalidData indicates the stored value exists but is malformed or undecodable.
errInvalidData = errors.New("invalid data")

upgradeConfigPrefix = []byte("upgrade-config-")
// offlinePruningKey tracks runs of offline pruning.
offlinePruningKey = []byte("OfflinePruning")
// populateMissingTriesKey tracks runs of trie backfills.
populateMissingTriesKey = []byte("PopulateMissingTries")
// pruningDisabledKey tracks whether the node has ever run in archival mode
// to ensure that a user does not accidentally corrupt an archival node.
pruningDisabledKey = []byte("PruningDisabled")
// acceptorTipKey tracks the tip of the last accepted block that has been fully processed.
acceptorTipKey = []byte("AcceptorTipKey")
// snapshotBlockHashKey tracks the block hash of the last snapshot.
snapshotBlockHashKey = []byte("SnapshotBlockHash")
)

// WriteOfflinePruning writes a time marker of the last attempt to run offline pruning.
// The marker is written when offline pruning completes and is deleted when the node
// is started successfully with offline pruning disabled. This ensures users must
// disable offline pruning and start their node successfully between runs of offline
// pruning.
func WriteOfflinePruning(db ethdb.KeyValueStore, ts time.Time) error {
return writeTimeMarker(db, offlinePruningKey, ts)
}

// ReadOfflinePruning reads the most recent timestamp of an attempt to run offline
// pruning if present.
func ReadOfflinePruning(db ethdb.KeyValueStore) (time.Time, error) {
return readTimeMarker(db, offlinePruningKey)
}

// DeleteOfflinePruning deletes any marker of the last attempt to run offline pruning.
func DeleteOfflinePruning(db ethdb.KeyValueStore) error {
return db.Delete(offlinePruningKey)
}

// WritePopulateMissingTries writes a marker for the current attempt to populate
// missing tries.
func WritePopulateMissingTries(db ethdb.KeyValueStore, ts time.Time) error {
return writeTimeMarker(db, populateMissingTriesKey, ts)
}

// ReadPopulateMissingTries reads the most recent timestamp of an attempt to
// re-populate missing trie nodes.
func ReadPopulateMissingTries(db ethdb.KeyValueStore) (time.Time, error) {
return readTimeMarker(db, populateMissingTriesKey)
}

// DeletePopulateMissingTries deletes any marker of the last attempt to
// re-populate missing trie nodes.
func DeletePopulateMissingTries(db ethdb.KeyValueStore) error {
return db.Delete(populateMissingTriesKey)
}

// WritePruningDisabled writes a marker to track whether the node has ever run
// with pruning disabled.
func WritePruningDisabled(db ethdb.KeyValueStore) error {
return db.Put(pruningDisabledKey, nil)
}

// HasPruningDisabled returns true if there is a marker present indicating that
// the node has run with pruning disabled at some point.
func HasPruningDisabled(db ethdb.KeyValueStore) (bool, error) {
return db.Has(pruningDisabledKey)
}

// WriteAcceptorTip writes `hash` as the last accepted block that has been fully processed.
func WriteAcceptorTip(db ethdb.KeyValueWriter, hash common.Hash) error {
return db.Put(acceptorTipKey, hash[:])
}

// ReadAcceptorTip reads the hash of the last accepted block that was fully processed.
// If there is no value present (the index is being initialized for the first time), then the
// empty hash is returned.
func ReadAcceptorTip(db ethdb.KeyValueReader) (common.Hash, error) {
ok, err := db.Has(acceptorTipKey)
if err != nil {
return common.Hash{}, err
}
if !ok {
return common.Hash{}, ErrEntryNotFound
}
h, err := db.Get(acceptorTipKey)
if err != nil {
return common.Hash{}, err
}
if len(h) != common.HashLength {
return common.Hash{}, fmt.Errorf("%w: length %d", errInvalidData, len(h))
}
return common.BytesToHash(h), nil
}

// ReadChainConfig retrieves the consensus settings based on the given genesis hash.
// The provided `upgradeConfig` (any JSON-unmarshalable type) will be populated if present on disk.
func ReadChainConfig[T any](db ethdb.KeyValueReader, hash common.Hash, upgradeConfig *T) (*params.ChainConfig, error) {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

  1. Do we need *T if the caller specifies a pointer-type as T?
  2. It seems weird that the comment specifies that T is known to be a json-unmarshable type, but our type constraint is any. Should we use json.Unmarshaler?

config := rawdb.ReadChainConfig(db, hash)
if config == nil {
return nil, ErrEntryNotFound
}

upgrade, _ := db.Get(upgradeConfigKey(hash))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Why do we ignore the error value here?

if len(upgrade) == 0 {
return config, nil
}

if err := json.Unmarshal(upgrade, upgradeConfig); err != nil {
return nil, errInvalidData
}

return config, nil
}

// WriteChainConfig writes the chain config settings to the database.
// The provided `upgradeConfig` (any JSON-marshalable type) will be stored alongside the chain config.
func WriteChainConfig[T any](db ethdb.KeyValueWriter, hash common.Hash, config *params.ChainConfig, upgradeConfig T) error {
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Same comment as on ReadChainConfig

rawdb.WriteChainConfig(db, hash, config)
if config == nil {
return nil
}

data, err := json.Marshal(upgradeConfig)
if err != nil {
return err
}
if err := db.Put(upgradeConfigKey(hash), data); err != nil {
return err
}
return nil
}

// NewAccountSnapshotsIterator returns an iterator for walking all of the accounts in the snapshot
func NewAccountSnapshotsIterator(db ethdb.Iteratee) ethdb.Iterator {
it := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
keyLen := len(rawdb.SnapshotAccountPrefix) + common.HashLength
return rawdb.NewKeyLengthIterator(it, keyLen)
}

// ReadSnapshotBlockHash retrieves the hash of the block whose state is contained in
// the persisted snapshot.
func ReadSnapshotBlockHash(db ethdb.KeyValueReader) (common.Hash, error) {
ok, err := db.Has(snapshotBlockHashKey)
if err != nil {
return common.Hash{}, err
}
if !ok {
return common.Hash{}, ErrEntryNotFound
}

data, err := db.Get(snapshotBlockHashKey)
if err != nil {
return common.Hash{}, err
}
if len(data) != common.HashLength {
return common.Hash{}, fmt.Errorf("%w: length %d", errInvalidData, len(data))
}
return common.BytesToHash(data), nil
}

// WriteSnapshotBlockHash stores the root of the block whose state is contained in
// the persisted snapshot.
func WriteSnapshotBlockHash(db ethdb.KeyValueWriter, blockHash common.Hash) error {
if err := db.Put(snapshotBlockHashKey, blockHash[:]); err != nil {
log.Error("Failed to store snapshot block hash", "err", err)
return err
}
return nil
}

// DeleteSnapshotBlockHash deletes the hash of the block whose state is contained in
// the persisted snapshot. Since snapshots are not immutable, this method can
// be used during updates, so a crash or failure will mark the entire snapshot
// invalid.
func DeleteSnapshotBlockHash(db ethdb.KeyValueWriter) error {
if err := db.Delete(snapshotBlockHashKey); err != nil {
log.Error("Failed to remove snapshot block hash", "err", err)
return err
}
return nil
}

// writeTimeMarker writes a marker of the provided time in the db at `key`.
func writeTimeMarker(db ethdb.KeyValueStore, key []byte, ts time.Time) error {
data, err := rlp.EncodeToBytes(uint64(ts.Unix()))
if err != nil {
return err
}
return db.Put(key, data)
}

// readTimeMarker reads the timestamp stored at `key`
func readTimeMarker(db ethdb.KeyValueStore, key []byte) (time.Time, error) {
// Check existence first to map missing marker to a stable sentinel error.
ok, err := db.Has(key)
if err != nil {
return time.Time{}, err
}
if !ok {
return time.Time{}, ErrEntryNotFound
}

data, err := db.Get(key)
if err != nil {
return time.Time{}, err
}
if len(data) == 0 {
return time.Time{}, ErrEntryNotFound
}

var unix uint64
if err := rlp.DecodeBytes(data, &unix); err != nil {
return time.Time{}, fmt.Errorf("%w: %w", errInvalidData, err)
}

return time.Unix(int64(unix), 0), nil
}

// upgradeConfigKey = upgradeConfigPrefix + hash
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

nit: Some of these in-line comments are documenting self-documenting code - could we remove these? Comments have a maintenance cost associated with them since they need to be kept in-sync w/ code, so if code is reasonably self-documenting we should generally avoid adding comments to them.

func upgradeConfigKey(hash common.Hash) []byte {
return append(upgradeConfigPrefix, hash.Bytes()...)
}
Loading
Loading