diff --git a/blockdb/blockdb.go b/blockdb/blockdb.go
index 96a379863..ba8e74c69 100644
--- a/blockdb/blockdb.go
+++ b/blockdb/blockdb.go
@@ -4,23 +4,25 @@ import (
"context"
"fmt"
+ "github.com/sirupsen/logrus"
+
"github.com/ethpandaops/dora/blockdb/pebble"
"github.com/ethpandaops/dora/blockdb/s3"
+ "github.com/ethpandaops/dora/blockdb/tiered"
"github.com/ethpandaops/dora/blockdb/types"
dtypes "github.com/ethpandaops/dora/types"
)
-// BlockDb wraps the underlying storage engine for both beacon block data
-// and execution data.
+// BlockDb is the main wrapper for block database operations.
type BlockDb struct {
engine types.BlockDbEngine
execEngine types.ExecDataEngine // nil if engine doesn't support exec data
}
-// GlobalBlockDb is the global singleton BlockDb instance.
+// GlobalBlockDb is the global block database instance.
var GlobalBlockDb *BlockDb
-// InitWithPebble initializes the global BlockDb with a Pebble backend.
+// InitWithPebble initializes the block database with Pebble (local) storage.
func InitWithPebble(config dtypes.PebbleBlockDBConfig) error {
engine, err := pebble.NewPebbleEngine(config)
if err != nil {
@@ -41,7 +43,7 @@ func InitWithPebble(config dtypes.PebbleBlockDBConfig) error {
return nil
}
-// InitWithS3 initializes the global BlockDb with an S3 backend.
+// InitWithS3 initializes the block database with S3 (remote) storage.
func InitWithS3(config dtypes.S3BlockDBConfig) error {
engine, err := s3.NewS3Engine(config)
if err != nil {
@@ -62,6 +64,27 @@ func InitWithS3(config dtypes.S3BlockDBConfig) error {
return nil
}
+// InitWithTiered initializes the block database with tiered storage (Pebble cache + S3 backend).
+func InitWithTiered(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) error {
+ engine, err := tiered.NewTieredEngine(config, logger)
+ if err != nil {
+ return err
+ }
+
+ db := &BlockDb{
+ engine: engine,
+ }
+
+ // Check if tiered engine supports exec data
+ if execEngine, ok := engine.(types.ExecDataEngine); ok {
+ db.execEngine = execEngine
+ }
+
+ GlobalBlockDb = db
+
+ return nil
+}
+
// GetEngine returns the underlying storage engine.
func (db *BlockDb) GetEngine() types.BlockDbEngine {
return db.engine
@@ -71,22 +94,59 @@ func (db *BlockDb) Close() error {
return db.engine.Close()
}
-func (db *BlockDb) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) {
- return db.engine.GetBlock(ctx, slot, root, parseBlock)
-}
-
-func (db *BlockDb) AddBlock(ctx context.Context, slot uint64, root []byte, header_ver uint64, header_data []byte, body_ver uint64, body_data []byte) (bool, error) {
+// GetBlock retrieves block data with selective loading based on flags.
+func (db *BlockDb) GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ return db.engine.GetBlock(ctx, slot, root, flags, parseBlock, parsePayload)
+}
+
+// GetStoredComponents returns which components exist for a block.
+func (db *BlockDb) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) {
+ return db.engine.GetStoredComponents(ctx, slot, root)
+}
+
+// AddBlock stores block data. Returns (added, updated, error).
+func (db *BlockDb) AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ headerVer uint64,
+ headerData []byte,
+ bodyVer uint64,
+ bodyData []byte,
+ payloadVer uint64,
+ payloadData []byte,
+ balVer uint64,
+ balData []byte,
+) (bool, bool, error) {
return db.engine.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
return &types.BlockData{
- HeaderVersion: header_ver,
- HeaderData: header_data,
- BodyVersion: body_ver,
- BodyData: body_data,
+ HeaderVersion: headerVer,
+ HeaderData: headerData,
+ BodyVersion: bodyVer,
+ BodyData: bodyData,
+ PayloadVersion: payloadVer,
+ PayloadData: payloadData,
+ BalVersion: balVer,
+ BalData: balData,
}, nil
})
}
-func (db *BlockDb) AddBlockWithCallback(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) {
+// AddBlockWithCallback stores block data using a callback for deferred data loading.
+// Returns (added, updated, error).
+func (db *BlockDb) AddBlockWithCallback(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
return db.engine.AddBlock(ctx, slot, root, dataCb)
}
diff --git a/blockdb/pebble/cleanup.go b/blockdb/pebble/cleanup.go
new file mode 100644
index 000000000..5a3cf787e
--- /dev/null
+++ b/blockdb/pebble/cleanup.go
@@ -0,0 +1,439 @@
+package pebble
+
+import (
+ "context"
+ "encoding/binary"
+ "sort"
+ "sync"
+ "time"
+
+ "github.com/cockroachdb/pebble"
+ "github.com/sirupsen/logrus"
+
+ "github.com/ethpandaops/dora/blockdb/types"
+ dtypes "github.com/ethpandaops/dora/types"
+)
+
+const (
+ // KeyNamespaceLRU is the namespace for LRU tracking data.
+ KeyNamespaceLRU uint16 = 2
+
+ // LRU value format: [headerAccess (8B)] [bodyAccess (8B)] [payloadAccess (8B)] [balAccess (8B)]
+ // Each access time is a Unix nanosecond timestamp, 0 means never accessed.
+ lruValueSize = 32
+
+ // Maximum number of LRU updates to buffer before forcing a flush.
+ maxLRUBufferSize = 1000
+)
+
+// CacheCleanup manages background cleanup of cached data.
+type CacheCleanup struct {
+ engine *PebbleEngine
+ config dtypes.PebbleBlockDBConfig
+ logger logrus.FieldLogger
+
+ ctx context.Context
+ cancel context.CancelFunc
+ wg sync.WaitGroup
+
+ // LRU update buffer
+ lruMu sync.Mutex
+ lruBuffer map[string]*lruUpdate // root hex -> update
+}
+
+// lruUpdate holds pending LRU timestamp updates for a block.
+type lruUpdate struct {
+ root []byte
+ headerAccess int64 // Unix nano, 0 = no update
+ bodyAccess int64
+ payloadAccess int64
+ balAccess int64
+}
+
+// NewCacheCleanup creates a new cache cleanup manager.
+func NewCacheCleanup(engine *PebbleEngine, logger logrus.FieldLogger) *CacheCleanup {
+ ctx, cancel := context.WithCancel(context.Background())
+
+ return &CacheCleanup{
+ engine: engine,
+ config: engine.GetConfig(),
+ logger: logger.WithField("component", "pebble-cleanup"),
+ ctx: ctx,
+ cancel: cancel,
+ lruBuffer: make(map[string]*lruUpdate, 100),
+ }
+}
+
+// Start begins the background cleanup loop.
+func (c *CacheCleanup) Start() {
+ if c.config.CleanupInterval == 0 {
+ c.logger.Info("cleanup disabled (interval is 0)")
+ return
+ }
+
+ c.wg.Add(1)
+ go c.runCleanupLoop()
+}
+
+// Stop stops the background cleanup loop.
+func (c *CacheCleanup) Stop() {
+ c.cancel()
+ c.wg.Wait()
+
+ // Final flush of LRU buffer
+ c.FlushLRU()
+}
+
+// runCleanupLoop runs the periodic cleanup.
+func (c *CacheCleanup) runCleanupLoop() {
+ defer c.wg.Done()
+
+ ticker := time.NewTicker(c.config.CleanupInterval)
+ defer ticker.Stop()
+
+ for {
+ select {
+ case <-c.ctx.Done():
+ return
+ case <-ticker.C:
+ c.FlushLRU()
+ c.runCleanup()
+ }
+ }
+}
+
+// RecordAccess records an access for LRU tracking. Buffered until flush.
+func (c *CacheCleanup) RecordAccess(root []byte, flags types.BlockDataFlags) {
+ c.lruMu.Lock()
+ defer c.lruMu.Unlock()
+
+ key := string(root)
+ now := time.Now().UnixNano()
+
+ update, exists := c.lruBuffer[key]
+ if !exists {
+ rootCopy := make([]byte, len(root))
+ copy(rootCopy, root)
+ update = &lruUpdate{root: rootCopy}
+ c.lruBuffer[key] = update
+ }
+
+ if flags.Has(types.BlockDataFlagHeader) {
+ update.headerAccess = now
+ }
+ if flags.Has(types.BlockDataFlagBody) {
+ update.bodyAccess = now
+ }
+ if flags.Has(types.BlockDataFlagPayload) {
+ update.payloadAccess = now
+ }
+ if flags.Has(types.BlockDataFlagBal) {
+ update.balAccess = now
+ }
+
+ // Force flush if buffer is too large
+ if len(c.lruBuffer) >= maxLRUBufferSize {
+ c.flushLRULocked()
+ }
+}
+
+// FlushLRU flushes buffered LRU updates to Pebble.
+func (c *CacheCleanup) FlushLRU() {
+ c.lruMu.Lock()
+ defer c.lruMu.Unlock()
+ c.flushLRULocked()
+}
+
+// flushLRULocked flushes LRU buffer (must hold lruMu).
+func (c *CacheCleanup) flushLRULocked() {
+ if len(c.lruBuffer) == 0 {
+ return
+ }
+
+ db := c.engine.GetDB()
+ batch := db.NewBatch()
+
+ for _, update := range c.lruBuffer {
+ key := makeLRUKey(update.root)
+
+ // Read existing LRU data
+ existing := make([]byte, lruValueSize)
+ if res, closer, err := db.Get(key); err == nil {
+ if len(res) >= lruValueSize {
+ copy(existing, res)
+ }
+ closer.Close()
+ }
+
+ // Merge updates (only update non-zero values)
+ value := make([]byte, lruValueSize)
+ copy(value, existing)
+
+ if update.headerAccess > 0 {
+ binary.BigEndian.PutUint64(value[0:8], uint64(update.headerAccess))
+ }
+ if update.bodyAccess > 0 {
+ binary.BigEndian.PutUint64(value[8:16], uint64(update.bodyAccess))
+ }
+ if update.payloadAccess > 0 {
+ binary.BigEndian.PutUint64(value[16:24], uint64(update.payloadAccess))
+ }
+ if update.balAccess > 0 {
+ binary.BigEndian.PutUint64(value[24:32], uint64(update.balAccess))
+ }
+
+ batch.Set(key, value, nil)
+ }
+
+ if err := batch.Commit(nil); err != nil {
+ c.logger.Errorf("failed to flush LRU updates: %v", err)
+ }
+ batch.Close()
+
+ // Clear buffer
+ c.lruBuffer = make(map[string]*lruUpdate, 100)
+}
+
+// makeLRUKey creates the key for LRU data.
+func makeLRUKey(root []byte) []byte {
+ key := make([]byte, 2+len(root))
+ binary.BigEndian.PutUint16(key[:2], KeyNamespaceLRU)
+ copy(key[2:], root)
+ return key
+}
+
+// runCleanup performs cleanup for all configured component types.
+func (c *CacheCleanup) runCleanup() {
+ c.logger.Debug("starting cache cleanup")
+
+ componentConfigs := map[uint16]*dtypes.BlockDbRetentionConfig{
+ BlockTypeHeader: &c.config.HeaderRetention,
+ BlockTypeBody: &c.config.BodyRetention,
+ BlockTypePayload: &c.config.PayloadRetention,
+ BlockTypeBal: &c.config.BalRetention,
+ }
+
+ for blockType, config := range componentConfigs {
+ if config == nil || !config.Enabled {
+ continue
+ }
+
+ switch config.CleanupMode {
+ case "age":
+ c.cleanupByAge(blockType, config.RetentionTime)
+ case "lru":
+ c.cleanupByLRU(blockType, config.MaxSize*1024*1024) // Convert MB to bytes
+ }
+ }
+}
+
+// cleanupByAge removes entries older than the retention time based on storage timestamp.
+func (c *CacheCleanup) cleanupByAge(blockType uint16, retention time.Duration) {
+ if retention == 0 {
+ return
+ }
+
+ cutoff := time.Now().Add(-retention)
+ deleted := 0
+
+ db := c.engine.GetDB()
+ iter, err := db.NewIter(&pebble.IterOptions{})
+ if err != nil {
+ c.logger.Errorf("failed to create iterator: %v", err)
+ return
+ }
+ defer iter.Close()
+
+ batch := db.NewBatch()
+ defer batch.Close()
+
+ for iter.First(); iter.Valid(); iter.Next() {
+ key := iter.Key()
+
+ // Check if this key is in the block namespace
+ if len(key) < 36 { // 2 (namespace) + 32 (root) + 2 (type)
+ continue
+ }
+
+ namespace := binary.BigEndian.Uint16(key[:2])
+ if namespace != KeyNamespaceBlock {
+ continue
+ }
+
+ keyType := binary.BigEndian.Uint16(key[len(key)-2:])
+ if keyType != blockType {
+ continue
+ }
+
+ // Check timestamp from value (stored at offset 8)
+ value := iter.Value()
+ if len(value) < valueHeaderSize {
+ continue
+ }
+
+ timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(value[8:16])))
+ if timestamp.Before(cutoff) {
+ keyCopy := make([]byte, len(key))
+ copy(keyCopy, key)
+ batch.Delete(keyCopy, nil)
+ deleted++
+ }
+ }
+
+ if deleted > 0 {
+ if err := batch.Commit(nil); err != nil {
+ c.logger.Errorf("failed to commit age cleanup batch: %v", err)
+ } else {
+ c.logger.Infof("cleaned up %d entries for block type %d (age-based)", deleted, blockType)
+ }
+ }
+}
+
+// lruEntry represents an entry for LRU cleanup sorting.
+type lruEntry struct {
+ root []byte
+ key []byte
+ size int64
+ lastAccess int64
+}
+
+// cleanupByLRU removes least recently used entries when size exceeds limit.
+func (c *CacheCleanup) cleanupByLRU(blockType uint16, maxSize int64) {
+ if maxSize == 0 {
+ return
+ }
+
+ db := c.engine.GetDB()
+
+ // First pass: collect all entries with their sizes and LRU timestamps
+ entries := make([]*lruEntry, 0, 1000)
+ var totalSize int64
+
+ iter, err := db.NewIter(&pebble.IterOptions{})
+ if err != nil {
+ c.logger.Errorf("failed to create iterator: %v", err)
+ return
+ }
+
+ // Scan block entries
+ for iter.First(); iter.Valid(); iter.Next() {
+ key := iter.Key()
+
+ if len(key) < 36 {
+ continue
+ }
+
+ namespace := binary.BigEndian.Uint16(key[:2])
+ if namespace != KeyNamespaceBlock {
+ continue
+ }
+
+ keyType := binary.BigEndian.Uint16(key[len(key)-2:])
+ if keyType != blockType {
+ continue
+ }
+
+ // Extract root from key
+ root := key[2 : len(key)-2]
+ value := iter.Value()
+ size := int64(len(value))
+ totalSize += size
+
+ // Get LRU timestamp for this entry
+ lastAccess := c.getLRUTimestamp(db, root, blockType)
+
+ keyCopy := make([]byte, len(key))
+ copy(keyCopy, key)
+ rootCopy := make([]byte, len(root))
+ copy(rootCopy, root)
+
+ entries = append(entries, &lruEntry{
+ root: rootCopy,
+ key: keyCopy,
+ size: size,
+ lastAccess: lastAccess,
+ })
+ }
+ iter.Close()
+
+ // Check if we need to clean up
+ if totalSize <= maxSize {
+ return
+ }
+
+ // Sort by last access time (oldest first, 0 = never accessed = oldest)
+ sort.Slice(entries, func(i, j int) bool {
+ return entries[i].lastAccess < entries[j].lastAccess
+ })
+
+ // Delete oldest entries until we're under the limit
+ batch := db.NewBatch()
+ defer batch.Close()
+
+ deleted := 0
+ freedSize := int64(0)
+ targetFree := totalSize - maxSize
+
+ for _, entry := range entries {
+ if freedSize >= targetFree {
+ break
+ }
+
+ batch.Delete(entry.key, nil)
+ freedSize += entry.size
+ deleted++
+ }
+
+ if deleted > 0 {
+ if err := batch.Commit(nil); err != nil {
+ c.logger.Errorf("failed to commit LRU cleanup batch: %v", err)
+ } else {
+ c.logger.Infof("cleaned up %d entries for block type %d (LRU-based, freed %d bytes)",
+ deleted, blockType, freedSize)
+ }
+ }
+}
+
+// getLRUTimestamp retrieves the LRU timestamp for a specific component.
+func (c *CacheCleanup) getLRUTimestamp(db *pebble.DB, root []byte, blockType uint16) int64 {
+ key := makeLRUKey(root)
+
+ res, closer, err := db.Get(key)
+ if err != nil {
+ return 0 // Never accessed
+ }
+ defer closer.Close()
+
+ if len(res) < lruValueSize {
+ return 0
+ }
+
+ // Extract timestamp based on block type
+ var offset int
+ switch blockType {
+ case BlockTypeHeader:
+ offset = 0
+ case BlockTypeBody:
+ offset = 8
+ case BlockTypePayload:
+ offset = 16
+ case BlockTypeBal:
+ offset = 24
+ default:
+ return 0
+ }
+
+ return int64(binary.BigEndian.Uint64(res[offset : offset+8]))
+}
+
+// DeleteLRU removes LRU data for a block (call when deleting block data).
+func (c *CacheCleanup) DeleteLRU(root []byte) {
+ db := c.engine.GetDB()
+ key := makeLRUKey(root)
+ db.Delete(key, nil)
+
+ // Also remove from buffer
+ c.lruMu.Lock()
+ delete(c.lruBuffer, string(root))
+ c.lruMu.Unlock()
+}
diff --git a/blockdb/pebble/pebble.go b/blockdb/pebble/pebble.go
index 3418912a9..985cc9e1f 100644
--- a/blockdb/pebble/pebble.go
+++ b/blockdb/pebble/pebble.go
@@ -3,6 +3,8 @@ package pebble
import (
"context"
"encoding/binary"
+ "fmt"
+ "time"
"github.com/cockroachdb/pebble"
"github.com/ethpandaops/dora/blockdb/types"
@@ -14,12 +16,18 @@ const (
)
const (
- BlockTypeHeader uint16 = 1
- BlockTypeBody uint16 = 2
+ BlockTypeHeader uint16 = 1
+ BlockTypeBody uint16 = 2
+ BlockTypePayload uint16 = 3
+ BlockTypeBal uint16 = 4
)
+// Value format: [version (8 bytes)] [timestamp (8 bytes)] [data]
+const valueHeaderSize = 16
+
type PebbleEngine struct {
- db *pebble.DB
+ db *pebble.DB
+ config dtypes.PebbleBlockDBConfig
}
func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, error) {
@@ -34,7 +42,8 @@ func NewPebbleEngine(config dtypes.PebbleBlockDBConfig) (types.BlockDbEngine, er
}
return &PebbleEngine{
- db: db,
+ db: db,
+ config: config,
}, nil
}
@@ -44,148 +53,248 @@ func (e *PebbleEngine) GetDB() *pebble.DB {
}
func (e *PebbleEngine) Close() error {
- err := e.db.Close()
- if err != nil {
- return err
- }
-
- return nil
+ return e.db.Close()
}
-func (e *PebbleEngine) getBlockHeader(root []byte) ([]byte, uint64, error) {
+// makeKey creates a key for the given root and block type.
+func makeKey(root []byte, blockType uint16) []byte {
key := make([]byte, 2+len(root)+2)
binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader)
+ binary.BigEndian.PutUint16(key[2+len(root):], blockType)
+ return key
+}
+
+// getComponent retrieves a single component from the database.
+// Returns (data, version, timestamp, error). Returns nil data if not found.
+func (e *PebbleEngine) getComponent(root []byte, blockType uint16) ([]byte, uint64, time.Time, error) {
+ key := makeKey(root, blockType)
res, closer, err := e.db.Get(key)
- if err != nil && err != pebble.ErrNotFound {
- return nil, 0, err
+ if err == pebble.ErrNotFound {
+ return nil, 0, time.Time{}, nil
+ }
+ if err != nil {
+ return nil, 0, time.Time{}, err
}
defer closer.Close()
- if err == pebble.ErrNotFound || len(res) == 0 {
- return nil, 0, nil
+ if len(res) < valueHeaderSize {
+ return nil, 0, time.Time{}, nil
}
version := binary.BigEndian.Uint64(res[:8])
- header := make([]byte, len(res)-8)
- copy(header, res[8:])
+ timestamp := time.Unix(0, int64(binary.BigEndian.Uint64(res[8:16])))
- return header, version, nil
+ data := make([]byte, len(res)-valueHeaderSize)
+ copy(data, res[valueHeaderSize:])
+
+ return data, version, timestamp, nil
}
-func (e *PebbleEngine) getBlockBody(root []byte, parser func(uint64, []byte) (interface{}, error)) (interface{}, uint64, error) {
- key := make([]byte, 2+len(root)+2)
- binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
- copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody)
+// setComponent stores a single component in the database.
+func (e *PebbleEngine) setComponent(root []byte, blockType uint16, version uint64, data []byte) error {
+ key := makeKey(root, blockType)
- res, closer, err := e.db.Get(key)
- if err != nil && err != pebble.ErrNotFound {
- return nil, 0, err
- }
- defer closer.Close()
+ value := make([]byte, valueHeaderSize+len(data))
+ binary.BigEndian.PutUint64(value[:8], version)
+ binary.BigEndian.PutUint64(value[8:16], uint64(time.Now().UnixNano()))
+ copy(value[valueHeaderSize:], data)
- if err == pebble.ErrNotFound || len(res) == 0 {
- return nil, 0, nil
+ return e.db.Set(key, value, nil)
+}
+
+// componentExists checks if a component exists in the database.
+func (e *PebbleEngine) componentExists(root []byte, blockType uint16) bool {
+ key := makeKey(root, blockType)
+
+ res, closer, err := e.db.Get(key)
+ if err == nil && len(res) >= valueHeaderSize {
+ closer.Close()
+ return true
}
+ return false
+}
- version := binary.BigEndian.Uint64(res[:8])
- block := res[8:]
+// GetStoredComponents returns which components exist for a block.
+func (e *PebbleEngine) GetStoredComponents(_ context.Context, _ uint64, root []byte) (types.BlockDataFlags, error) {
+ var flags types.BlockDataFlags
- body, err := parser(version, block)
- if err != nil {
- return nil, 0, err
+ if e.componentExists(root, BlockTypeHeader) {
+ flags |= types.BlockDataFlagHeader
+ }
+ if e.componentExists(root, BlockTypeBody) {
+ flags |= types.BlockDataFlagBody
+ }
+ if e.componentExists(root, BlockTypePayload) {
+ flags |= types.BlockDataFlagPayload
+ }
+ if e.componentExists(root, BlockTypeBal) {
+ flags |= types.BlockDataFlagBal
}
- return body, version, nil
+ return flags, nil
}
-func (e *PebbleEngine) GetBlock(_ context.Context, _ uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) {
- header, header_ver, err := e.getBlockHeader(root)
- if err != nil {
- return nil, err
+// GetBlock retrieves block data with selective loading based on flags.
+// Note: LRU access tracking should be done by the caller via CacheCleanup.RecordAccess()
+// to avoid expensive read-modify-write operations on every access.
+func (e *PebbleEngine) GetBlock(
+ _ context.Context,
+ _ uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ blockData := &types.BlockData{}
+
+ // Load header if requested
+ if flags.Has(types.BlockDataFlagHeader) {
+ data, version, _, err := e.getComponent(root, BlockTypeHeader)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get header: %w", err)
+ }
+ if data != nil {
+ blockData.HeaderVersion = version
+ blockData.HeaderData = data
+ }
}
- blockData := &types.BlockData{
- HeaderVersion: header_ver,
- HeaderData: header,
- }
+ // Load body if requested
+ if flags.Has(types.BlockDataFlagBody) {
+ data, version, _, err := e.getComponent(root, BlockTypeBody)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get body: %w", err)
+ }
- if parseBlock == nil {
- parseBlock = func(version uint64, block []byte) (interface{}, error) {
- blockData.BodyData = make([]byte, len(block))
- copy(blockData.BodyData, block)
- return nil, nil
+ if data != nil {
+ blockData.BodyVersion = version
+ if parseBlock != nil {
+ body, err := parseBlock(version, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse body: %w", err)
+ }
+ blockData.Body = body
+ } else {
+ blockData.BodyData = data
+ }
}
}
- body, body_ver, err := e.getBlockBody(root, parseBlock)
- if err != nil {
- return nil, err
- }
+ // Load payload if requested
+ if flags.Has(types.BlockDataFlagPayload) {
+ data, version, _, err := e.getComponent(root, BlockTypePayload)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get payload: %w", err)
+ }
- blockData.Body = body
- blockData.BodyVersion = body_ver
+ if data != nil {
+ blockData.PayloadVersion = version
+ if parsePayload != nil {
+ payload, err := parsePayload(version, data)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse payload: %w", err)
+ }
+ blockData.Payload = payload
+ } else {
+ blockData.PayloadData = data
+ }
+ }
+ }
- return blockData, nil
-}
+ // Load BAL if requested
+ if flags.Has(types.BlockDataFlagBal) {
+ data, version, _, err := e.getComponent(root, BlockTypeBal)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get BAL: %w", err)
+ }
-func (e *PebbleEngine) checkBlock(key []byte) bool {
- res, closer, err := e.db.Get(key)
- if err == nil && len(res) > 0 {
- closer.Close()
- return true
+ if data != nil {
+ blockData.BalVersion = version
+ blockData.BalData = data
+ }
}
- return false
+ return blockData, nil
}
-func (e *PebbleEngine) addBlockHeader(key []byte, version uint64, header []byte) error {
- data := make([]byte, 8+len(header))
- binary.BigEndian.PutUint64(data[:8], version)
+// AddBlock stores block data. Returns (added, updated, error).
+// - added: true if a new block was created
+// - updated: true if an existing block was updated with new components
+func (e *PebbleEngine) AddBlock(
+ _ context.Context,
+ _ uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
+ // Check what components already exist
+ existingFlags, err := e.GetStoredComponents(context.Background(), 0, root)
+ if err != nil {
+ return false, false, fmt.Errorf("failed to check existing components: %w", err)
+ }
- return e.db.Set(key, data, nil)
-}
+ // Get the new data
+ blockData, err := dataCb()
+ if err != nil {
+ return false, false, err
+ }
-func (e *PebbleEngine) addBlockBody(root []byte, version uint64, block []byte) error {
- key := make([]byte, 2+len(root)+2)
- binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
- copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeBody)
+ // Determine what new components we have
+ var newFlags types.BlockDataFlags
+ if len(blockData.HeaderData) > 0 {
+ newFlags |= types.BlockDataFlagHeader
+ }
+ if len(blockData.BodyData) > 0 {
+ newFlags |= types.BlockDataFlagBody
+ }
+ if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 {
+ newFlags |= types.BlockDataFlagPayload
+ }
+ if blockData.BalVersion != 0 && len(blockData.BalData) > 0 {
+ newFlags |= types.BlockDataFlagBal
+ }
- data := make([]byte, 8+len(block))
- binary.BigEndian.PutUint64(data[:8], version)
- copy(data[8:], block)
+ // Calculate components to add (new components not in existing)
+ toAdd := newFlags &^ existingFlags
- return e.db.Set(key, data, nil)
-}
+ if toAdd == 0 {
+ // Nothing new to add
+ return false, false, nil
+ }
-func (e *PebbleEngine) AddBlock(_ context.Context, _ uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) {
- key := make([]byte, 2+len(root)+2)
- binary.BigEndian.PutUint16(key[:2], KeyNamespaceBlock)
- copy(key[2:], root)
- binary.BigEndian.PutUint16(key[2+len(root):], BlockTypeHeader)
+ isNew := existingFlags == 0
+ isUpdated := !isNew
- if e.checkBlock(key) {
- return false, nil
+ // Store new components
+ if toAdd.Has(types.BlockDataFlagHeader) {
+ if err := e.setComponent(root, BlockTypeHeader, blockData.HeaderVersion, blockData.HeaderData); err != nil {
+ return false, false, fmt.Errorf("failed to store header: %w", err)
+ }
}
- blockData, err := dataCb()
- if err != nil {
- return false, err
+ if toAdd.Has(types.BlockDataFlagBody) {
+ if err := e.setComponent(root, BlockTypeBody, blockData.BodyVersion, blockData.BodyData); err != nil {
+ return false, false, fmt.Errorf("failed to store body: %w", err)
+ }
}
- err = e.addBlockHeader(key, blockData.HeaderVersion, blockData.HeaderData)
- if err != nil {
- return false, err
+ if toAdd.Has(types.BlockDataFlagPayload) {
+ if err := e.setComponent(root, BlockTypePayload, blockData.PayloadVersion, blockData.PayloadData); err != nil {
+ return false, false, fmt.Errorf("failed to store payload: %w", err)
+ }
}
- err = e.addBlockBody(root, blockData.BodyVersion, blockData.BodyData)
- if err != nil {
- return false, err
+ if toAdd.Has(types.BlockDataFlagBal) {
+ if err := e.setComponent(root, BlockTypeBal, blockData.BalVersion, blockData.BalData); err != nil {
+ return false, false, fmt.Errorf("failed to store BAL: %w", err)
+ }
}
- return true, nil
+ return isNew, isUpdated, nil
+}
+
+// GetConfig returns the engine configuration.
+func (e *PebbleEngine) GetConfig() dtypes.PebbleBlockDBConfig {
+ return e.config
}
diff --git a/blockdb/s3/format.go b/blockdb/s3/format.go
new file mode 100644
index 000000000..79fa2c04a
--- /dev/null
+++ b/blockdb/s3/format.go
@@ -0,0 +1,202 @@
+package s3
+
+import (
+ "encoding/binary"
+ "fmt"
+
+ "github.com/attestantio/go-eth2-client/spec"
+
+ "github.com/ethpandaops/dora/blockdb/types"
+)
+
+// Object format versions:
+// v1: header + body (pre-gloas blocks)
+// v2: header + body + payload + bal (gloas+ blocks, payload/BAL introduced in same fork)
+//
+// Note: Both payload and BAL may be empty (length 0), but body is always required.
+
+// Metadata sizes by version
+const (
+ metadataSizeV1 = 16 // 4 (version) + 4 (headerLen) + 4 (bodyVer) + 4 (bodyLen)
+ metadataSizeV2 = 32 // v1 + 4 (payloadVer) + 4 (payloadLen) + 4 (balVer) + 4 (balLen)
+
+ // Maximum metadata size for initial read
+ maxMetadataSize = 64
+)
+
+// objectMetadata represents the metadata for all format versions.
+type objectMetadata struct {
+ ObjVersion uint32
+
+ // Header (always present)
+ HeaderLength uint32
+
+ // Body (always required)
+ BodyVersion uint32
+ BodyLength uint32
+
+ // Payload (v2+, may be empty)
+ PayloadVersion uint32
+ PayloadLength uint32
+
+ // BAL (v2+, may be empty)
+ BalVersion uint32
+ BalLength uint32
+}
+
+// metadataSize returns the metadata size for this object.
+func (m *objectMetadata) metadataSize() int {
+ switch m.ObjVersion {
+ case 1:
+ return metadataSizeV1
+ case 2:
+ return metadataSizeV2
+ default:
+ return metadataSizeV2
+ }
+}
+
+// headerOffset returns the byte offset of the header data.
+func (m *objectMetadata) headerOffset() int {
+ return m.metadataSize()
+}
+
+// bodyOffset returns the byte offset of the body data.
+func (m *objectMetadata) bodyOffset() int {
+ return m.metadataSize() + int(m.HeaderLength)
+}
+
+// payloadOffset returns the byte offset of the payload data.
+func (m *objectMetadata) payloadOffset() int {
+ return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength)
+}
+
+// balOffset returns the byte offset of the BAL data.
+func (m *objectMetadata) balOffset() int {
+ return m.metadataSize() + int(m.HeaderLength) + int(m.BodyLength) + int(m.PayloadLength)
+}
+
+// storedFlags returns which components are stored in this object.
+func (m *objectMetadata) storedFlags() types.BlockDataFlags {
+ var flags types.BlockDataFlags
+
+ if m.HeaderLength > 0 {
+ flags |= types.BlockDataFlagHeader
+ }
+ if m.BodyLength > 0 {
+ flags |= types.BlockDataFlagBody
+ }
+ if m.PayloadLength > 0 && m.ObjVersion >= 2 {
+ flags |= types.BlockDataFlagPayload
+ }
+ if m.BalLength > 0 && m.ObjVersion >= 2 {
+ flags |= types.BlockDataFlagBal
+ }
+
+ return flags
+}
+
+// readObjectMetadata reads metadata from any format version.
+func readObjectMetadata(data []byte) (*objectMetadata, error) {
+ if len(data) < 4 {
+ return nil, fmt.Errorf("data too short for metadata version")
+ }
+
+ version := binary.BigEndian.Uint32(data[:4])
+ meta := &objectMetadata{ObjVersion: version}
+
+ switch version {
+ case 1:
+ if len(data) < metadataSizeV1 {
+ return nil, fmt.Errorf("data too short for v1 metadata: need %d, got %d", metadataSizeV1, len(data))
+ }
+ meta.HeaderLength = binary.BigEndian.Uint32(data[4:8])
+ meta.BodyVersion = binary.BigEndian.Uint32(data[8:12])
+ meta.BodyLength = binary.BigEndian.Uint32(data[12:16])
+
+ case 2:
+ if len(data) < metadataSizeV2 {
+ return nil, fmt.Errorf("data too short for v2 metadata: need %d, got %d", metadataSizeV2, len(data))
+ }
+ meta.HeaderLength = binary.BigEndian.Uint32(data[4:8])
+ meta.BodyVersion = binary.BigEndian.Uint32(data[8:12])
+ meta.BodyLength = binary.BigEndian.Uint32(data[12:16])
+ meta.PayloadVersion = binary.BigEndian.Uint32(data[16:20])
+ meta.PayloadLength = binary.BigEndian.Uint32(data[20:24])
+ meta.BalVersion = binary.BigEndian.Uint32(data[24:28])
+ meta.BalLength = binary.BigEndian.Uint32(data[28:32])
+
+ default:
+ return nil, fmt.Errorf("unsupported object version: %d", version)
+ }
+
+ return meta, nil
+}
+
+// writeObjectMetadata creates metadata bytes for the given BlockData.
+// Uses v1 format for pre-gloas blocks, v2 for gloas+ blocks.
+func writeObjectMetadata(data *types.BlockData) []byte {
+ // Use v2 format only for gloas+ blocks (which can have payload/BAL)
+ if data.BodyVersion >= uint64(spec.DataVersionGloas) {
+ meta := make([]byte, metadataSizeV2)
+ binary.BigEndian.PutUint32(meta[0:4], 2)
+ binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData)))
+ binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion))
+ binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData)))
+ binary.BigEndian.PutUint32(meta[16:20], uint32(data.PayloadVersion))
+ binary.BigEndian.PutUint32(meta[20:24], uint32(len(data.PayloadData)))
+ binary.BigEndian.PutUint32(meta[24:28], uint32(data.BalVersion))
+ binary.BigEndian.PutUint32(meta[28:32], uint32(len(data.BalData)))
+ return meta
+ }
+
+ // Use v1 format for pre-gloas blocks
+ meta := make([]byte, metadataSizeV1)
+ binary.BigEndian.PutUint32(meta[0:4], 1)
+ binary.BigEndian.PutUint32(meta[4:8], uint32(len(data.HeaderData)))
+ binary.BigEndian.PutUint32(meta[8:12], uint32(data.BodyVersion))
+ binary.BigEndian.PutUint32(meta[12:16], uint32(len(data.BodyData)))
+ return meta
+}
+
+// getDataRange calculates the single byte range spanning all requested components.
+// Returns (start, end) where end is inclusive. Returns (-1, -1) if no data to fetch.
+func (m *objectMetadata) getDataRange(flags types.BlockDataFlags) (int64, int64) {
+ var start int64 = -1
+ var end int64 = -1
+
+ // Check each component in order (they're stored sequentially)
+ if flags.Has(types.BlockDataFlagHeader) && m.HeaderLength > 0 {
+ start = int64(m.headerOffset())
+ end = start + int64(m.HeaderLength) - 1
+ }
+
+ if flags.Has(types.BlockDataFlagBody) && m.BodyLength > 0 {
+ bodyStart := int64(m.bodyOffset())
+ bodyEnd := bodyStart + int64(m.BodyLength) - 1
+ if start < 0 {
+ start = bodyStart
+ }
+ end = bodyEnd
+ }
+
+ if flags.Has(types.BlockDataFlagPayload) && m.PayloadLength > 0 && m.ObjVersion >= 2 {
+ payloadStart := int64(m.payloadOffset())
+ payloadEnd := payloadStart + int64(m.PayloadLength) - 1
+ if start < 0 {
+ start = payloadStart
+ }
+ end = payloadEnd
+ }
+
+ if flags.Has(types.BlockDataFlagBal) && m.BalLength > 0 && m.ObjVersion >= 2 {
+ balStart := int64(m.balOffset())
+ balEnd := balStart + int64(m.BalLength) - 1
+ if start < 0 {
+ start = balStart
+ }
+ end = balEnd
+ }
+
+ return start, end
+}
diff --git a/blockdb/s3/s3store.go b/blockdb/s3/s3store.go
index 69e240c93..a3163a8b9 100644
--- a/blockdb/s3/s3store.go
+++ b/blockdb/s3/s3store.go
@@ -3,7 +3,6 @@ package s3
import (
"bytes"
"context"
- "encoding/binary"
"encoding/hex"
"fmt"
"io"
@@ -11,6 +10,7 @@ import (
"strings"
"sync/atomic"
+ "github.com/attestantio/go-eth2-client/spec"
"github.com/ethpandaops/dora/blockdb/types"
dtypes "github.com/ethpandaops/dora/types"
"github.com/minio/minio-go/v7"
@@ -21,6 +21,10 @@ type S3Engine struct {
client *minio.Client
bucket string
pathPrefix string
+ config dtypes.S3BlockDBConfig
+
+ // Range request support (configured via EnableRangeRequests)
+ rangeRequestsEnabled bool
// Operation counters
getCount atomic.Int64
@@ -74,9 +78,11 @@ func NewS3Engine(config dtypes.S3BlockDBConfig) (types.BlockDbEngine, error) {
}
engine := &S3Engine{
- client: client,
- bucket: config.Bucket,
- pathPrefix: strings.TrimPrefix(config.Path, "/"),
+ client: client,
+ bucket: config.Bucket,
+ pathPrefix: strings.TrimPrefix(config.Path, "/"),
+ config: config,
+ rangeRequestsEnabled: config.EnableRangeRequests,
}
return engine, nil
@@ -91,149 +97,406 @@ func (e *S3Engine) getObjectKey(root []byte, slot uint64) string {
return path.Join(e.pathPrefix, fmt.Sprintf("%06d", slot/10000), fmt.Sprintf("%010d_%s", slot, rootHex))
}
-type objectMetadata struct {
- objVersion uint32
- headerLength uint32
- bodyVersion uint32
- bodyLength uint32
+// GetStoredComponents returns which components exist for a block by reading metadata.
+func (e *S3Engine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) {
+ key := e.getObjectKey(root, slot)
+ e.getCount.Add(1)
+
+ // Read just the metadata
+ meta, err := e.readMetadata(ctx, key)
+ if err != nil {
+ return 0, err
+ }
+ if meta == nil {
+ return 0, nil
+ }
+
+ return meta.storedFlags(), nil
}
-func (e *S3Engine) readObjectMetadata(data []byte) (*objectMetadata, int, error) {
- metadataLength := 4
- metadata := &objectMetadata{
- objVersion: binary.BigEndian.Uint32(data[:4]),
+// readMetadata reads object metadata using range request if enabled, otherwise full read.
+func (e *S3Engine) readMetadata(ctx context.Context, key string) (*objectMetadata, error) {
+ if e.config.EnableRangeRequests {
+ meta, err := e.readMetadataWithRange(ctx, key)
+ if err == nil {
+ return meta, nil
+ }
+ // Fall through to full read on error
}
- switch metadata.objVersion {
- case 1:
- metadata.headerLength = binary.BigEndian.Uint32(data[4:8])
- metadata.bodyVersion = binary.BigEndian.Uint32(data[8:12])
- metadata.bodyLength = binary.BigEndian.Uint32(data[12:16])
- metadataLength += 12
+ // Full read fallback
+ obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{})
+ if err != nil {
+ errResp := minio.ToErrorResponse(err)
+ if errResp.Code == "NoSuchKey" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("failed to get object: %w", err)
+ }
+ defer obj.Close()
+
+ buf := make([]byte, maxMetadataSize)
+ n, err := obj.Read(buf)
+ if (err != nil && err != io.EOF) || n == 0 {
+ return nil, fmt.Errorf("failed to read metadata: %w", err)
}
- return metadata, metadataLength, nil
+ return readObjectMetadata(buf[:n])
}
-func (e *S3Engine) writeObjectMetadata(metadata *objectMetadata) []byte {
- data := make([]byte, 4, 16)
- binary.BigEndian.PutUint32(data, metadata.objVersion)
+// readMetadataWithRange reads metadata using HTTP Range request.
+func (e *S3Engine) readMetadataWithRange(ctx context.Context, key string) (*objectMetadata, error) {
+ opts := minio.GetObjectOptions{}
+ if err := opts.SetRange(0, int64(maxMetadataSize-1)); err != nil {
+ return nil, err
+ }
- switch metadata.objVersion {
- case 1:
- data = binary.BigEndian.AppendUint32(data, metadata.headerLength)
- data = binary.BigEndian.AppendUint32(data, metadata.bodyVersion)
- data = binary.BigEndian.AppendUint32(data, metadata.bodyLength)
+ obj, err := e.client.GetObject(ctx, e.bucket, key, opts)
+ if err != nil {
+ errResp := minio.ToErrorResponse(err)
+ if errResp.Code == "NoSuchKey" {
+ return nil, nil
+ }
+ return nil, fmt.Errorf("failed to get object with range: %w", err)
+ }
+ defer obj.Close()
+
+ buf := make([]byte, maxMetadataSize)
+ n, err := obj.Read(buf)
+ if err != nil && err != io.EOF {
+ return nil, fmt.Errorf("failed to read range: %w", err)
}
- return data
+ return readObjectMetadata(buf[:n])
}
-func (e *S3Engine) GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*types.BlockData, error) {
+// GetBlock retrieves block data with selective loading based on flags.
+func (e *S3Engine) GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
key := e.getObjectKey(root, slot)
- e.getCount.Add(1)
+ // Try range-based loading if enabled
+ if e.config.EnableRangeRequests && e.rangeRequestsEnabled {
+ data, err := e.getBlockWithRanges(ctx, key, flags, parseBlock, parsePayload)
+ if err == nil {
+ return data, nil
+ }
+ // Fall through to full read on error
+ }
+
+ // Full read fallback
+ return e.getBlockFull(ctx, key, flags, parseBlock, parsePayload)
+}
+
+// getBlockWithRanges uses a single range request for selective loading.
+// Makes exactly 2 GET requests: one for metadata, one for all requested data.
+func (e *S3Engine) getBlockWithRanges(
+ ctx context.Context,
+ key string,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ // First, get metadata (1 GET request)
+ meta, err := e.readMetadataWithRange(ctx, key)
+ if err != nil {
+ return nil, err
+ }
+ if meta == nil {
+ return nil, nil
+ }
+
+ // Calculate the single byte range spanning all requested components
+ rangeStart, rangeEnd := meta.getDataRange(flags)
+ if rangeStart < 0 {
+ // No data to fetch
+ return &types.BlockData{
+ HeaderVersion: uint64(meta.ObjVersion),
+ BodyVersion: uint64(meta.BodyVersion),
+ PayloadVersion: uint64(meta.PayloadVersion),
+ BalVersion: uint64(meta.BalVersion),
+ }, nil
+ }
+
+ // Fetch all requested data in a single GET request
+ opts := minio.GetObjectOptions{}
+ if err := opts.SetRange(rangeStart, rangeEnd); err != nil {
+ return nil, err
+ }
+
+ obj, err := e.client.GetObject(ctx, e.bucket, key, opts)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get object range: %w", err)
+ }
+ defer obj.Close()
+
+ data, err := io.ReadAll(obj)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read object range: %w", err)
+ }
+
+ // Extract requested components from the fetched data
+ return e.extractComponents(meta, flags, data, rangeStart, parseBlock, parsePayload)
+}
+
+// extractComponents extracts requested components from fetched data.
+func (e *S3Engine) extractComponents(
+ meta *objectMetadata,
+ flags types.BlockDataFlags,
+ data []byte,
+ dataStartOffset int64,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ blockData := &types.BlockData{
+ HeaderVersion: uint64(meta.ObjVersion),
+ BodyVersion: uint64(meta.BodyVersion),
+ PayloadVersion: uint64(meta.PayloadVersion),
+ BalVersion: uint64(meta.BalVersion),
+ }
+
+ // Extract header if requested
+ if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 {
+ start := int64(meta.headerOffset()) - dataStartOffset
+ end := start + int64(meta.HeaderLength)
+ if start >= 0 && end <= int64(len(data)) {
+ blockData.HeaderData = data[start:end]
+ }
+ }
+
+ // Extract body if requested
+ if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 {
+ start := int64(meta.bodyOffset()) - dataStartOffset
+ end := start + int64(meta.BodyLength)
+ if start >= 0 && end <= int64(len(data)) {
+ bodyData := data[start:end]
+ if parseBlock != nil {
+ body, err := parseBlock(uint64(meta.BodyVersion), bodyData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse body: %w", err)
+ }
+ blockData.Body = body
+ } else {
+ blockData.BodyData = bodyData
+ }
+ }
+ }
+
+ // Extract payload if requested (v2+)
+ if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 {
+ start := int64(meta.payloadOffset()) - dataStartOffset
+ end := start + int64(meta.PayloadLength)
+ if start >= 0 && end <= int64(len(data)) {
+ payloadData := data[start:end]
+ if parsePayload != nil {
+ payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse payload: %w", err)
+ }
+ blockData.Payload = payload
+ } else {
+ blockData.PayloadData = payloadData
+ }
+ }
+ }
+
+ // Extract BAL if requested (v2+)
+ if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 {
+ start := int64(meta.balOffset()) - dataStartOffset
+ end := start + int64(meta.BalLength)
+ if start >= 0 && end <= int64(len(data)) {
+ blockData.BalData = data[start:end]
+ }
+ }
+
+ return blockData, nil
+}
+
+// getBlockFull performs a full object read (fallback when range requests fail).
+func (e *S3Engine) getBlockFull(
+ ctx context.Context,
+ key string,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
obj, err := e.client.GetObject(ctx, e.bucket, key, minio.GetObjectOptions{})
if err != nil {
- if minio.ToErrorResponse(err).Code == "NoSuchKey" {
+ errResp := minio.ToErrorResponse(err)
+ if errResp.Code == "NoSuchKey" {
return nil, nil
}
return nil, fmt.Errorf("failed to get object: %w", err)
}
defer obj.Close()
- // read metadata
- buf := make([]byte, 1024)
- buflen, err := obj.Read(buf)
- if (err != nil && err != io.EOF) || buflen == 0 {
- return nil, fmt.Errorf("failed to read metadata: %w", err)
+ // Read entire object
+ data, err := io.ReadAll(obj)
+ if err != nil {
+ return nil, fmt.Errorf("failed to read object: %w", err)
}
- metadata, metadataLength, err := e.readObjectMetadata(buf)
+ // Parse metadata
+ meta, err := readObjectMetadata(data)
if err != nil {
return nil, fmt.Errorf("failed to read metadata: %w", err)
}
- headerData := make([]byte, metadata.headerLength)
- headerOffset := 0
- if buflen > metadataLength {
- copy(headerData, buf[metadataLength:buflen])
- headerOffset = buflen - metadataLength
+ blockData := &types.BlockData{
+ HeaderVersion: uint64(meta.ObjVersion),
+ BodyVersion: uint64(meta.BodyVersion),
+ PayloadVersion: uint64(meta.PayloadVersion),
+ BalVersion: uint64(meta.BalVersion),
}
- if buflen < int(metadataLength)+int(metadata.headerLength) {
- _, err = obj.Read(headerData[headerOffset:])
- if err != nil {
- return nil, fmt.Errorf("failed to read header data: %w", err)
- }
- }
+ metaSize := meta.metadataSize()
- bodyData := make([]byte, metadata.bodyLength)
- bodyOffset := 0
- if buflen > int(metadataLength)+int(metadata.headerLength) {
- copy(bodyData, buf[int(metadataLength)+int(metadata.headerLength):buflen])
- bodyOffset = buflen - int(metadataLength) - int(metadata.headerLength)
+ // Extract header if requested
+ if flags.Has(types.BlockDataFlagHeader) && meta.HeaderLength > 0 {
+ headerEnd := metaSize + int(meta.HeaderLength)
+ if headerEnd <= len(data) {
+ blockData.HeaderData = data[metaSize:headerEnd]
+ }
}
- if buflen < int(metadataLength)+int(metadata.headerLength)+int(metadata.bodyLength) {
- _, err = obj.Read(bodyData[bodyOffset:])
- if err != nil {
- return nil, fmt.Errorf("failed to read body data: %w", err)
+ // Extract body if requested
+ if flags.Has(types.BlockDataFlagBody) && meta.BodyLength > 0 {
+ bodyStart := metaSize + int(meta.HeaderLength)
+ bodyEnd := bodyStart + int(meta.BodyLength)
+ if bodyEnd <= len(data) {
+ bodyData := data[bodyStart:bodyEnd]
+ if parseBlock != nil {
+ body, err := parseBlock(uint64(meta.BodyVersion), bodyData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse body: %w", err)
+ }
+ blockData.Body = body
+ } else {
+ blockData.BodyData = bodyData
+ }
}
}
- blockData := &types.BlockData{
- HeaderVersion: uint64(metadata.objVersion),
- HeaderData: headerData,
- BodyVersion: uint64(metadata.bodyVersion),
+ // Extract payload if requested (v2+)
+ if flags.Has(types.BlockDataFlagPayload) && meta.PayloadLength > 0 && meta.ObjVersion >= 2 {
+ payloadStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength)
+ payloadEnd := payloadStart + int(meta.PayloadLength)
+ if payloadEnd <= len(data) {
+ payloadData := data[payloadStart:payloadEnd]
+ if parsePayload != nil {
+ payload, err := parsePayload(uint64(meta.PayloadVersion), payloadData)
+ if err != nil {
+ return nil, fmt.Errorf("failed to parse payload: %w", err)
+ }
+ blockData.Payload = payload
+ } else {
+ blockData.PayloadData = payloadData
+ }
+ }
}
- e.getBytes.Add(int64(metadataLength) + int64(metadata.headerLength) + int64(metadata.bodyLength))
-
- if parseBlock != nil {
- body, err := parseBlock(uint64(metadata.bodyVersion), bodyData)
- if err != nil {
- return nil, fmt.Errorf("failed to parse body: %w", err)
+ // Extract BAL if requested (v3+)
+ if flags.Has(types.BlockDataFlagBal) && meta.BalLength > 0 && meta.ObjVersion >= 2 {
+ balStart := metaSize + int(meta.HeaderLength) + int(meta.BodyLength) + int(meta.PayloadLength)
+ balEnd := balStart + int(meta.BalLength)
+ if balEnd <= len(data) {
+ blockData.BalData = data[balStart:balEnd]
}
-
- blockData.Body = body
- } else {
- blockData.BodyData = bodyData
}
return blockData, nil
}
-func (e *S3Engine) AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*types.BlockData, error)) (bool, error) {
+// AddBlock stores block data. Returns (added, updated, error).
+func (e *S3Engine) AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
key := e.getObjectKey(root, slot)
e.statCount.Add(1)
- // Check if object already exists
- stat, err := e.client.StatObject(ctx, e.bucket, key, minio.StatObjectOptions{})
- if err == nil && stat.Size > 0 {
- return false, nil
+ // Check what components already exist
+ existingMeta, err := e.readMetadata(ctx, key)
+ if err != nil && err.Error() != "failed to get object: NoSuchKey" {
+ // Ignore "not found" errors
+ existingFlags, _ := e.GetStoredComponents(ctx, slot, root)
+ if existingFlags == 0 {
+ existingMeta = nil
+ }
}
+ // Get the new data
blockData, err := dataCb()
if err != nil {
- return false, fmt.Errorf("failed to get block data: %w", err)
+ return false, false, fmt.Errorf("failed to get block data: %w", err)
+ }
+
+ // Calculate what we already have
+ var existingFlags types.BlockDataFlags
+ if existingMeta != nil {
+ existingFlags = existingMeta.storedFlags()
+ }
+
+ // Calculate what the new data provides
+ var newFlags types.BlockDataFlags
+ if len(blockData.HeaderData) > 0 {
+ newFlags |= types.BlockDataFlagHeader
+ }
+ if len(blockData.BodyData) > 0 {
+ newFlags |= types.BlockDataFlagBody
+ }
+ if blockData.PayloadVersion != 0 && len(blockData.PayloadData) > 0 {
+ newFlags |= types.BlockDataFlagPayload
+ }
+ if blockData.BalVersion != 0 && len(blockData.BalData) > 0 {
+ newFlags |= types.BlockDataFlagBal
+ }
+
+ // Check if we need to update (new data has more components)
+ needsUpdate := (newFlags &^ existingFlags) != 0
+ isNew := existingFlags == 0
+
+ if !isNew && !needsUpdate {
+ // Already have all the data
+ return false, false, nil
}
- metadata := &objectMetadata{
- objVersion: uint32(blockData.HeaderVersion),
- headerLength: uint32(len(blockData.HeaderData)),
- bodyVersion: uint32(blockData.BodyVersion),
- bodyLength: uint32(len(blockData.BodyData)),
+ // If updating, merge with existing data
+ finalData := blockData
+ if !isNew && needsUpdate {
+ // Fetch existing data and merge
+ existingData, err := e.GetBlock(ctx, slot, root, types.BlockDataFlagAll, nil, nil)
+ if err == nil && existingData != nil {
+ finalData = mergeBlockData(existingData, blockData)
+ }
}
- metadataBytes := e.writeObjectMetadata(metadata)
- metadataLength := len(metadataBytes)
+ // Write object (v1 for pre-gloas, v2 for gloas+)
+ metaBytes := writeObjectMetadata(finalData)
+
+ // Calculate total size and build reader chain (avoids copying to concatenated buffer)
+ totalSize := int64(len(metaBytes) + len(finalData.HeaderData) + len(finalData.BodyData))
+ readers := []io.Reader{
+ bytes.NewReader(metaBytes),
+ bytes.NewReader(finalData.HeaderData),
+ bytes.NewReader(finalData.BodyData),
+ }
- // Prepare data with header and body versions and lengths
- data := make([]byte, metadataLength+int(metadata.headerLength)+int(metadata.bodyLength))
- copy(data[:metadataLength], metadataBytes)
- copy(data[metadataLength:metadataLength+int(metadata.headerLength)], blockData.HeaderData)
- copy(data[metadataLength+int(metadata.headerLength):], blockData.BodyData)
+ if finalData.BodyVersion >= uint64(spec.DataVersionGloas) {
+ totalSize += int64(len(finalData.PayloadData) + len(finalData.BalData))
+ readers = append(readers,
+ bytes.NewReader(finalData.PayloadData),
+ bytes.NewReader(finalData.BalData),
+ )
+ }
// Upload object
e.putCount.Add(1)
@@ -241,14 +504,55 @@ func (e *S3Engine) AddBlock(ctx context.Context, slot uint64, root []byte, dataC
ctx,
e.bucket,
key,
- bytes.NewReader(data),
- int64(len(data)),
+ io.MultiReader(readers...),
+ totalSize,
minio.PutObjectOptions{ContentType: "application/octet-stream"},
)
if err != nil {
- return false, fmt.Errorf("failed to upload block: %w", err)
+ return false, false, fmt.Errorf("failed to upload block: %w", err)
+ }
+
+ e.putBytes.Add(totalSize)
+
+ return isNew, !isNew && needsUpdate, nil
+}
+
+// mergeBlockData merges existing data with new data (new takes precedence for non-empty fields).
+func mergeBlockData(existing, new *types.BlockData) *types.BlockData {
+ result := &types.BlockData{}
+
+ // Use new data if available, otherwise keep existing
+ if len(new.HeaderData) > 0 {
+ result.HeaderVersion = new.HeaderVersion
+ result.HeaderData = new.HeaderData
+ } else {
+ result.HeaderVersion = existing.HeaderVersion
+ result.HeaderData = existing.HeaderData
+ }
+
+ if len(new.BodyData) > 0 {
+ result.BodyVersion = new.BodyVersion
+ result.BodyData = new.BodyData
+ } else {
+ result.BodyVersion = existing.BodyVersion
+ result.BodyData = existing.BodyData
+ }
+
+ if new.PayloadVersion != 0 && len(new.PayloadData) > 0 {
+ result.PayloadVersion = new.PayloadVersion
+ result.PayloadData = new.PayloadData
+ } else {
+ result.PayloadVersion = existing.PayloadVersion
+ result.PayloadData = existing.PayloadData
+ }
+
+ if new.BalVersion != 0 && len(new.BalData) > 0 {
+ result.BalVersion = new.BalVersion
+ result.BalData = new.BalData
+ } else {
+ result.BalVersion = existing.BalVersion
+ result.BalData = existing.BalData
}
- e.putBytes.Add(int64(len(data)))
- return true, nil
+ return result
}
diff --git a/blockdb/tiered/tiered.go b/blockdb/tiered/tiered.go
new file mode 100644
index 000000000..04f05a16f
--- /dev/null
+++ b/blockdb/tiered/tiered.go
@@ -0,0 +1,278 @@
+package tiered
+
+import (
+ "context"
+ "fmt"
+
+ "github.com/sirupsen/logrus"
+
+ "github.com/ethpandaops/dora/blockdb/pebble"
+ "github.com/ethpandaops/dora/blockdb/s3"
+ "github.com/ethpandaops/dora/blockdb/types"
+ dtypes "github.com/ethpandaops/dora/types"
+)
+
+// TieredEngine combines Pebble (cache) and S3 (primary storage) in a tiered architecture.
+// Reads check cache first, then fall back to S3.
+// Writes go to both (write-through).
+type TieredEngine struct {
+ cache *pebble.PebbleEngine
+ primary *s3.S3Engine
+ cleanup *pebble.CacheCleanup
+ logger logrus.FieldLogger
+}
+
+// NewTieredEngine creates a new tiered storage engine.
+func NewTieredEngine(config dtypes.TieredBlockDBConfig, logger logrus.FieldLogger) (types.BlockDbEngine, error) {
+ // Initialize Pebble cache
+ cacheEngine, err := pebble.NewPebbleEngine(config.Pebble)
+ if err != nil {
+ return nil, fmt.Errorf("failed to initialize pebble cache: %w", err)
+ }
+
+ pebbleEngine, ok := cacheEngine.(*pebble.PebbleEngine)
+ if !ok {
+ return nil, fmt.Errorf("unexpected pebble engine type")
+ }
+
+ // Initialize S3 primary storage
+ primaryEngine, err := s3.NewS3Engine(config.S3)
+ if err != nil {
+ cacheEngine.Close()
+ return nil, fmt.Errorf("failed to initialize s3 primary storage: %w", err)
+ }
+
+ s3Engine, ok := primaryEngine.(*s3.S3Engine)
+ if !ok {
+ cacheEngine.Close()
+ return nil, fmt.Errorf("unexpected s3 engine type")
+ }
+
+ // Initialize cache cleanup
+ cleanup := pebble.NewCacheCleanup(pebbleEngine, logger)
+ cleanup.Start()
+
+ return &TieredEngine{
+ cache: pebbleEngine,
+ primary: s3Engine,
+ cleanup: cleanup,
+ logger: logger.WithField("component", "tiered-blockdb"),
+ }, nil
+}
+
+// Close closes both storage engines.
+func (e *TieredEngine) Close() error {
+ if e.cleanup != nil {
+ e.cleanup.Stop()
+ }
+
+ var errs []error
+ if err := e.cache.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("cache close: %w", err))
+ }
+ if err := e.primary.Close(); err != nil {
+ errs = append(errs, fmt.Errorf("primary close: %w", err))
+ }
+
+ if len(errs) > 0 {
+ return errs[0]
+ }
+ return nil
+}
+
+// GetStoredComponents returns which components exist for a block.
+// Checks cache first, then S3.
+func (e *TieredEngine) GetStoredComponents(ctx context.Context, slot uint64, root []byte) (types.BlockDataFlags, error) {
+ // Check cache first
+ cacheFlags, err := e.cache.GetStoredComponents(ctx, slot, root)
+ if err != nil {
+ e.logger.Debugf("cache GetStoredComponents error: %v", err)
+ }
+
+ // If cache has all components, return early
+ if cacheFlags == types.BlockDataFlagAll {
+ return cacheFlags, nil
+ }
+
+ // Check S3 for additional components
+ s3Flags, err := e.primary.GetStoredComponents(ctx, slot, root)
+ if err != nil {
+ return cacheFlags, nil // Return cache result on S3 error
+ }
+
+ return cacheFlags | s3Flags, nil
+}
+
+// GetBlock retrieves block data with selective loading.
+// Checks cache first, fetches missing components from S3.
+func (e *TieredEngine) GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags types.BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+) (*types.BlockData, error) {
+ // Check what's in cache
+ cacheFlags, _ := e.cache.GetStoredComponents(ctx, slot, root)
+
+ // Determine what we can get from cache vs S3
+ cacheRequestFlags := flags & cacheFlags
+ s3RequestFlags := flags &^ cacheFlags
+
+ result := &types.BlockData{}
+
+ // Get from cache
+ if cacheRequestFlags != 0 {
+ cacheData, err := e.cache.GetBlock(ctx, slot, root, cacheRequestFlags, parseBlock, parsePayload)
+ if err != nil {
+ e.logger.Debugf("cache GetBlock error: %v", err)
+ } else if cacheData != nil {
+ mergeBlockDataInto(result, cacheData)
+
+ // Record LRU access
+ if e.cleanup != nil {
+ e.cleanup.RecordAccess(root, cacheRequestFlags)
+ }
+ }
+ }
+
+ // Get missing components from S3
+ if s3RequestFlags != 0 {
+ s3Data, err := e.primary.GetBlock(ctx, slot, root, s3RequestFlags, parseBlock, parsePayload)
+ if err != nil {
+ e.logger.Debugf("s3 GetBlock error: %v", err)
+ } else if s3Data != nil {
+ mergeBlockDataInto(result, s3Data)
+
+ // Cache the S3 data for future reads
+ e.cacheS3Data(ctx, slot, root, s3Data, s3RequestFlags)
+ }
+ }
+
+ return result, nil
+}
+
+// cacheS3Data stores S3 data in the cache for future reads.
+func (e *TieredEngine) cacheS3Data(ctx context.Context, slot uint64, root []byte, data *types.BlockData, flags types.BlockDataFlags) {
+ // Build cache data with only the components we fetched from S3
+ cacheData := &types.BlockData{}
+
+ if flags.Has(types.BlockDataFlagHeader) && len(data.HeaderData) > 0 {
+ cacheData.HeaderVersion = data.HeaderVersion
+ cacheData.HeaderData = data.HeaderData
+ }
+ if flags.Has(types.BlockDataFlagBody) && len(data.BodyData) > 0 {
+ cacheData.BodyVersion = data.BodyVersion
+ cacheData.BodyData = data.BodyData
+ }
+ if flags.Has(types.BlockDataFlagPayload) && len(data.PayloadData) > 0 {
+ cacheData.PayloadVersion = data.PayloadVersion
+ cacheData.PayloadData = data.PayloadData
+ }
+ if flags.Has(types.BlockDataFlagBal) && len(data.BalData) > 0 {
+ cacheData.BalVersion = data.BalVersion
+ cacheData.BalData = data.BalData
+ }
+
+ // Add to cache (ignore errors - caching is best effort)
+ _, _, err := e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
+ return cacheData, nil
+ })
+ if err != nil {
+ e.logger.Debugf("failed to cache S3 data: %v", err)
+ }
+
+ // Flush LRU updates since we did a write
+ if e.cleanup != nil {
+ e.cleanup.FlushLRU()
+ }
+}
+
+// AddBlock stores block data using write-through to both cache and S3.
+// Returns (added, updated, error).
+func (e *TieredEngine) AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*types.BlockData, error),
+) (bool, bool, error) {
+ // Get the data once
+ data, err := dataCb()
+ if err != nil {
+ return false, false, err
+ }
+
+ // Check what components already exist (in cache or S3)
+ existingFlags, _ := e.GetStoredComponents(ctx, slot, root)
+
+ // Determine what new data provides
+ var newFlags types.BlockDataFlags
+ if len(data.HeaderData) > 0 {
+ newFlags |= types.BlockDataFlagHeader
+ }
+ if len(data.BodyData) > 0 {
+ newFlags |= types.BlockDataFlagBody
+ }
+ if data.PayloadVersion != 0 && len(data.PayloadData) > 0 {
+ newFlags |= types.BlockDataFlagPayload
+ }
+ if data.BalVersion != 0 && len(data.BalData) > 0 {
+ newFlags |= types.BlockDataFlagBal
+ }
+
+ // Check if we need to update
+ needsUpdate := (newFlags &^ existingFlags) != 0
+ isNew := existingFlags == 0
+
+ if !isNew && !needsUpdate {
+ return false, false, nil
+ }
+
+ // Write-through: write to S3 first (primary), then cache
+ // S3 handles merging with existing data
+ s3Added, s3Updated, err := e.primary.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
+ return data, nil
+ })
+ if err != nil {
+ return false, false, fmt.Errorf("failed to write to S3: %w", err)
+ }
+
+ // Write to cache
+ _, _, err = e.cache.AddBlock(ctx, slot, root, func() (*types.BlockData, error) {
+ return data, nil
+ })
+ if err != nil {
+ e.logger.Warnf("failed to write to cache: %v", err)
+ // Don't fail - S3 write succeeded
+ }
+
+ // Flush LRU updates after write
+ if e.cleanup != nil {
+ e.cleanup.FlushLRU()
+ }
+
+ return s3Added, s3Updated, nil
+}
+
+// mergeBlockDataInto merges source data into target (source values take precedence for non-empty fields).
+func mergeBlockDataInto(target, source *types.BlockData) {
+ if source.HeaderVersion != 0 || len(source.HeaderData) > 0 {
+ target.HeaderVersion = source.HeaderVersion
+ target.HeaderData = source.HeaderData
+ }
+ if source.BodyVersion != 0 || len(source.BodyData) > 0 {
+ target.BodyVersion = source.BodyVersion
+ target.BodyData = source.BodyData
+ target.Body = source.Body
+ }
+ if source.PayloadVersion != 0 || len(source.PayloadData) > 0 {
+ target.PayloadVersion = source.PayloadVersion
+ target.PayloadData = source.PayloadData
+ target.Payload = source.Payload
+ }
+ if source.BalVersion != 0 || len(source.BalData) > 0 {
+ target.BalVersion = source.BalVersion
+ target.BalData = source.BalData
+ }
+}
diff --git a/blockdb/types/engine.go b/blockdb/types/engine.go
index 80db81f67..86b352ddf 100644
--- a/blockdb/types/engine.go
+++ b/blockdb/types/engine.go
@@ -2,13 +2,25 @@ package types
import "context"
-// BlockData holds beacon block header and body data.
+// BlockData contains all data components for a block.
type BlockData struct {
+ // Header data
HeaderVersion uint64
HeaderData []byte
- BodyVersion uint64
- BodyData []byte
- Body interface{}
+
+ // Body data
+ BodyVersion uint64
+ BodyData []byte
+ Body any // Parsed body (optional)
+
+ // Execution payload data (ePBS)
+ PayloadVersion uint64
+ PayloadData []byte
+ Payload any // Parsed payload (optional)
+
+ // Block access list data
+ BalVersion uint64
+ BalData []byte
}
// ExecDataTxSections holds all compressed section data for a single
@@ -22,11 +34,35 @@ type ExecDataTxSections struct {
StateChangeData []byte // snappy-compressed, nil if section not present
}
-// BlockDbEngine is the interface for beacon block storage.
+// BlockDbEngine defines the interface for block database engines.
type BlockDbEngine interface {
+ // Close closes the database engine.
Close() error
- GetBlock(ctx context.Context, slot uint64, root []byte, parseBlock func(uint64, []byte) (interface{}, error)) (*BlockData, error)
- AddBlock(ctx context.Context, slot uint64, root []byte, dataCb func() (*BlockData, error)) (bool, error)
+
+ // GetBlock retrieves block data with selective loading based on flags.
+ // If parseBlock is nil, raw body data is stored in BlockData.BodyData.
+ // If parsePayload is nil, raw payload data is stored in BlockData.PayloadData.
+ GetBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ flags BlockDataFlags,
+ parseBlock func(uint64, []byte) (any, error),
+ parsePayload func(uint64, []byte) (any, error),
+ ) (*BlockData, error)
+
+ // AddBlock stores block data. Returns:
+ // - added: true if a new block was created
+ // - updated: true if an existing block was updated with new components
+ AddBlock(
+ ctx context.Context,
+ slot uint64,
+ root []byte,
+ dataCb func() (*BlockData, error),
+ ) (added bool, updated bool, err error)
+
+ // GetStoredComponents returns which components exist for a block.
+ GetStoredComponents(ctx context.Context, slot uint64, root []byte) (BlockDataFlags, error)
}
// ExecDataEngine is the interface for per-block execution data storage.
diff --git a/blockdb/types/flags.go b/blockdb/types/flags.go
new file mode 100644
index 000000000..34aff4dbb
--- /dev/null
+++ b/blockdb/types/flags.go
@@ -0,0 +1,38 @@
+package types
+
+// BlockDataFlags specifies which components to load from storage.
+type BlockDataFlags uint8
+
+const (
+ // BlockDataFlagHeader requests the block header data.
+ BlockDataFlagHeader BlockDataFlags = 1 << iota // 0x01
+ // BlockDataFlagBody requests the block body data.
+ BlockDataFlagBody // 0x02
+ // BlockDataFlagPayload requests the execution payload data.
+ BlockDataFlagPayload // 0x04
+ // BlockDataFlagBal requests the block access list data.
+ BlockDataFlagBal // 0x08
+
+ // BlockDataFlagAll requests all block components.
+ BlockDataFlagAll = BlockDataFlagHeader | BlockDataFlagBody | BlockDataFlagPayload | BlockDataFlagBal
+)
+
+// Has returns true if the flag set contains the specified flag.
+func (f BlockDataFlags) Has(flag BlockDataFlags) bool {
+ return f&flag == flag
+}
+
+// HasAny returns true if the flag set contains any of the specified flags.
+func (f BlockDataFlags) HasAny(flags BlockDataFlags) bool {
+ return f&flags != 0
+}
+
+// Add returns a new flag set with the specified flag added.
+func (f BlockDataFlags) Add(flag BlockDataFlags) BlockDataFlags {
+ return f | flag
+}
+
+// Remove returns a new flag set with the specified flag removed.
+func (f BlockDataFlags) Remove(flag BlockDataFlags) BlockDataFlags {
+ return f &^ flag
+}
diff --git a/clients/consensus/chainspec.go b/clients/consensus/chainspec.go
index 8272c9531..4713cc0b8 100644
--- a/clients/consensus/chainspec.go
+++ b/clients/consensus/chainspec.go
@@ -53,6 +53,8 @@ type ChainSpecConfig struct {
ElectraForkEpoch *uint64 `yaml:"ELECTRA_FORK_EPOCH" check-if-fork:"ElectraForkEpoch"`
FuluForkVersion phase0.Version `yaml:"FULU_FORK_VERSION" check-if-fork:"FuluForkEpoch"`
FuluForkEpoch *uint64 `yaml:"FULU_FORK_EPOCH" check-if-fork:"FuluForkEpoch"`
+ GloasForkVersion phase0.Version `yaml:"GLOAS_FORK_VERSION" check-if-fork:"GloasForkEpoch"`
+ GloasForkEpoch *uint64 `yaml:"GLOAS_FORK_EPOCH" check-if-fork:"GloasForkEpoch"`
// Time parameters
SlotDurationMs uint64 `yaml:"SLOT_DURATION_MS"`
@@ -84,7 +86,6 @@ type ChainSpecConfig struct {
MaxPayloadSize uint64 `yaml:"MAX_PAYLOAD_SIZE"`
MaxRequestBlocks uint64 `yaml:"MAX_REQUEST_BLOCKS"`
EpochsPerSubnetSubscription uint64 `yaml:"EPOCHS_PER_SUBNET_SUBSCRIPTION"`
- MinEpochsForBlockRequests uint64 `yaml:"MIN_EPOCHS_FOR_BLOCK_REQUESTS"`
AttestationPropoagationSlotRange uint64 `yaml:"ATTESTATION_PROPAGATION_SLOT_RANGE"`
MaximumGossipClockDisparity uint64 `yaml:"MAXIMUM_GOSSIP_CLOCK_DISPARITY"`
MessageDomainInvalidSnappy phase0.DomainType `yaml:"MESSAGE_DOMAIN_INVALID_SNAPPY"`
@@ -92,32 +93,31 @@ type ChainSpecConfig struct {
SubnetsPerNode uint64 `yaml:"SUBNETS_PER_NODE"`
AttestationSubnetCount uint64 `yaml:"ATTESTATION_SUBNET_COUNT"`
AttestationSubnetExtraBits uint64 `yaml:"ATTESTATION_SUBNET_EXTRA_BITS"`
- AttestationSubnetPrefixBits uint64 `yaml:"ATTESTATION_SUBNET_PREFIX_BITS"`
// Deneb
MaxRequestBlocksDeneb uint64 `yaml:"MAX_REQUEST_BLOCKS_DENEB" check-if-fork:"DenebForkEpoch"`
MinEpochsForBlobSidecarsRequests uint64 `yaml:"MIN_EPOCHS_FOR_BLOB_SIDECARS_REQUESTS" check-if-fork:"DenebForkEpoch"`
BlobSidecarSubnetCount uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT" check-if-fork:"DenebForkEpoch"`
MaxBlobsPerBlock uint64 `yaml:"MAX_BLOBS_PER_BLOCK" check-if-fork:"DenebForkEpoch"`
- MaxRequestBlobSidecars uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS" check-if-fork:"DenebForkEpoch"`
// Electra
MinPerEpochChurnLimitElectra uint64 `yaml:"MIN_PER_EPOCH_CHURN_LIMIT_ELECTRA" check-if-fork:"ElectraForkEpoch"`
MaxPerEpochActivationExitChurnLimit uint64 `yaml:"MAX_PER_EPOCH_ACTIVATION_EXIT_CHURN_LIMIT" check-if-fork:"ElectraForkEpoch"`
BlobSidecarSubnetCountElectra uint64 `yaml:"BLOB_SIDECAR_SUBNET_COUNT_ELECTRA" check-if-fork:"ElectraForkEpoch"`
MaxBlobsPerBlockElectra uint64 `yaml:"MAX_BLOBS_PER_BLOCK_ELECTRA" check-if-fork:"ElectraForkEpoch"`
- MaxRequestBlobSidecarsElectra uint64 `yaml:"MAX_REQUEST_BLOB_SIDECARS_ELECTRA" check-if-fork:"ElectraForkEpoch"`
// Fulu
MinEpochsForDataColumnSidecars uint64 `yaml:"MIN_EPOCHS_FOR_DATA_COLUMN_SIDECARS_REQUESTS" check-if-fork:"FuluForkEpoch"`
NumberOfCustodyGroups *uint64 `yaml:"NUMBER_OF_CUSTODY_GROUPS" check-if-fork:"FuluForkEpoch"`
DataColumnSidecarSubnetCount *uint64 `yaml:"DATA_COLUMN_SIDECAR_SUBNET_COUNT" check-if-fork:"FuluForkEpoch"`
- MaxRequestDataColumnSidecars uint64 `yaml:"MAX_REQUEST_DATA_COLUMN_SIDECARS" check-if-fork:"FuluForkEpoch"`
SamplesPerSlot uint64 `yaml:"SAMPLES_PER_SLOT" check-if-fork:"FuluForkEpoch"`
CustodyRequirement *uint64 `yaml:"CUSTODY_REQUIREMENT" check-if-fork:"FuluForkEpoch"`
ValidatorCustodyRequirement *uint64 `yaml:"VALIDATOR_CUSTODY_REQUIREMENT" check-if-fork:"FuluForkEpoch"`
BalancePerAdditionalCustodyGroup *uint64 `yaml:"BALANCE_PER_ADDITIONAL_CUSTODY_GROUP" check-if-fork:"FuluForkEpoch"`
BlobSchedule []BlobScheduleEntry `yaml:"BLOB_SCHEDULE" check-if-fork:"FuluForkEpoch"`
+
+ // Gloas
+ MinBuilderWithdrawabilityDelay uint64 `yaml:"MIN_BUILDER_WITHDRAWABILITY_DELAY" check-if-fork:"GloasForkEpoch"`
}
type ChainSpecPreset struct {
@@ -205,6 +205,13 @@ type ChainSpecPreset struct {
FieldElementsPerExtBlob uint64 `yaml:"FIELD_ELEMENTS_PER_EXT_BLOB" check-if-fork:"FuluForkEpoch"`
CellsPerExtBlob uint64 `yaml:"CELLS_PER_EXT_BLOB" check-if-fork:"FuluForkEpoch"`
NumberOfColumns *uint64 `yaml:"NUMBER_OF_COLUMNS" check-if-fork:"FuluForkEpoch"`
+
+ // Gloas
+ PtcSize uint64 `yaml:"PTC_SIZE" check-if-fork:"GloasForkEpoch"`
+ MaxPayloadAttestations uint64 `yaml:"MAX_PAYLOAD_ATTESTATIONS" check-if-fork:"GloasForkEpoch"`
+ BuilderRegistryLimit uint64 `yaml:"BUILDER_REGISTRY_LIMIT" check-if-fork:"GloasForkEpoch"`
+ BuilderPendingWithdrawalsLimit uint64 `yaml:"BUILDER_PENDING_WITHDRAWALS_LIMIT" check-if-fork:"GloasForkEpoch"`
+ MaxBuildersPerWithdrawalsSweep uint64 `yaml:"MAX_BUILDERS_PER_WITHDRAWALS_SWEEP" check-if-fork:"GloasForkEpoch"`
}
type ChainSpecDomainTypes struct {
@@ -219,6 +226,9 @@ type ChainSpecDomainTypes struct {
DomainSyncCommitteeSelectionProof phase0.DomainType `yaml:"DOMAIN_SYNC_COMMITTEE_SELECTION_PROOF"`
DomainContributionAndProof phase0.DomainType `yaml:"DOMAIN_CONTRIBUTION_AND_PROOF"`
DomainBlsToExecutionChange phase0.DomainType `yaml:"DOMAIN_BLS_TO_EXECUTION_CHANGE"`
+ DomainBeaconBuilder phase0.DomainType `yaml:"DOMAIN_BEACON_BUILDER" check-if-fork:"GloasForkEpoch"`
+ DomainPtcAttester phase0.DomainType `yaml:"DOMAIN_PTC_ATTESTER" check-if-fork:"GloasForkEpoch"`
+ DomainProposerPreferences phase0.DomainType `yaml:"DOMAIN_PROPOSER_PREFERENCES" check-if-fork:"GloasForkEpoch"`
}
type ChainSpec struct {
diff --git a/clients/consensus/chainstate.go b/clients/consensus/chainstate.go
index 0a1eb2230..63213c013 100644
--- a/clients/consensus/chainstate.go
+++ b/clients/consensus/chainstate.go
@@ -361,6 +361,34 @@ func (cs *ChainState) GetForkDigestForEpoch(epoch phase0.Epoch) phase0.ForkDiges
return cs.GetForkDigest(currentForkVersion, currentBlobParams)
}
+func (cs *ChainState) GetBlobScheduleForEpoch(epoch phase0.Epoch) *BlobScheduleEntry {
+ if cs.specs == nil {
+ return nil
+ }
+
+ var blobSchedule *BlobScheduleEntry
+
+ if cs.specs.ElectraForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.ElectraForkEpoch) {
+ blobSchedule = &BlobScheduleEntry{
+ Epoch: *cs.specs.ElectraForkEpoch,
+ MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlockElectra,
+ }
+ } else if cs.specs.DenebForkEpoch != nil && epoch >= phase0.Epoch(*cs.specs.DenebForkEpoch) {
+ blobSchedule = &BlobScheduleEntry{
+ Epoch: *cs.specs.DenebForkEpoch,
+ MaxBlobsPerBlock: cs.specs.MaxBlobsPerBlock,
+ }
+ }
+
+ for i, blobScheduleEntry := range cs.specs.BlobSchedule {
+ if blobScheduleEntry.Epoch <= uint64(epoch) {
+ blobSchedule = &cs.specs.BlobSchedule[i]
+ }
+ }
+
+ return blobSchedule
+}
+
func (cs *ChainState) GetForkDigest(forkVersion phase0.Version, blobParams *BlobScheduleEntry) phase0.ForkDigest {
if cs.specs == nil || cs.genesis == nil {
return phase0.ForkDigest{}
@@ -444,6 +472,22 @@ func (cs *ChainState) GetValidatorChurnLimit(validatorCount uint64) uint64 {
return adaptable
}
+func (cs *ChainState) IsEip7732Enabled(epoch phase0.Epoch) bool {
+ if cs.specs == nil {
+ return false
+ }
+
+ return cs.specs.GloasForkEpoch != nil && phase0.Epoch(*cs.specs.GloasForkEpoch) <= epoch
+}
+
+func (cs *ChainState) IsFuluEnabled(epoch phase0.Epoch) bool {
+ if cs.specs == nil {
+ return false
+ }
+
+ return cs.specs.FuluForkEpoch != nil && phase0.Epoch(*cs.specs.FuluForkEpoch) <= epoch
+}
+
func (cs *ChainState) GetBalanceChurnLimit(totalActiveBalance uint64) uint64 {
if cs.specs == nil {
return 0
diff --git a/clients/consensus/client.go b/clients/consensus/client.go
index 234c48eeb..ad514acdf 100644
--- a/clients/consensus/client.go
+++ b/clients/consensus/client.go
@@ -6,6 +6,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/sirupsen/logrus"
@@ -23,36 +24,38 @@ type ClientConfig struct {
}
type Client struct {
- pool *Pool
- clientIdx uint16
- endpointConfig *ClientConfig
- clientCtx context.Context
- clientCtxCancel context.CancelFunc
- rpcClient *rpc.BeaconClient
- logger *logrus.Entry
- isOnline bool
- isSyncing bool
- isOptimistic bool
- versionStr string
- nodeIdentity *rpc.NodeIdentity
- clientType ClientType
- lastEvent time.Time
- retryCounter uint64
- lastError error
- headMutex sync.RWMutex
- headRoot phase0.Root
- headSlot phase0.Slot
- justifiedRoot phase0.Root
- justifiedEpoch phase0.Epoch
- finalizedRoot phase0.Root
- finalizedEpoch phase0.Epoch
- lastFinalityUpdateEpoch phase0.Epoch
- lastMetadataUpdateEpoch phase0.Epoch
- lastMetadataUpdateTime time.Time
- lastSyncUpdateEpoch phase0.Epoch
- peers []*v1.Peer
- streamDispatcher utils.Dispatcher[*rpc.BeaconStreamEvent]
- checkpointDispatcher utils.Dispatcher[*v1.Finality]
+ pool *Pool
+ clientIdx uint16
+ endpointConfig *ClientConfig
+ clientCtx context.Context
+ clientCtxCancel context.CancelFunc
+ rpcClient *rpc.BeaconClient
+ logger *logrus.Entry
+ isOnline bool
+ isSyncing bool
+ isOptimistic bool
+ versionStr string
+ nodeIdentity *rpc.NodeIdentity
+ clientType ClientType
+ lastEvent time.Time
+ retryCounter uint64
+ lastError error
+ headMutex sync.RWMutex
+ headRoot phase0.Root
+ headSlot phase0.Slot
+ justifiedRoot phase0.Root
+ justifiedEpoch phase0.Epoch
+ finalizedRoot phase0.Root
+ finalizedEpoch phase0.Epoch
+ lastFinalityUpdateEpoch phase0.Epoch
+ lastMetadataUpdateEpoch phase0.Epoch
+ lastMetadataUpdateTime time.Time
+ lastSyncUpdateEpoch phase0.Epoch
+ peers []*v1.Peer
+ streamDispatcher utils.Dispatcher[*rpc.BeaconStreamEvent]
+ checkpointDispatcher utils.Dispatcher[*v1.Finality]
+ executionPayloadDispatcher utils.Dispatcher[*v1.ExecutionPayloadAvailableEvent]
+ executionPayloadBidDispatcher utils.Dispatcher[*gloas.SignedExecutionPayloadBid]
specWarnings []string // warnings from incomplete spec checks
specs map[string]interface{}
@@ -99,6 +102,14 @@ func (client *Client) SubscribeFinalizedEvent(capacity int) *utils.Subscription[
return client.checkpointDispatcher.Subscribe(capacity, false)
}
+func (client *Client) SubscribeExecutionPayloadAvailableEvent(capacity int, blocking bool) *utils.Subscription[*v1.ExecutionPayloadAvailableEvent] {
+ return client.executionPayloadDispatcher.Subscribe(capacity, blocking)
+}
+
+func (client *Client) SubscribeExecutionPayloadBidEvent(capacity int, blocking bool) *utils.Subscription[*gloas.SignedExecutionPayloadBid] {
+ return client.executionPayloadBidDispatcher.Subscribe(capacity, blocking)
+}
+
func (client *Client) GetPool() *Pool {
return client.pool
}
diff --git a/clients/consensus/clientlogic.go b/clients/consensus/clientlogic.go
index 146af4072..84f63bb83 100644
--- a/clients/consensus/clientlogic.go
+++ b/clients/consensus/clientlogic.go
@@ -8,6 +8,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/sirupsen/logrus"
@@ -133,7 +134,11 @@ func (client *Client) runClientLogic() error {
}
// start event stream
- blockStream := client.rpcClient.NewBlockStream(client.clientCtx, client.logger, rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent)
+ blockStream := client.rpcClient.NewBlockStream(
+ client.clientCtx,
+ client.logger,
+ rpc.StreamBlockEvent|rpc.StreamHeadEvent|rpc.StreamFinalizedEvent|rpc.StreamExecutionPayloadEvent|rpc.StreamExecutionPayloadBidEvent,
+ )
defer blockStream.Close()
// process events
@@ -165,6 +170,12 @@ func (client *Client) runClientLogic() error {
if err != nil {
client.logger.Warnf("failed processing finalized event: %v", err)
}
+
+ case rpc.StreamExecutionPayloadEvent:
+ client.executionPayloadDispatcher.Fire(evt.Data.(*v1.ExecutionPayloadAvailableEvent))
+
+ case rpc.StreamExecutionPayloadBidEvent:
+ client.executionPayloadBidDispatcher.Fire(evt.Data.(*gloas.SignedExecutionPayloadBid))
}
// fire through stream dispatcher first to preserve SSE ordering
diff --git a/clients/consensus/rpc/beaconapi.go b/clients/consensus/rpc/beaconapi.go
index 6768091bc..7435764e1 100644
--- a/clients/consensus/rpc/beaconapi.go
+++ b/clients/consensus/rpc/beaconapi.go
@@ -19,6 +19,7 @@ import (
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/capella"
"github.com/attestantio/go-eth2-client/spec/deneb"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/rs/zerolog"
"github.com/sirupsen/logrus"
@@ -406,6 +407,22 @@ func (bc *BeaconClient) GetBlockBodyByBlockroot(ctx context.Context, blockroot p
return result.Data, nil
}
+func (bc *BeaconClient) GetExecutionPayloadByBlockroot(ctx context.Context, blockroot phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ provider, isProvider := bc.clientSvc.(eth2client.ExecutionPayloadProvider)
+ if !isProvider {
+ return nil, fmt.Errorf("get execution payload not supported")
+ }
+
+ result, err := provider.SignedExecutionPayloadEnvelope(ctx, &api.SignedExecutionPayloadEnvelopeOpts{
+ Block: fmt.Sprintf("0x%x", blockroot),
+ })
+ if err != nil {
+ return nil, err
+ }
+
+ return result.Data, nil
+}
+
func (bc *BeaconClient) GetState(ctx context.Context, stateRef string) (*spec.VersionedBeaconState, error) {
provider, isProvider := bc.clientSvc.(eth2client.BeaconStateProvider)
if !isProvider {
diff --git a/clients/consensus/rpc/beaconstream.go b/clients/consensus/rpc/beaconstream.go
index be6fd92c9..6721c24dc 100644
--- a/clients/consensus/rpc/beaconstream.go
+++ b/clients/consensus/rpc/beaconstream.go
@@ -10,16 +10,18 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
- "github.com/donovanhide/eventsource"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/sirupsen/logrus"
"github.com/ethpandaops/dora/clients/consensus/rpc/eventstream"
)
const (
- StreamBlockEvent uint16 = 0x01
- StreamHeadEvent uint16 = 0x02
- StreamFinalizedEvent uint16 = 0x04
+ StreamBlockEvent uint16 = 0x01
+ StreamHeadEvent uint16 = 0x02
+ StreamFinalizedEvent uint16 = 0x04
+ StreamExecutionPayloadEvent uint16 = 0x08
+ StreamExecutionPayloadBidEvent uint16 = 0x10
)
type BeaconStreamEvent struct {
@@ -71,48 +73,127 @@ func (bs *BeaconStream) startStream() {
bs.running = false
}()
- stream := bs.subscribeStream(bs.client.endpoint, bs.events)
- if stream != nil {
- defer stream.Close()
-
- for {
+ // Subscribe to basic events (block, head, finalized_checkpoint)
+ basicEvents := bs.events & (StreamBlockEvent | StreamHeadEvent | StreamFinalizedEvent)
+ basicStream := bs.subscribeStream(bs.client.endpoint, basicEvents)
+ if basicStream == nil {
+ return
+ }
+ defer basicStream.Close()
+
+ // Subscribe to advanced events (execution_payload_available, execution_payload_bid)
+ // These are in a separate stream because clients may not support them yet,
+ // and subscribing to unsupported topics can cause the entire subscription to fail.
+ // Run in a separate goroutine so it doesn't block the basic stream.
+ advancedEvents := bs.events & (StreamExecutionPayloadEvent | StreamExecutionPayloadBidEvent)
+ advancedStreamChan := make(chan *eventstream.Stream, 1)
+ if advancedEvents > 0 {
+ go func() {
+ stream := bs.subscribeStream(bs.client.endpoint, advancedEvents)
select {
+ case advancedStreamChan <- stream:
case <-bs.ctx.Done():
- return
- case evt := <-stream.Events:
- switch evt.Event() {
- case "block":
- bs.processBlockEvent(evt)
- case "head":
- bs.processHeadEvent(evt)
- case "finalized_checkpoint":
- bs.processFinalizedEvent(evt)
- }
- case <-stream.Ready:
- bs.ReadyChan <- &BeaconStreamStatus{
- Ready: true,
- }
- case err := <-stream.Errors:
- if strings.Contains(err.Error(), "INTERNAL_ERROR; received from peer") {
- // this seems to be a go bug, silently reconnect to the stream
- time.Sleep(10 * time.Millisecond)
- stream.RetryNow()
- } else {
- bs.logger.Warnf("beacon block stream error: %v", err)
+ if stream != nil {
+ stream.Close()
}
+ }
+ }()
+ }
- select {
- case bs.ReadyChan <- &BeaconStreamStatus{
- Ready: false,
- Error: err,
- }:
- case <-bs.ctx.Done():
- }
+ var advancedStream *eventstream.Stream
+ defer func() {
+ if advancedStream != nil {
+ advancedStream.Close()
+ }
+ }()
+
+ for {
+ select {
+ case <-bs.ctx.Done():
+ return
+
+ // Basic stream events
+ case evt := <-basicStream.Events:
+ switch evt.Event() {
+ case "block":
+ bs.processBlockEvent(evt)
+ case "head":
+ bs.processHeadEvent(evt)
+ case "finalized_checkpoint":
+ bs.processFinalizedEvent(evt)
+ }
+ case <-basicStream.Ready:
+ bs.ReadyChan <- &BeaconStreamStatus{
+ Ready: true,
}
+ case err := <-basicStream.Errors:
+ bs.handleStreamError(basicStream, err)
+
+ // Advanced stream connection established
+ case stream := <-advancedStreamChan:
+ advancedStream = stream
+
+ // Advanced stream events (no Ready/Error forwarding)
+ case evt := <-bs.getAdvancedStreamEvents(advancedStream):
+ switch evt.Event() {
+ case "execution_payload_available":
+ bs.processExecutionPayloadAvailableEvent(evt)
+ case "execution_payload_bid":
+ bs.processExecutionPayloadBidEvent(evt)
+ }
+ case <-bs.getAdvancedStreamReady(advancedStream):
+ // Don't forward ready events from advanced stream
+ case <-bs.getAdvancedStreamErrors(advancedStream):
+ // Silently retry - clients may not support these events yet
+ time.Sleep(10 * time.Millisecond)
+ advancedStream.RetryNow()
}
}
}
+// getAdvancedStreamEvents returns the events channel or a nil channel if stream is nil.
+func (bs *BeaconStream) getAdvancedStreamEvents(stream *eventstream.Stream) chan eventstream.StreamEvent {
+ if stream == nil {
+ return nil
+ }
+ return stream.Events
+}
+
+// getAdvancedStreamReady returns the ready channel or a nil channel if stream is nil.
+func (bs *BeaconStream) getAdvancedStreamReady(stream *eventstream.Stream) chan bool {
+ if stream == nil {
+ return nil
+ }
+ return stream.Ready
+}
+
+// getAdvancedStreamErrors returns the errors channel or a nil channel if stream is nil.
+func (bs *BeaconStream) getAdvancedStreamErrors(stream *eventstream.Stream) chan error {
+ if stream == nil {
+ return nil
+ }
+ return stream.Errors
+}
+
+// handleStreamError handles stream errors and forwards them to the ReadyChan.
+func (bs *BeaconStream) handleStreamError(stream *eventstream.Stream, err error) {
+ if strings.Contains(err.Error(), "INTERNAL_ERROR; received from peer") {
+ // this seems to be a go bug, silently reconnect to the stream
+ time.Sleep(10 * time.Millisecond)
+ stream.RetryNow()
+ } else {
+ bs.logger.Warnf("beacon block stream error: %v", err)
+ }
+
+ select {
+ case bs.ReadyChan <- &BeaconStreamStatus{
+ Ready: false,
+ Error: err,
+ }:
+ case <-bs.ctx.Done():
+ }
+}
+
func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventstream.Stream {
var topics strings.Builder
@@ -148,6 +229,26 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst
topicsCount++
}
+ if events&StreamExecutionPayloadEvent > 0 {
+ if topicsCount > 0 {
+ fmt.Fprintf(&topics, ",")
+ }
+
+ fmt.Fprintf(&topics, "execution_payload_available")
+
+ topicsCount++
+ }
+
+ if events&StreamExecutionPayloadBidEvent > 0 {
+ if topicsCount > 0 {
+ fmt.Fprintf(&topics, ",")
+ }
+
+ fmt.Fprintf(&topics, "execution_payload_bid")
+
+ topicsCount++
+ }
+
if topicsCount == 0 {
return nil
}
@@ -179,7 +280,7 @@ func (bs *BeaconStream) subscribeStream(endpoint string, events uint16) *eventst
}
}
-func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) {
+func (bs *BeaconStream) processBlockEvent(evt eventstream.StreamEvent) {
var parsed v1.BlockEvent
err := json.Unmarshal([]byte(evt.Data()), &parsed)
@@ -194,7 +295,7 @@ func (bs *BeaconStream) processBlockEvent(evt eventsource.Event) {
}
}
-func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) {
+func (bs *BeaconStream) processHeadEvent(evt eventstream.StreamEvent) {
var parsed v1.HeadEvent
err := json.Unmarshal([]byte(evt.Data()), &parsed)
@@ -210,7 +311,7 @@ func (bs *BeaconStream) processHeadEvent(evt eventsource.Event) {
}
}
-func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) {
+func (bs *BeaconStream) processFinalizedEvent(evt eventstream.StreamEvent) {
var parsed v1.FinalizedCheckpointEvent
err := json.Unmarshal([]byte(evt.Data()), &parsed)
@@ -225,6 +326,36 @@ func (bs *BeaconStream) processFinalizedEvent(evt eventsource.Event) {
}
}
+func (bs *BeaconStream) processExecutionPayloadAvailableEvent(evt eventstream.StreamEvent) {
+ var parsed v1.ExecutionPayloadAvailableEvent
+
+ err := json.Unmarshal([]byte(evt.Data()), &parsed)
+ if err != nil {
+ bs.logger.Warnf("beacon block stream failed to decode execution_payload event: %v", err)
+ return
+ }
+
+ bs.EventChan <- &BeaconStreamEvent{
+ Event: StreamExecutionPayloadEvent,
+ Data: &parsed,
+ }
+}
+
+func (bs *BeaconStream) processExecutionPayloadBidEvent(evt eventstream.StreamEvent) {
+ var parsed gloas.SignedExecutionPayloadBid
+
+ err := json.Unmarshal([]byte(evt.Data()), &parsed)
+ if err != nil {
+ bs.logger.Warnf("beacon block stream failed to decode execution_payload_bid event: %v", err)
+ return
+ }
+
+ bs.EventChan <- &BeaconStreamEvent{
+ Event: StreamExecutionPayloadBidEvent,
+ Data: &parsed,
+ }
+}
+
func getRedactedURL(requrl string) string {
var logurl string
diff --git a/cmd/dora-explorer/main.go b/cmd/dora-explorer/main.go
index 5eaabb516..c05d2f726 100644
--- a/cmd/dora-explorer/main.go
+++ b/cmd/dora-explorer/main.go
@@ -233,6 +233,8 @@ func startFrontend(router *mux.Router) {
router.HandleFunc("/validators/submit_withdrawals", handlers.SubmitWithdrawal).Methods("GET")
router.HandleFunc("/validator/{idxOrPubKey}", handlers.Validator).Methods("GET")
router.HandleFunc("/validator/{index}/slots", handlers.ValidatorSlots).Methods("GET")
+ router.HandleFunc("/builders", handlers.Builders).Methods("GET")
+ router.HandleFunc("/builder/{idxOrPubKey}", handlers.BuilderDetail).Methods("GET")
if utils.Config.Frontend.Pprof {
// add pprof handler
diff --git a/cmd/dora-utils/blockdb_sync.go b/cmd/dora-utils/blockdb_sync.go
index fdebcb81a..7905d7e9f 100644
--- a/cmd/dora-utils/blockdb_sync.go
+++ b/cmd/dora-utils/blockdb_sync.go
@@ -271,7 +271,7 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs
return slotResult{slot: slot, err: fmt.Errorf("failed to marshal block header for slot %d: %v", slot, err), time: time.Since(t1)}
}
- added, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) {
+ added, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, slot, blockHeader.Root[:], func() (*btypes.BlockData, error) {
blockBody, err := client.GetRPCClient().GetBlockBodyByBlockroot(ctx, blockHeader.Root)
if err != nil {
return nil, fmt.Errorf("failed to get block body for slot %d: %v", slot, err)
@@ -282,11 +282,29 @@ func processSlot(ctx context.Context, pool *consensus.Pool, dynSsz *dynssz.DynSs
return nil, fmt.Errorf("failed to marshal block body for slot %d: %v", slot, err)
}
+ var payloadVersion uint64
+ var payloadBytes []byte
+
+ chainState := pool.GetChainState()
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(phase0.Slot(slot))) {
+ blockPayload, err := client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, blockHeader.Root)
+ if err != nil {
+ return nil, fmt.Errorf("failed to get block execution payload for slot %d: %v", slot, err)
+ }
+
+ payloadVersion, payloadBytes, err = beacon.MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz, blockPayload, true)
+ if err != nil {
+ return nil, fmt.Errorf("failed to marshal block execution payload for slot %d: %v", slot, err)
+ }
+ }
+
return &btypes.BlockData{
- HeaderVersion: 1,
- HeaderData: headerBytes,
- BodyVersion: version,
- BodyData: bodyBytes,
+ HeaderVersion: 1,
+ HeaderData: headerBytes,
+ BodyVersion: version,
+ BodyData: bodyBytes,
+ PayloadVersion: payloadVersion,
+ PayloadData: payloadBytes,
}, nil
})
if err != nil {
diff --git a/db/block_bids.go b/db/block_bids.go
new file mode 100644
index 000000000..7b2b9c6ca
--- /dev/null
+++ b/db/block_bids.go
@@ -0,0 +1,193 @@
+package db
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/jmoiron/sqlx"
+)
+
+func InsertBids(bids []*dbtypes.BlockBid, tx *sqlx.Tx) error {
+ var sql strings.Builder
+ fmt.Fprint(&sql,
+ EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: "INSERT INTO block_bids ",
+ dbtypes.DBEngineSqlite: "INSERT OR REPLACE INTO block_bids ",
+ }),
+ "(parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment)",
+ " VALUES ",
+ )
+ argIdx := 0
+ fieldCount := 9
+
+ args := make([]any, len(bids)*fieldCount)
+ for i, bid := range bids {
+ if i > 0 {
+ fmt.Fprintf(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "(")
+ for f := 0; f < fieldCount; f++ {
+ if f > 0 {
+ fmt.Fprintf(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%v", argIdx+f+1)
+ }
+ fmt.Fprintf(&sql, ")")
+
+ args[argIdx+0] = bid.ParentRoot
+ args[argIdx+1] = bid.ParentHash
+ args[argIdx+2] = bid.BlockHash
+ args[argIdx+3] = bid.FeeRecipient
+ args[argIdx+4] = bid.GasLimit
+ args[argIdx+5] = bid.BuilderIndex
+ args[argIdx+6] = bid.Slot
+ args[argIdx+7] = bid.Value
+ args[argIdx+8] = bid.ElPayment
+ argIdx += fieldCount
+ }
+ fmt.Fprint(&sql, EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: " ON CONFLICT (parent_root, parent_hash, block_hash, builder_index) DO UPDATE SET " +
+ "fee_recipient = excluded.fee_recipient, " +
+ "gas_limit = excluded.gas_limit, " +
+ "slot = excluded.slot, " +
+ "value = excluded.value, " +
+ "el_payment = excluded.el_payment",
+ dbtypes.DBEngineSqlite: "",
+ }))
+
+ _, err := tx.Exec(sql.String(), args...)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
+func GetBidsForBlockRoot(ctx context.Context, blockRoot []byte) []*dbtypes.BlockBid {
+ var sql strings.Builder
+ args := []any{
+ blockRoot,
+ }
+ fmt.Fprint(&sql, `
+ SELECT
+ parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment
+ FROM block_bids
+ WHERE parent_root = $1
+ ORDER BY value DESC
+ `)
+
+ bids := []*dbtypes.BlockBid{}
+ err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching bids for block root: %v", err)
+ return nil
+ }
+ return bids
+}
+
+func GetBidsForSlotRange(ctx context.Context, minSlot uint64) []*dbtypes.BlockBid {
+ var sql strings.Builder
+ args := []any{
+ minSlot,
+ }
+ fmt.Fprint(&sql, `
+ SELECT
+ parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment
+ FROM block_bids
+ WHERE slot >= $1
+ ORDER BY slot DESC, value DESC
+ `)
+
+ bids := []*dbtypes.BlockBid{}
+ err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching bids for slot range: %v", err)
+ return nil
+ }
+ return bids
+}
+
+func DeleteBidsBeforeSlot(minSlot uint64, tx *sqlx.Tx) error {
+ _, err := tx.Exec(`DELETE FROM block_bids WHERE slot < $1`, minSlot)
+ return err
+}
+
+// GetBidsByBlockHashes returns bids for multiple block hashes and a specific builder index
+// Returns a map keyed by block hash (hex string) for easy lookup
+func GetBidsByBlockHashes(ctx context.Context, blockHashes [][]byte, builderIndex uint64) map[string]*dbtypes.BlockBid {
+ result := make(map[string]*dbtypes.BlockBid, len(blockHashes))
+ if len(blockHashes) == 0 {
+ return result
+ }
+
+ var sql strings.Builder
+ args := make([]any, 0, len(blockHashes)+1)
+
+ fmt.Fprint(&sql, `
+ SELECT
+ parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment
+ FROM block_bids
+ WHERE builder_index = $1 AND block_hash IN (`)
+
+ args = append(args, builderIndex)
+ for i, hash := range blockHashes {
+ if i > 0 {
+ fmt.Fprint(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%d", i+2)
+ args = append(args, hash)
+ }
+ fmt.Fprint(&sql, ")")
+
+ bids := []*dbtypes.BlockBid{}
+ err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching bids by block hashes: %v", err)
+ return result
+ }
+
+ for _, bid := range bids {
+ key := fmt.Sprintf("%x", bid.BlockHash)
+ result[key] = bid
+ }
+
+ return result
+}
+
+// GetBidsByBuilderIndex returns bids submitted by a specific builder, ordered by slot descending
+func GetBidsByBuilderIndex(ctx context.Context, builderIndex uint64, offset uint64, limit uint32) ([]*dbtypes.BlockBid, uint64) {
+ var sql strings.Builder
+ args := []any{
+ builderIndex,
+ }
+ fmt.Fprint(&sql, `
+ SELECT
+ parent_root, parent_hash, block_hash, fee_recipient, gas_limit, builder_index, slot, value, el_payment
+ FROM block_bids
+ WHERE builder_index = $1
+ ORDER BY slot DESC, value DESC
+ `)
+
+ if limit > 0 {
+ fmt.Fprintf(&sql, " LIMIT $%d OFFSET $%d", len(args)+1, len(args)+2)
+ args = append(args, limit, offset)
+ }
+
+ bids := []*dbtypes.BlockBid{}
+ err := ReaderDb.SelectContext(ctx, &bids, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching bids for builder index %d: %v", builderIndex, err)
+ return nil, 0
+ }
+
+ // Get total count
+ var totalCount uint64
+ err = ReaderDb.GetContext(ctx, &totalCount, `SELECT COUNT(*) FROM block_bids WHERE builder_index = $1`, builderIndex)
+ if err != nil {
+ logger.Errorf("Error while counting bids for builder index %d: %v", builderIndex, err)
+ return bids, 0
+ }
+
+ return bids, totalCount
+}
diff --git a/db/builders.go b/db/builders.go
new file mode 100644
index 000000000..26d67bfa1
--- /dev/null
+++ b/db/builders.go
@@ -0,0 +1,450 @@
+package db
+
+import (
+ "context"
+ "fmt"
+ "strings"
+
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/jmoiron/sqlx"
+)
+
+// InsertBuilder inserts a single builder into the database
+func InsertBuilder(builder *dbtypes.Builder, tx *sqlx.Tx) error {
+ _, err := tx.Exec(EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: `
+ INSERT INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7)
+ ON CONFLICT (pubkey) DO UPDATE SET
+ builder_index = excluded.builder_index,
+ version = excluded.version,
+ execution_address = excluded.execution_address,
+ deposit_epoch = excluded.deposit_epoch,
+ withdrawable_epoch = excluded.withdrawable_epoch,
+ superseded = excluded.superseded`,
+ dbtypes.DBEngineSqlite: `
+ INSERT OR REPLACE INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7)`,
+ }),
+ builder.Pubkey,
+ builder.BuilderIndex,
+ builder.Version,
+ builder.ExecutionAddress,
+ builder.DepositEpoch,
+ builder.WithdrawableEpoch,
+ builder.Superseded)
+
+ if err != nil {
+ return fmt.Errorf("error inserting builder: %w", err)
+ }
+ return nil
+}
+
+// InsertBuilderBatch inserts multiple builders in a batch
+func InsertBuilderBatch(builders []*dbtypes.Builder, tx *sqlx.Tx) error {
+ if len(builders) == 0 {
+ return nil
+ }
+
+ valueStrings := make([]string, len(builders))
+ valueArgs := make([]any, 0, len(builders)*7)
+ for i, b := range builders {
+ valueStrings[i] = fmt.Sprintf("($%v, $%v, $%v, $%v, $%v, $%v, $%v)",
+ i*7+1, i*7+2, i*7+3, i*7+4, i*7+5, i*7+6, i*7+7)
+ valueArgs = append(valueArgs,
+ b.Pubkey,
+ b.BuilderIndex,
+ b.Version,
+ b.ExecutionAddress,
+ b.DepositEpoch,
+ b.WithdrawableEpoch,
+ b.Superseded)
+ }
+
+ stmt := fmt.Sprintf(EngineQuery(map[dbtypes.DBEngineType]string{
+ dbtypes.DBEnginePgsql: `
+ INSERT INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES %s
+ ON CONFLICT (pubkey) DO UPDATE SET
+ builder_index = excluded.builder_index,
+ version = excluded.version,
+ execution_address = excluded.execution_address,
+ deposit_epoch = excluded.deposit_epoch,
+ withdrawable_epoch = excluded.withdrawable_epoch,
+ superseded = excluded.superseded`,
+ dbtypes.DBEngineSqlite: `
+ INSERT OR REPLACE INTO builders (
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ ) VALUES %s`,
+ }), strings.Join(valueStrings, ","))
+
+ _, err := tx.Exec(stmt, valueArgs...)
+ if err != nil {
+ return fmt.Errorf("error inserting builder batch: %w", err)
+ }
+
+ return nil
+}
+
+// GetBuilderByPubkey returns a builder by pubkey (primary key)
+func GetBuilderByPubkey(ctx context.Context, pubkey []byte) *dbtypes.Builder {
+ builder := dbtypes.Builder{}
+ err := ReaderDb.GetContext(ctx, &builder, `
+ SELECT * FROM builders WHERE pubkey = $1
+ `, pubkey)
+ if err != nil {
+ return nil
+ }
+ return &builder
+}
+
+// GetActiveBuilderByIndex returns the active (non-superseded) builder for a given index
+func GetActiveBuilderByIndex(ctx context.Context, index uint64) *dbtypes.Builder {
+ builder := dbtypes.Builder{}
+ err := ReaderDb.GetContext(ctx, &builder, `
+ SELECT * FROM builders WHERE builder_index = $1 AND superseded = false
+ `, index)
+ if err != nil {
+ return nil
+ }
+ return &builder
+}
+
+// GetBuildersByIndex returns all builders (including superseded) for a given index
+func GetBuildersByIndex(ctx context.Context, index uint64) []*dbtypes.Builder {
+ builders := []*dbtypes.Builder{}
+ err := ReaderDb.SelectContext(ctx, &builders, `
+ SELECT * FROM builders WHERE builder_index = $1 ORDER BY superseded ASC
+ `, index)
+ if err != nil {
+ logger.Errorf("Error while fetching builders by index: %v", err)
+ return nil
+ }
+ return builders
+}
+
+// GetBuilderRange returns builders in a given index range (only active builders)
+func GetBuilderRange(ctx context.Context, startIndex uint64, endIndex uint64) []*dbtypes.Builder {
+ builders := []*dbtypes.Builder{}
+ err := ReaderDb.SelectContext(ctx, &builders, `
+ SELECT * FROM builders
+ WHERE builder_index >= $1 AND builder_index <= $2 AND superseded = false
+ ORDER BY builder_index ASC
+ `, startIndex, endIndex)
+ if err != nil {
+ logger.Errorf("Error while fetching builder range: %v", err)
+ return nil
+ }
+ return builders
+}
+
+// GetMaxBuilderIndex returns the highest builder index in the database
+func GetMaxBuilderIndex(ctx context.Context) (uint64, error) {
+ var maxIndex uint64
+ err := ReaderDb.GetContext(ctx, &maxIndex, "SELECT COALESCE(MAX(builder_index), 0) FROM builders")
+ if err != nil {
+ return 0, fmt.Errorf("error getting max builder index: %w", err)
+ }
+ return maxIndex, nil
+}
+
+// GetBuilderCount returns the count of builders (optionally only active)
+func GetBuilderCount(ctx context.Context, activeOnly bool) (uint64, error) {
+ var count uint64
+ var err error
+ if activeOnly {
+ err = ReaderDb.GetContext(ctx, &count, "SELECT COUNT(*) FROM builders WHERE superseded = false")
+ } else {
+ err = ReaderDb.GetContext(ctx, &count, "SELECT COUNT(*) FROM builders")
+ }
+ if err != nil {
+ return 0, fmt.Errorf("error getting builder count: %w", err)
+ }
+ return count, nil
+}
+
+// SetBuilderSuperseded marks a builder as superseded
+func SetBuilderSuperseded(pubkey []byte, tx *sqlx.Tx) error {
+ _, err := tx.Exec(`
+ UPDATE builders SET superseded = true WHERE pubkey = $1
+ `, pubkey)
+ if err != nil {
+ return fmt.Errorf("error setting builder superseded: %w", err)
+ }
+ return nil
+}
+
+// SetBuildersSuperseded marks multiple builders as superseded in a batch
+func SetBuildersSuperseded(pubkeys [][]byte, tx *sqlx.Tx) error {
+ if len(pubkeys) == 0 {
+ return nil
+ }
+
+ var sql strings.Builder
+ sql.WriteString("UPDATE builders SET superseded = true WHERE pubkey IN (")
+
+ args := make([]any, len(pubkeys))
+ for i, pk := range pubkeys {
+ if i > 0 {
+ sql.WriteString(", ")
+ }
+ fmt.Fprintf(&sql, "$%d", i+1)
+ args[i] = pk
+ }
+ sql.WriteString(")")
+
+ _, err := tx.Exec(sql.String(), args...)
+ if err != nil {
+ return fmt.Errorf("error setting builders superseded: %w", err)
+ }
+ return nil
+}
+
+// StreamBuildersByPubkeys streams builders by pubkeys in batches
+func StreamBuildersByPubkeys(ctx context.Context, pubkeys [][]byte, cb func(builder *dbtypes.Builder) bool) error {
+ const batchSize = 1000
+
+ for i := 0; i < len(pubkeys); i += batchSize {
+ end := min(i+batchSize, len(pubkeys))
+ batch := pubkeys[i:end]
+
+ var sql strings.Builder
+ fmt.Fprintf(&sql, `
+ SELECT
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ FROM builders
+ WHERE pubkey in (`)
+
+ args := make([]any, len(batch))
+ for j, pk := range batch {
+ if j > 0 {
+ fmt.Fprintf(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%v", j+1)
+ args[j] = pk
+ }
+ fmt.Fprintf(&sql, ")")
+
+ // Create pubkey map for ordering
+ pubkeyMap := make(map[string]int, len(batch))
+ for pos, pk := range batch {
+ pubkeyMap[string(pk)] = pos
+ }
+
+ // Fetch all builders for this batch
+ builders := make([]*dbtypes.Builder, len(batch))
+ rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...)
+ if err != nil {
+ return fmt.Errorf("error querying builders: %w", err)
+ }
+ defer rows.Close()
+
+ for rows.Next() {
+ builder := &dbtypes.Builder{}
+ err := rows.Scan(
+ &builder.Pubkey,
+ &builder.BuilderIndex,
+ &builder.Version,
+ &builder.ExecutionAddress,
+ &builder.DepositEpoch,
+ &builder.WithdrawableEpoch,
+ &builder.Superseded,
+ )
+ if err != nil {
+ return fmt.Errorf("error scanning builder: %w", err)
+ }
+ pos := pubkeyMap[string(builder.Pubkey)]
+ builders[pos] = builder
+ }
+
+ if err = rows.Err(); err != nil {
+ return fmt.Errorf("error iterating rows: %w", err)
+ }
+
+ // Stream in original order
+ for _, b := range builders {
+ if b != nil && !cb(b) {
+ return nil
+ }
+ }
+ }
+
+ return nil
+}
+
+// GetBuildersByExecutionAddress returns builders with a specific execution address
+func GetBuildersByExecutionAddress(ctx context.Context, address []byte) []*dbtypes.Builder {
+ builders := []*dbtypes.Builder{}
+ err := ReaderDb.SelectContext(ctx, &builders, `
+ SELECT * FROM builders WHERE execution_address = $1 ORDER BY builder_index ASC
+ `, address)
+ if err != nil {
+ logger.Errorf("Error while fetching builders by execution address: %v", err)
+ return nil
+ }
+ return builders
+}
+
+// GetBuilderIndexesByFilter returns builder indexes matching a filter
+func GetBuilderIndexesByFilter(ctx context.Context, filter dbtypes.BuilderFilter, currentEpoch uint64) ([]uint64, error) {
+ var sql strings.Builder
+ args := []interface{}{}
+ fmt.Fprint(&sql, `
+ SELECT
+ builder_index
+ FROM builders
+ `)
+
+ args = buildBuilderFilterSql(filter, currentEpoch, &sql, args)
+
+ switch filter.OrderBy {
+ case dbtypes.BuilderOrderIndexAsc:
+ fmt.Fprint(&sql, " ORDER BY builder_index ASC")
+ case dbtypes.BuilderOrderIndexDesc:
+ fmt.Fprint(&sql, " ORDER BY builder_index DESC")
+ case dbtypes.BuilderOrderPubKeyAsc:
+ fmt.Fprint(&sql, " ORDER BY pubkey ASC")
+ case dbtypes.BuilderOrderPubKeyDesc:
+ fmt.Fprint(&sql, " ORDER BY pubkey DESC")
+ case dbtypes.BuilderOrderDepositEpochAsc:
+ fmt.Fprint(&sql, " ORDER BY deposit_epoch ASC")
+ case dbtypes.BuilderOrderDepositEpochDesc:
+ fmt.Fprint(&sql, " ORDER BY deposit_epoch DESC")
+ case dbtypes.BuilderOrderWithdrawableEpochAsc:
+ fmt.Fprint(&sql, " ORDER BY withdrawable_epoch ASC")
+ case dbtypes.BuilderOrderWithdrawableEpochDesc:
+ fmt.Fprint(&sql, " ORDER BY withdrawable_epoch DESC")
+ }
+
+ builderIds := []uint64{}
+ err := ReaderDb.SelectContext(ctx, &builderIds, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error while fetching builders by filter: %v", err)
+ return nil, err
+ }
+
+ return builderIds, nil
+}
+
+func buildBuilderFilterSql(filter dbtypes.BuilderFilter, currentEpoch uint64, sql *strings.Builder, args []interface{}) []interface{} {
+ filterOp := "WHERE"
+
+ if filter.MinIndex != nil {
+ fmt.Fprintf(sql, " %v builder_index >= $%v", filterOp, len(args)+1)
+ args = append(args, *filter.MinIndex)
+ filterOp = "AND"
+ }
+ if filter.MaxIndex != nil {
+ fmt.Fprintf(sql, " %v builder_index <= $%v", filterOp, len(args)+1)
+ args = append(args, *filter.MaxIndex)
+ filterOp = "AND"
+ }
+ if len(filter.PubKey) > 0 {
+ fmt.Fprintf(sql, " %v pubkey LIKE $%v", filterOp, len(args)+1)
+ args = append(args, append(filter.PubKey, '%'))
+ filterOp = "AND"
+ }
+ if len(filter.ExecutionAddress) > 0 {
+ fmt.Fprintf(sql, " %v execution_address = $%v", filterOp, len(args)+1)
+ args = append(args, filter.ExecutionAddress)
+ filterOp = "AND"
+ }
+ if len(filter.Status) > 0 {
+ statusConditions := make([]string, 0, len(filter.Status))
+ for _, status := range filter.Status {
+ switch status {
+ case dbtypes.BuilderStatusActiveFilter:
+ statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch > $%v)", len(args)+1))
+ args = append(args, ConvertUint64ToInt64(currentEpoch))
+ case dbtypes.BuilderStatusExitedFilter:
+ statusConditions = append(statusConditions, fmt.Sprintf("(superseded = false AND withdrawable_epoch <= $%v)", len(args)+1))
+ args = append(args, ConvertUint64ToInt64(currentEpoch))
+ case dbtypes.BuilderStatusSupersededFilter:
+ statusConditions = append(statusConditions, "superseded = true")
+ }
+ }
+ if len(statusConditions) > 0 {
+ fmt.Fprintf(sql, " %v (%v)", filterOp, strings.Join(statusConditions, " OR "))
+ }
+ }
+
+ return args
+}
+
+// StreamBuildersByIndexes streams builders by indexes
+func StreamBuildersByIndexes(ctx context.Context, indexes []uint64, cb func(builder *dbtypes.Builder) bool) {
+ const batchSize = 1000
+
+ for i := 0; i < len(indexes); i += batchSize {
+ end := min(i+batchSize, len(indexes))
+ batch := indexes[i:end]
+
+ var sql strings.Builder
+ fmt.Fprint(&sql, `
+ SELECT
+ pubkey, builder_index, version, execution_address,
+ deposit_epoch, withdrawable_epoch, superseded
+ FROM builders
+ WHERE builder_index IN (`)
+
+ args := make([]any, len(batch))
+ for j, idx := range batch {
+ if j > 0 {
+ fmt.Fprint(&sql, ", ")
+ }
+ fmt.Fprintf(&sql, "$%v", j+1)
+ args[j] = idx
+ }
+ fmt.Fprint(&sql, ")")
+
+ // Create index map for ordering
+ indexMap := make(map[uint64]int, len(batch))
+ for pos, idx := range batch {
+ indexMap[idx] = pos
+ }
+
+ // Fetch all builders for this batch
+ builders := make([]*dbtypes.Builder, len(batch))
+ rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...)
+ if err != nil {
+ logger.Errorf("Error querying builders: %v", err)
+ return
+ }
+
+ for rows.Next() {
+ builder := &dbtypes.Builder{}
+ err := rows.Scan(
+ &builder.Pubkey,
+ &builder.BuilderIndex,
+ &builder.Version,
+ &builder.ExecutionAddress,
+ &builder.DepositEpoch,
+ &builder.WithdrawableEpoch,
+ &builder.Superseded,
+ )
+ if err != nil {
+ logger.Errorf("Error scanning builder: %v", err)
+ rows.Close()
+ return
+ }
+ pos := indexMap[builder.BuilderIndex]
+ builders[pos] = builder
+ }
+ rows.Close()
+
+ // Stream in original order
+ for _, b := range builders {
+ if b != nil && !cb(b) {
+ return
+ }
+ }
+ }
+}
diff --git a/db/deposits.go b/db/deposits.go
index c54a2c3a6..9adc88f1d 100644
--- a/db/deposits.go
+++ b/db/deposits.go
@@ -139,14 +139,18 @@ func GetDepositsFiltered(ctx context.Context, offset uint64, limit uint32, canon
}
if len(txFilter.WithdrawalAddress) > 0 {
+ // 0x01 = ETH1, 0x02 = compounding, 0x03 = builder deposit
wdcreds1 := make([]byte, 32)
wdcreds1[0] = 0x01
copy(wdcreds1[12:], txFilter.WithdrawalAddress)
wdcreds2 := make([]byte, 32)
wdcreds2[0] = 0x02
copy(wdcreds2[12:], txFilter.WithdrawalAddress)
- args = append(args, wdcreds1, wdcreds2)
- fmt.Fprintf(&sql, " %v (deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v)", filterOp, len(args)-1, len(args))
+ wdcreds3 := make([]byte, 32)
+ wdcreds3[0] = 0x03
+ copy(wdcreds3[12:], txFilter.WithdrawalAddress)
+ args = append(args, wdcreds1, wdcreds2, wdcreds3)
+ fmt.Fprintf(&sql, " %v (deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v OR deposits.withdrawalcredentials = $%v)", filterOp, len(args)-2, len(args)-1, len(args))
filterOp = "AND"
}
diff --git a/db/epochs.go b/db/epochs.go
index aafce0f59..b003009f6 100644
--- a/db/epochs.go
+++ b/db/epochs.go
@@ -14,8 +14,8 @@ func InsertEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Epoch) error {
epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count,
- eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22)
+ eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)
ON CONFLICT (epoch) DO UPDATE SET
validator_count = excluded.validator_count,
validator_balance = excluded.validator_balance,
@@ -37,18 +37,19 @@ func InsertEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Epoch) error {
sync_participation = excluded.sync_participation,
blob_count = excluded.blob_count,
eth_gas_used = excluded.eth_gas_used,
- eth_gas_limit = excluded.eth_gas_limit`,
+ eth_gas_limit = excluded.eth_gas_limit,
+ payload_count = excluded.payload_count`,
dbtypes.DBEngineSqlite: `
INSERT OR REPLACE INTO epochs (
epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count,
- eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22)`,
+ eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23)`,
}),
epoch.Epoch, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget, epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount,
epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount, epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount,
- epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit)
+ epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation, epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount)
if err != nil {
return err
}
@@ -71,7 +72,7 @@ func GetEpochs(ctx context.Context, firstEpoch uint64, limit uint32) []*dbtypes.
epoch, validator_count, validator_balance, eligible, voted_target, voted_head, voted_total, block_count, orphaned_count,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation, blob_count,
- eth_gas_used, eth_gas_limit
+ eth_gas_used, eth_gas_limit, payload_count
FROM epochs
WHERE epoch <= $1
ORDER BY epoch DESC
diff --git a/db/orphaned_blocks.go b/db/orphaned_blocks.go
index a027b860a..3f6eddd4d 100644
--- a/db/orphaned_blocks.go
+++ b/db/orphaned_blocks.go
@@ -11,15 +11,15 @@ func InsertOrphanedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Orphan
_, err := tx.ExecContext(ctx, EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
INSERT INTO orphaned_blocks (
- root, header_ver, header_ssz, block_ver, block_ssz, block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6)
+ root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)
ON CONFLICT (root) DO NOTHING`,
dbtypes.DBEngineSqlite: `
INSERT OR IGNORE INTO orphaned_blocks (
- root, header_ver, header_ssz, block_ver, block_ssz, block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6)`,
+ root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8)`,
}),
- block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid)
+ block.Root, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.BlockUid, block.PayloadVer, block.PayloadSSZ)
if err != nil {
return err
}
@@ -29,7 +29,7 @@ func InsertOrphanedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Orphan
func GetOrphanedBlock(ctx context.Context, root []byte) *dbtypes.OrphanedBlock {
block := dbtypes.OrphanedBlock{}
err := ReaderDb.GetContext(ctx, &block, `
- SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid
+ SELECT root, header_ver, header_ssz, block_ver, block_ssz, block_uid, payload_ver, payload_ssz
FROM orphaned_blocks
WHERE root = $1
`, root)
diff --git a/db/schema/pgsql/20260108202212_epbs-payload.sql b/db/schema/pgsql/20260108202212_epbs-payload.sql
new file mode 100644
index 000000000..4a9eaf95c
--- /dev/null
+++ b/db/schema/pgsql/20260108202212_epbs-payload.sql
@@ -0,0 +1,83 @@
+-- +goose Up
+-- +goose StatementBegin
+
+ALTER TABLE public."unfinalized_blocks"
+ ADD COLUMN "payload_ver" int NOT NULL DEFAULT 0,
+ ADD COLUMN "payload_ssz" bytea NULL;
+
+ALTER TABLE public."orphaned_blocks"
+ ADD COLUMN "payload_ver" int NOT NULL DEFAULT 0,
+ ADD COLUMN "payload_ssz" bytea NULL;
+
+ALTER TABLE public."slots"
+ ADD COLUMN "payload_status" smallint NOT NULL DEFAULT 0,
+ ADD COLUMN "builder_index" bigint NOT NULL DEFAULT -1,
+ ADD COLUMN "eth_block_parent_hash" bytea NULL;
+
+CREATE INDEX IF NOT EXISTS "slots_payload_status_idx"
+ ON public."slots"
+ ("payload_status" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "slots_eth_block_parent_hash_idx"
+ ON public."slots"
+ ("eth_block_parent_hash" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "slots_builder_index_idx"
+ ON public."slots"
+ ("builder_index" ASC NULLS LAST);
+
+ALTER TABLE public."epochs"
+ ADD COLUMN "payload_count" int NOT NULL DEFAULT 0;
+
+ALTER TABLE public."unfinalized_epochs"
+ ADD COLUMN "payload_count" int NOT NULL DEFAULT 0;
+
+CREATE TABLE IF NOT EXISTS public."block_bids" (
+ "parent_root" bytea NOT NULL,
+ "parent_hash" bytea NOT NULL,
+ "block_hash" bytea NOT NULL,
+ "fee_recipient" bytea NOT NULL,
+ "gas_limit" bigint NOT NULL,
+ "builder_index" bigint NOT NULL,
+ "slot" bigint NOT NULL,
+ "value" bigint NOT NULL,
+ "el_payment" bigint NOT NULL,
+ CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index)
+);
+
+CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx"
+ ON public."block_bids"
+ ("parent_root" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx"
+ ON public."block_bids"
+ ("builder_index" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "block_bids_slot_idx"
+ ON public."block_bids"
+ ("slot" ASC NULLS LAST);
+
+CREATE TABLE IF NOT EXISTS public."builders" (
+ "pubkey" bytea NOT NULL,
+ "builder_index" bigint NOT NULL,
+ "version" smallint NOT NULL,
+ "execution_address" bytea NOT NULL,
+ "deposit_epoch" bigint NOT NULL,
+ "withdrawable_epoch" bigint NOT NULL,
+ "superseded" boolean NOT NULL DEFAULT false,
+ CONSTRAINT builders_pkey PRIMARY KEY (pubkey)
+);
+
+CREATE INDEX IF NOT EXISTS "builders_builder_index_idx"
+ ON public."builders"
+ ("builder_index" ASC NULLS LAST);
+
+CREATE INDEX IF NOT EXISTS "builders_execution_address_idx"
+ ON public."builders"
+ ("execution_address" ASC NULLS LAST);
+
+-- +goose StatementEnd
+-- +goose Down
+-- +goose StatementBegin
+SELECT 'NOT SUPPORTED';
+-- +goose StatementEnd
\ No newline at end of file
diff --git a/db/schema/sqlite/20260108202212_epbs-payload.sql b/db/schema/sqlite/20260108202212_epbs-payload.sql
new file mode 100644
index 000000000..2bf22624a
--- /dev/null
+++ b/db/schema/sqlite/20260108202212_epbs-payload.sql
@@ -0,0 +1,60 @@
+-- +goose Up
+-- +goose StatementBegin
+
+ALTER TABLE "unfinalized_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0;
+ALTER TABLE "unfinalized_blocks" ADD "payload_ssz" BLOB NULL;
+
+ALTER TABLE "orphaned_blocks" ADD "payload_ver" int NOT NULL DEFAULT 0;
+ALTER TABLE "orphaned_blocks" ADD "payload_ssz" BLOB NULL;
+
+ALTER TABLE "slots" ADD "payload_status" smallint NOT NULL DEFAULT 0;
+ALTER TABLE "slots" ADD "builder_index" BIGINT NOT NULL DEFAULT -1;
+ALTER TABLE "slots" ADD "eth_block_parent_hash" BLOB NULL;
+
+CREATE INDEX IF NOT EXISTS "slots_payload_status_idx" ON "slots" ("payload_status" ASC);
+CREATE INDEX IF NOT EXISTS "slots_eth_block_parent_hash_idx" ON "slots" ("eth_block_parent_hash" ASC);
+CREATE INDEX IF NOT EXISTS "slots_builder_index_idx" ON "slots" ("builder_index" ASC);
+
+ALTER TABLE "epochs" ADD "payload_count" int NOT NULL DEFAULT 0;
+
+ALTER TABLE "unfinalized_epochs" ADD "payload_count" int NOT NULL DEFAULT 0;
+
+CREATE TABLE IF NOT EXISTS "block_bids" (
+ "parent_root" BLOB NOT NULL,
+ "parent_hash" BLOB NOT NULL,
+ "block_hash" BLOB NOT NULL,
+ "fee_recipient" BLOB NOT NULL,
+ "gas_limit" BIGINT NOT NULL,
+ "builder_index" BIGINT NOT NULL,
+ "slot" BIGINT NOT NULL,
+ "value" BIGINT NOT NULL,
+ "el_payment" BIGINT NOT NULL,
+ CONSTRAINT block_bids_pkey PRIMARY KEY (parent_root, parent_hash, block_hash, builder_index)
+);
+
+CREATE INDEX IF NOT EXISTS "block_bids_parent_root_idx" ON "block_bids" ("parent_root" ASC);
+
+CREATE INDEX IF NOT EXISTS "block_bids_builder_index_idx" ON "block_bids" ("builder_index" ASC);
+
+CREATE INDEX IF NOT EXISTS "block_bids_slot_idx" ON "block_bids" ("slot" ASC);
+
+CREATE TABLE IF NOT EXISTS "builders" (
+ "pubkey" BLOB NOT NULL,
+ "builder_index" BIGINT NOT NULL,
+ "version" SMALLINT NOT NULL,
+ "execution_address" BLOB NOT NULL,
+ "deposit_epoch" BIGINT NOT NULL,
+ "withdrawable_epoch" BIGINT NOT NULL,
+ "superseded" BOOLEAN NOT NULL DEFAULT false,
+ PRIMARY KEY (pubkey)
+);
+
+CREATE INDEX IF NOT EXISTS "builders_builder_index_idx" ON "builders" ("builder_index" ASC);
+
+CREATE INDEX IF NOT EXISTS "builders_execution_address_idx" ON "builders" ("execution_address" ASC);
+
+-- +goose StatementEnd
+-- +goose Down
+-- +goose StatementBegin
+SELECT 'NOT SUPPORTED';
+-- +goose StatementEnd
\ No newline at end of file
diff --git a/db/slots.go b/db/slots.go
index d9d689bdd..4968f0802 100644
--- a/db/slots.go
+++ b/db/slots.go
@@ -20,31 +20,32 @@ func InsertSlot(ctx context.Context, tx *sqlx.Tx, slot *dbtypes.Slot) error {
slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
- eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34)
+ eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count,
+ eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time,
+ max_exec_time, exec_times, block_uid, payload_status, builder_index
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37)
ON CONFLICT (slot, root) DO UPDATE SET
status = excluded.status,
eth_block_extra = excluded.eth_block_extra,
eth_block_extra_text = excluded.eth_block_extra_text,
- fork_id = excluded.fork_id`,
+ fork_id = excluded.fork_id,
+ payload_status = excluded.payload_status`,
dbtypes.DBEngineSqlite: `
INSERT OR REPLACE INTO slots (
slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
- eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34)`,
+ eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count,
+ eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time,
+ max_exec_time, exec_times, block_uid, payload_status, builder_index
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26, $27, $28, $29, $30, $31, $32, $33, $34, $35, $36, $37)`,
}),
slot.Slot, slot.Proposer, slot.Status, slot.Root, slot.ParentRoot, slot.StateRoot, slot.Graffiti, slot.GraffitiText,
slot.AttestationCount, slot.DepositCount, slot.ExitCount, slot.WithdrawCount, slot.WithdrawAmount, slot.AttesterSlashingCount,
slot.ProposerSlashingCount, slot.BLSChangeCount, slot.EthTransactionCount, slot.EthBlockNumber, slot.EthBlockHash,
- slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount, slot.EthGasUsed,
- slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime, slot.MaxExecTime,
- slot.ExecTimes, slot.BlockUid)
+ slot.EthBlockParentHash, slot.EthBlockExtra, slot.EthBlockExtraText, slot.SyncParticipation, slot.ForkId, slot.BlobCount,
+ slot.EthGasUsed, slot.EthGasLimit, slot.EthBaseFee, slot.EthFeeRecipient, slot.BlockSize, slot.RecvDelay, slot.MinExecTime,
+ slot.MaxExecTime, slot.ExecTimes, slot.BlockUid, slot.PayloadStatus, slot.BuilderIndex)
if err != nil {
return err
}
@@ -99,9 +100,9 @@ func GetSlotsRange(ctx context.Context, firstSlot uint64, lastSlot uint64, withM
"state_root", "root", "slot", "proposer", "status", "parent_root", "graffiti", "graffiti_text",
"attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count",
"proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash",
- "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used",
- "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times",
- "block_uid",
+ "eth_block_parent_hash", "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count",
+ "eth_gas_used", "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time",
+ "max_exec_time", "exec_times", "block_uid", "payload_status", "builder_index",
}
for _, blockField := range blockFields {
fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField)
@@ -133,9 +134,9 @@ func GetSlotsByParentRoot(ctx context.Context, parentRoot []byte) []*dbtypes.Slo
slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
- eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count,
+ eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time,
+ max_exec_time, exec_times, block_uid, payload_status, builder_index
FROM slots
WHERE parent_root = $1
ORDER BY slot DESC
@@ -154,9 +155,9 @@ func GetSlotByRoot(ctx context.Context, root []byte) *dbtypes.Slot {
root, slot, parent_root, state_root, status, proposer, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
- eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count,
+ eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time,
+ max_exec_time, exec_times, block_uid, payload_status, builder_index
FROM slots
WHERE root = $1
`, root)
@@ -182,9 +183,9 @@ func GetSlotsByRoots(ctx context.Context, roots [][]byte) map[phase0.Root]*dbtyp
root, slot, parent_root, state_root, status, proposer, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
- eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count,
+ eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time,
+ max_exec_time, exec_times, block_uid, payload_status, builder_index
FROM slots
WHERE root IN (%v)
ORDER BY slot DESC`,
@@ -258,9 +259,9 @@ func GetSlotsByBlockHash(ctx context.Context, blockHash []byte) []*dbtypes.Slot
slot, proposer, status, root, parent_root, state_root, graffiti, graffiti_text,
attestation_count, deposit_count, exit_count, withdraw_count, withdraw_amount, attester_slashing_count,
proposer_slashing_count, bls_change_count, eth_transaction_count, eth_block_number, eth_block_hash,
- eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count, eth_gas_used,
- eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
+ eth_block_parent_hash, eth_block_extra, eth_block_extra_text, sync_participation, fork_id, blob_count,
+ eth_gas_used, eth_gas_limit, eth_base_fee, eth_fee_recipient, block_size, recv_delay, min_exec_time,
+ max_exec_time, exec_times, block_uid, payload_status, builder_index
FROM slots
WHERE eth_block_hash = $1
ORDER BY slot DESC
@@ -320,9 +321,9 @@ func GetFilteredSlots(ctx context.Context, filter *dbtypes.BlockFilter, firstSlo
"state_root", "root", "slot", "proposer", "status", "parent_root", "graffiti", "graffiti_text",
"attestation_count", "deposit_count", "exit_count", "withdraw_count", "withdraw_amount", "attester_slashing_count",
"proposer_slashing_count", "bls_change_count", "eth_transaction_count", "eth_block_number", "eth_block_hash",
- "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count", "eth_gas_used",
- "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time", "max_exec_time", "exec_times",
- "block_uid",
+ "eth_block_parent_hash", "eth_block_extra", "eth_block_extra_text", "sync_participation", "fork_id", "blob_count",
+ "eth_gas_used", "eth_gas_limit", "eth_base_fee", "eth_fee_recipient", "block_size", "recv_delay", "min_exec_time",
+ "max_exec_time", "exec_times", "block_uid", "payload_status", "builder_index",
}
for _, blockField := range blockFields {
fmt.Fprintf(&sql, ", slots.%v AS \"block.%v\"", blockField, blockField)
@@ -477,6 +478,37 @@ func GetFilteredSlots(ctx context.Context, filter *dbtypes.BlockFilter, firstSlo
fmt.Fprintf(&sql, ` AND slots.eth_block_hash = $%v `, argIdx)
args = append(args, filter.EthBlockHash)
}
+ if filter.BuilderIndex != nil {
+ argIdx++
+ fmt.Fprintf(&sql, ` AND slots.builder_index = $%v `, argIdx)
+ args = append(args, *filter.BuilderIndex)
+ }
+
+ if filter.WithPayloadMask != dbtypes.PayloadStatusMaskAll {
+ allowedPayloadStatuses := []dbtypes.PayloadStatus{}
+ if filter.WithPayloadMask&dbtypes.PayloadStatusMaskMissing != 0 {
+ allowedPayloadStatuses = append(allowedPayloadStatuses, dbtypes.PayloadStatusMissing)
+ }
+ if filter.WithPayloadMask&dbtypes.PayloadStatusMaskCanonical != 0 {
+ allowedPayloadStatuses = append(allowedPayloadStatuses, dbtypes.PayloadStatusCanonical)
+ }
+ if filter.WithPayloadMask&dbtypes.PayloadStatusMaskOrphaned != 0 {
+ allowedPayloadStatuses = append(allowedPayloadStatuses, dbtypes.PayloadStatusOrphaned)
+ }
+
+ if len(allowedPayloadStatuses) > 0 {
+ allowedPayloadStatusesPlaceholders := make([]string, len(allowedPayloadStatuses))
+ for i, payloadStatus := range allowedPayloadStatuses {
+ allowedPayloadStatusesPlaceholders[i] = fmt.Sprintf("%v", payloadStatus)
+ }
+ fmt.Fprintf(&sql, ` AND slots.payload_status IN (%s) `, strings.Join(allowedPayloadStatusesPlaceholders, ", "))
+ }
+ }
+ if len(filter.EthBlockParentHash) > 0 {
+ argIdx++
+ fmt.Fprintf(&sql, ` AND slots.eth_block_parent_hash = $%v `, argIdx)
+ args = append(args, filter.EthBlockParentHash)
+ }
if filter.MinGasUsed != nil {
argIdx++
fmt.Fprintf(&sql, ` AND slots.eth_gas_used >= $%v `, argIdx)
diff --git a/db/unfinalized_blocks.go b/db/unfinalized_blocks.go
index 914173fb5..668797776 100644
--- a/db/unfinalized_blocks.go
+++ b/db/unfinalized_blocks.go
@@ -13,18 +13,16 @@ func InsertUnfinalizedBlock(ctx context.Context, tx *sqlx.Tx, block *dbtypes.Unf
_, err := tx.ExecContext(ctx, EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
INSERT INTO unfinalized_blocks (
- root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)
+ root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)
ON CONFLICT (root) DO NOTHING`,
dbtypes.DBEngineSqlite: `
INSERT OR IGNORE INTO unfinalized_blocks (
- root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times,
- block_uid
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13)`,
+ root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15)`,
}),
- block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.Status, block.ForkId, block.RecvDelay, block.MinExecTime, block.MaxExecTime,
- block.ExecTimes, block.BlockUid,
+ block.Root, block.Slot, block.HeaderVer, block.HeaderSSZ, block.BlockVer, block.BlockSSZ, block.PayloadVer, block.PayloadSSZ, block.Status, block.ForkId, block.RecvDelay,
+ block.MinExecTime, block.MaxExecTime, block.ExecTimes, block.BlockUid,
)
if err != nil {
return err
@@ -90,6 +88,14 @@ func UpdateUnfinalizedBlockForkId(ctx context.Context, tx *sqlx.Tx, roots [][]by
return nil
}
+func UpdateUnfinalizedBlockPayload(ctx context.Context, tx *sqlx.Tx, root []byte, payloadVer uint64, payloadSSZ []byte) error {
+ _, err := tx.ExecContext(ctx, `UPDATE unfinalized_blocks SET payload_ver = $1, payload_ssz = $2 WHERE root = $3`, payloadVer, payloadSSZ, root)
+ if err != nil {
+ return err
+ }
+ return nil
+}
+
func UpdateUnfinalizedBlockExecutionTimes(ctx context.Context, tx *sqlx.Tx, root []byte, minExecTime uint32, maxExecTime uint32, execTimes []byte) error {
_, err := tx.ExecContext(ctx, `UPDATE unfinalized_blocks SET min_exec_time = $1, max_exec_time = $2, exec_times = $3 WHERE root = $4`, minExecTime, maxExecTime, execTimes, root)
if err != nil {
@@ -141,7 +147,7 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db
var sql strings.Builder
args := []any{slot}
- fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`)
+ fmt.Fprint(&sql, `SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, payload_ver, payload_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid FROM unfinalized_blocks WHERE slot >= $1`)
rows, err := ReaderDb.QueryContext(ctx, sql.String(), args...)
if err != nil {
@@ -152,7 +158,7 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db
for rows.Next() {
block := dbtypes.UnfinalizedBlock{}
err := rows.Scan(
- &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.Status, &block.ForkId, &block.RecvDelay,
+ &block.Root, &block.Slot, &block.HeaderVer, &block.HeaderSSZ, &block.BlockVer, &block.BlockSSZ, &block.PayloadVer, &block.PayloadSSZ, &block.Status, &block.ForkId, &block.RecvDelay,
&block.MinExecTime, &block.MaxExecTime, &block.ExecTimes, &block.BlockUid,
)
if err != nil {
@@ -165,13 +171,28 @@ func StreamUnfinalizedBlocks(ctx context.Context, slot uint64, cb func(block *db
return nil
}
-func GetUnfinalizedBlock(ctx context.Context, root []byte) *dbtypes.UnfinalizedBlock {
+func GetUnfinalizedBlock(ctx context.Context, root []byte, withHeader bool, withBody bool, withPayload bool) *dbtypes.UnfinalizedBlock {
+ var sql strings.Builder
+ fmt.Fprint(&sql, `SELECT root, slot`)
+
+ if withHeader {
+ fmt.Fprint(&sql, `, header_ver, header_ssz`)
+ }
+
+ if withBody {
+ fmt.Fprint(&sql, `, block_ver, block_ssz`)
+ }
+
+ if withPayload {
+ fmt.Fprint(&sql, `, payload_ver, payload_ssz`)
+ }
+
+ fmt.Fprint(&sql, `, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid`)
+
+ fmt.Fprint(&sql, ` FROM unfinalized_blocks WHERE root = $1`)
+
block := dbtypes.UnfinalizedBlock{}
- err := ReaderDb.GetContext(ctx, &block, `
- SELECT root, slot, header_ver, header_ssz, block_ver, block_ssz, status, fork_id, recv_delay, min_exec_time, max_exec_time, exec_times, block_uid
- FROM unfinalized_blocks
- WHERE root = $1
- `, root)
+ err := ReaderDb.GetContext(ctx, &block, sql.String(), root)
if err != nil {
logger.Errorf("Error while fetching unfinalized block 0x%x: %v", root, err)
return nil
diff --git a/db/unfinalized_epochs.go b/db/unfinalized_epochs.go
index 960fde0d4..c5a452591 100644
--- a/db/unfinalized_epochs.go
+++ b/db/unfinalized_epochs.go
@@ -14,8 +14,8 @@ func InsertUnfinalizedEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Unf
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)
ON CONFLICT (epoch, dependent_root, epoch_head_root) DO UPDATE SET
epoch_head_fork_id = excluded.epoch_head_fork_id,
validator_count = excluded.validator_count,
@@ -38,19 +38,20 @@ func InsertUnfinalizedEpoch(ctx context.Context, tx *sqlx.Tx, epoch *dbtypes.Unf
sync_participation = excluded.sync_participation,
blob_count = excluded.blob_count,
eth_gas_used = excluded.eth_gas_used,
- eth_gas_limit = excluded.eth_gas_limit`,
+ eth_gas_limit = excluded.eth_gas_limit,
+ payload_count = excluded.payload_count`,
dbtypes.DBEngineSqlite: `
INSERT OR REPLACE INTO unfinalized_epochs (
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
- ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25)`,
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
+ ) VALUES ($1, $2, $3, $4, $5, $6, $7, $8, $9, $10, $11, $12, $13, $14, $15, $16, $17, $18, $19, $20, $21, $22, $23, $24, $25, $26)`,
}),
epoch.Epoch, epoch.DependentRoot, epoch.EpochHeadRoot, epoch.EpochHeadForkId, epoch.ValidatorCount, epoch.ValidatorBalance, epoch.Eligible, epoch.VotedTarget,
epoch.VotedHead, epoch.VotedTotal, epoch.BlockCount, epoch.OrphanedCount, epoch.AttestationCount, epoch.DepositCount, epoch.ExitCount, epoch.WithdrawCount,
epoch.WithdrawAmount, epoch.AttesterSlashingCount, epoch.ProposerSlashingCount, epoch.BLSChangeCount, epoch.EthTransactionCount, epoch.SyncParticipation,
- epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit,
+ epoch.BlobCount, epoch.EthGasUsed, epoch.EthGasLimit, epoch.PayloadCount,
)
if err != nil {
return err
@@ -64,7 +65,7 @@ func StreamUnfinalizedEpochs(ctx context.Context, epoch uint64, cb func(duty *db
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
FROM unfinalized_epochs
WHERE epoch >= $1`, epoch)
if err != nil {
@@ -78,7 +79,7 @@ func StreamUnfinalizedEpochs(ctx context.Context, epoch uint64, cb func(duty *db
&e.Epoch, &e.DependentRoot, &e.EpochHeadRoot, &e.EpochHeadForkId, &e.ValidatorCount, &e.ValidatorBalance, &e.Eligible, &e.VotedTarget,
&e.VotedHead, &e.VotedTotal, &e.BlockCount, &e.OrphanedCount, &e.AttestationCount, &e.DepositCount, &e.ExitCount, &e.WithdrawCount,
&e.WithdrawAmount, &e.AttesterSlashingCount, &e.ProposerSlashingCount, &e.BLSChangeCount, &e.EthTransactionCount, &e.SyncParticipation,
- &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit,
+ &e.BlobCount, &e.EthGasUsed, &e.EthGasLimit, &e.PayloadCount,
)
if err != nil {
logger.Errorf("Error while scanning unfinalized epoch: %v", err)
@@ -97,7 +98,7 @@ func GetUnfinalizedEpoch(ctx context.Context, epoch uint64, headRoot []byte) *db
epoch, dependent_root, epoch_head_root, epoch_head_fork_id, validator_count, validator_balance, eligible, voted_target,
voted_head, voted_total, block_count, orphaned_count, attestation_count, deposit_count, exit_count, withdraw_count,
withdraw_amount, attester_slashing_count, proposer_slashing_count, bls_change_count, eth_transaction_count, sync_participation,
- blob_count, eth_gas_used, eth_gas_limit
+ blob_count, eth_gas_used, eth_gas_limit, payload_count
FROM unfinalized_epochs
WHERE epoch = $1 AND epoch_head_root = $2
`, epoch, headRoot)
diff --git a/dbtypes/dbtypes.go b/dbtypes/dbtypes.go
index 7c90a09f6..dd0ee58ce 100644
--- a/dbtypes/dbtypes.go
+++ b/dbtypes/dbtypes.go
@@ -18,6 +18,14 @@ const (
Orphaned
)
+type PayloadStatus uint8
+
+const (
+ PayloadStatusMissing PayloadStatus = iota
+ PayloadStatusCanonical
+ PayloadStatusOrphaned
+)
+
type SlotHeader struct {
Slot uint64 `db:"slot"`
Proposer uint64 `db:"proposer"`
@@ -25,40 +33,43 @@ type SlotHeader struct {
}
type Slot struct {
- Slot uint64 `db:"slot"`
- Proposer uint64 `db:"proposer"`
- Status SlotStatus `db:"status"`
- Root []byte `db:"root"`
- ParentRoot []byte `db:"parent_root"`
- StateRoot []byte `db:"state_root"`
- Graffiti []byte `db:"graffiti"`
- GraffitiText string `db:"graffiti_text"`
- AttestationCount uint64 `db:"attestation_count"`
- DepositCount uint64 `db:"deposit_count"`
- ExitCount uint64 `db:"exit_count"`
- WithdrawCount uint64 `db:"withdraw_count"`
- WithdrawAmount uint64 `db:"withdraw_amount"`
- AttesterSlashingCount uint64 `db:"attester_slashing_count"`
- ProposerSlashingCount uint64 `db:"proposer_slashing_count"`
- BLSChangeCount uint64 `db:"bls_change_count"`
- EthTransactionCount uint64 `db:"eth_transaction_count"`
- BlobCount uint64 `db:"blob_count"`
- EthGasUsed uint64 `db:"eth_gas_used"`
- EthGasLimit uint64 `db:"eth_gas_limit"`
- EthBaseFee uint64 `db:"eth_base_fee"`
- EthFeeRecipient []byte `db:"eth_fee_recipient"`
- EthBlockNumber *uint64 `db:"eth_block_number"`
- EthBlockHash []byte `db:"eth_block_hash"`
- EthBlockExtra []byte `db:"eth_block_extra"`
- EthBlockExtraText string `db:"eth_block_extra_text"`
- SyncParticipation float32 `db:"sync_participation"`
- ForkId uint64 `db:"fork_id"`
- BlockSize uint64 `db:"block_size"`
- RecvDelay int32 `db:"recv_delay"`
- MinExecTime uint32 `db:"min_exec_time"`
- MaxExecTime uint32 `db:"max_exec_time"`
- ExecTimes []byte `db:"exec_times"`
- BlockUid uint64 `db:"block_uid"`
+ Slot uint64 `db:"slot"`
+ Proposer uint64 `db:"proposer"`
+ Status SlotStatus `db:"status"`
+ Root []byte `db:"root"`
+ ParentRoot []byte `db:"parent_root"`
+ StateRoot []byte `db:"state_root"`
+ Graffiti []byte `db:"graffiti"`
+ GraffitiText string `db:"graffiti_text"`
+ AttestationCount uint64 `db:"attestation_count"`
+ DepositCount uint64 `db:"deposit_count"`
+ ExitCount uint64 `db:"exit_count"`
+ WithdrawCount uint64 `db:"withdraw_count"`
+ WithdrawAmount uint64 `db:"withdraw_amount"`
+ AttesterSlashingCount uint64 `db:"attester_slashing_count"`
+ ProposerSlashingCount uint64 `db:"proposer_slashing_count"`
+ BLSChangeCount uint64 `db:"bls_change_count"`
+ EthTransactionCount uint64 `db:"eth_transaction_count"`
+ BlobCount uint64 `db:"blob_count"`
+ EthGasUsed uint64 `db:"eth_gas_used"`
+ EthGasLimit uint64 `db:"eth_gas_limit"`
+ EthBaseFee uint64 `db:"eth_base_fee"`
+ EthFeeRecipient []byte `db:"eth_fee_recipient"`
+ EthBlockNumber *uint64 `db:"eth_block_number"`
+ EthBlockHash []byte `db:"eth_block_hash"`
+ EthBlockParentHash []byte `db:"eth_block_parent_hash"`
+ EthBlockExtra []byte `db:"eth_block_extra"`
+ EthBlockExtraText string `db:"eth_block_extra_text"`
+ SyncParticipation float32 `db:"sync_participation"`
+ ForkId uint64 `db:"fork_id"`
+ BlockSize uint64 `db:"block_size"`
+ RecvDelay int32 `db:"recv_delay"`
+ MinExecTime uint32 `db:"min_exec_time"`
+ MaxExecTime uint32 `db:"max_exec_time"`
+ ExecTimes []byte `db:"exec_times"`
+ PayloadStatus PayloadStatus `db:"payload_status"`
+ BlockUid uint64 `db:"block_uid"`
+ BuilderIndex int64 `db:"builder_index"` // Builder index, -1 for self-built blocks (MaxUint64)
}
type Epoch struct {
@@ -84,15 +95,18 @@ type Epoch struct {
EthGasUsed uint64 `db:"eth_gas_used"`
EthGasLimit uint64 `db:"eth_gas_limit"`
SyncParticipation float32 `db:"sync_participation"`
+ PayloadCount uint64 `db:"payload_count"`
}
type OrphanedBlock struct {
- Root []byte `db:"root"`
- HeaderVer uint64 `db:"header_ver"`
- HeaderSSZ []byte `db:"header_ssz"`
- BlockVer uint64 `db:"block_ver"`
- BlockSSZ []byte `db:"block_ssz"`
- BlockUid uint64 `db:"block_uid"`
+ Root []byte `db:"root"`
+ HeaderVer uint64 `db:"header_ver"`
+ HeaderSSZ []byte `db:"header_ssz"`
+ BlockVer uint64 `db:"block_ver"`
+ BlockSSZ []byte `db:"block_ssz"`
+ PayloadVer uint64 `db:"payload_ver"`
+ PayloadSSZ []byte `db:"payload_ssz"`
+ BlockUid uint64 `db:"block_uid"`
}
type SlotAssignment struct {
@@ -121,6 +135,8 @@ type UnfinalizedBlock struct {
HeaderSSZ []byte `db:"header_ssz"`
BlockVer uint64 `db:"block_ver"`
BlockSSZ []byte `db:"block_ssz"`
+ PayloadVer uint64 `db:"payload_ver"`
+ PayloadSSZ []byte `db:"payload_ssz"`
Status UnfinalizedBlockStatus `db:"status"`
ForkId uint64 `db:"fork_id"`
RecvDelay int32 `db:"recv_delay"`
@@ -156,6 +172,7 @@ type UnfinalizedEpoch struct {
EthGasUsed uint64 `db:"eth_gas_used"`
EthGasLimit uint64 `db:"eth_gas_limit"`
SyncParticipation float32 `db:"sync_participation"`
+ PayloadCount uint64 `db:"payload_count"`
}
type OrphanedEpoch struct {
@@ -548,6 +565,30 @@ type ElTokenTransfer struct {
AmountRaw []byte `db:"amount_raw"`
}
+// ePBS types
+
+type BlockBid struct {
+ ParentRoot []byte `db:"parent_root"`
+ ParentHash []byte `db:"parent_hash"`
+ BlockHash []byte `db:"block_hash"`
+ FeeRecipient []byte `db:"fee_recipient"`
+ GasLimit uint64 `db:"gas_limit"`
+ BuilderIndex int64 `db:"builder_index"`
+ Slot uint64 `db:"slot"`
+ Value uint64 `db:"value"`
+ ElPayment uint64 `db:"el_payment"`
+}
+
+type Builder struct {
+ Pubkey []byte `db:"pubkey"`
+ BuilderIndex uint64 `db:"builder_index"`
+ Version uint8 `db:"version"`
+ ExecutionAddress []byte `db:"execution_address"`
+ DepositEpoch int64 `db:"deposit_epoch"`
+ WithdrawableEpoch int64 `db:"withdrawable_epoch"`
+ Superseded bool `db:"superseded"`
+}
+
// Withdrawal types
const (
WithdrawalTypeBeaconWithdrawal = 0
diff --git a/dbtypes/other.go b/dbtypes/other.go
index 7936d6aec..260cd6a14 100644
--- a/dbtypes/other.go
+++ b/dbtypes/other.go
@@ -43,6 +43,16 @@ type UnfinalizedBlockFilter struct {
WithBody bool
}
+type PayloadStatusMask uint8
+
+const (
+ PayloadStatusMaskMissing PayloadStatusMask = 0x01
+ PayloadStatusMaskCanonical PayloadStatusMask = 0x02
+ PayloadStatusMaskOrphaned PayloadStatusMask = 0x04
+
+ PayloadStatusMaskAll PayloadStatusMask = 0x07
+)
+
type BlockFilter struct {
Graffiti string
InvertGraffiti bool
@@ -53,6 +63,7 @@ type BlockFilter struct {
InvertProposer bool
WithOrphaned uint8
WithMissing uint8
+ WithPayloadMask PayloadStatusMask
MinSyncParticipation *float32
MaxSyncParticipation *float32
MinExecTime *uint32
@@ -67,6 +78,8 @@ type BlockFilter struct {
ForkIds []uint64 // Filter by fork IDs
EthBlockNumber *uint64 // Filter by EL block number
EthBlockHash []byte // Filter by EL block hash
+ EthBlockParentHash []byte // Filter by EL block parent hash
+ BuilderIndex *int64 // Filter by builder index (-1 for self-built blocks)
MinGasUsed *uint64 // Filter by minimum gas used
MaxGasUsed *uint64 // Filter by maximum gas used
MinGasLimit *uint64 // Filter by minimum gas limit
@@ -222,6 +235,43 @@ type ValidatorFilter struct {
Offset uint64
}
+// Builder filter types
+
+type BuilderOrder uint8
+
+const (
+ BuilderOrderIndexAsc BuilderOrder = iota
+ BuilderOrderIndexDesc
+ BuilderOrderPubKeyAsc
+ BuilderOrderPubKeyDesc
+ BuilderOrderBalanceAsc
+ BuilderOrderBalanceDesc
+ BuilderOrderDepositEpochAsc
+ BuilderOrderDepositEpochDesc
+ BuilderOrderWithdrawableEpochAsc
+ BuilderOrderWithdrawableEpochDesc
+)
+
+type BuilderStatus uint8
+
+const (
+ BuilderStatusActiveFilter BuilderStatus = iota
+ BuilderStatusExitedFilter
+ BuilderStatusSupersededFilter
+)
+
+type BuilderFilter struct {
+ MinIndex *uint64
+ MaxIndex *uint64
+ PubKey []byte
+ ExecutionAddress []byte
+ Status []BuilderStatus
+
+ OrderBy BuilderOrder
+ Limit uint64
+ Offset uint64
+}
+
// EL Explorer filters
type ElTransactionFilter struct {
diff --git a/go.mod b/go.mod
index 6daf7bfd3..c1ecb9cd7 100644
--- a/go.mod
+++ b/go.mod
@@ -254,3 +254,5 @@ require (
modernc.org/memory v1.11.0 // indirect
modernc.org/sqlite v1.46.1 // indirect
)
+
+replace github.com/attestantio/go-eth2-client => github.com/pk910/go-eth2-client v0.0.0-20260331085057-05aefaa5ce81
diff --git a/go.sum b/go.sum
index e93f3f843..a0e943de1 100644
--- a/go.sum
+++ b/go.sum
@@ -37,8 +37,6 @@ github.com/allegro/bigcache v1.2.1-0.20190218064605-e24eb225f156/go.mod h1:Cb/ax
github.com/allegro/bigcache/v3 v3.1.0 h1:H2Vp8VOvxcrB91o86fUSVJFqeuz8kpyyB02eH3bSzwk=
github.com/allegro/bigcache/v3 v3.1.0/go.mod h1:aPyh7jEvrog9zAwx5N7+JUQX5dZTSGpxF1LAR4dr35I=
github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c=
-github.com/attestantio/go-eth2-client v0.28.0 h1:2zIIIMPvSD+g6h3TgVXsoda/Yw3e+wjo1e8CZEanORU=
-github.com/attestantio/go-eth2-client v0.28.0/go.mod h1:PO9sHFCq+1RiG+Eh3eOR2GYvYV64Qzg7idM3kLgCs5k=
github.com/benbjohnson/clock v1.3.5 h1:VvXlSJBzZpA/zum6Sj74hxwYI2DIxRWuNIoXAzHZz5o=
github.com/benbjohnson/clock v1.3.5/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
@@ -562,6 +560,8 @@ github.com/pion/webrtc/v4 v4.1.4 h1:/gK1ACGHXQmtyVVbJFQDxNoODg4eSRiFLB7t9r9pg8M=
github.com/pion/webrtc/v4 v4.1.4/go.mod h1:Oab9npu1iZtQRMic3K3toYq5zFPvToe/QBw7dMI2ok4=
github.com/pk910/dynamic-ssz v1.2.3-0.20260318065836-323b83c1a387 h1:XkL2iLFDP6/NKAOF0fysZrhaa/qtcAtqzhLUxSIy79s=
github.com/pk910/dynamic-ssz v1.2.3-0.20260318065836-323b83c1a387/go.mod h1:NmeFF4jxzVwWC8cnEhUB7xMI++8hd/0OZvZHFrUvFfs=
+github.com/pk910/go-eth2-client v0.0.0-20260331085057-05aefaa5ce81 h1:mHgrerDiro/np9FCoJ19EYsCyl/CXMHFi34o+j3T+rE=
+github.com/pk910/go-eth2-client v0.0.0-20260331085057-05aefaa5ce81/go.mod h1:lwj0l8l51hIjqdQpODPea01JfE33nyM++1VGjBZau08=
github.com/pk910/hashtree-bindings v0.1.0 h1:w7NyRWFi2OaYEFvo9ADcE/QU6PMuVLl3hBgx92KiH9c=
github.com/pk910/hashtree-bindings v0.1.0/go.mod h1:zrWt88783JmhBfcgni6kkIMYRdXTZi/FL//OyI5T/l4=
github.com/pkg/diff v0.0.0-20210226163009-20ebb0f2a09e/go.mod h1:pJLUxLENpZxwdsKMEsNbx1VGcRFpLqf3715MtcvvzbA=
diff --git a/handlers/api/network_forks_v1.go b/handlers/api/network_forks_v1.go
index 7a2c82299..0ddfcce33 100644
--- a/handlers/api/network_forks_v1.go
+++ b/handlers/api/network_forks_v1.go
@@ -112,7 +112,8 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo {
// Helper function to add consensus fork
addConsensusFork := func(name string, forkEpoch *uint64, forkVersion phase0.Version) {
if forkEpoch != nil && *forkEpoch < uint64(18446744073709551615) {
- forkDigest := chainState.GetForkDigest(forkVersion, nil)
+ blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*forkEpoch))
+ forkDigest := chainState.GetForkDigest(forkVersion, blobParams)
version := fmt.Sprintf("0x%x", forkVersion)
epoch := *forkEpoch
forks = append(forks, &APINetworkForkInfo{
@@ -135,6 +136,7 @@ func buildNetworkForks(chainState *consensus.ChainState) []*APINetworkForkInfo {
addConsensusFork("Deneb", specs.DenebForkEpoch, specs.DenebForkVersion)
addConsensusFork("Electra", specs.ElectraForkEpoch, specs.ElectraForkVersion)
addConsensusFork("Fulu", specs.FuluForkEpoch, specs.FuluForkVersion)
+ addConsensusFork("Gloas", specs.GloasForkEpoch, specs.GloasForkVersion)
// Add BPO forks from BLOB_SCHEDULE
for i, blobSchedule := range specs.BlobSchedule {
diff --git a/handlers/builder.go b/handlers/builder.go
new file mode 100644
index 000000000..13dc4d4b0
--- /dev/null
+++ b/handlers/builder.go
@@ -0,0 +1,384 @@
+package handlers
+
+import (
+ "context"
+ "encoding/hex"
+ "errors"
+ "fmt"
+ "net/http"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/attestantio/go-eth2-client/spec/gloas"
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+ "github.com/ethereum/go-ethereum/common"
+ "github.com/gorilla/mux"
+ "github.com/sirupsen/logrus"
+
+ "github.com/ethpandaops/dora/clients/consensus"
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/ethpandaops/dora/indexer/beacon"
+ "github.com/ethpandaops/dora/services"
+ "github.com/ethpandaops/dora/templates"
+ "github.com/ethpandaops/dora/types/models"
+)
+
+// BuilderDetail will return the main "builder" page using a go template
+func BuilderDetail(w http.ResponseWriter, r *http.Request) {
+ var builderTemplateFiles = append(layoutTemplateFiles,
+ "builder/builder.html",
+ "builder/recentBlocks.html",
+ "builder/recentBids.html",
+ "builder/recentDeposits.html",
+ "_svg/timeline.html",
+ )
+ var notfoundTemplateFiles = append(layoutTemplateFiles,
+ "builder/notfound.html",
+ )
+
+ var pageTemplate = templates.GetTemplate(builderTemplateFiles...)
+ data := InitPageData(w, r, "builders", "/builder", "Builder", builderTemplateFiles)
+
+ var builder *gloas.Builder
+ var builderIndex uint64
+ var superseded bool
+
+ vars := mux.Vars(r)
+ idxOrPubKey := strings.Replace(vars["idxOrPubKey"], "0x", "", -1)
+ builderPubKey, err := hex.DecodeString(idxOrPubKey)
+ if err != nil || len(builderPubKey) != 48 {
+ // search by index
+ idx, err := strconv.ParseUint(vars["idxOrPubKey"], 10, 64)
+ if err == nil {
+ builderIndex = idx
+ builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(idx))
+ if builder == nil {
+ // Try from DB
+ dbBuilder := db.GetActiveBuilderByIndex(r.Context(), idx)
+ if dbBuilder != nil {
+ builder = beacon.UnwrapDbBuilder(dbBuilder)
+ superseded = dbBuilder.Superseded
+ }
+ }
+ }
+ } else {
+ // search by pubkey - check cache first (more accurate), then fall back to DB
+ var pubkey phase0.BLSPubKey
+ copy(pubkey[:], builderPubKey)
+ if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(pubkey); found {
+ idx := uint64(validatorIdx)
+ if idx&services.BuilderIndexFlag != 0 {
+ builderIndex = idx &^ services.BuilderIndexFlag
+ builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ }
+ }
+
+ if builder == nil {
+ // Fall back to DB
+ dbBuilder := db.GetBuilderByPubkey(r.Context(), builderPubKey)
+ if dbBuilder != nil {
+ builderIndex = dbBuilder.BuilderIndex
+ superseded = dbBuilder.Superseded
+ builder = services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(dbBuilder.BuilderIndex))
+ if builder == nil {
+ builder = beacon.UnwrapDbBuilder(dbBuilder)
+ }
+ }
+ }
+ }
+
+ if builder == nil {
+ data := InitPageData(w, r, "builders", "/builder", "Builder not found", notfoundTemplateFiles)
+ w.Header().Set("Content-Type", "text/html")
+ handleTemplateError(w, r, "builder.go", "BuilderDetail", "", templates.GetTemplate(notfoundTemplateFiles...).ExecuteTemplate(w, "layout", data))
+ return
+ }
+
+ tabView := "blocks"
+ if r.URL.Query().Has("v") {
+ tabView = r.URL.Query().Get("v")
+ }
+
+ var pageError error
+ pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1)
+ if pageError == nil {
+ data.Data, pageError = getBuilderPageData(builderIndex, superseded, tabView)
+ }
+ if data.Data == nil {
+ pageError = errors.New("builder not found")
+ }
+ if pageError != nil {
+ handlePageError(w, r, pageError)
+ return
+ }
+ w.Header().Set("Content-Type", "text/html")
+
+ if r.URL.Query().Has("lazy") {
+ // return the selected tab content only (lazy loaded)
+ handleTemplateError(w, r, "builder.go", "BuilderDetail", "", pageTemplate.ExecuteTemplate(w, "lazyPage", data.Data))
+ } else {
+ handleTemplateError(w, r, "builder.go", "BuilderDetail", "", pageTemplate.ExecuteTemplate(w, "layout", data))
+ }
+}
+
+func getBuilderPageData(builderIndex uint64, superseded bool, tabView string) (*models.BuilderPageData, error) {
+ pageData := &models.BuilderPageData{}
+ pageCacheKey := fmt.Sprintf("builder:%v:%v", builderIndex, tabView)
+ pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} {
+ pageData, cacheTimeout := buildBuilderPageData(pageCall.CallCtx, builderIndex, superseded, tabView)
+ pageCall.CacheTimeout = cacheTimeout
+ return pageData
+ })
+ if pageErr == nil && pageRes != nil {
+ resData, resOk := pageRes.(*models.BuilderPageData)
+ if !resOk {
+ return nil, ErrInvalidPageModel
+ }
+ pageData = resData
+ }
+ return pageData, pageErr
+}
+
+func buildBuilderPageData(ctx context.Context, builderIndex uint64, superseded bool, tabView string) (*models.BuilderPageData, time.Duration) {
+ logrus.Debugf("builder page called: %v", builderIndex)
+
+ chainState := services.GlobalBeaconService.GetChainState()
+ specs := chainState.GetSpecs()
+ currentEpoch := chainState.CurrentEpoch()
+
+ // Get builder data
+ builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ if builder == nil {
+ // Try from DB
+ dbBuilder := db.GetActiveBuilderByIndex(ctx, builderIndex)
+ if dbBuilder != nil {
+ builder = beacon.UnwrapDbBuilder(dbBuilder)
+ superseded = dbBuilder.Superseded
+ }
+ }
+ if builder == nil {
+ return nil, 0
+ }
+
+ // Determine state
+ finalizedEpoch, _ := chainState.GetFinalizedCheckpoint()
+ state := "Active"
+ if superseded {
+ state = "Superseded"
+ } else if builder.WithdrawableEpoch <= currentEpoch {
+ state = "Exited"
+ } else if builder.DepositEpoch > finalizedEpoch {
+ state = "Pending"
+ }
+
+ pageData := &models.BuilderPageData{
+ CurrentEpoch: uint64(currentEpoch),
+ Index: builderIndex,
+ Name: services.GlobalBeaconService.GetValidatorName(builderIndex | services.BuilderIndexFlag),
+ PublicKey: builder.PublicKey[:],
+ Balance: uint64(builder.Balance),
+ ExecutionAddress: builder.ExecutionAddress[:],
+ Version: builder.Version,
+ State: state,
+ IsSuperseded: superseded,
+ TabView: tabView,
+ GloasIsActive: specs.GloasForkEpoch != nil && uint64(currentEpoch) >= *specs.GloasForkEpoch,
+ }
+
+ // Deposit epoch
+ if builder.DepositEpoch < 18446744073709551615 {
+ pageData.ShowDeposit = true
+ pageData.DepositEpoch = uint64(builder.DepositEpoch)
+ pageData.DepositTs = chainState.EpochToTime(builder.DepositEpoch)
+ }
+
+ // Withdrawable epoch
+ if builder.WithdrawableEpoch < 18446744073709551615 {
+ pageData.ShowWithdrawable = true
+ pageData.WithdrawableEpoch = uint64(builder.WithdrawableEpoch)
+ pageData.WithdrawableTs = chainState.EpochToTime(builder.WithdrawableEpoch)
+ }
+
+ // Check for exit reason if builder has exited or is exiting
+ if pageData.ShowWithdrawable {
+ builderIndexWithFlag := builderIndex | services.BuilderIndexFlag
+
+ // Check for voluntary exit
+ if exits, totalExits := services.GlobalBeaconService.GetVoluntaryExitsByFilter(ctx, &dbtypes.VoluntaryExitFilter{
+ MinIndex: builderIndexWithFlag,
+ MaxIndex: builderIndexWithFlag,
+ }, 0, 1); totalExits > 0 && len(exits) > 0 {
+ pageData.ExitReason = "Builder submitted a voluntary exit request"
+ pageData.ExitReasonVoluntaryExit = true
+ pageData.ExitReasonSlot = exits[0].SlotNumber
+
+ // Check for EL-triggered withdrawal request (full exit with amount=0)
+ } else {
+ zeroAmount := uint64(0)
+ if withdrawals, totalPendingTxs, totalReqs := services.GlobalBeaconService.GetWithdrawalRequestsByFilter(ctx, &services.CombinedWithdrawalRequestFilter{
+ Filter: &dbtypes.WithdrawalRequestFilter{
+ PublicKey: builder.PublicKey[:],
+ MaxAmount: &zeroAmount,
+ },
+ }, 0, 1); totalPendingTxs+totalReqs > 0 && len(withdrawals) > 0 {
+ withdrawal := withdrawals[0]
+ pageData.ExitReason = "Builder submitted a full withdrawal request"
+ pageData.ExitReasonWithdrawal = true
+ if withdrawal.Request != nil {
+ pageData.ExitReasonSlot = withdrawal.Request.SlotNumber
+ }
+
+ if withdrawal.Transaction != nil {
+ pageData.ExitReasonTxHash = withdrawal.Transaction.TxHash
+ pageData.ExitReasonTxDetails = &models.BuilderPageDataExitTxDetails{
+ BlockNumber: withdrawal.Transaction.BlockNumber,
+ BlockHash: fmt.Sprintf("%#x", withdrawal.Transaction.BlockRoot),
+ BlockTime: withdrawal.Transaction.BlockTime,
+ TxOrigin: common.Address(withdrawal.Transaction.TxSender).Hex(),
+ TxTarget: common.Address(withdrawal.Transaction.TxTarget).Hex(),
+ TxHash: fmt.Sprintf("%#x", withdrawal.Transaction.TxHash),
+ }
+ }
+ }
+ }
+ }
+
+ // Load tab-specific data
+ switch tabView {
+ case "blocks":
+ pageData.RecentBlocks = buildBuilderRecentBlocks(ctx, builderIndex, chainState)
+ case "bids":
+ pageData.RecentBids = buildBuilderRecentBids(ctx, builderIndex, chainState)
+ case "deposits":
+ pageData.RecentDeposits = buildBuilderRecentDeposits(ctx, builder.PublicKey[:], chainState)
+ }
+
+ return pageData, 10 * time.Minute
+}
+
+func buildBuilderRecentBlocks(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataBlock {
+ // Filter blocks by builder index using the new DB filter
+ builderIndexInt64 := int64(builderIndex)
+ filter := &dbtypes.BlockFilter{
+ BuilderIndex: &builderIndexInt64,
+ WithOrphaned: 1, // Include both canonical and orphaned
+ WithMissing: 0, // Exclude missing blocks
+ }
+
+ // Get blocks built by this builder
+ dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, filter, 0, 20, 0)
+
+ // Collect block hashes for batch bid lookup
+ blockHashes := make([][]byte, 0, len(dbBlocks))
+ validBlocks := make([]*dbtypes.Slot, 0, len(dbBlocks))
+
+ for _, assignedSlot := range dbBlocks {
+ if assignedSlot.Block == nil {
+ continue
+ }
+ slot := assignedSlot.Block
+
+ // Only include blocks with actual payloads
+ if slot.PayloadStatus != dbtypes.PayloadStatusCanonical && slot.PayloadStatus != dbtypes.PayloadStatusOrphaned {
+ continue
+ }
+
+ if len(slot.EthBlockHash) > 0 {
+ blockHashes = append(blockHashes, slot.EthBlockHash)
+ validBlocks = append(validBlocks, slot)
+ }
+ }
+
+ // Batch fetch all bids for these block hashes
+ bidsMap := db.GetBidsByBlockHashes(ctx, blockHashes, builderIndex)
+
+ // Build result
+ blocks := make([]*models.BuilderPageDataBlock, 0, len(validBlocks))
+ for _, slot := range validBlocks {
+ block := &models.BuilderPageDataBlock{
+ Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot.Slot))),
+ Slot: slot.Slot,
+ Ts: chainState.SlotToTime(phase0.Slot(slot.Slot)),
+ BlockRoot: slot.Root,
+ BlockHash: slot.EthBlockHash,
+ Status: uint16(slot.PayloadStatus),
+ FeeRecipient: slot.EthFeeRecipient,
+ GasLimit: slot.EthGasLimit,
+ }
+
+ // Look up bid info for Value and ElPayment from the batch result
+ blockHashKey := fmt.Sprintf("%x", slot.EthBlockHash)
+ if bid, ok := bidsMap[blockHashKey]; ok {
+ block.Value = bid.Value
+ block.ElPayment = bid.ElPayment
+ }
+
+ blocks = append(blocks, block)
+ }
+
+ return blocks
+}
+
+func buildBuilderRecentBids(ctx context.Context, builderIndex uint64, chainState *consensus.ChainState) []*models.BuilderPageDataBid {
+ bids, _ := db.GetBidsByBuilderIndex(ctx, builderIndex, 0, 20)
+
+ result := make([]*models.BuilderPageDataBid, 0, len(bids))
+ for _, bid := range bids {
+ bidData := &models.BuilderPageDataBid{
+ Slot: bid.Slot,
+ Ts: chainState.SlotToTime(phase0.Slot(bid.Slot)),
+ ParentRoot: bid.ParentRoot,
+ ParentHash: bid.ParentHash,
+ BlockHash: bid.BlockHash,
+ FeeRecipient: bid.FeeRecipient,
+ GasLimit: bid.GasLimit,
+ Value: bid.Value,
+ ElPayment: bid.ElPayment,
+ IsWinning: false,
+ }
+
+ // Check if this bid won (payload was included)
+ slots := db.GetSlotsByBlockHash(ctx, bid.BlockHash)
+ for _, slot := range slots {
+ if slot.PayloadStatus == dbtypes.PayloadStatusCanonical {
+ bidData.IsWinning = true
+ break
+ }
+ }
+
+ result = append(result, bidData)
+ }
+
+ return result
+}
+
+func buildBuilderRecentDeposits(ctx context.Context, pubkey []byte, chainState *consensus.ChainState) []*models.BuilderPageDataDeposit {
+ result := make([]*models.BuilderPageDataDeposit, 0)
+
+ // Query deposit requests by builder pubkey
+ depositFilter := &services.CombinedDepositRequestFilter{
+ Filter: &dbtypes.DepositTxFilter{
+ PublicKey: pubkey,
+ WithOrphaned: 1,
+ },
+ }
+ deposits, _ := services.GlobalBeaconService.GetDepositRequestsByFilter(ctx, depositFilter, 0, 20)
+ for _, deposit := range deposits {
+ entry := &models.BuilderPageDataDeposit{
+ Type: "deposit",
+ }
+ if deposit.Request != nil {
+ entry.SlotNumber = deposit.Request.SlotNumber
+ entry.SlotRoot = deposit.Request.SlotRoot
+ entry.Time = chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber))
+ entry.Orphaned = deposit.RequestOrphaned
+ } else if deposit.Transaction != nil {
+ entry.Time = chainState.SlotToTime(phase0.Slot(deposit.Transaction.BlockTime))
+ }
+ result = append(result, entry)
+ }
+
+ return result
+}
diff --git a/handlers/builders.go b/handlers/builders.go
new file mode 100644
index 000000000..e2d3ce73e
--- /dev/null
+++ b/handlers/builders.go
@@ -0,0 +1,298 @@
+package handlers
+
+import (
+ "context"
+ "encoding/hex"
+ "encoding/json"
+ "fmt"
+ "net/http"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+ "time"
+
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/ethpandaops/dora/services"
+ "github.com/ethpandaops/dora/templates"
+ "github.com/ethpandaops/dora/types/models"
+ "github.com/sirupsen/logrus"
+)
+
+// Builders will return the main "builders" page using a go template
+func Builders(w http.ResponseWriter, r *http.Request) {
+ var buildersTemplateFiles = append(layoutTemplateFiles,
+ "builders/builders.html",
+ "_svg/professor.html",
+ )
+
+ var pageTemplate = templates.GetTemplate(buildersTemplateFiles...)
+ data := InitPageData(w, r, "builders", "/builders", "Builders", buildersTemplateFiles)
+
+ urlArgs := r.URL.Query()
+ var pageNumber uint64 = 1
+ if urlArgs.Has("p") {
+ pageNumber, _ = strconv.ParseUint(urlArgs.Get("p"), 10, 64)
+ }
+ var pageSize uint64 = 50
+ if urlArgs.Has("c") {
+ pageSize, _ = strconv.ParseUint(urlArgs.Get("c"), 10, 64)
+ }
+ if urlArgs.Has("json") && pageSize > 10000 {
+ pageSize = 10000
+ } else if !urlArgs.Has("json") && pageSize > 1000 {
+ pageSize = 1000
+ }
+
+ var filterPubKey string
+ var filterIndex string
+ var filterExecutionAddr string
+ var filterStatus string
+ if urlArgs.Has("f") {
+ if urlArgs.Has("f.pubkey") {
+ filterPubKey = urlArgs.Get("f.pubkey")
+ }
+ if urlArgs.Has("f.index") {
+ filterIndex = urlArgs.Get("f.index")
+ }
+ if urlArgs.Has("f.execution_addr") {
+ filterExecutionAddr = urlArgs.Get("f.execution_addr")
+ }
+ if urlArgs.Has("f.status") {
+ filterStatus = strings.Join(urlArgs["f.status"], ",")
+ }
+ }
+ var sortOrder string
+ if urlArgs.Has("o") {
+ sortOrder = urlArgs.Get("o")
+ }
+
+ var pageError error
+ pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 1)
+ if pageError == nil {
+ data.Data, pageError = getBuildersPageData(pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ }
+ if pageError != nil {
+ handlePageError(w, r, pageError)
+ return
+ }
+
+ if urlArgs.Has("json") {
+ w.Header().Set("Content-Type", "application/json")
+ err := json.NewEncoder(w).Encode(data.Data)
+ if err != nil {
+ logrus.WithError(err).Error("error encoding builders data")
+ http.Error(w, "Internal server error", http.StatusServiceUnavailable)
+ }
+ return
+ }
+
+ w.Header().Set("Content-Type", "text/html")
+ if handleTemplateError(w, r, "builders.go", "Builders", "", pageTemplate.ExecuteTemplate(w, "layout", data)) != nil {
+ return // an error has occurred and was processed
+ }
+}
+
+func getBuildersPageData(pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, error) {
+ pageData := &models.BuildersPageData{}
+ pageCacheKey := fmt.Sprintf("builders:%v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} {
+ pageData, cacheTimeout := buildBuildersPageData(pageCall.CallCtx, pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ pageCall.CacheTimeout = cacheTimeout
+ return pageData
+ })
+ if pageErr == nil && pageRes != nil {
+ resData, resOk := pageRes.(*models.BuildersPageData)
+ if !resOk {
+ return nil, ErrInvalidPageModel
+ }
+ pageData = resData
+ }
+ return pageData, pageErr
+}
+
+func buildBuildersPageData(ctx context.Context, pageNumber uint64, pageSize uint64, sortOrder string, filterPubKey string, filterIndex string, filterExecutionAddr string, filterStatus string) (*models.BuildersPageData, time.Duration) {
+ logrus.Debugf("builders page called: %v:%v:%v:%v:%v:%v:%v", pageNumber, pageSize, sortOrder, filterPubKey, filterIndex, filterExecutionAddr, filterStatus)
+ pageData := &models.BuildersPageData{}
+ cacheTime := 10 * time.Minute
+
+ chainState := services.GlobalBeaconService.GetChainState()
+
+ builderFilter := dbtypes.BuilderFilter{
+ Limit: pageSize,
+ Offset: (pageNumber - 1) * pageSize,
+ }
+
+ filterArgs := url.Values{}
+ if filterPubKey != "" || filterIndex != "" || filterExecutionAddr != "" || filterStatus != "" {
+ if filterPubKey != "" {
+ pageData.FilterPubKey = filterPubKey
+ filterArgs.Add("f.pubkey", filterPubKey)
+ filterPubKeyVal, _ := hex.DecodeString(strings.Replace(filterPubKey, "0x", "", -1))
+ builderFilter.PubKey = filterPubKeyVal
+ }
+ if filterIndex != "" {
+ pageData.FilterIndex = filterIndex
+ filterArgs.Add("f.index", filterIndex)
+ filterIndexVal, _ := strconv.ParseUint(filterIndex, 10, 64)
+ builderFilter.MinIndex = &filterIndexVal
+ builderFilter.MaxIndex = &filterIndexVal
+ }
+ if filterExecutionAddr != "" {
+ pageData.FilterExecutionAddr = filterExecutionAddr
+ filterArgs.Add("f.execution_addr", filterExecutionAddr)
+ filterExecutionAddrVal, _ := hex.DecodeString(strings.Replace(filterExecutionAddr, "0x", "", -1))
+ builderFilter.ExecutionAddress = filterExecutionAddrVal
+ }
+ if filterStatus != "" {
+ pageData.FilterStatus = filterStatus
+ filterArgs.Add("f.status", filterStatus)
+ filterStatusVal := strings.Split(filterStatus, ",")
+ builderFilter.Status = make([]dbtypes.BuilderStatus, 0, len(filterStatusVal))
+ for _, status := range filterStatusVal {
+ switch status {
+ case "active":
+ builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusActiveFilter)
+ case "exited":
+ builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusExitedFilter)
+ case "superseded":
+ builderFilter.Status = append(builderFilter.Status, dbtypes.BuilderStatusSupersededFilter)
+ }
+ }
+ }
+ }
+
+ // apply sort order
+ switch sortOrder {
+ case "index-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderIndexDesc
+ case "pubkey":
+ builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyAsc
+ case "pubkey-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderPubKeyDesc
+ case "balance":
+ builderFilter.OrderBy = dbtypes.BuilderOrderBalanceAsc
+ case "balance-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderBalanceDesc
+ case "deposit":
+ builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochAsc
+ case "deposit-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderDepositEpochDesc
+ case "withdrawable":
+ builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochAsc
+ case "withdrawable-d":
+ builderFilter.OrderBy = dbtypes.BuilderOrderWithdrawableEpochDesc
+ default:
+ builderFilter.OrderBy = dbtypes.BuilderOrderIndexAsc
+ pageData.IsDefaultSorting = true
+ sortOrder = "index"
+ }
+ pageData.Sorting = sortOrder
+
+ // get latest builder set
+ builderSetRsp, builderSetLen := services.GlobalBeaconService.GetFilteredBuilderSet(ctx, &builderFilter, true)
+ if len(builderSetRsp) == 0 {
+ cacheTime = 5 * time.Minute
+ }
+
+ currentEpoch := chainState.CurrentEpoch()
+ finalizedEpoch, _ := chainState.GetFinalizedCheckpoint()
+
+ // get status options
+ pageData.FilterStatusOpts = []models.BuildersPageDataStatusOption{
+ {Status: "active", Count: 0},
+ {Status: "exited", Count: 0},
+ {Status: "superseded", Count: 0},
+ }
+
+ totalPages := builderSetLen / pageSize
+ if (builderSetLen % pageSize) > 0 {
+ totalPages++
+ }
+ if pageNumber == 0 {
+ pageData.IsDefaultPage = true
+ } else if pageNumber >= totalPages {
+ if totalPages == 0 {
+ pageNumber = 0
+ } else {
+ pageNumber = totalPages
+ }
+ }
+
+ pageData.PageSize = pageSize
+ pageData.TotalPages = totalPages
+ pageData.CurrentPageIndex = pageNumber
+ if pageNumber > 1 {
+ pageData.PrevPageIndex = pageNumber - 1
+ }
+ if pageNumber < totalPages {
+ pageData.NextPageIndex = pageNumber + 1
+ }
+ if totalPages > 1 {
+ pageData.LastPageIndex = totalPages
+ }
+
+ // get builders
+ pageData.Builders = make([]*models.BuildersPageDataBuilder, 0, len(builderSetRsp))
+
+ for _, builder := range builderSetRsp {
+ if builder.Builder == nil {
+ continue
+ }
+
+ builderData := &models.BuildersPageDataBuilder{
+ Index: uint64(builder.Index),
+ PublicKey: builder.Builder.PublicKey[:],
+ ExecutionAddress: builder.Builder.ExecutionAddress[:],
+ Balance: uint64(builder.Builder.Balance),
+ }
+
+ // Determine state
+ if builder.Superseded {
+ builderData.State = "Superseded"
+ } else if builder.Builder.WithdrawableEpoch <= currentEpoch {
+ builderData.State = "Exited"
+ } else if builder.Builder.DepositEpoch > finalizedEpoch {
+ builderData.State = "Pending"
+ } else {
+ builderData.State = "Active"
+ }
+
+ // Deposit epoch
+ if builder.Builder.DepositEpoch < 18446744073709551615 {
+ builderData.ShowDeposit = true
+ builderData.DepositEpoch = uint64(builder.Builder.DepositEpoch)
+ builderData.DepositTs = chainState.EpochToTime(builder.Builder.DepositEpoch)
+ }
+
+ // Withdrawable epoch
+ if builder.Builder.WithdrawableEpoch < 18446744073709551615 {
+ builderData.ShowWithdrawable = true
+ builderData.WithdrawableEpoch = uint64(builder.Builder.WithdrawableEpoch)
+ builderData.WithdrawableTs = chainState.EpochToTime(builder.Builder.WithdrawableEpoch)
+ }
+
+ pageData.Builders = append(pageData.Builders, builderData)
+ }
+ pageData.BuilderCount = builderSetLen
+ pageData.FirstBuilder = pageNumber * pageSize
+ pageData.LastBuilder = pageData.FirstBuilder + uint64(len(pageData.Builders))
+
+ // Populate UrlParams for page jump functionality
+ pageData.UrlParams = make(map[string]string)
+ for key, values := range filterArgs {
+ if len(values) > 0 {
+ pageData.UrlParams[key] = values[0]
+ }
+ }
+ pageData.UrlParams["c"] = fmt.Sprintf("%v", pageData.PageSize)
+
+ pageData.FilteredPageLink = fmt.Sprintf("/builders?f&%v&c=%v", filterArgs.Encode(), pageData.PageSize)
+
+ // Sort status options alphabetically
+ sort.Slice(pageData.FilterStatusOpts, func(a, b int) bool {
+ return strings.Compare(pageData.FilterStatusOpts[a].Status, pageData.FilterStatusOpts[b].Status) < 0
+ })
+
+ return pageData, cacheTime
+}
diff --git a/handlers/deposits.go b/handlers/deposits.go
index d83310b4f..bb83a0fa6 100644
--- a/handlers/deposits.go
+++ b/handlers/deposits.go
@@ -10,6 +10,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethereum/go-ethereum/common"
"github.com/ethpandaops/dora/db"
@@ -155,6 +156,9 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint
// load initiated deposits
dbDepositTxs := db.GetDepositTxs(ctx, 0, 20)
for _, depositTx := range dbDepositTxs {
+ // Check if this is a builder deposit (0x03 withdrawal credentials)
+ isBuilder := len(depositTx.WithdrawalCredentials) > 0 && depositTx.WithdrawalCredentials[0] == 0x03
+
depositTxData := &models.DepositsPageDataInitiatedDeposit{
Index: depositTx.Index,
Address: depositTx.TxSender,
@@ -166,12 +170,31 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint
Block: depositTx.BlockNumber,
Orphaned: depositTx.Orphaned,
Valid: depositTx.ValidSignature == 1 || depositTx.ValidSignature == 2,
+ IsBuilder: isBuilder,
}
validatorIndex, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositTx.PublicKey))
if !found {
depositTxData.ValidatorStatus = "Deposited"
depositTxData.ValidatorExists = false
+ } else if uint64(validatorIndex)&services.BuilderIndexFlag != 0 {
+ builderIndex := uint64(validatorIndex) &^ services.BuilderIndexFlag
+ depositTxData.IsBuilder = true
+ depositTxData.ValidatorExists = true
+ depositTxData.ValidatorIndex = builderIndex
+ depositTxData.ValidatorName = services.GlobalBeaconService.GetValidatorName(uint64(validatorIndex))
+
+ builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ if builder == nil {
+ depositTxData.ValidatorStatus = "Deposited"
+ } else {
+ currentEpoch := chainState.CurrentEpoch()
+ if builder.WithdrawableEpoch <= currentEpoch {
+ depositTxData.ValidatorStatus = "Exited"
+ } else {
+ depositTxData.ValidatorStatus = "Active"
+ }
+ }
} else {
depositTxData.ValidatorExists = true
depositTxData.ValidatorIndex = uint64(validatorIndex)
@@ -218,15 +241,20 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint
dbDeposits, _ := services.GlobalBeaconService.GetDepositRequestsByFilter(ctx, depositFilter, 0, uint32(20))
for _, deposit := range dbDeposits {
+ // Check if this is a builder deposit (0x03 withdrawal credentials)
+ wdCreds := deposit.WithdrawalCredentials()
+ isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03
+
depositData := &models.DepositsPageDataIncludedDeposit{
PublicKey: deposit.PublicKey(),
- Withdrawalcredentials: deposit.WithdrawalCredentials(),
+ Withdrawalcredentials: wdCreds,
Amount: deposit.Amount(),
Time: chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)),
SlotNumber: deposit.Request.SlotNumber,
SlotRoot: deposit.Request.SlotRoot,
Orphaned: deposit.RequestOrphaned,
DepositorAddress: deposit.SourceAddress(),
+ IsBuilder: isBuilder,
}
if deposit.IsQueued {
@@ -258,6 +286,24 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint
validatorIndex, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(deposit.PublicKey()))
if !found {
depositData.ValidatorStatus = "Deposited"
+ } else if uint64(validatorIndex)&services.BuilderIndexFlag != 0 {
+ builderIndex := uint64(validatorIndex) &^ services.BuilderIndexFlag
+ depositData.IsBuilder = true
+ depositData.ValidatorExists = true
+ depositData.ValidatorIndex = builderIndex
+ depositData.ValidatorName = services.GlobalBeaconService.GetValidatorName(uint64(validatorIndex))
+
+ builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ if builder == nil {
+ depositData.ValidatorStatus = "Deposited"
+ } else {
+ currentEpoch := chainState.CurrentEpoch()
+ if builder.WithdrawableEpoch <= currentEpoch {
+ depositData.ValidatorStatus = "Exited"
+ } else {
+ depositData.ValidatorStatus = "Active"
+ }
+ }
} else {
depositData.ValidatorExists = true
depositData.ValidatorIndex = uint64(validatorIndex)
@@ -318,16 +364,39 @@ func buildDepositsPageData(ctx context.Context, firstEpoch uint64, pageSize uint
}
for _, queueEntry := range queuedDeposits.Queue[:limit] {
+ // Check if this is a builder deposit (0x03 withdrawal credentials)
+ wdCreds := queueEntry.PendingDeposit.WithdrawalCredentials[:]
+ isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03
+
depositData := &models.DepositsPageDataQueuedDeposit{
QueuePosition: queueEntry.QueuePos,
EstimatedTime: chainState.EpochToTime(queueEntry.EpochEstimate),
PublicKey: queueEntry.PendingDeposit.Pubkey[:],
- Withdrawalcredentials: queueEntry.PendingDeposit.WithdrawalCredentials[:],
+ Withdrawalcredentials: wdCreds,
Amount: uint64(queueEntry.PendingDeposit.Amount),
+ IsBuilder: isBuilder,
}
if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositData.PublicKey)); !found {
depositData.ValidatorStatus = "Deposited"
+ } else if uint64(validatorIdx)&services.BuilderIndexFlag != 0 {
+ builderIndex := uint64(validatorIdx) &^ services.BuilderIndexFlag
+ depositData.IsBuilder = true
+ depositData.ValidatorExists = true
+ depositData.ValidatorIndex = builderIndex
+ depositData.ValidatorName = services.GlobalBeaconService.GetValidatorName(uint64(validatorIdx))
+
+ builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ if builder == nil {
+ depositData.ValidatorStatus = "Deposited"
+ } else {
+ currentEpoch := chainState.CurrentEpoch()
+ if builder.WithdrawableEpoch <= currentEpoch {
+ depositData.ValidatorStatus = "Exited"
+ } else {
+ depositData.ValidatorStatus = "Active"
+ }
+ }
} else {
depositData.ValidatorExists = true
depositData.ValidatorIndex = uint64(validatorIdx)
diff --git a/handlers/el_withdrawals.go b/handlers/el_withdrawals.go
index 68149dca4..a5897fcab 100644
--- a/handlers/el_withdrawals.go
+++ b/handlers/el_withdrawals.go
@@ -213,7 +213,12 @@ func buildFilteredElWithdrawalsPageData(ctx context.Context, pageIdx uint64, pag
}
if validatorIndex := elWithdrawal.ValidatorIndex(); validatorIndex != nil {
- elWithdrawalData.ValidatorIndex = *validatorIndex
+ if *validatorIndex&services.BuilderIndexFlag != 0 {
+ elWithdrawalData.IsBuilder = true
+ elWithdrawalData.ValidatorIndex = *validatorIndex &^ services.BuilderIndexFlag
+ } else {
+ elWithdrawalData.ValidatorIndex = *validatorIndex
+ }
elWithdrawalData.ValidatorName = services.GlobalBeaconService.GetValidatorName(*validatorIndex)
elWithdrawalData.ValidatorValid = true
}
diff --git a/handlers/epoch.go b/handlers/epoch.go
index 807975d47..b9a1e2e90 100644
--- a/handlers/epoch.go
+++ b/handlers/epoch.go
@@ -91,7 +91,7 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat
specs := chainState.GetSpecs()
currentSlot := chainState.CurrentSlot()
currentEpoch := chainState.EpochOfSlot(currentSlot)
- if epoch > uint64(currentEpoch) {
+ if epoch > uint64(currentEpoch)+1 {
return nil, -1
}
@@ -105,7 +105,7 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat
}
nextEpoch := epoch + 1
- if nextEpoch > uint64(currentEpoch) {
+ if nextEpoch > uint64(currentEpoch)+1 {
nextEpoch = 0
}
firstSlot := chainState.EpochToSlot(phase0.Epoch(epoch))
@@ -170,12 +170,18 @@ func buildEpochPageData(ctx context.Context, epoch uint64) (*models.EpochPageDat
pageData.MissedCount++
}
+ payloadStatus := dbSlot.PayloadStatus
+ if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
slotData := &models.EpochPageDataSlot{
Slot: slot,
Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))),
Ts: chainState.SlotToTime(phase0.Slot(slot)),
Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing,
Status: uint8(dbSlot.Status),
+ PayloadStatus: uint8(payloadStatus),
Proposer: dbSlot.Proposer,
ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer),
AttestationCount: dbSlot.AttestationCount,
diff --git a/handlers/exits.go b/handlers/exits.go
index 534b4770a..82da09050 100644
--- a/handlers/exits.go
+++ b/handlers/exits.go
@@ -10,6 +10,7 @@ import (
"time"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/dbtypes"
"github.com/ethpandaops/dora/services"
@@ -159,43 +160,70 @@ func buildExitsPageData(ctx context.Context, firstEpoch uint64, pageSize uint64,
dbVoluntaryExits, _ := services.GlobalBeaconService.GetVoluntaryExitsByFilter(ctx, voluntaryExitFilter, 0, uint32(20))
for _, voluntaryExit := range dbVoluntaryExits {
exitData := &models.ExitsPageDataRecentExit{
- SlotNumber: voluntaryExit.SlotNumber,
- SlotRoot: voluntaryExit.SlotRoot,
- Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)),
- Orphaned: voluntaryExit.Orphaned,
- ValidatorIndex: voluntaryExit.ValidatorIndex,
- ValidatorName: services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex),
+ SlotNumber: voluntaryExit.SlotNumber,
+ SlotRoot: voluntaryExit.SlotRoot,
+ Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)),
+ Orphaned: voluntaryExit.Orphaned,
}
- validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false)
- if validator == nil {
- exitData.ValidatorStatus = "Unknown"
- } else {
- exitData.PublicKey = validator.Validator.PublicKey[:]
- exitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials
-
- if strings.HasPrefix(validator.Status.String(), "pending") {
- exitData.ValidatorStatus = "Pending"
- } else if validator.Status == v1.ValidatorStateActiveOngoing {
- exitData.ValidatorStatus = "Active"
- exitData.ShowUpcheck = true
- } else if validator.Status == v1.ValidatorStateActiveExiting {
- exitData.ValidatorStatus = "Exiting"
- exitData.ShowUpcheck = true
- } else if validator.Status == v1.ValidatorStateActiveSlashed {
- exitData.ValidatorStatus = "Slashed"
- exitData.ShowUpcheck = true
- } else if validator.Status == v1.ValidatorStateExitedUnslashed {
- exitData.ValidatorStatus = "Exited"
- } else if validator.Status == v1.ValidatorStateExitedSlashed {
- exitData.ValidatorStatus = "Slashed"
+ // Check if this is a builder exit (validator index has BuilderIndexFlag set)
+ if voluntaryExit.ValidatorIndex&services.BuilderIndexFlag != 0 {
+ builderIndex := voluntaryExit.ValidatorIndex &^ services.BuilderIndexFlag
+ exitData.IsBuilder = true
+ exitData.ValidatorIndex = builderIndex
+
+ // Resolve builder name via validatornames service (with BuilderIndexFlag)
+ exitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex)
+
+ builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ if builder == nil {
+ exitData.ValidatorStatus = "Unknown"
} else {
- exitData.ValidatorStatus = validator.Status.String()
+ exitData.PublicKey = builder.PublicKey[:]
+
+ // Determine builder status
+ currentEpoch := chainState.CurrentEpoch()
+ if builder.WithdrawableEpoch <= currentEpoch {
+ exitData.ValidatorStatus = "Exited"
+ } else {
+ exitData.ValidatorStatus = "Exiting"
+ }
}
+ } else {
+ // Regular validator exit
+ exitData.ValidatorIndex = voluntaryExit.ValidatorIndex
+ exitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex)
- if exitData.ShowUpcheck {
- exitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3))
- exitData.UpcheckMaximum = uint8(3)
+ validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false)
+ if validator == nil {
+ exitData.ValidatorStatus = "Unknown"
+ } else {
+ exitData.PublicKey = validator.Validator.PublicKey[:]
+ exitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials
+
+ if strings.HasPrefix(validator.Status.String(), "pending") {
+ exitData.ValidatorStatus = "Pending"
+ } else if validator.Status == v1.ValidatorStateActiveOngoing {
+ exitData.ValidatorStatus = "Active"
+ exitData.ShowUpcheck = true
+ } else if validator.Status == v1.ValidatorStateActiveExiting {
+ exitData.ValidatorStatus = "Exiting"
+ exitData.ShowUpcheck = true
+ } else if validator.Status == v1.ValidatorStateActiveSlashed {
+ exitData.ValidatorStatus = "Slashed"
+ exitData.ShowUpcheck = true
+ } else if validator.Status == v1.ValidatorStateExitedUnslashed {
+ exitData.ValidatorStatus = "Exited"
+ } else if validator.Status == v1.ValidatorStateExitedSlashed {
+ exitData.ValidatorStatus = "Slashed"
+ } else {
+ exitData.ValidatorStatus = validator.Status.String()
+ }
+
+ if exitData.ShowUpcheck {
+ exitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3))
+ exitData.UpcheckMaximum = uint8(3)
+ }
}
}
diff --git a/handlers/included_deposits.go b/handlers/included_deposits.go
index 428540fa0..18de1b461 100644
--- a/handlers/included_deposits.go
+++ b/handlers/included_deposits.go
@@ -201,15 +201,19 @@ func buildFilteredIncludedDepositsPageData(ctx context.Context, pageIdx uint64,
chainState := services.GlobalBeaconService.GetChainState()
for _, deposit := range dbDeposits {
+ wdCreds := deposit.WithdrawalCredentials()
+ isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03
+
depositData := &models.IncludedDepositsPageDataDeposit{
PublicKey: deposit.PublicKey(),
- Withdrawalcredentials: deposit.WithdrawalCredentials(),
+ Withdrawalcredentials: wdCreds,
Amount: deposit.Amount(),
Time: chainState.SlotToTime(phase0.Slot(deposit.Request.SlotNumber)),
SlotNumber: deposit.Request.SlotNumber,
SlotRoot: deposit.Request.SlotRoot,
Orphaned: deposit.RequestOrphaned,
DepositorAddress: deposit.SourceAddress(),
+ IsBuilder: isBuilder,
}
if deposit.Request != nil {
diff --git a/handlers/index.go b/handlers/index.go
index 18faa8af6..f6902ee11 100644
--- a/handlers/index.go
+++ b/handlers/index.go
@@ -291,6 +291,19 @@ func buildIndexPageData(ctx context.Context) (*models.IndexPageData, time.Durati
ForkDigest: forkDigest[:],
})
}
+ if specs.GloasForkEpoch != nil && *specs.GloasForkEpoch < uint64(18446744073709551615) {
+ blobParams := chainState.GetBlobScheduleForEpoch(phase0.Epoch(*specs.GloasForkEpoch))
+ forkDigest := chainState.GetForkDigest(specs.GloasForkVersion, blobParams)
+ pageData.NetworkForks = append(pageData.NetworkForks, &models.IndexPageDataForks{
+ Name: "Gloas",
+ Epoch: *specs.GloasForkEpoch,
+ Version: specs.GloasForkVersion[:],
+ Time: uint64(chainState.EpochToTime(phase0.Epoch(*specs.GloasForkEpoch)).Unix()),
+ Active: uint64(currentEpoch) >= *specs.GloasForkEpoch,
+ Type: "consensus",
+ ForkDigest: forkDigest[:],
+ })
+ }
// Add BPO forks from BLOB_SCHEDULE
elBlobSchedule := services.GlobalBeaconService.GetExecutionChainState().GetFullBlobSchedule()
@@ -426,14 +439,23 @@ func buildIndexPageRecentBlocksData(ctx context.Context, pageData *models.IndexP
if blockData == nil {
continue
}
+
+ epoch := chainState.EpochOfSlot(phase0.Slot(blockData.Slot))
+
+ payloadStatus := blockData.PayloadStatus
+ if !chainState.IsEip7732Enabled(epoch) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
blockModel := &models.IndexPageDataBlocks{
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(blockData.Slot))),
- Slot: blockData.Slot,
- Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)),
- Proposer: blockData.Proposer,
- ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer),
- Status: uint64(blockData.Status),
- BlockRoot: blockData.Root,
+ Epoch: uint64(epoch),
+ Slot: blockData.Slot,
+ Ts: chainState.SlotToTime(phase0.Slot(blockData.Slot)),
+ Proposer: blockData.Proposer,
+ ProposerName: services.GlobalBeaconService.GetValidatorName(blockData.Proposer),
+ Status: uint64(blockData.Status),
+ PayloadStatus: uint8(payloadStatus),
+ BlockRoot: blockData.Root,
}
if blockData.EthBlockNumber != nil {
blockModel.WithEthBlock = true
@@ -471,16 +493,24 @@ func buildIndexPageRecentSlotsData(ctx context.Context, pageData *models.IndexPa
dbSlot := dbSlots[dbIdx]
dbIdx++
+ epoch := chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot))
+
+ payloadStatus := dbSlot.PayloadStatus
+ if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
slotData := &models.IndexPageDataSlots{
- Slot: slot,
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(dbSlot.Slot))),
- Ts: chainState.SlotToTime(phase0.Slot(slot)),
- Status: uint64(dbSlot.Status),
- Proposer: dbSlot.Proposer,
- ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer),
- BlockRoot: dbSlot.Root,
- ParentRoot: dbSlot.ParentRoot,
- ForkGraph: make([]*models.IndexPageDataForkGraph, 0),
+ Slot: slot,
+ Epoch: uint64(epoch),
+ Ts: chainState.SlotToTime(phase0.Slot(slot)),
+ Status: uint64(dbSlot.Status),
+ PayloadStatus: uint8(payloadStatus),
+ Proposer: dbSlot.Proposer,
+ ProposerName: services.GlobalBeaconService.GetValidatorName(dbSlot.Proposer),
+ BlockRoot: dbSlot.Root,
+ ParentRoot: dbSlot.ParentRoot,
+ ForkGraph: make([]*models.IndexPageDataForkGraph, 0),
}
pageData.RecentSlots = append(pageData.RecentSlots, slotData)
blockCount++
diff --git a/handlers/initiated_deposits.go b/handlers/initiated_deposits.go
index 173d72491..09ed47b14 100644
--- a/handlers/initiated_deposits.go
+++ b/handlers/initiated_deposits.go
@@ -181,6 +181,8 @@ func buildFilteredInitiatedDepositsPageData(ctx context.Context, pageIdx uint64,
}
for _, depositTx := range dbDepositTxs {
+ isBuilder := len(depositTx.WithdrawalCredentials) > 0 && depositTx.WithdrawalCredentials[0] == 0x03
+
depositTxData := &models.InitiatedDepositsPageDataDeposit{
Index: depositTx.Index,
Address: depositTx.TxSender,
@@ -193,6 +195,7 @@ func buildFilteredInitiatedDepositsPageData(ctx context.Context, pageIdx uint64,
Orphaned: depositTx.Orphaned,
Valid: depositTx.ValidSignature == 1 || depositTx.ValidSignature == 2,
ValidatorStatus: "",
+ IsBuilder: isBuilder,
}
if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositTx.PublicKey)); !found {
diff --git a/handlers/pageData.go b/handlers/pageData.go
index 5effba795..561f74849 100644
--- a/handlers/pageData.go
+++ b/handlers/pageData.go
@@ -90,6 +90,8 @@ func InitPageData(w http.ResponseWriter, r *http.Request, active, path, title st
}
func createMenuItems(active string) []types.MainMenuItem {
+ chainState := services.GlobalBeaconService.GetChainState()
+ specs := chainState.GetSpecs()
hiddenFor := []string{"confirmation", "login", "register"}
if utils.SliceContains(hiddenFor, active) {
@@ -203,6 +205,20 @@ func createMenuItems(active string) []types.MainMenuItem {
validatorMenu = append(validatorMenu, types.NavigationGroup{
Links: validatorMenuLinks,
})
+
+ if specs != nil && specs.GloasForkEpoch != nil && uint64(chainState.CurrentEpoch()) >= *specs.GloasForkEpoch {
+ builderMenu := []types.NavigationLink{
+ {
+ Label: "Builders",
+ Path: "/builders",
+ Icon: "fa-building",
+ },
+ }
+ validatorMenu = append(validatorMenu, types.NavigationGroup{
+ Links: builderMenu,
+ })
+ }
+
validatorMenu = append(validatorMenu, types.NavigationGroup{
Links: []types.NavigationLink{
{
@@ -223,8 +239,6 @@ func createMenuItems(active string) []types.MainMenuItem {
},
})
- chainState := services.GlobalBeaconService.GetChainState()
- specs := chainState.GetSpecs()
if specs != nil && specs.ElectraForkEpoch != nil && uint64(chainState.CurrentEpoch()) >= *specs.ElectraForkEpoch {
validatorMenu = append(validatorMenu, types.NavigationGroup{
Links: []types.NavigationLink{
diff --git a/handlers/queued_deposits.go b/handlers/queued_deposits.go
index 3b9f2ce67..dfe875a75 100644
--- a/handlers/queued_deposits.go
+++ b/handlers/queued_deposits.go
@@ -211,12 +211,16 @@ func buildQueuedDepositsPageData(ctx context.Context, pageIdx uint64, pageSize u
for i := start; i < end; i++ {
queueEntry := filteredQueue[i]
+ wdCreds := queueEntry.PendingDeposit.WithdrawalCredentials[:]
+ isBuilder := len(wdCreds) > 0 && wdCreds[0] == 0x03
+
depositData := &models.QueuedDepositsPageDataDeposit{
QueuePosition: queueEntry.QueuePos,
EstimatedTime: chainState.EpochToTime(queueEntry.EpochEstimate),
PublicKey: queueEntry.PendingDeposit.Pubkey[:],
Amount: uint64(queueEntry.PendingDeposit.Amount),
- Withdrawalcredentials: queueEntry.PendingDeposit.WithdrawalCredentials[:],
+ Withdrawalcredentials: wdCreds,
+ IsBuilder: isBuilder,
}
// Get validator status if exists
diff --git a/handlers/search.go b/handlers/search.go
index a0e9e7818..627d3a628 100644
--- a/handlers/search.go
+++ b/handlers/search.go
@@ -92,9 +92,9 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR
}
blockResult := &dbtypes.SearchBlockResult{}
- err = db.ReaderDb.Get(blockResult, `
- SELECT slot, root, status
- FROM slots
+ err = db.ReaderDb.GetContext(ctx, blockResult, `
+ SELECT slot, root, status
+ FROM slots
WHERE slot = $1 AND status != 0
LIMIT 1`, searchQuery)
if err == nil {
@@ -128,9 +128,9 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR
blockHash, err := hex.DecodeString(hashQuery)
if err == nil {
blockResult := &dbtypes.SearchBlockResult{}
- err = db.ReaderDb.Get(blockResult, `
- SELECT slot, root, orphaned
- FROM slots
+ err = db.ReaderDb.GetContext(ctx, blockResult, `
+ SELECT slot, root, orphaned
+ FROM slots
WHERE root = $1 OR
state_root = $1
LIMIT 1`, blockHash)
@@ -149,7 +149,7 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR
}
names := &dbtypes.SearchNameResult{}
- err = db.ReaderDb.Get(names, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.GetContext(ctx, names, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT name
FROM validator_names
@@ -166,7 +166,7 @@ func buildSearchResolverResult(ctx context.Context, searchQuery string) (searchR
}
graffiti := &dbtypes.SearchGraffitiResult{}
- err = db.ReaderDb.Get(graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.GetContext(ctx, graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT graffiti
FROM slots
@@ -261,7 +261,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
switch searchType {
case "epochs":
dbres := &dbtypes.SearchAheadEpochsResult{}
- err = db.ReaderDb.Select(dbres, "SELECT epoch FROM epochs WHERE CAST(epoch AS text) LIKE $1 ORDER BY epoch LIMIT 10", search+"%")
+ err = db.ReaderDb.SelectContext(ctx, dbres, "SELECT epoch FROM epochs WHERE CAST(epoch AS text) LIKE $1 ORDER BY epoch LIMIT 10", search+"%")
if err == nil {
model := make([]models.SearchAheadEpochsResult, len(*dbres))
for idx, entry := range *dbres {
@@ -299,9 +299,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
}
} else {
dbres := &dbtypes.SearchAheadSlotsResult{}
- err = db.ReaderDb.Select(dbres, `
- SELECT slot, root, status
- FROM slots
+ err = db.ReaderDb.SelectContext(ctx, dbres, `
+ SELECT slot, root, status
+ FROM slots
WHERE slot < $1 AND (root = $2 OR state_root = $2)
ORDER BY slot LIMIT 1`, minSlotIdx, blockHash)
if err != nil {
@@ -335,9 +335,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
result = res
} else {
dbres := &dbtypes.SearchAheadSlotsResult{}
- err = db.ReaderDb.Select(dbres, `
- SELECT slot, root, status
- FROM slots
+ err = db.ReaderDb.SelectContext(ctx, dbres, `
+ SELECT slot, root, status
+ FROM slots
WHERE slot = $1 AND status != 0
ORDER BY slot LIMIT 10`, blockNumber)
if err == nil {
@@ -383,9 +383,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
result = res
} else {
dbres := &dbtypes.SearchAheadExecBlocksResult{}
- err = db.ReaderDb.Select(dbres, `
- SELECT slot, root, eth_block_hash, eth_block_number, status
- FROM slots
+ err = db.ReaderDb.SelectContext(ctx, dbres, `
+ SELECT slot, root, eth_block_hash, eth_block_number, status
+ FROM slots
WHERE slot < $1 AND eth_block_hash = $2
ORDER BY slot LIMIT 10`, minSlotIdx, blockHash)
if err != nil {
@@ -422,9 +422,9 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
result = res
} else {
dbres := &dbtypes.SearchAheadExecBlocksResult{}
- err = db.ReaderDb.Select(dbres, `
- SELECT slot, root, eth_block_hash, eth_block_number, status
- FROM slots
+ err = db.ReaderDb.SelectContext(ctx, dbres, `
+ SELECT slot, root, eth_block_hash, eth_block_number, status
+ FROM slots
WHERE slot < $1 AND eth_block_number = $2
ORDER BY slot LIMIT 10`, minSlotIdx, blockNumber)
if err == nil {
@@ -444,7 +444,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
}
case "graffiti":
graffiti := &dbtypes.SearchAheadGraffitiResult{}
- err = db.ReaderDb.Select(graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.SelectContext(ctx, graffiti, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT graffiti, count(*) as count
FROM slots
@@ -472,7 +472,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
}
case "valname":
names := &dbtypes.SearchAheadValidatorNameResult{}
- err = db.ReaderDb.Select(names, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.SelectContext(ctx, names, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT name, count(*) as count
FROM validator_names
@@ -523,7 +523,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
} else if len(search) >= 2 && len(search) <= 96 {
// Search by pubkey prefix
validators := &dbtypes.SearchAheadValidatorResult{}
- err = db.ReaderDb.Select(validators, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.SelectContext(ctx, validators, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT v.validator_index, v.pubkey
FROM validators v
@@ -573,7 +573,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
} else if len(search) >= 2 && len(search) < 40 {
// Search by address prefix in DB
addresses := &dbtypes.SearchAheadAddressResult{}
- err = db.ReaderDb.Select(addresses, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.SelectContext(ctx, addresses, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT address, is_contract
FROM el_accounts
@@ -626,7 +626,7 @@ func buildSearchAheadResult(ctx context.Context, searchType, search string) (*se
} else if len(search) >= 2 && len(search) < 64 {
// Search by transaction hash prefix in DB
transactions := &dbtypes.SearchAheadTransactionResult{}
- err = db.ReaderDb.Select(transactions, db.EngineQuery(map[dbtypes.DBEngineType]string{
+ err = db.ReaderDb.SelectContext(ctx, transactions, db.EngineQuery(map[dbtypes.DBEngineType]string{
dbtypes.DBEnginePgsql: `
SELECT DISTINCT ON (tx_hash) tx_hash, block_number, reverted
FROM el_transactions
diff --git a/handlers/slot.go b/handlers/slot.go
index a99a4c3c9..c2b88a939 100644
--- a/handlers/slot.go
+++ b/handlers/slot.go
@@ -1,6 +1,7 @@
package handlers
import (
+ "bytes"
"context"
"encoding/hex"
"encoding/json"
@@ -46,6 +47,8 @@ func Slot(w http.ResponseWriter, r *http.Request) {
"slot/deposit_requests.html",
"slot/withdrawal_requests.html",
"slot/consolidation_requests.html",
+ "slot/bids.html",
+ "slot/ptc_votes.html",
)
var notfoundTemplateFiles = append(layoutTemplateFiles,
"slot/notfound.html",
@@ -530,6 +533,11 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock
includedValidators := []uint64{}
attEpochStatsValues := assignmentsMap[attEpoch]
+ if attVersioned.Version >= spec.DataVersionGloas {
+ payloadStatus := uint64(attData.Index)
+ attPageData.PayloadStatus = &payloadStatus
+ }
+
if attVersioned.Version >= spec.DataVersionElectra {
// EIP-7549 attestation
attAssignments = []uint64{}
@@ -614,9 +622,16 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock
pageData.VoluntaryExits = make([]*models.SlotPageVoluntaryExit, pageData.VoluntaryExitsCount)
for i, exit := range voluntaryExits {
+ validatorIndex := uint64(exit.Message.ValidatorIndex)
+ isBuilder := validatorIndex&services.BuilderIndexFlag != 0
+ displayIndex := validatorIndex
+ if isBuilder {
+ displayIndex = validatorIndex &^ services.BuilderIndexFlag
+ }
pageData.VoluntaryExits[i] = &models.SlotPageVoluntaryExit{
- ValidatorIndex: uint64(exit.Message.ValidatorIndex),
- ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(exit.Message.ValidatorIndex)),
+ ValidatorIndex: displayIndex,
+ ValidatorName: services.GlobalBeaconService.GetValidatorName(validatorIndex),
+ IsBuilder: isBuilder,
Epoch: uint64(exit.Message.Epoch),
Signature: exit.Signature[:],
}
@@ -732,7 +747,63 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock
pageData.SyncAggParticipation = utils.SyncCommitteeParticipation(pageData.SyncAggregateBits, specs.SyncCommitteeSize)
}
- if executionPayload, _ := blockData.Block.ExecutionPayload(); executionPayload != nil {
+ if payloadBid, err := blockData.Block.SignedExecutionPayloadBid(); err == nil {
+ commitments := make([][]byte, len(payloadBid.Message.BlobKZGCommitments))
+ for i := range payloadBid.Message.BlobKZGCommitments {
+ commitments[i] = payloadBid.Message.BlobKZGCommitments[i][:]
+ }
+
+ pageData.PayloadHeader = &models.SlotPagePayloadHeader{
+ PayloadStatus: uint16(0),
+ ParentBlockHash: payloadBid.Message.ParentBlockHash[:],
+ ParentBlockRoot: payloadBid.Message.ParentBlockRoot[:],
+ BlockHash: payloadBid.Message.BlockHash[:],
+ GasLimit: uint64(payloadBid.Message.GasLimit),
+ BuilderIndex: uint64(payloadBid.Message.BuilderIndex),
+ BuilderName: services.GlobalBeaconService.GetValidatorName(uint64(payloadBid.Message.BuilderIndex) | services.BuilderIndexFlag),
+ Slot: uint64(payloadBid.Message.Slot),
+ Value: uint64(payloadBid.Message.Value),
+ BlobKZGCommitments: commitments,
+ Signature: payloadBid.Signature[:],
+ }
+ }
+
+ var executionPayload *spec.VersionedExecutionPayload
+ if blockData.Block.Version >= spec.DataVersionGloas && blockData.Payload != nil {
+ executionPayload = &spec.VersionedExecutionPayload{
+ Version: spec.DataVersionGloas,
+ Gloas: blockData.Payload.Message.Payload,
+ }
+
+ // Determine payload status by checking if any canonical child
+ // builds on this block's execution payload.
+ pageData.PayloadHeader.PayloadStatus = uint16(dbtypes.PayloadStatusCanonical)
+ childSlots := services.GlobalBeaconService.GetDbBlocksByParentRoot(ctx, blockData.Root)
+ hasCanonicalChild := false
+ payloadIncluded := false
+
+ for _, child := range childSlots {
+ if child.Status != dbtypes.Canonical {
+ continue
+ }
+
+ hasCanonicalChild = true
+
+ if bytes.Equal(child.EthBlockParentHash, pageData.PayloadHeader.BlockHash) {
+ payloadIncluded = true
+
+ break
+ }
+ }
+
+ if hasCanonicalChild && !payloadIncluded {
+ pageData.PayloadHeader.PayloadStatus = uint16(dbtypes.PayloadStatusOrphaned)
+ }
+ } else {
+ executionPayload, _ = blockData.Block.ExecutionPayload()
+ }
+
+ if executionPayload != nil {
pageData.ExecutionData = &models.SlotPageExecutionData{}
if parentHash, err := executionPayload.ParentHash(); err == nil {
@@ -834,6 +905,36 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock
}
}
+ if specs.DenebForkEpoch != nil && uint64(epoch) >= *specs.DenebForkEpoch {
+ pageData.BlobsCount = uint64(len(blobKzgCommitments))
+ pageData.Blobs = make([]*models.SlotPageBlob, pageData.BlobsCount)
+ for i := range blobKzgCommitments {
+ blobData := &models.SlotPageBlob{
+ Index: uint64(i),
+ KzgCommitment: blobKzgCommitments[i][:],
+ }
+ pageData.Blobs[i] = blobData
+ }
+ }
+
+ if specs.ElectraForkEpoch != nil && uint64(epoch) >= *specs.ElectraForkEpoch {
+ var requests *electra.ExecutionRequests
+ if blockData.Block.Version >= spec.DataVersionGloas {
+ if blockData.Payload != nil {
+ requests = blockData.Payload.Message.ExecutionRequests
+ executionWithdrawals = blockData.Payload.Message.Payload.Withdrawals
+ }
+ } else {
+ requests, _ = blockData.Block.ExecutionRequests()
+ }
+
+ if requests != nil {
+ getSlotPageDepositRequests(pageData, requests.Deposits)
+ getSlotPageWithdrawalRequests(pageData, requests.Withdrawals)
+ getSlotPageConsolidationRequests(pageData, requests.Consolidations)
+ }
+ }
+
if specs.CapellaForkEpoch != nil && uint64(epoch) >= *specs.CapellaForkEpoch {
pageData.BLSChangesCount = uint64(len(blsToExecChanges))
pageData.BLSChanges = make([]*models.SlotPageBLSChange, pageData.BLSChangesCount)
@@ -850,32 +951,27 @@ func getSlotPageBlockData(ctx context.Context, blockData *services.CombinedBlock
pageData.WithdrawalsCount = uint64(len(executionWithdrawals))
pageData.Withdrawals = make([]*models.SlotPageWithdrawal, pageData.WithdrawalsCount)
for i, withdrawal := range executionWithdrawals {
+ validatorIndex := uint64(withdrawal.ValidatorIndex)
+ isBuilder := validatorIndex&services.BuilderIndexFlag != 0
+ displayIndex := validatorIndex
+ if isBuilder {
+ displayIndex = validatorIndex &^ services.BuilderIndexFlag
+ }
pageData.Withdrawals[i] = &models.SlotPageWithdrawal{
Index: uint64(withdrawal.Index),
- ValidatorIndex: uint64(withdrawal.ValidatorIndex),
- ValidatorName: services.GlobalBeaconService.GetValidatorName(uint64(withdrawal.ValidatorIndex)),
+ ValidatorIndex: displayIndex,
+ ValidatorName: services.GlobalBeaconService.GetValidatorName(validatorIndex),
+ IsBuilder: isBuilder,
Address: withdrawal.Address[:],
Amount: uint64(withdrawal.Amount),
}
}
}
- if specs.DenebForkEpoch != nil && uint64(epoch) >= *specs.DenebForkEpoch {
- pageData.BlobsCount = uint64(len(blobKzgCommitments))
- pageData.Blobs = make([]*models.SlotPageBlob, pageData.BlobsCount)
- for i := range blobKzgCommitments {
- blobData := &models.SlotPageBlob{
- Index: uint64(i),
- KzgCommitment: blobKzgCommitments[i][:],
- }
- pageData.Blobs[i] = blobData
- }
- }
-
- if requests, err := blockData.Block.ExecutionRequests(); err == nil && requests != nil {
- getSlotPageDepositRequests(pageData, requests.Deposits)
- getSlotPageWithdrawalRequests(pageData, requests.Withdrawals)
- getSlotPageConsolidationRequests(pageData, requests.Consolidations)
+ // Load execution payload bids for ePBS (gloas+) blocks
+ if blockData.Block.Version >= spec.DataVersionGloas {
+ getSlotPageBids(pageData)
+ getSlotPagePtcVotes(pageData, blockData, blockData.Header.Message.Slot)
}
return pageData
@@ -1028,8 +1124,14 @@ func getSlotPageDepositRequests(pageData *models.SlotPageBlockData, depositReque
if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(depositRequest.Pubkey)); found {
receiptData.Exists = true
- receiptData.ValidatorIndex = uint64(validatorIdx)
- receiptData.ValidatorName = services.GlobalBeaconService.GetValidatorName(receiptData.ValidatorIndex)
+ rawIndex := uint64(validatorIdx)
+ if rawIndex&services.BuilderIndexFlag != 0 {
+ receiptData.IsBuilder = true
+ receiptData.ValidatorIndex = rawIndex &^ services.BuilderIndexFlag
+ } else {
+ receiptData.ValidatorIndex = rawIndex
+ }
+ receiptData.ValidatorName = services.GlobalBeaconService.GetValidatorName(rawIndex)
}
pageData.DepositRequests = append(pageData.DepositRequests, receiptData)
@@ -1050,8 +1152,14 @@ func getSlotPageWithdrawalRequests(pageData *models.SlotPageBlockData, withdrawa
if validatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(withdrawalRequest.ValidatorPubkey)); found {
requestData.Exists = true
- requestData.ValidatorIndex = uint64(validatorIdx)
- requestData.ValidatorName = services.GlobalBeaconService.GetValidatorName(requestData.ValidatorIndex)
+ fullIndex := uint64(validatorIdx)
+ if fullIndex&services.BuilderIndexFlag != 0 {
+ requestData.IsBuilder = true
+ requestData.ValidatorIndex = fullIndex &^ services.BuilderIndexFlag
+ } else {
+ requestData.ValidatorIndex = fullIndex
+ }
+ requestData.ValidatorName = services.GlobalBeaconService.GetValidatorName(fullIndex)
}
pageData.WithdrawalRequests = append(pageData.WithdrawalRequests, requestData)
@@ -1072,14 +1180,26 @@ func getSlotPageConsolidationRequests(pageData *models.SlotPageBlockData, consol
if sourceValidatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(consolidationRequest.SourcePubkey)); found {
requestData.SourceFound = true
- requestData.SourceIndex = uint64(sourceValidatorIdx)
- requestData.SourceName = services.GlobalBeaconService.GetValidatorName(requestData.SourceIndex)
+ fullIndex := uint64(sourceValidatorIdx)
+ if fullIndex&services.BuilderIndexFlag != 0 {
+ requestData.SourceIsBuilder = true
+ requestData.SourceIndex = fullIndex &^ services.BuilderIndexFlag
+ } else {
+ requestData.SourceIndex = fullIndex
+ }
+ requestData.SourceName = services.GlobalBeaconService.GetValidatorName(fullIndex)
}
if targetValidatorIdx, found := services.GlobalBeaconService.GetValidatorIndexByPubkey(phase0.BLSPubKey(consolidationRequest.TargetPubkey)); found {
requestData.TargetFound = true
- requestData.TargetIndex = uint64(targetValidatorIdx)
- requestData.TargetName = services.GlobalBeaconService.GetValidatorName(requestData.TargetIndex)
+ fullIndex := uint64(targetValidatorIdx)
+ if fullIndex&services.BuilderIndexFlag != 0 {
+ requestData.TargetIsBuilder = true
+ requestData.TargetIndex = fullIndex &^ services.BuilderIndexFlag
+ } else {
+ requestData.TargetIndex = fullIndex
+ }
+ requestData.TargetName = services.GlobalBeaconService.GetValidatorName(fullIndex)
}
pageData.ConsolidationRequests = append(pageData.ConsolidationRequests, requestData)
@@ -1087,3 +1207,219 @@ func getSlotPageConsolidationRequests(pageData *models.SlotPageBlockData, consol
pageData.ConsolidationRequestsCount = uint64(len(pageData.ConsolidationRequests))
}
+
+func getSlotPageBids(pageData *models.SlotPageBlockData) {
+ beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer()
+ bids := beaconIndexer.GetBlockBids(phase0.Root(pageData.ParentRoot))
+
+ pageData.Bids = make([]*models.SlotPageBid, 0, len(bids))
+
+ // Get the winning block hash for comparison
+ var winningBlockHash []byte
+ if pageData.ExecutionData != nil {
+ winningBlockHash = pageData.ExecutionData.BlockHash
+ }
+
+ for _, bid := range bids {
+ bidData := &models.SlotPageBid{
+ ParentRoot: bid.ParentRoot,
+ ParentHash: bid.ParentHash,
+ BlockHash: bid.BlockHash,
+ FeeRecipient: bid.FeeRecipient,
+ GasLimit: bid.GasLimit,
+ BuilderIndex: uint64(bid.BuilderIndex),
+ BuilderName: services.GlobalBeaconService.GetValidatorName(uint64(bid.BuilderIndex)),
+ IsSelfBuilt: bid.BuilderIndex < 0,
+ Slot: bid.Slot,
+ Value: bid.Value,
+ ElPayment: bid.ElPayment,
+ TotalValue: bid.Value + bid.ElPayment,
+ }
+
+ // Check if this is the winning bid
+ if winningBlockHash != nil && len(bid.BlockHash) == len(winningBlockHash) {
+ isWinning := true
+ for i := range bid.BlockHash {
+ if bid.BlockHash[i] != winningBlockHash[i] {
+ isWinning = false
+ break
+ }
+ }
+ bidData.IsWinning = isWinning
+ }
+
+ pageData.Bids = append(pageData.Bids, bidData)
+ }
+
+ // Sort by total value (value + el_payment) descending
+ for i := 0; i < len(pageData.Bids)-1; i++ {
+ for j := i + 1; j < len(pageData.Bids); j++ {
+ if pageData.Bids[j].TotalValue > pageData.Bids[i].TotalValue {
+ pageData.Bids[i], pageData.Bids[j] = pageData.Bids[j], pageData.Bids[i]
+ }
+ }
+ }
+
+ pageData.BidsCount = uint64(len(pageData.Bids))
+}
+
+// getSlotPagePtcVotes extracts PTC (Payload Timeliness Committee) votes from a Gloas block.
+// PTC votes are included in blocks as payload attestations for the PREVIOUS slot.
+func getSlotPagePtcVotes(pageData *models.SlotPageBlockData, blockData *services.CombinedBlockResponse, blockSlot phase0.Slot) {
+ // Only Gloas+ blocks have payload attestations
+ if blockData.Block.Version < spec.DataVersionGloas || blockData.Block.Gloas == nil {
+ return
+ }
+
+ payloadAttestations := blockData.Block.Gloas.Message.Body.PayloadAttestations
+ if len(payloadAttestations) == 0 {
+ return
+ }
+
+ chainState := services.GlobalBeaconService.GetChainState()
+ specs := chainState.GetSpecs()
+
+ // PTC votes are for the previous slot
+ votedSlot := blockSlot - 1
+ votedEpoch := chainState.EpochOfSlot(votedSlot)
+
+ // Get epoch stats for the voted slot to retrieve PTC duties
+ var ptcDuties []phase0.ValidatorIndex
+ beaconIndexer := services.GlobalBeaconService.GetBeaconIndexer()
+ epochStats := beaconIndexer.GetEpochStatsByEpoch(votedEpoch)
+ for _, es := range epochStats {
+ values := es.GetValues(true)
+ if values != nil && values.PtcDuties != nil {
+ slotInEpoch := uint64(votedSlot) % specs.SlotsPerEpoch
+ if slotInEpoch < uint64(len(values.PtcDuties)) && values.PtcDuties[slotInEpoch] != nil {
+ // Convert from active indice indices to validator indices
+ ptcDuties = make([]phase0.ValidatorIndex, len(values.PtcDuties[slotInEpoch]))
+ for i, activeIdx := range values.PtcDuties[slotInEpoch] {
+ if int(activeIdx) < len(values.ActiveIndices) {
+ ptcDuties[i] = values.ActiveIndices[activeIdx]
+ }
+ }
+ break
+ }
+ }
+ }
+
+ // PTC_SIZE is a spec constant (512). The Bitvector is always PTC_SIZE bits.
+ // On small validator sets, validators appear multiple times in PTC duties
+ // via weighted selection, but voting is tracked by bit position.
+ ptcSize := specs.PtcSize
+
+ // Build PTC votes structure
+ ptcVotes := &models.SlotPagePtcVotes{
+ VotedSlot: uint64(votedSlot),
+ TotalPtcSize: ptcSize,
+ Aggregates: make([]*models.SlotPagePtcAggregate, 0, len(payloadAttestations)),
+ }
+
+ // Track voted bit positions across all aggregates
+ votedPositions := make(map[uint64]bool, ptcSize)
+ totalVotes := uint64(0)
+
+ for _, pa := range payloadAttestations {
+ if pa == nil || pa.Data == nil {
+ continue
+ }
+
+ // Set voted block root from first attestation
+ if ptcVotes.VotedBlockRoot == nil {
+ ptcVotes.VotedBlockRoot = pa.Data.BeaconBlockRoot[:]
+ }
+
+ aggregate := &models.SlotPagePtcAggregate{
+ PayloadPresent: pa.Data.PayloadPresent,
+ BlobDataAvailable: pa.Data.BlobDataAvailable,
+ AggregationBits: pa.AggregationBits,
+ Signature: pa.Signature[:],
+ Validators: make([]types.NamedValidator, 0),
+ }
+
+ // Count votes from aggregation bits and map to unique validators
+ bitCount := uint64(len(pa.AggregationBits)) * 8
+ if bitCount > ptcSize {
+ bitCount = ptcSize
+ }
+ aggValidatorSet := make(map[uint64]bool)
+ for i := uint64(0); i < bitCount; i++ {
+ if (pa.AggregationBits[i/8]>>(i%8))&1 == 1 {
+ votedPositions[i] = true
+ if int(i) < len(ptcDuties) {
+ vidx := uint64(ptcDuties[i])
+ if !aggValidatorSet[vidx] {
+ aggValidatorSet[vidx] = true
+ aggregate.Validators = append(aggregate.Validators, types.NamedValidator{
+ Index: vidx,
+ Name: services.GlobalBeaconService.GetValidatorName(vidx),
+ })
+ }
+ }
+ }
+ }
+
+ aggregate.VoteCount = uint64(len(aggregate.Validators))
+ totalVotes += aggregate.VoteCount
+
+ ptcVotes.Aggregates = append(ptcVotes.Aggregates, aggregate)
+ }
+
+ // Calculate participation by unique validators, not bit positions.
+ // On small validator sets, the same validator occupies multiple PTC positions.
+ // A validator votes at their first PTC position only (ptc.index()), leaving
+ // duplicate positions unset. Count unique voters/non-voters for display.
+ voterSet := make(map[uint64]bool)
+ if len(ptcDuties) > 0 {
+ for i := range ptcDuties {
+ if votedPositions[uint64(i)] {
+ voterSet[uint64(ptcDuties[i])] = true
+ }
+ }
+
+ // Non-voters: unique validators with NO voted position at all
+ nonVoterSet := make(map[uint64]bool)
+ for _, vidx := range ptcDuties {
+ v := uint64(vidx)
+ if !voterSet[v] {
+ nonVoterSet[v] = true
+ }
+ }
+ nonVoters := make([]types.NamedValidator, 0, len(nonVoterSet))
+ for vidx := range nonVoterSet {
+ nonVoters = append(nonVoters, types.NamedValidator{
+ Index: vidx,
+ Name: services.GlobalBeaconService.GetValidatorName(vidx),
+ })
+ }
+ ptcVotes.NonVoters = nonVoters
+ ptcVotes.NonVoterCount = uint64(len(nonVoters))
+ }
+
+ // Calculate participation rate based on unique validators
+ totalUniqueValidators := uint64(len(voterSet)) + ptcVotes.NonVoterCount
+ if totalUniqueValidators > 0 {
+ ptcVotes.TotalPtcSize = totalUniqueValidators
+ ptcVotes.Participation = float64(len(voterSet)) / float64(totalUniqueValidators)
+ ptcVotes.NonVoterPercent = float64(ptcVotes.NonVoterCount) / float64(totalUniqueValidators) * 100
+
+ // Recalculate aggregate vote percentages based on unique validators
+ for _, agg := range ptcVotes.Aggregates {
+ agg.VotePercent = float64(agg.VoteCount) / float64(totalUniqueValidators) * 100
+ }
+ } else if ptcSize > 0 {
+ // No duties available, use bit positions as approximation
+ totalVoted := uint64(len(votedPositions))
+ ptcVotes.NonVoterCount = ptcSize - totalVoted
+ ptcVotes.Participation = float64(totalVoted) / float64(ptcSize)
+ ptcVotes.NonVoterPercent = float64(ptcVotes.NonVoterCount) / float64(ptcSize) * 100
+
+ for _, agg := range ptcVotes.Aggregates {
+ agg.VotePercent = float64(agg.VoteCount) / float64(ptcSize) * 100
+ }
+ }
+
+ pageData.PtcVotes = ptcVotes
+ pageData.PtcVotesCount = totalVotes
+}
diff --git a/handlers/slots.go b/handlers/slots.go
index 8daa368fb..052400b3e 100644
--- a/handlers/slots.go
+++ b/handlers/slots.go
@@ -159,9 +159,6 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64,
currentSlot := chainState.CurrentSlot()
currentEpoch := chainState.EpochOfSlot(currentSlot)
maxSlot := currentSlot + 8
- if maxSlot >= chainState.EpochToSlot(currentEpoch+1) {
- maxSlot = chainState.EpochToSlot(currentEpoch+1) - 1
- }
if firstSlot > uint64(maxSlot) {
pageData.IsDefaultPage = true
firstSlot = uint64(maxSlot)
@@ -255,12 +252,19 @@ func buildSlotsPageData(ctx context.Context, firstSlot uint64, pageSize uint64,
dbSlot := dbSlots[dbIdx]
dbIdx++
+ epoch := chainState.EpochOfSlot(phase0.Slot(slot))
+ payloadStatus := dbSlot.PayloadStatus
+ if !chainState.IsEip7732Enabled(phase0.Epoch(epoch)) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+
slotData := &models.SlotsPageDataSlot{
Slot: slot,
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))),
+ Epoch: uint64(epoch),
Ts: chainState.SlotToTime(phase0.Slot(slot)),
Finalized: finalized,
Status: uint8(dbSlot.Status),
+ PayloadStatus: uint8(payloadStatus),
Scheduled: slot >= uint64(currentSlot) && dbSlot.Status == dbtypes.Missing,
Synchronized: dbSlot.SyncParticipation != -1,
Proposer: dbSlot.Proposer,
diff --git a/handlers/slots_filtered.go b/handlers/slots_filtered.go
index 2cdcf8a07..83d3d042a 100644
--- a/handlers/slots_filtered.go
+++ b/handlers/slots_filtered.go
@@ -50,8 +50,8 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) {
var invertgraffiti bool
var invertextradata bool
var invertproposer bool
- var withOrphaned uint64
- var withMissing uint64
+ var statusMask uint64 = 0x07
+ var payloadMask uint64 = 0x07
var minSyncAgg string
var maxSyncAgg string
var minExecTime string
@@ -86,11 +86,11 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) {
if urlArgs.Has("f.pinvert") {
invertproposer = urlArgs.Get("f.pinvert") == "on"
}
- if urlArgs.Has("f.orphaned") {
- withOrphaned, _ = strconv.ParseUint(urlArgs.Get("f.orphaned"), 10, 64)
+ if urlArgs.Has("f.status") {
+ statusMask, _ = strconv.ParseUint(urlArgs.Get("f.status"), 0, 64)
}
- if urlArgs.Has("f.missing") {
- withMissing, _ = strconv.ParseUint(urlArgs.Get("f.missing"), 10, 64)
+ if urlArgs.Has("f.pstatus") {
+ payloadMask, _ = strconv.ParseUint(urlArgs.Get("f.pstatus"), 0, 64)
}
if urlArgs.Has("f.minsync") {
minSyncAgg = urlArgs.Get("f.minsync")
@@ -125,14 +125,11 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) {
if urlArgs.Has("f.maxepoch") {
maxEpoch = urlArgs.Get("f.maxepoch")
}
- } else {
- withOrphaned = 1
- withMissing = 1
}
var pageError error
pageError = services.GlobalCallRateLimiter.CheckCallLimit(r, 2)
if pageError == nil {
- data.Data, pageError = getFilteredSlotsPageData(pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, uint8(withOrphaned), uint8(withMissing), minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns)
+ data.Data, pageError = getFilteredSlotsPageData(pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, uint8(statusMask), uint8(payloadMask), minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns)
}
if pageError != nil {
handlePageError(w, r, pageError)
@@ -144,11 +141,11 @@ func SlotsFiltered(w http.ResponseWriter, r *http.Request) {
}
}
-func getFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, withOrphaned uint8, withMissing uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, displayColumns uint64) (*models.SlotsFilteredPageData, error) {
+func getFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, statusMask uint8, payloadMask uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, displayColumns uint64) (*models.SlotsFilteredPageData, error) {
pageData := &models.SlotsFilteredPageData{}
- pageCacheKey := fmt.Sprintf("slots_filtered:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, withOrphaned, withMissing, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns)
+ pageCacheKey := fmt.Sprintf("slots_filtered:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v:%v", pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, statusMask, payloadMask, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns)
pageRes, pageErr := services.GlobalFrontendCache.ProcessCachedPage(pageCacheKey, true, pageData, func(pageCall *services.FrontendCacheProcessingPage) interface{} {
- return buildFilteredSlotsPageData(pageCall.CallCtx, pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, withOrphaned, withMissing, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns)
+ return buildFilteredSlotsPageData(pageCall.CallCtx, pageIdx, pageSize, graffiti, invertgraffiti, extradata, invertextradata, proposer, pname, invertproposer, statusMask, payloadMask, minSyncAgg, maxSyncAgg, minExecTime, maxExecTime, minTxCount, maxTxCount, minBlobCount, maxBlobCount, forkIds, minEpoch, maxEpoch, displayColumns)
})
if pageErr == nil && pageRes != nil {
resData, resOk := pageRes.(*models.SlotsFilteredPageData)
@@ -160,7 +157,7 @@ func getFilteredSlotsPageData(pageIdx uint64, pageSize uint64, graffiti string,
return pageData, pageErr
}
-func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, withOrphaned uint8, withMissing uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, displayColumns uint64) *models.SlotsFilteredPageData {
+func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize uint64, graffiti string, invertgraffiti bool, extradata string, invertextradata bool, proposer string, pname string, invertproposer bool, statusMask uint8, payloadMask uint8, minSyncAgg string, maxSyncAgg string, minExecTime string, maxExecTime string, minTxCount string, maxTxCount string, minBlobCount string, maxBlobCount string, forkIds string, minEpoch string, maxEpoch string, displayColumns uint64) *models.SlotsFilteredPageData {
chainState := services.GlobalBeaconService.GetChainState()
filterArgs := url.Values{}
if graffiti != "" {
@@ -184,11 +181,11 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui
if invertproposer {
filterArgs.Add("f.pinvert", "on")
}
- if withOrphaned != 0 {
- filterArgs.Add("f.orphaned", fmt.Sprintf("%v", withOrphaned))
+ if statusMask != 0x07 {
+ filterArgs.Add("f.status", fmt.Sprintf("0x%x", statusMask))
}
- if withMissing != 0 {
- filterArgs.Add("f.missing", fmt.Sprintf("%v", withMissing))
+ if payloadMask != 0x07 {
+ filterArgs.Add("f.pstatus", fmt.Sprintf("0x%x", payloadMask))
}
if minSyncAgg != "" {
filterArgs.Add("f.minsync", minSyncAgg)
@@ -270,27 +267,52 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui
filterArgs.Add("d", fmt.Sprintf("0x%x", displayMask))
}
+ // Map statusMask to WithOrphaned/WithMissing for DB filter
+ hasMissing := statusMask&0x01 != 0
+ hasCanonical := statusMask&0x02 != 0
+ hasOrphaned := statusMask&0x04 != 0
+
+ var withMissing, withOrphaned uint8
+ if !hasMissing {
+ withMissing = 0
+ } else if !hasCanonical && !hasOrphaned {
+ withMissing = 2
+ } else {
+ withMissing = 1
+ }
+ if !hasOrphaned {
+ withOrphaned = 0
+ } else if !hasCanonical && !hasMissing {
+ withOrphaned = 2
+ } else {
+ withOrphaned = 1
+ }
+
pageData := &models.SlotsFilteredPageData{
- FilterGraffiti: graffiti,
- FilterExtraData: extradata,
- FilterProposer: proposer,
- FilterProposerName: pname,
- FilterInvertGraffiti: invertgraffiti,
- FilterInvertExtraData: invertextradata,
- FilterInvertProposer: invertproposer,
- FilterWithOrphaned: withOrphaned,
- FilterWithMissing: withMissing,
- FilterMinSyncAgg: minSyncAgg,
- FilterMaxSyncAgg: maxSyncAgg,
- FilterMinExecTime: minExecTime,
- FilterMaxExecTime: maxExecTime,
- FilterMinTxCount: minTxCount,
- FilterMaxTxCount: maxTxCount,
- FilterMinBlobCount: minBlobCount,
- FilterMaxBlobCount: maxBlobCount,
- FilterForkIds: forkIds,
- FilterMinEpoch: minEpoch,
- FilterMaxEpoch: maxEpoch,
+ FilterGraffiti: graffiti,
+ FilterExtraData: extradata,
+ FilterProposer: proposer,
+ FilterProposerName: pname,
+ FilterInvertGraffiti: invertgraffiti,
+ FilterInvertExtraData: invertextradata,
+ FilterInvertProposer: invertproposer,
+ FilterStatusMissing: hasMissing,
+ FilterStatusCanonical: hasCanonical,
+ FilterStatusOrphaned: hasOrphaned,
+ FilterPayloadMissing: payloadMask&0x01 != 0,
+ FilterPayloadCanonical: payloadMask&0x02 != 0,
+ FilterPayloadOrphaned: payloadMask&0x04 != 0,
+ FilterMinSyncAgg: minSyncAgg,
+ FilterMaxSyncAgg: maxSyncAgg,
+ FilterMinExecTime: minExecTime,
+ FilterMaxExecTime: maxExecTime,
+ FilterMinTxCount: minTxCount,
+ FilterMaxTxCount: maxTxCount,
+ FilterMinBlobCount: minBlobCount,
+ FilterMaxBlobCount: maxBlobCount,
+ FilterForkIds: forkIds,
+ FilterMinEpoch: minEpoch,
+ FilterMaxEpoch: maxEpoch,
DisplayEpoch: displayMap[1],
DisplaySlot: displayMap[2],
@@ -346,6 +368,7 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui
InvertProposer: invertproposer,
WithOrphaned: withOrphaned,
WithMissing: withMissing,
+ WithPayloadMask: dbtypes.PayloadStatusMask(payloadMask),
}
if proposer != "" {
pidx, _ := strconv.ParseUint(proposer, 10, 64)
@@ -436,12 +459,7 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui
}
}
- withScheduledCount := chainState.GetSpecs().SlotsPerEpoch - uint64(chainState.SlotToSlotIndex(currentSlot)) - 1
- if withScheduledCount > 16 {
- withScheduledCount = 16
- }
-
- dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, blockFilter, pageIdx, uint32(pageSize), withScheduledCount)
+ dbBlocks := services.GlobalBeaconService.GetDbBlocksByFilter(ctx, blockFilter, pageIdx, uint32(pageSize), 16)
mevBlocksMap := make(map[string]*dbtypes.MevBlock)
if pageData.DisplayMevBlock {
@@ -465,12 +483,13 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui
break
}
slot := phase0.Slot(dbBlock.Slot)
+ epoch := chainState.EpochOfSlot(slot)
slotData := &models.SlotsFilteredPageDataSlot{
Slot: uint64(slot),
- Epoch: uint64(chainState.EpochOfSlot(slot)),
+ Epoch: uint64(epoch),
Ts: chainState.SlotToTime(slot),
- Finalized: finalizedEpoch >= chainState.EpochOfSlot(slot),
+ Finalized: finalizedEpoch >= epoch,
Synchronized: true,
Scheduled: slot >= currentSlot,
Proposer: dbBlock.Proposer,
@@ -502,6 +521,12 @@ func buildFilteredSlotsPageData(ctx context.Context, pageIdx uint64, pageSize ui
slotData.EthBlockNumber = *dbBlock.Block.EthBlockNumber
}
+ payloadStatus := dbBlock.Block.PayloadStatus
+ if !chainState.IsEip7732Enabled(epoch) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+ slotData.PayloadStatus = uint8(payloadStatus)
+
if pageData.DisplayMevBlock && dbBlock.Block.EthBlockHash != nil {
if mevBlock, exists := mevBlocksMap[fmt.Sprintf("%x", dbBlock.Block.EthBlockHash)]; exists {
slotData.IsMevBlock = true
diff --git a/handlers/validator_slots.go b/handlers/validator_slots.go
index 3cf97d9a4..d4cb6417d 100644
--- a/handlers/validator_slots.go
+++ b/handlers/validator_slots.go
@@ -113,12 +113,13 @@ func buildValidatorSlotsPageData(ctx context.Context, validator uint64, pageIdx
break
}
slot := blockAssignment.Slot
+ epoch := chainState.EpochOfSlot(phase0.Slot(slot))
slotData := &models.ValidatorSlotsPageDataSlot{
Slot: slot,
- Epoch: uint64(chainState.EpochOfSlot(phase0.Slot(slot))),
+ Epoch: uint64(epoch),
Ts: chainState.SlotToTime(phase0.Slot(slot)),
- Finalized: finalizedEpoch >= chainState.EpochOfSlot(phase0.Slot(slot)),
+ Finalized: finalizedEpoch >= epoch,
Status: uint8(0),
Proposer: validator,
ProposerName: pageData.Name,
@@ -141,6 +142,12 @@ func buildValidatorSlotsPageData(ctx context.Context, validator uint64, pageIdx
slotData.WithEthBlock = true
slotData.EthBlockNumber = *dbBlock.EthBlockNumber
}
+
+ payloadStatus := dbBlock.PayloadStatus
+ if !chainState.IsEip7732Enabled(epoch) {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ }
+ slotData.PayloadStatus = uint8(payloadStatus)
}
pageData.Slots = append(pageData.Slots, slotData)
}
diff --git a/handlers/voluntary_exits.go b/handlers/voluntary_exits.go
index 9b8c883b7..53a6d1d87 100644
--- a/handlers/voluntary_exits.go
+++ b/handlers/voluntary_exits.go
@@ -9,6 +9,7 @@ import (
"strings"
v1 "github.com/attestantio/go-eth2-client/api/v1"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/dbtypes"
"github.com/ethpandaops/dora/services"
@@ -164,40 +165,67 @@ func buildFilteredVoluntaryExitsPageData(ctx context.Context, pageIdx uint64, pa
SlotRoot: voluntaryExit.SlotRoot,
Time: chainState.SlotToTime(phase0.Slot(voluntaryExit.SlotNumber)),
Orphaned: voluntaryExit.Orphaned,
- ValidatorIndex: voluntaryExit.ValidatorIndex,
- ValidatorName: services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex),
ValidatorStatus: "",
}
- validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false)
- if validator == nil {
- voluntaryExitData.ValidatorStatus = "Unknown"
- } else {
- voluntaryExitData.PublicKey = validator.Validator.PublicKey[:]
- voluntaryExitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials
-
- if strings.HasPrefix(validator.Status.String(), "pending") {
- voluntaryExitData.ValidatorStatus = "Pending"
- } else if validator.Status == v1.ValidatorStateActiveOngoing {
- voluntaryExitData.ValidatorStatus = "Active"
- voluntaryExitData.ShowUpcheck = true
- } else if validator.Status == v1.ValidatorStateActiveExiting {
- voluntaryExitData.ValidatorStatus = "Exiting"
- voluntaryExitData.ShowUpcheck = true
- } else if validator.Status == v1.ValidatorStateActiveSlashed {
- voluntaryExitData.ValidatorStatus = "Slashed"
- voluntaryExitData.ShowUpcheck = true
- } else if validator.Status == v1.ValidatorStateExitedUnslashed {
- voluntaryExitData.ValidatorStatus = "Exited"
- } else if validator.Status == v1.ValidatorStateExitedSlashed {
- voluntaryExitData.ValidatorStatus = "Slashed"
+ // Check if this is a builder exit (validator index has BuilderIndexFlag set)
+ if voluntaryExit.ValidatorIndex&services.BuilderIndexFlag != 0 {
+ builderIndex := voluntaryExit.ValidatorIndex &^ services.BuilderIndexFlag
+ voluntaryExitData.IsBuilder = true
+ voluntaryExitData.ValidatorIndex = builderIndex
+
+ // Resolve builder name via validatornames service (with BuilderIndexFlag)
+ voluntaryExitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex)
+
+ builder := services.GlobalBeaconService.GetBuilderByIndex(gloas.BuilderIndex(builderIndex))
+ if builder == nil {
+ voluntaryExitData.ValidatorStatus = "Unknown"
} else {
- voluntaryExitData.ValidatorStatus = validator.Status.String()
+ voluntaryExitData.PublicKey = builder.PublicKey[:]
+
+ // Determine builder status
+ currentEpoch := chainState.CurrentEpoch()
+ if builder.WithdrawableEpoch <= currentEpoch {
+ voluntaryExitData.ValidatorStatus = "Exited"
+ } else {
+ voluntaryExitData.ValidatorStatus = "Exiting"
+ }
}
+ } else {
+ // Regular validator exit
+ voluntaryExitData.ValidatorIndex = voluntaryExit.ValidatorIndex
+ voluntaryExitData.ValidatorName = services.GlobalBeaconService.GetValidatorName(voluntaryExit.ValidatorIndex)
+
+ validator := services.GlobalBeaconService.GetValidatorByIndex(phase0.ValidatorIndex(voluntaryExit.ValidatorIndex), false)
+ if validator == nil {
+ voluntaryExitData.ValidatorStatus = "Unknown"
+ } else {
+ voluntaryExitData.PublicKey = validator.Validator.PublicKey[:]
+ voluntaryExitData.WithdrawalCreds = validator.Validator.WithdrawalCredentials
+
+ if strings.HasPrefix(validator.Status.String(), "pending") {
+ voluntaryExitData.ValidatorStatus = "Pending"
+ } else if validator.Status == v1.ValidatorStateActiveOngoing {
+ voluntaryExitData.ValidatorStatus = "Active"
+ voluntaryExitData.ShowUpcheck = true
+ } else if validator.Status == v1.ValidatorStateActiveExiting {
+ voluntaryExitData.ValidatorStatus = "Exiting"
+ voluntaryExitData.ShowUpcheck = true
+ } else if validator.Status == v1.ValidatorStateActiveSlashed {
+ voluntaryExitData.ValidatorStatus = "Slashed"
+ voluntaryExitData.ShowUpcheck = true
+ } else if validator.Status == v1.ValidatorStateExitedUnslashed {
+ voluntaryExitData.ValidatorStatus = "Exited"
+ } else if validator.Status == v1.ValidatorStateExitedSlashed {
+ voluntaryExitData.ValidatorStatus = "Slashed"
+ } else {
+ voluntaryExitData.ValidatorStatus = validator.Status.String()
+ }
- if voluntaryExitData.ShowUpcheck {
- voluntaryExitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3))
- voluntaryExitData.UpcheckMaximum = uint8(3)
+ if voluntaryExitData.ShowUpcheck {
+ voluntaryExitData.UpcheckActivity = uint8(services.GlobalBeaconService.GetValidatorLiveness(validator.Index, 3))
+ voluntaryExitData.UpcheckMaximum = uint8(3)
+ }
}
}
diff --git a/handlers/withdrawals.go b/handlers/withdrawals.go
index 2be5fbf20..49fff3ed6 100644
--- a/handlers/withdrawals.go
+++ b/handlers/withdrawals.go
@@ -141,7 +141,12 @@ func buildWithdrawalsPageData(ctx context.Context, firstEpoch uint64, pageSize u
}
if validatorIndex := withdrawal.ValidatorIndex(); validatorIndex != nil {
- withdrawalData.ValidatorIndex = *validatorIndex
+ if *validatorIndex&services.BuilderIndexFlag != 0 {
+ withdrawalData.IsBuilder = true
+ withdrawalData.ValidatorIndex = *validatorIndex &^ services.BuilderIndexFlag
+ } else {
+ withdrawalData.ValidatorIndex = *validatorIndex
+ }
withdrawalData.ValidatorName = services.GlobalBeaconService.GetValidatorName(*validatorIndex)
withdrawalData.ValidatorValid = true
}
diff --git a/indexer/beacon/bidcache.go b/indexer/beacon/bidcache.go
new file mode 100644
index 000000000..3cdbe4bc5
--- /dev/null
+++ b/indexer/beacon/bidcache.go
@@ -0,0 +1,211 @@
+package beacon
+
+import (
+ "sync"
+
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/jmoiron/sqlx"
+)
+
+const (
+ // bidCacheMaxSlots is the maximum number of slots to keep in the cache
+ bidCacheMaxSlots = 15
+ // bidCacheFlushThreshold is the slot span that triggers a flush
+ bidCacheFlushThreshold = 15
+ // bidCacheRetainSlots is the number of slots to retain after a flush
+ bidCacheRetainSlots = 10
+)
+
+// bidCacheKey uniquely identifies a bid in the cache
+type bidCacheKey struct {
+ ParentRoot phase0.Root
+ ParentHash phase0.Hash32
+ BlockHash phase0.Hash32
+ BuilderIndex int64
+}
+
+// blockBidCache caches execution payload bids for recent blocks.
+// Bids for older slots are ignored. The cache is flushed to DB on shutdown
+// or when the slot span exceeds the threshold.
+type blockBidCache struct {
+ indexer *Indexer
+ cacheMutex sync.RWMutex
+ bids map[bidCacheKey]*dbtypes.BlockBid
+ minSlot phase0.Slot
+ maxSlot phase0.Slot
+}
+
+// newBlockBidCache creates a new instance of blockBidCache.
+func newBlockBidCache(indexer *Indexer) *blockBidCache {
+ return &blockBidCache{
+ indexer: indexer,
+ bids: make(map[bidCacheKey]*dbtypes.BlockBid, 64),
+ }
+}
+
+// loadFromDB loads bids from the last N slots from the database.
+func (cache *blockBidCache) loadFromDB(currentSlot phase0.Slot) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ minSlot := phase0.Slot(0)
+ if currentSlot > bidCacheRetainSlots {
+ minSlot = currentSlot - bidCacheRetainSlots
+ }
+
+ dbBids := db.GetBidsForSlotRange(cache.indexer.ctx, uint64(minSlot))
+ for _, bid := range dbBids {
+ key := bidCacheKey{
+ ParentRoot: phase0.Root(bid.ParentRoot),
+ ParentHash: phase0.Hash32(bid.ParentHash),
+ BlockHash: phase0.Hash32(bid.BlockHash),
+ BuilderIndex: bid.BuilderIndex,
+ }
+ cache.bids[key] = bid
+
+ slot := phase0.Slot(bid.Slot)
+ if cache.minSlot == 0 || slot < cache.minSlot {
+ cache.minSlot = slot
+ }
+ if slot > cache.maxSlot {
+ cache.maxSlot = slot
+ }
+ }
+
+ if len(dbBids) > 0 {
+ cache.indexer.logger.Infof("loaded %d bids from DB (slots %d-%d)", len(dbBids), cache.minSlot, cache.maxSlot)
+ }
+}
+
+// AddBid adds a bid to the cache. Returns true if the bid was added,
+// false if it was ignored (too old) or already exists.
+func (cache *blockBidCache) AddBid(bid *dbtypes.BlockBid) bool {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ slot := phase0.Slot(bid.Slot)
+
+ // Ignore bids for slots that are too old
+ if cache.maxSlot > 0 && slot+bidCacheMaxSlots < cache.maxSlot {
+ return false
+ }
+
+ key := bidCacheKey{
+ ParentRoot: phase0.Root(bid.ParentRoot),
+ ParentHash: phase0.Hash32(bid.ParentHash),
+ BlockHash: phase0.Hash32(bid.BlockHash),
+ BuilderIndex: bid.BuilderIndex,
+ }
+
+ // Check if bid already exists
+ if _, exists := cache.bids[key]; exists {
+ return false
+ }
+
+ cache.bids[key] = bid
+
+ // Update slot bounds
+ if cache.minSlot == 0 || slot < cache.minSlot {
+ cache.minSlot = slot
+ }
+ if slot > cache.maxSlot {
+ cache.maxSlot = slot
+ }
+
+ return true
+}
+
+// GetBidsForBlockRoot returns all bids for a given parent block root.
+func (cache *blockBidCache) GetBidsForBlockRoot(blockRoot phase0.Root) []*dbtypes.BlockBid {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ result := make([]*dbtypes.BlockBid, 0)
+ for key, bid := range cache.bids {
+ if key.ParentRoot == blockRoot {
+ result = append(result, bid)
+ }
+ }
+
+ return result
+}
+
+// checkAndFlush checks if the cache needs to be flushed and performs the flush if necessary.
+// This should be called periodically (e.g., on each new block).
+func (cache *blockBidCache) checkAndFlush() error {
+ cache.cacheMutex.Lock()
+
+ // Check if we need to flush
+ if cache.maxSlot == 0 || cache.maxSlot-cache.minSlot < bidCacheFlushThreshold {
+ cache.cacheMutex.Unlock()
+ return nil
+ }
+
+ // Calculate the cutoff slot - we'll flush bids older than this
+ cutoffSlot := cache.maxSlot - bidCacheRetainSlots
+
+ // Collect bids to flush (from minSlot to cutoffSlot)
+ bidsToFlush := make([]*dbtypes.BlockBid, 0)
+ for key, bid := range cache.bids {
+ if phase0.Slot(bid.Slot) < cutoffSlot {
+ bidsToFlush = append(bidsToFlush, bid)
+ delete(cache.bids, key)
+ }
+ }
+
+ // Update minSlot
+ cache.minSlot = cutoffSlot
+
+ cache.cacheMutex.Unlock()
+
+ // Write to DB outside of lock
+ if len(bidsToFlush) > 0 {
+ err := db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ return db.InsertBids(bidsToFlush, tx)
+ })
+ if err != nil {
+ cache.indexer.logger.Errorf("error flushing bids to db: %v", err)
+ return err
+ }
+ cache.indexer.logger.Debugf("flushed %d bids to DB (slots < %d)", len(bidsToFlush), cutoffSlot)
+ }
+
+ return nil
+}
+
+// flushAll flushes all cached bids to the database.
+// This should be called on shutdown.
+func (cache *blockBidCache) flushAll() error {
+ cache.cacheMutex.Lock()
+
+ if len(cache.bids) == 0 {
+ cache.cacheMutex.Unlock()
+ return nil
+ }
+
+ bidsToFlush := make([]*dbtypes.BlockBid, 0, len(cache.bids))
+ for _, bid := range cache.bids {
+ bidsToFlush = append(bidsToFlush, bid)
+ }
+
+ // Clear the cache
+ cache.bids = make(map[bidCacheKey]*dbtypes.BlockBid, 64)
+ cache.minSlot = 0
+ cache.maxSlot = 0
+
+ cache.cacheMutex.Unlock()
+
+ // Write to DB outside of lock
+ err := db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ return db.InsertBids(bidsToFlush, tx)
+ })
+ if err != nil {
+ cache.indexer.logger.Errorf("error flushing all bids to db: %v", err)
+ return err
+ }
+
+ cache.indexer.logger.Infof("flushed %d bids to DB on shutdown", len(bidsToFlush))
+ return nil
+}
diff --git a/indexer/beacon/block.go b/indexer/beacon/block.go
index c3914047e..b2d4de18b 100644
--- a/indexer/beacon/block.go
+++ b/indexer/beacon/block.go
@@ -3,11 +3,13 @@ package beacon
import (
"context"
"fmt"
+ "math"
"math/rand/v2"
"sync"
"time"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
btypes "github.com/ethpandaops/dora/blockdb/types"
@@ -20,36 +22,41 @@ import (
// Block represents a beacon block.
type Block struct {
- Root phase0.Root
- Slot phase0.Slot
- BlockUID uint64
- dynSsz *dynssz.DynSsz
- parentRoot *phase0.Root
- dependentRoot *phase0.Root
- forkId ForkKey
- forkChecked bool
- headerMutex sync.Mutex
- headerChan chan bool
- header *phase0.SignedBeaconBlockHeader
- blockMutex sync.Mutex
- blockChan chan bool
- block *spec.VersionedSignedBeaconBlock
- blockIndex *BlockBodyIndex
- recvDelay int32
- executionTimes []ExecutionTime // execution times from snooper clients
- minExecutionTime uint16
- maxExecutionTime uint16
- execTimeUpdate *time.Ticker
- executionTimesMux sync.RWMutex
- isInFinalizedDb bool // block is in finalized table (slots)
- isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks)
- isDisposed bool // block is disposed
- processingStatus dbtypes.UnfinalizedBlockStatus
- seenMutex sync.RWMutex
- seenMap map[uint16]*Client
- processedActivity uint8
- blockResults [][]uint8
- blockResultsMutex sync.Mutex
+ Root phase0.Root
+ Slot phase0.Slot
+ BlockUID uint64
+ dynSsz *dynssz.DynSsz
+ parentRoot *phase0.Root
+ dependentRoot *phase0.Root
+ forkId ForkKey
+ forkChecked bool
+ headerMutex sync.Mutex
+ headerChan chan bool
+ header *phase0.SignedBeaconBlockHeader
+ blockMutex sync.Mutex
+ blockChan chan bool
+ block *spec.VersionedSignedBeaconBlock
+ executionPayloadMutex sync.Mutex
+ executionPayloadChan chan bool
+ executionPayload *gloas.SignedExecutionPayloadEnvelope
+ blockIndex *BlockBodyIndex
+ recvDelay int32
+ executionTimes []ExecutionTime // execution times from snooper clients
+ minExecutionTime uint16
+ maxExecutionTime uint16
+ execTimeUpdate *time.Ticker
+ executionTimesMux sync.RWMutex
+ isInFinalizedDb bool // block is in finalized table (slots)
+ isInUnfinalizedDb bool // block is in unfinalized table (unfinalized_blocks)
+ hasExecutionPayload bool // block has an execution payload (either in cache or db)
+ isPayloadOrphaned bool // payload is orphaned (next block doesn't build on it)
+ isDisposed bool // block is disposed
+ processingStatus dbtypes.UnfinalizedBlockStatus
+ seenMutex sync.RWMutex
+ seenMap map[uint16]*Client
+ processedActivity uint8
+ blockResults [][]uint8
+ blockResultsMutex sync.Mutex
}
// BlockBodyIndex holds important block properties that are used as index for cache lookups.
@@ -58,10 +65,12 @@ type BlockBodyIndex struct {
Graffiti [32]byte
ExecutionExtraData []byte
ExecutionHash phase0.Hash32
+ ExecutionParentHash phase0.Hash32
ExecutionNumber uint64
SyncParticipation float32
EthTransactionCount uint64
BlobCount uint64
+ BuilderIndex uint64
GasUsed uint64
GasLimit uint64
BlockSize uint64
@@ -69,21 +78,16 @@ type BlockBodyIndex struct {
// newBlock creates a new Block instance.
func newBlock(dynSsz *dynssz.DynSsz, root phase0.Root, slot phase0.Slot, blockUID uint64) *Block {
- if blockUID == 0 {
- // use highest possible block UID as default
- blockUID = (uint64(slot) << 16) | 0xffff
+ return &Block{
+ Root: root,
+ Slot: slot,
+ BlockUID: blockUID,
+ dynSsz: dynSsz,
+ seenMap: make(map[uint16]*Client),
+ headerChan: make(chan bool),
+ blockChan: make(chan bool),
+ executionPayloadChan: make(chan bool),
}
- block := &Block{
- Root: root,
- Slot: slot,
- BlockUID: blockUID,
- dynSsz: dynSsz,
- seenMap: make(map[uint16]*Client),
- headerChan: make(chan bool),
- blockChan: make(chan bool),
- }
-
- return block
}
func (block *Block) Dispose() {
@@ -170,7 +174,7 @@ func (block *Block) GetBlock(ctx context.Context) *spec.VersionedSignedBeaconBlo
}
if block.isInUnfinalizedDb {
- dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:])
+ dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, true, false)
if dbBlock != nil {
blockBody, err := UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ)
if err == nil {
@@ -188,6 +192,10 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec
return nil
}
+ if block.block != nil {
+ return block.block
+ }
+
if ctx == nil {
ctx = context.Background()
}
@@ -201,6 +209,45 @@ func (block *Block) AwaitBlock(ctx context.Context, timeout time.Duration) *spec
return block.block
}
+// GetExecutionPayload returns the execution payload of this block.
+func (block *Block) GetExecutionPayload(ctx context.Context) *gloas.SignedExecutionPayloadEnvelope {
+ if block.executionPayload != nil {
+ return block.executionPayload
+ }
+
+ if block.hasExecutionPayload && block.isInUnfinalizedDb {
+ dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, false, true)
+ if dbBlock != nil {
+ payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ)
+ if err == nil {
+ return payload
+ }
+ }
+ }
+
+ return nil
+}
+
+// AwaitExecutionPayload waits for the execution payload of this block to be available.
+func (block *Block) AwaitExecutionPayload(ctx context.Context, timeout time.Duration) *gloas.SignedExecutionPayloadEnvelope {
+ if ctx == nil {
+ ctx = context.Background()
+ }
+
+ select {
+ case <-block.executionPayloadChan:
+ case <-time.After(timeout):
+ case <-ctx.Done():
+ }
+
+ return block.executionPayload
+}
+
+// HasExecutionPayload returns true if this block has an execution payload.
+func (block *Block) HasExecutionPayload() bool {
+ return block.hasExecutionPayload
+}
+
// GetParentRoot returns the parent root of this block.
func (block *Block) GetParentRoot() *phase0.Root {
if block.isDisposed {
@@ -264,7 +311,7 @@ func (block *Block) SetBlock(body *spec.VersionedSignedBeaconBlock) {
return
}
- block.setBlockIndex(body)
+ block.setBlockIndex(body, nil)
block.block = body
if block.blockChan != nil {
@@ -295,7 +342,7 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo
return false, err
}
- block.setBlockIndex(blockBody)
+ block.setBlockIndex(blockBody, nil)
block.block = blockBody
if block.blockChan != nil {
close(block.blockChan)
@@ -305,35 +352,106 @@ func (block *Block) EnsureBlock(loadBlock func() (*spec.VersionedSignedBeaconBlo
return true, nil
}
+// SetExecutionPayload sets the execution payload of this block.
+func (block *Block) SetExecutionPayload(payload *gloas.SignedExecutionPayloadEnvelope) {
+ block.setBlockIndex(block.block, payload)
+ block.executionPayload = payload
+ block.hasExecutionPayload = true
+
+ if block.executionPayloadChan != nil {
+ close(block.executionPayloadChan)
+ block.executionPayloadChan = nil
+ }
+}
+
+// EnsureExecutionPayload ensures that the execution payload of this block is available.
+func (block *Block) EnsureExecutionPayload(loadExecutionPayload func() (*gloas.SignedExecutionPayloadEnvelope, error)) (bool, error) {
+ if block.executionPayload != nil {
+ return false, nil
+ }
+
+ if block.hasExecutionPayload {
+ return false, nil
+ }
+
+ block.executionPayloadMutex.Lock()
+ defer block.executionPayloadMutex.Unlock()
+
+ if block.executionPayload != nil {
+ return false, nil
+ }
+
+ payload, err := loadExecutionPayload()
+ if err != nil {
+ return false, err
+ }
+
+ if payload == nil {
+ return false, nil
+ }
+
+ block.setBlockIndex(block.block, payload)
+ block.executionPayload = payload
+ block.hasExecutionPayload = true
+ if block.executionPayloadChan != nil {
+ close(block.executionPayloadChan)
+ block.executionPayloadChan = nil
+ }
+
+ return true, nil
+}
+
// setBlockIndex sets the block index of this block.
-func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock) {
+func (block *Block) setBlockIndex(body *spec.VersionedSignedBeaconBlock, payload *gloas.SignedExecutionPayloadEnvelope) {
if body == nil {
return
}
- blockIndex := &BlockBodyIndex{}
- blockIndex.Graffiti, _ = body.Graffiti()
+ blockIndex := block.blockIndex
+ if blockIndex == nil {
+ blockIndex = &BlockBodyIndex{}
+ }
+
+ if body != nil {
+ blockIndex.Graffiti, _ = body.Graffiti()
+ blockIndex.ExecutionExtraData, _ = getBlockExecutionExtraData(body)
+ blockIndex.ExecutionHash, _ = body.ExecutionBlockHash()
+ if execNumber, err := body.ExecutionBlockNumber(); err == nil {
+ blockIndex.ExecutionNumber = uint64(execNumber)
+ }
+ if transactions, err := body.ExecutionTransactions(); err == nil {
+ blockIndex.EthTransactionCount = uint64(len(transactions))
+ }
+ if blobKzgCommitments, err := body.BlobKZGCommitments(); err == nil {
+ blockIndex.BlobCount = uint64(len(blobKzgCommitments))
+ }
+ if builderIndex, err := getBlockPayloadBuilderIndex(body); err == nil {
+ blockIndex.BuilderIndex = uint64(builderIndex)
+ } else {
+ blockIndex.BuilderIndex = math.MaxUint64
+ }
+ if parentHash, err := getBlockExecutionParentHash(body); err == nil {
+ blockIndex.ExecutionParentHash = parentHash
+ }
+ if executionPayload, err := body.ExecutionPayload(); err == nil {
+ gasUsed, _ := executionPayload.GasUsed()
+ blockIndex.GasUsed = gasUsed
- executionPayload, _ := body.ExecutionPayload()
- if executionPayload != nil {
- blockIndex.ExecutionExtraData, _ = executionPayload.ExtraData()
- blockIndex.ExecutionHash, _ = executionPayload.BlockHash()
- blockIndex.ExecutionNumber, _ = executionPayload.BlockNumber()
+ gasLimit, _ := executionPayload.GasLimit()
+ blockIndex.GasLimit = gasLimit
+ }
+ }
+ if payload != nil {
+ blockIndex.ExecutionNumber = uint64(payload.Message.Payload.BlockNumber)
+ blockIndex.ExecutionParentHash = payload.Message.Payload.ParentHash
// Calculate transaction count
- executionTransactions, _ := executionPayload.Transactions()
+ executionTransactions := payload.Message.Payload.Transactions
blockIndex.EthTransactionCount = uint64(len(executionTransactions))
- // Calculate blob count
- blobKzgCommitments, _ := body.BlobKZGCommitments()
- blockIndex.BlobCount = uint64(len(blobKzgCommitments))
-
// Get gas used and gas limit
- gasUsed, _ := executionPayload.GasUsed()
- blockIndex.GasUsed = gasUsed
-
- gasLimit, _ := executionPayload.GasLimit()
- blockIndex.GasLimit = gasLimit
+ blockIndex.GasUsed = payload.Message.Payload.GasUsed
+ blockIndex.GasLimit = payload.Message.Payload.GasLimit
}
// Calculate block size
@@ -372,7 +490,7 @@ func (block *Block) GetBlockIndex(ctx context.Context) *BlockBodyIndex {
blockBody := block.GetBlock(ctx)
if blockBody != nil {
- block.setBlockIndex(blockBody)
+ block.setBlockIndex(blockBody, block.GetExecutionPayload(ctx))
}
return block.blockIndex
@@ -399,7 +517,7 @@ func (block *Block) buildUnfinalizedBlock(ctx context.Context, compress bool) (*
return nil, fmt.Errorf("marshal exec times ssz failed: %v", err)
}
- return &dbtypes.UnfinalizedBlock{
+ unfinalizedBlock := &dbtypes.UnfinalizedBlock{
Root: block.Root[:],
Slot: uint64(block.Slot),
HeaderVer: 1,
@@ -413,7 +531,18 @@ func (block *Block) buildUnfinalizedBlock(ctx context.Context, compress bool) (*
MaxExecTime: uint32(block.maxExecutionTime),
ExecTimes: execTimesSSZ,
BlockUid: block.BlockUID,
- }, nil
+ }
+
+ if block.executionPayload != nil {
+ payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, compress)
+ if err != nil {
+ return nil, fmt.Errorf("marshal execution payload ssz failed: %v", err)
+ }
+ unfinalizedBlock.PayloadVer = payloadVer
+ unfinalizedBlock.PayloadSSZ = payloadSSZ
+ }
+
+ return unfinalizedBlock, nil
}
// buildOrphanedBlock builds an orphaned block from the block data.
@@ -432,14 +561,25 @@ func (block *Block) buildOrphanedBlock(ctx context.Context, compress bool) (*dbt
return nil, fmt.Errorf("marshal block ssz failed: %v", err)
}
- return &dbtypes.OrphanedBlock{
+ orphanedBlock := &dbtypes.OrphanedBlock{
Root: block.Root[:],
HeaderVer: 1,
HeaderSSZ: headerSSZ,
BlockVer: blockVer,
BlockSSZ: blockSSZ,
BlockUid: block.BlockUID,
- }, nil
+ }
+
+ if block.executionPayload != nil {
+ payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, compress)
+ if err != nil {
+ return nil, fmt.Errorf("marshal execution payload ssz failed: %v", err)
+ }
+ orphanedBlock.PayloadVer = payloadVer
+ orphanedBlock.PayloadSSZ = payloadSSZ
+ }
+
+ return orphanedBlock, nil
}
func (block *Block) writeToBlockDb(ctx context.Context) error {
@@ -447,7 +587,7 @@ func (block *Block) writeToBlockDb(ctx context.Context) error {
return nil
}
- _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) {
+ _, _, err := blockdb.GlobalBlockDb.AddBlockWithCallback(ctx, uint64(block.Slot), block.Root[:], func() (*btypes.BlockData, error) {
headerSSZ, err := block.header.MarshalSSZ()
if err != nil {
return nil, fmt.Errorf("marshal header ssz failed: %v", err)
@@ -478,9 +618,12 @@ func (block *Block) unpruneBlockBody(ctx context.Context) {
return
}
- dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:])
+ dbBlock := db.GetUnfinalizedBlock(ctx, block.Root[:], false, true, true)
if dbBlock != nil {
block.block, _ = UnmarshalVersionedSignedBeaconBlockSSZ(block.dynSsz, dbBlock.BlockVer, dbBlock.BlockSSZ)
+ if len(dbBlock.PayloadSSZ) > 0 {
+ block.executionPayload, _ = UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ)
+ }
}
}
diff --git a/indexer/beacon/block_helper.go b/indexer/beacon/block_helper.go
index c943ede1c..39deea640 100644
--- a/indexer/beacon/block_helper.go
+++ b/indexer/beacon/block_helper.go
@@ -10,6 +10,7 @@ import (
"github.com/attestantio/go-eth2-client/spec/capella"
"github.com/attestantio/go-eth2-client/spec/deneb"
"github.com/attestantio/go-eth2-client/spec/electra"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/utils"
dynssz "github.com/pk910/dynamic-ssz"
@@ -47,6 +48,9 @@ func MarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, block *spec.Ver
case spec.DataVersionFulu:
version = uint64(block.Version)
ssz, err = dynSsz.MarshalSSZ(block.Fulu)
+ case spec.DataVersionGloas:
+ version = uint64(block.Version)
+ ssz, err = dynSsz.MarshalSSZ(block.Gloas)
default:
err = fmt.Errorf("unknown block version")
}
@@ -118,6 +122,11 @@ func UnmarshalVersionedSignedBeaconBlockSSZ(dynSsz *dynssz.DynSsz, version uint6
if err := dynSsz.UnmarshalSSZ(block.Fulu, ssz); err != nil {
return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err)
}
+ case spec.DataVersionGloas:
+ block.Gloas = &gloas.SignedBeaconBlock{}
+ if err := dynSsz.UnmarshalSSZ(block.Gloas, ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err)
+ }
default:
return nil, fmt.Errorf("unknown block version")
}
@@ -148,6 +157,9 @@ func MarshalVersionedSignedBeaconBlockJson(block *spec.VersionedSignedBeaconBloc
case spec.DataVersionFulu:
version = uint64(block.Version)
jsonRes, err = block.Fulu.MarshalJSON()
+ case spec.DataVersionGloas:
+ version = uint64(block.Version)
+ jsonRes, err = block.Gloas.MarshalJSON()
default:
err = fmt.Errorf("unknown block version")
}
@@ -201,12 +213,195 @@ func unmarshalVersionedSignedBeaconBlockJson(version uint64, ssz []byte) (*spec.
if err := block.Fulu.UnmarshalJSON(ssz); err != nil {
return nil, fmt.Errorf("failed to decode fulu signed beacon block: %v", err)
}
+ case spec.DataVersionGloas:
+ block.Gloas = &gloas.SignedBeaconBlock{}
+ if err := block.Gloas.UnmarshalJSON(ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed beacon block: %v", err)
+ }
default:
return nil, fmt.Errorf("unknown block version")
}
return block, nil
}
+// MarshalVersionedSignedExecutionPayloadEnvelopeSSZ marshals a signed execution payload envelope using SSZ encoding.
+func MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, payload *gloas.SignedExecutionPayloadEnvelope, compress bool) (version uint64, ssz []byte, err error) {
+ if utils.Config.KillSwitch.DisableSSZEncoding {
+ // SSZ encoding disabled, use json instead
+ version, ssz, err = marshalVersionedSignedExecutionPayloadEnvelopeJson(payload)
+ } else {
+ // SSZ encoding
+ version = uint64(spec.DataVersionGloas)
+ ssz, err = dynSsz.MarshalSSZ(payload)
+ }
+
+ if compress {
+ ssz = compressBytes(ssz)
+ version |= compressionFlag
+ }
+
+ return
+}
+
+// UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ unmarshals a versioned signed execution payload envelope using SSZ encoding.
+func UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(dynSsz *dynssz.DynSsz, version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ if (version & compressionFlag) != 0 {
+ // decompress
+ if d, err := decompressBytes(ssz); err != nil {
+ return nil, fmt.Errorf("failed to decompress: %v", err)
+ } else {
+ ssz = d
+ version &= ^compressionFlag
+ }
+ }
+
+ if (version & jsonVersionFlag) != 0 {
+ // JSON encoding
+ return unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version, ssz)
+ }
+
+ if version != uint64(spec.DataVersionGloas) {
+ return nil, fmt.Errorf("unknown version")
+ }
+
+ // SSZ encoding
+ payload := &gloas.SignedExecutionPayloadEnvelope{}
+ if err := dynSsz.UnmarshalSSZ(payload, ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err)
+ }
+
+ return payload, nil
+}
+
+// marshalVersionedSignedExecutionPayloadEnvelopeJson marshals a versioned signed execution payload envelope using JSON encoding.
+func marshalVersionedSignedExecutionPayloadEnvelopeJson(payload *gloas.SignedExecutionPayloadEnvelope) (version uint64, jsonRes []byte, err error) {
+ version = uint64(spec.DataVersionGloas)
+ jsonRes, err = payload.MarshalJSON()
+
+ version |= jsonVersionFlag
+
+ return
+}
+
+// unmarshalVersionedSignedExecutionPayloadEnvelopeJson unmarshals a versioned signed execution payload envelope using JSON encoding.
+func unmarshalVersionedSignedExecutionPayloadEnvelopeJson(version uint64, ssz []byte) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ if version&jsonVersionFlag == 0 {
+ return nil, fmt.Errorf("no json encoding")
+ }
+
+ if version-jsonVersionFlag != uint64(spec.DataVersionGloas) {
+ return nil, fmt.Errorf("unknown version")
+ }
+
+ payload := &gloas.SignedExecutionPayloadEnvelope{}
+ if err := payload.UnmarshalJSON(ssz); err != nil {
+ return nil, fmt.Errorf("failed to decode gloas signed execution payload envelope: %v", err)
+ }
+ return payload, nil
+}
+
+// getBlockExecutionExtraData returns the extra data from the execution payload of a versioned signed beacon block.
+func getBlockExecutionExtraData(v *spec.VersionedSignedBeaconBlock) ([]byte, error) {
+ switch v.Version {
+ case spec.DataVersionBellatrix:
+ if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no bellatrix block")
+ }
+
+ return v.Bellatrix.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionCapella:
+ if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no capella block")
+ }
+
+ return v.Capella.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionDeneb:
+ if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no deneb block")
+ }
+
+ return v.Deneb.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionElectra:
+ if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil {
+ return nil, errors.New("no electra block")
+ }
+
+ return v.Electra.Message.Body.ExecutionPayload.ExtraData, nil
+ case spec.DataVersionGloas:
+ return nil, nil
+ default:
+ return nil, errors.New("unknown version")
+ }
+}
+
+// getBlockPayloadBuilderIndex returns the builder index from the execution payload of a versioned signed beacon block.
+func getBlockPayloadBuilderIndex(v *spec.VersionedSignedBeaconBlock) (gloas.BuilderIndex, error) {
+ switch v.Version {
+ case spec.DataVersionPhase0:
+ return 0, errors.New("no builder index in phase0 block")
+ case spec.DataVersionAltair:
+ return 0, errors.New("no builder index in altair block")
+ case spec.DataVersionBellatrix:
+ return 0, errors.New("no builder index in bellatrix block")
+ case spec.DataVersionCapella:
+ return 0, errors.New("no builder index in capella block")
+ case spec.DataVersionDeneb:
+ return 0, errors.New("no builder index in deneb block")
+ case spec.DataVersionElectra:
+ return 0, errors.New("no builder index in electra block")
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.Message == nil || v.Gloas.Message.Body == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid.Message == nil {
+ return 0, errors.New("no gloas block")
+ }
+
+ return v.Gloas.Message.Body.SignedExecutionPayloadBid.Message.BuilderIndex, nil
+ default:
+ return 0, errors.New("unknown version")
+ }
+}
+
+// getBlockExecutionParentHash returns the parent hash from the execution payload of a versioned signed beacon block.
+func getBlockExecutionParentHash(v *spec.VersionedSignedBeaconBlock) (phase0.Hash32, error) {
+ switch v.Version {
+ case spec.DataVersionPhase0:
+ return phase0.Hash32{}, errors.New("no parent hash in phase0 block")
+ case spec.DataVersionAltair:
+ return phase0.Hash32{}, errors.New("no parent hash in altair block")
+ case spec.DataVersionBellatrix:
+ if v.Bellatrix == nil || v.Bellatrix.Message == nil || v.Bellatrix.Message.Body == nil || v.Bellatrix.Message.Body.ExecutionPayload == nil {
+ return phase0.Hash32{}, errors.New("no bellatrix block")
+ }
+
+ return v.Bellatrix.Message.Body.ExecutionPayload.ParentHash, nil
+ case spec.DataVersionCapella:
+ if v.Capella == nil || v.Capella.Message == nil || v.Capella.Message.Body == nil || v.Capella.Message.Body.ExecutionPayload == nil {
+ return phase0.Hash32{}, errors.New("no capella block")
+ }
+
+ return v.Capella.Message.Body.ExecutionPayload.ParentHash, nil
+ case spec.DataVersionDeneb:
+ if v.Deneb == nil || v.Deneb.Message == nil || v.Deneb.Message.Body == nil || v.Deneb.Message.Body.ExecutionPayload == nil {
+ return phase0.Hash32{}, errors.New("no deneb block")
+ }
+
+ return v.Deneb.Message.Body.ExecutionPayload.ParentHash, nil
+ case spec.DataVersionElectra:
+ if v.Electra == nil || v.Electra.Message == nil || v.Electra.Message.Body == nil || v.Electra.Message.Body.ExecutionPayload == nil {
+ return phase0.Hash32{}, errors.New("no electra block")
+ }
+
+ return v.Electra.Message.Body.ExecutionPayload.ParentHash, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.Message == nil || v.Gloas.Message.Body == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid == nil || v.Gloas.Message.Body.SignedExecutionPayloadBid.Message == nil {
+ return phase0.Hash32{}, errors.New("no gloas block")
+ }
+
+ return v.Gloas.Message.Body.SignedExecutionPayloadBid.Message.ParentBlockHash, nil
+ default:
+ return phase0.Hash32{}, errors.New("unknown version")
+ }
+}
+
// getStateRandaoMixes returns the RANDAO mixes from a versioned beacon state.
func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) {
switch v.Version {
@@ -252,6 +447,12 @@ func getStateRandaoMixes(v *spec.VersionedBeaconState) ([]phase0.Root, error) {
}
return v.Fulu.RANDAOMixes, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.RANDAOMixes == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.RANDAOMixes, nil
default:
return nil, errors.New("unknown version")
}
@@ -274,6 +475,8 @@ func getStateDepositIndex(state *spec.VersionedBeaconState) uint64 {
return state.Electra.ETH1DepositIndex
case spec.DataVersionFulu:
return state.Fulu.ETH1DepositIndex
+ case spec.DataVersionGloas:
+ return state.Gloas.ETH1DepositIndex
}
return 0
}
@@ -319,6 +522,12 @@ func getStateCurrentSyncCommittee(v *spec.VersionedBeaconState) ([]phase0.BLSPub
}
return v.Fulu.CurrentSyncCommittee.Pubkeys, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.CurrentSyncCommittee == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.CurrentSyncCommittee.Pubkeys, nil
default:
return nil, errors.New("unknown version")
}
@@ -349,6 +558,12 @@ func getStateDepositBalanceToConsume(v *spec.VersionedBeaconState) (phase0.Gwei,
}
return v.Fulu.DepositBalanceToConsume, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil {
+ return 0, errors.New("no gloas block")
+ }
+
+ return v.Gloas.DepositBalanceToConsume, nil
default:
return 0, errors.New("unknown version")
}
@@ -368,17 +583,23 @@ func getStatePendingDeposits(v *spec.VersionedBeaconState) ([]*electra.PendingDe
case spec.DataVersionDeneb:
return nil, errors.New("no pending deposits in deneb")
case spec.DataVersionElectra:
- if v.Electra == nil || v.Electra.PendingDeposits == nil {
+ if v.Electra == nil {
return nil, errors.New("no electra block")
}
return v.Electra.PendingDeposits, nil
case spec.DataVersionFulu:
- if v.Fulu == nil || v.Fulu.PendingDeposits == nil {
+ if v.Fulu == nil {
return nil, errors.New("no fulu block")
}
return v.Fulu.PendingDeposits, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.PendingDeposits, nil
default:
return nil, errors.New("unknown version")
}
@@ -398,17 +619,23 @@ func getStatePendingWithdrawals(v *spec.VersionedBeaconState) ([]*electra.Pendin
case spec.DataVersionDeneb:
return nil, errors.New("no pending withdrawals in deneb")
case spec.DataVersionElectra:
- if v.Electra == nil || v.Electra.PendingPartialWithdrawals == nil {
+ if v.Electra == nil {
return nil, errors.New("no electra block")
}
return v.Electra.PendingPartialWithdrawals, nil
case spec.DataVersionFulu:
- if v.Fulu == nil || v.Fulu.PendingPartialWithdrawals == nil {
+ if v.Fulu == nil {
return nil, errors.New("no fulu block")
}
return v.Fulu.PendingPartialWithdrawals, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.PendingPartialWithdrawals, nil
default:
return nil, errors.New("unknown version")
}
@@ -428,17 +655,23 @@ func getStatePendingConsolidations(v *spec.VersionedBeaconState) ([]*electra.Pen
case spec.DataVersionDeneb:
return nil, errors.New("no pending consolidations in deneb")
case spec.DataVersionElectra:
- if v.Electra == nil || v.Electra.PendingConsolidations == nil {
+ if v.Electra == nil {
return nil, errors.New("no electra block")
}
return v.Electra.PendingConsolidations, nil
case spec.DataVersionFulu:
- if v.Fulu == nil || v.Fulu.PendingConsolidations == nil {
+ if v.Fulu == nil {
return nil, errors.New("no fulu block")
}
return v.Fulu.PendingConsolidations, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.PendingConsolidations, nil
default:
return nil, errors.New("unknown version")
}
@@ -460,16 +693,70 @@ func getStateProposerLookahead(v *spec.VersionedBeaconState) ([]phase0.Validator
case spec.DataVersionElectra:
return nil, errors.New("no proposer lookahead in electra")
case spec.DataVersionFulu:
- if v.Fulu == nil || v.Fulu.ProposerLookahead == nil {
+ if v.Fulu == nil {
return nil, errors.New("no fulu block")
}
return v.Fulu.ProposerLookahead, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil {
+ return nil, errors.New("no gloas block")
+ }
+
+ return v.Gloas.ProposerLookahead, nil
default:
return nil, errors.New("unknown version")
}
}
+// getLatestBlockHeaderParentRoot returns the parent root from the latest block header in the state.
+func getLatestBlockHeaderParentRoot(v *spec.VersionedBeaconState) (phase0.Root, error) {
+ switch v.Version {
+ case spec.DataVersionPhase0:
+ return phase0.Root{}, errors.New("no latest block header parent root in phase0 state")
+ case spec.DataVersionAltair:
+ return phase0.Root{}, errors.New("no latest block header parent root in altair state")
+ case spec.DataVersionBellatrix:
+ if v.Bellatrix == nil || v.Bellatrix.LatestBlockHeader == nil {
+ return phase0.Root{}, errors.New("no bellatrix state")
+ }
+
+ return v.Bellatrix.LatestBlockHeader.ParentRoot, nil
+ case spec.DataVersionCapella:
+ if v.Capella == nil || v.Capella.LatestBlockHeader == nil {
+ return phase0.Root{}, errors.New("no capella state")
+ }
+
+ return v.Capella.LatestBlockHeader.ParentRoot, nil
+ case spec.DataVersionDeneb:
+ if v.Deneb == nil || v.Deneb.LatestBlockHeader == nil {
+ return phase0.Root{}, errors.New("no deneb state")
+ }
+
+ return v.Deneb.LatestBlockHeader.ParentRoot, nil
+ case spec.DataVersionElectra:
+ if v.Electra == nil || v.Electra.LatestBlockHeader == nil {
+ return phase0.Root{}, errors.New("no electra state")
+ }
+
+ return v.Electra.LatestBlockHeader.ParentRoot, nil
+ case spec.DataVersionFulu:
+ if v.Fulu == nil || v.Fulu.LatestBlockHeader == nil {
+ return phase0.Root{}, errors.New("no fulu state")
+ }
+
+ return v.Fulu.LatestBlockHeader.ParentRoot, nil
+ case spec.DataVersionGloas:
+ if v.Gloas == nil || v.Gloas.LatestBlockHeader == nil {
+ return phase0.Root{}, errors.New("no gloas state")
+ }
+
+ return v.Gloas.LatestBlockHeader.ParentRoot, nil
+ default:
+ return phase0.Root{}, errors.New("unknown version")
+ }
+}
+
// getBlockSize returns the block size from a versioned beacon block.
func getBlockSize(dynSsz *dynssz.DynSsz, block *spec.VersionedSignedBeaconBlock) (int, error) {
switch block.Version {
@@ -487,6 +774,8 @@ func getBlockSize(dynSsz *dynssz.DynSsz, block *spec.VersionedSignedBeaconBlock)
return dynSsz.SizeSSZ(block.Electra)
case spec.DataVersionFulu:
return dynSsz.SizeSSZ(block.Fulu)
+ case spec.DataVersionGloas:
+ return dynSsz.SizeSSZ(block.Gloas)
default:
return 0, errors.New("unknown version")
}
diff --git a/indexer/beacon/buildercache.go b/indexer/beacon/buildercache.go
new file mode 100644
index 000000000..188c5aa6f
--- /dev/null
+++ b/indexer/beacon/buildercache.go
@@ -0,0 +1,740 @@
+package beacon
+
+import (
+ "bytes"
+ "fmt"
+ "hash/crc64"
+ "math"
+ "runtime/debug"
+ "sync"
+ "time"
+
+ "github.com/attestantio/go-eth2-client/spec/gloas"
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+ "github.com/jmoiron/sqlx"
+
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+)
+
+// BuilderIndexFlag separates builder indices from validator indices in the pubkey cache
+const BuilderIndexFlag = uint64(1 << 40)
+
+// Builder status flag constants representing different builder states
+const (
+ BuilderStatusExited uint16 = 1 << iota // Builder has exited (withdrawable_epoch reached)
+ BuilderStatusSuperseded // Builder index was reused, this pubkey is no longer active
+)
+
+// builderCache manages the in-memory cache of builder states and handles updates
+type builderCache struct {
+ indexer *Indexer
+ builderSetCache []*builderEntry
+ cacheMutex sync.RWMutex
+ triggerDbUpdate chan bool
+}
+
+// builderEntry represents a single builder's state in the cache
+type builderEntry struct {
+ builderDiffs []*builderDiff
+ finalChecksum uint64
+ finalBuilder *gloas.Builder
+ activeData *BuilderData
+ statusFlags uint16
+}
+
+// BuilderData contains the essential builder state information for active builders.
+// Only WithdrawableEpoch can change during a builder's lifetime; all other fields are static.
+type BuilderData struct {
+ WithdrawableEpoch phase0.Epoch
+}
+
+// builderDiff represents an updated builder entry in the builder set cache.
+type builderDiff struct {
+ epoch phase0.Epoch
+ dependentRoot phase0.Root
+ builder *gloas.Builder
+}
+
+// newBuilderCache initializes a new builder cache instance and starts the persist loop
+func newBuilderCache(indexer *Indexer) *builderCache {
+ cache := &builderCache{
+ indexer: indexer,
+ triggerDbUpdate: make(chan bool, 1),
+ }
+
+ go cache.runPersistLoop()
+
+ return cache
+}
+
+// updateBuilderSet processes builder set updates and maintains the cache state
+func (cache *builderCache) updateBuilderSet(slot phase0.Slot, dependentRoot phase0.Root, builders []*gloas.Builder) {
+ chainState := cache.indexer.consensusPool.GetChainState()
+ epoch := chainState.EpochOfSlot(slot)
+ currentEpoch := chainState.CurrentEpoch()
+ finalizedEpoch, finalizedRoot := chainState.GetFinalizedCheckpoint()
+ cutOffEpoch := phase0.Epoch(0)
+ if currentEpoch > phase0.Epoch(cache.indexer.inMemoryEpochs) {
+ cutOffEpoch = currentEpoch - phase0.Epoch(cache.indexer.inMemoryEpochs)
+ }
+ if cutOffEpoch > finalizedEpoch {
+ cutOffEpoch = finalizedEpoch
+ }
+
+ if epoch < cutOffEpoch {
+ cache.indexer.logger.Infof("ignoring old builder set update for epoch %d", epoch)
+ return
+ }
+
+ isFinalizedBuilderSet := false
+ if slot == 0 {
+ isFinalizedBuilderSet = true // genesis
+ } else if epoch <= finalizedEpoch {
+ finalizedBlock := cache.indexer.blockCache.getBlockByRoot(finalizedRoot)
+ if finalizedBlock != nil {
+ finalizedDependentBlock := cache.indexer.blockCache.getDependentBlock(chainState, finalizedBlock, nil)
+ if finalizedDependentBlock != nil && bytes.Equal(finalizedDependentBlock.Root[:], dependentRoot[:]) {
+ isFinalizedBuilderSet = true
+ }
+ }
+ }
+
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ t1 := time.Now()
+
+ if len(cache.builderSetCache) < len(builders) {
+ if len(builders) > cap(cache.builderSetCache) {
+ newCache := make([]*builderEntry, len(builders), len(builders)+1000)
+ copy(newCache, cache.builderSetCache)
+ cache.builderSetCache = newCache
+ } else {
+ cache.builderSetCache = cache.builderSetCache[:len(builders)]
+ }
+ }
+
+ isParentMap := map[phase0.Root]bool{}
+ isAheadMap := map[phase0.Root]bool{}
+ updatedCount := uint64(0)
+
+ for i := range builders {
+ var parentChecksum uint64
+ var parentBuilder *gloas.Builder
+ parentEpoch := phase0.Epoch(0)
+
+ aheadDiffIdx := 0
+ foundAhead := false
+ aheadEpoch := phase0.Epoch(math.MaxInt64)
+
+ cachedBuilder := cache.builderSetCache[i]
+ if cachedBuilder == nil {
+ cachedBuilder = &builderEntry{}
+ cache.builderSetCache[i] = cachedBuilder
+
+ cache.indexer.pubkeyCache.Add(builders[i].PublicKey, phase0.ValidatorIndex(uint64(i)|BuilderIndexFlag))
+ } else {
+ parentBuilder = cachedBuilder.finalBuilder
+ parentChecksum = cachedBuilder.finalChecksum
+ }
+
+ deleteKeys := []int{}
+
+ if !isFinalizedBuilderSet {
+ for diffkey, diff := range cachedBuilder.builderDiffs {
+ if diff.epoch < cutOffEpoch {
+ deleteKeys = append(deleteKeys, diffkey)
+ continue
+ }
+
+ if diff.epoch < epoch {
+ isParent, checkedParent := isParentMap[diff.dependentRoot]
+ if !checkedParent {
+ isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, dependentRoot)
+ isParentMap[diff.dependentRoot] = isParent
+ }
+
+ if isParent && diff.epoch > parentEpoch {
+ parentBuilder = diff.builder
+ parentEpoch = diff.epoch
+ }
+ }
+
+ if diff.epoch > epoch {
+ isAhead, checkedAhead := isAheadMap[diff.dependentRoot]
+ if !checkedAhead {
+ isAhead = cache.indexer.blockCache.isCanonicalBlock(dependentRoot, diff.dependentRoot)
+ isAheadMap[diff.dependentRoot] = isAhead
+ }
+
+ if isAhead && diff.epoch < aheadEpoch {
+ aheadDiffIdx = diffkey
+ aheadEpoch = diff.epoch
+ foundAhead = true
+ }
+ }
+ }
+
+ if parentBuilder != nil {
+ parentChecksum = calculateBuilderChecksum(parentBuilder)
+ }
+ }
+
+ checksum := calculateBuilderChecksum(builders[i])
+ if checksum == parentChecksum {
+ continue
+ }
+
+ if isFinalizedBuilderSet {
+ cachedBuilder.finalBuilder = builders[i]
+ cachedBuilder.finalChecksum = checksum
+ cachedBuilder.statusFlags = GetBuilderStatusFlags(builders[i])
+ updatedCount++
+
+ activeData := &BuilderData{
+ WithdrawableEpoch: builders[i].WithdrawableEpoch,
+ }
+ if cache.isActiveBuilder(activeData) {
+ cachedBuilder.activeData = activeData
+ }
+ }
+
+ if foundAhead && cache.checkBuilderEqual(cachedBuilder.builderDiffs[aheadDiffIdx].builder, builders[i]) {
+ if isFinalizedBuilderSet {
+ deleteKeys = append(deleteKeys, aheadDiffIdx)
+ } else {
+ diff := cachedBuilder.builderDiffs[aheadDiffIdx]
+ diff.epoch = epoch
+ diff.dependentRoot = dependentRoot
+ cachedBuilder.builderDiffs[aheadDiffIdx] = diff
+ }
+ } else if isFinalizedBuilderSet {
+ } else if len(deleteKeys) == 0 {
+ cachedBuilder.builderDiffs = append(cachedBuilder.builderDiffs, &builderDiff{
+ epoch: epoch,
+ dependentRoot: dependentRoot,
+ builder: builders[i],
+ })
+ } else {
+ cachedBuilder.builderDiffs[deleteKeys[0]] = &builderDiff{
+ epoch: epoch,
+ dependentRoot: dependentRoot,
+ builder: builders[i],
+ }
+ deleteKeys = deleteKeys[1:]
+ }
+
+ if len(deleteKeys) > 0 {
+ lastIdx := len(cachedBuilder.builderDiffs) - 1
+ delLen := len(deleteKeys)
+ for delIdx := 0; delIdx < delLen; delIdx++ {
+ for delLen > 0 && deleteKeys[delLen-1] == lastIdx {
+ lastIdx--
+ delLen--
+ }
+ if delLen == 0 {
+ break
+ }
+ cachedBuilder.builderDiffs[deleteKeys[delIdx]] = cachedBuilder.builderDiffs[lastIdx]
+ lastIdx--
+ }
+
+ cachedBuilder.builderDiffs = cachedBuilder.builderDiffs[:lastIdx+1]
+ }
+ }
+
+ if updatedCount > 0 {
+ select {
+ case cache.triggerDbUpdate <- true:
+ default:
+ }
+ }
+
+ isFinalizedStr := ""
+ if isFinalizedBuilderSet {
+ isFinalizedStr = "finalized "
+ }
+ cache.indexer.logger.Infof("processed %vbuilder set update for epoch %d in %v", isFinalizedStr, epoch, time.Since(t1))
+}
+
+// checkBuilderEqual compares two builder states for equality
+func (cache *builderCache) checkBuilderEqual(builder1 *gloas.Builder, builder2 *gloas.Builder) bool {
+ if builder1 == nil && builder2 == nil {
+ return true
+ }
+ if builder1 == nil || builder2 == nil {
+ return false
+ }
+ return bytes.Equal(builder1.PublicKey[:], builder2.PublicKey[:]) &&
+ builder1.Version == builder2.Version &&
+ bytes.Equal(builder1.ExecutionAddress[:], builder2.ExecutionAddress[:]) &&
+ builder1.DepositEpoch == builder2.DepositEpoch &&
+ builder1.WithdrawableEpoch == builder2.WithdrawableEpoch
+}
+
+// GetBuilderStatusFlags calculates the status flags for a builder
+func GetBuilderStatusFlags(builder *gloas.Builder) uint16 {
+ flags := uint16(0)
+ if builder.WithdrawableEpoch != FarFutureEpoch {
+ flags |= BuilderStatusExited
+ }
+ return flags
+}
+
+// getBuilderSetSize returns the current number of builders in the builder set
+func (cache *builderCache) getBuilderSetSize() uint64 {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ return uint64(len(cache.builderSetCache))
+}
+
+// setFinalizedEpoch updates the builder cache when a new epoch is finalized.
+// dependentRoot is the dependent root of the finalized epoch (last block of the parent epoch).
+func (cache *builderCache) setFinalizedEpoch(epoch phase0.Epoch, dependentRoot phase0.Root) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ updatedCount := uint64(0)
+
+ for _, cachedBuilder := range cache.builderSetCache {
+ if cachedBuilder == nil {
+ continue
+ }
+
+ // Find the finalized builder state
+ for _, diff := range cachedBuilder.builderDiffs {
+ if diff.dependentRoot == dependentRoot {
+ cachedBuilder.finalBuilder = diff.builder
+ cachedBuilder.finalChecksum = calculateBuilderChecksum(diff.builder)
+ cachedBuilder.statusFlags = GetBuilderStatusFlags(diff.builder)
+ updatedCount++
+
+ cachedBuilder.activeData = &BuilderData{
+ WithdrawableEpoch: diff.builder.WithdrawableEpoch,
+ }
+ break
+ }
+ }
+
+ // Clean up old diffs
+ newDiffs := make([]*builderDiff, 0)
+ for _, diff := range cachedBuilder.builderDiffs {
+ if diff.epoch > epoch {
+ newDiffs = append(newDiffs, diff)
+ }
+ }
+ cachedBuilder.builderDiffs = newDiffs
+
+ // Clear old active data
+ if cachedBuilder.activeData != nil {
+ if !cache.isActiveBuilder(cachedBuilder.activeData) {
+ cachedBuilder.activeData = nil
+ }
+ }
+ }
+
+ if updatedCount > 0 {
+ select {
+ case cache.triggerDbUpdate <- true:
+ default:
+ }
+ }
+}
+
+// BuilderSetStreamer is a callback for streaming builder data
+type BuilderSetStreamer func(index gloas.BuilderIndex, flags uint16, activeData *BuilderData, builder *gloas.Builder) error
+
+// streamBuilderSetForRoot streams the builder set for a given blockRoot
+func (cache *builderCache) streamBuilderSetForRoot(blockRoot phase0.Root, onlyActive bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ isParentMap := map[phase0.Root]bool{}
+ isAheadMap := map[phase0.Root]bool{}
+
+ for index, cachedBuilder := range cache.builderSetCache {
+ if cachedBuilder == nil {
+ continue
+ }
+
+ latestBuilder := cachedBuilder.finalBuilder
+ builderData := cachedBuilder.activeData
+ builderEpoch := phase0.Epoch(0)
+
+ var aheadBuilder *gloas.Builder
+ aheadEpoch := phase0.Epoch(math.MaxInt64)
+
+ for _, diff := range cachedBuilder.builderDiffs {
+ isParent, checkedParent := isParentMap[diff.dependentRoot]
+ if !checkedParent {
+ isParent = cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot)
+ isParentMap[diff.dependentRoot] = isParent
+ }
+
+ if isParent && diff.epoch >= builderEpoch {
+ builderData = &BuilderData{
+ WithdrawableEpoch: diff.builder.WithdrawableEpoch,
+ }
+ builderEpoch = diff.epoch
+ latestBuilder = diff.builder
+ }
+
+ if !isParent && builderData == nil {
+ isAhead, checkedAhead := isAheadMap[diff.dependentRoot]
+ if !checkedAhead {
+ isAhead = cache.indexer.blockCache.isCanonicalBlock(blockRoot, diff.dependentRoot)
+ isAheadMap[diff.dependentRoot] = isAhead
+ }
+
+ if isAhead && diff.epoch < aheadEpoch {
+ aheadBuilder = diff.builder
+ aheadEpoch = diff.epoch
+ }
+ }
+ }
+
+ if builderData == nil && aheadBuilder != nil {
+ builderData = &BuilderData{
+ WithdrawableEpoch: aheadBuilder.WithdrawableEpoch,
+ }
+ latestBuilder = aheadBuilder
+ }
+
+ if onlyActive && (builderData == nil || (epoch != nil && builderData.WithdrawableEpoch <= *epoch)) {
+ continue
+ }
+
+ builderFlags := cachedBuilder.statusFlags
+ if latestBuilder != nil {
+ builderFlags = GetBuilderStatusFlags(latestBuilder)
+ }
+
+ err := cb(gloas.BuilderIndex(index), builderFlags, builderData, latestBuilder)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+// UnwrapDbBuilder converts a dbtypes.Builder to a gloas.Builder
+func UnwrapDbBuilder(dbBuilder *dbtypes.Builder) *gloas.Builder {
+ builder := &gloas.Builder{
+ Version: dbBuilder.Version,
+ Balance: 0, // Balance not persisted
+ DepositEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.DepositEpoch)),
+ WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)),
+ }
+ copy(builder.PublicKey[:], dbBuilder.Pubkey)
+ copy(builder.ExecutionAddress[:], dbBuilder.ExecutionAddress)
+ return builder
+}
+
+// isActiveBuilder determines if a builder is currently active
+func (cache *builderCache) isActiveBuilder(builder *BuilderData) bool {
+ currentEpoch := cache.indexer.consensusPool.GetChainState().CurrentEpoch()
+ cutOffEpoch := phase0.Epoch(0)
+ if currentEpoch > 10 {
+ cutOffEpoch = currentEpoch - 10
+ }
+
+ return builder.WithdrawableEpoch > cutOffEpoch
+}
+
+// getBuilderByIndex returns the builder by index for a given forkId
+func (cache *builderCache) getBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder {
+ canonicalHead := cache.indexer.GetCanonicalHead(overrideForkId)
+ if canonicalHead == nil {
+ return nil
+ }
+
+ return cache.getBuilderByIndexAndRoot(index, canonicalHead.Root)
+}
+
+// getBuilderByIndexAndRoot returns the builder by index for a given blockRoot
+func (cache *builderCache) getBuilderByIndexAndRoot(index gloas.BuilderIndex, blockRoot phase0.Root) *gloas.Builder {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ if uint64(index) >= uint64(len(cache.builderSetCache)) {
+ return nil
+ }
+
+ cachedBuilder := cache.builderSetCache[index]
+ if cachedBuilder == nil {
+ return nil
+ }
+
+ builder := cachedBuilder.finalBuilder
+ builderEpoch := phase0.Epoch(0)
+
+ // Find the latest valid diff
+ for _, diff := range cachedBuilder.builderDiffs {
+ if cache.indexer.blockCache.isCanonicalBlock(diff.dependentRoot, blockRoot) && diff.epoch >= builderEpoch {
+ builder = diff.builder
+ builderEpoch = diff.epoch
+ }
+ }
+
+ // Fallback to db if builder is not found in cache
+ if builder == nil {
+ if dbBuilder := db.GetActiveBuilderByIndex(cache.indexer.ctx, uint64(index)); dbBuilder != nil {
+ builder = UnwrapDbBuilder(dbBuilder)
+ }
+ } else {
+ // Return a copy
+ builder = &gloas.Builder{
+ PublicKey: builder.PublicKey,
+ Version: builder.Version,
+ ExecutionAddress: builder.ExecutionAddress,
+ Balance: builder.Balance,
+ DepositEpoch: builder.DepositEpoch,
+ WithdrawableEpoch: builder.WithdrawableEpoch,
+ }
+ }
+
+ return builder
+}
+
+// calculateBuilderChecksum generates a CRC64 checksum of all builder fields (except balance)
+func calculateBuilderChecksum(b *gloas.Builder) uint64 {
+ if b == nil {
+ return 0
+ }
+
+ data := make([]byte, 0, 80)
+ data = append(data, b.PublicKey[:]...)
+ data = append(data, b.Version)
+ data = append(data, b.ExecutionAddress[:]...)
+ data = append(data, uint64ToBytes(uint64(b.DepositEpoch))...)
+ data = append(data, uint64ToBytes(uint64(b.WithdrawableEpoch))...)
+
+ return crc64.Checksum(data, crc64Table)
+}
+
+// prepopulateFromDB pre-populates the builder set cache from the database
+func (cache *builderCache) prepopulateFromDB() (uint64, error) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
+ maxIndex, err := db.GetMaxBuilderIndex(cache.indexer.ctx)
+ if err != nil {
+ return 0, fmt.Errorf("error getting max builder index: %w", err)
+ }
+
+ if maxIndex == 0 {
+ return 0, nil
+ }
+
+ cache.builderSetCache = make([]*builderEntry, maxIndex+1, maxIndex+1+1000)
+
+ restoreCount := uint64(0)
+
+ batchSize := uint64(10000)
+ for start := uint64(0); start <= maxIndex; start += batchSize {
+ end := min(start+batchSize, maxIndex)
+
+ builders := db.GetBuilderRange(cache.indexer.ctx, start, end)
+ for _, dbBuilder := range builders {
+ if dbBuilder.Superseded {
+ continue
+ }
+
+ builder := UnwrapDbBuilder(dbBuilder)
+ builderEntry := &builderEntry{
+ finalChecksum: calculateBuilderChecksum(builder),
+ }
+ builderData := &BuilderData{
+ WithdrawableEpoch: phase0.Epoch(db.ConvertInt64ToUint64(dbBuilder.WithdrawableEpoch)),
+ }
+ if cache.isActiveBuilder(builderData) {
+ builderEntry.activeData = builderData
+ }
+ builderEntry.statusFlags = GetBuilderStatusFlags(builder)
+
+ cache.builderSetCache[dbBuilder.BuilderIndex] = builderEntry
+
+ cache.indexer.pubkeyCache.Add(builder.PublicKey, phase0.ValidatorIndex(dbBuilder.BuilderIndex|BuilderIndexFlag))
+
+ restoreCount++
+ }
+ }
+
+ return restoreCount, nil
+}
+
+// runPersistLoop handles the background persistence of builder states to the database
+func (cache *builderCache) runPersistLoop() {
+ defer func() {
+ if err := recover(); err != nil {
+ cache.indexer.logger.WithError(err.(error)).Errorf(
+ "uncaught panic in indexer.beacon.builderCache.runPersistLoop subroutine: %v, stack: %v",
+ err, string(debug.Stack()))
+ time.Sleep(10 * time.Second)
+
+ go cache.runPersistLoop()
+ }
+ }()
+
+ for range cache.triggerDbUpdate {
+ time.Sleep(2 * time.Second)
+ err := db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ hasMore, err := cache.persistBuilders(tx)
+ if hasMore {
+ select {
+ case cache.triggerDbUpdate <- true:
+ default:
+ }
+ }
+ return err
+ })
+ if err != nil {
+ cache.indexer.logger.WithError(err).Errorf("error persisting builders")
+ }
+ }
+}
+
+// persistBuilders writes a batch of builder states to the database
+func (cache *builderCache) persistBuilders(tx *sqlx.Tx) (bool, error) {
+ cache.cacheMutex.RLock()
+ defer cache.cacheMutex.RUnlock()
+
+ const batchSize = 1000
+ const maxPerRun = 10000
+
+ batch := make([]*dbtypes.Builder, 0, batchSize)
+ batchIndices := make([]uint64, 0, batchSize)
+ supersededPubkeys := make([][]byte, 0)
+ persisted := 0
+ firstIndex := uint64(0)
+ lastIndex := uint64(0)
+ hasMore := false
+
+ for index, entry := range cache.builderSetCache {
+ if entry == nil || entry.finalBuilder == nil {
+ continue
+ }
+
+ if persisted == 0 && len(batch) == 0 {
+ firstIndex = uint64(index)
+ }
+ lastIndex = uint64(index)
+
+ dbBuilder := &dbtypes.Builder{
+ Pubkey: entry.finalBuilder.PublicKey[:],
+ BuilderIndex: uint64(index),
+ Version: entry.finalBuilder.Version,
+ ExecutionAddress: entry.finalBuilder.ExecutionAddress[:],
+ DepositEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.DepositEpoch)),
+ WithdrawableEpoch: db.ConvertUint64ToInt64(uint64(entry.finalBuilder.WithdrawableEpoch)),
+ Superseded: false,
+ }
+
+ batch = append(batch, dbBuilder)
+ batchIndices = append(batchIndices, uint64(index))
+
+ if len(batch) >= batchSize {
+ superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices)
+ if err != nil {
+ return false, err
+ }
+ supersededPubkeys = append(supersededPubkeys, superseded...)
+
+ // Clear finalBuilder for persisted entries
+ for _, idx := range batchIndices {
+ if cache.builderSetCache[idx] != nil {
+ cache.builderSetCache[idx].finalBuilder = nil
+ }
+ }
+
+ batch = batch[:0]
+ batchIndices = batchIndices[:0]
+ persisted += batchSize
+
+ if persisted >= maxPerRun {
+ hasMore = true
+ break
+ }
+ }
+ }
+
+ // Persist remaining batch
+ if len(batch) > 0 {
+ superseded, err := cache.persistBuilderBatch(tx, batch, batchIndices)
+ if err != nil {
+ return false, err
+ }
+ supersededPubkeys = append(supersededPubkeys, superseded...)
+
+ // Clear finalBuilder for persisted entries
+ for _, idx := range batchIndices {
+ if cache.builderSetCache[idx] != nil {
+ cache.builderSetCache[idx].finalBuilder = nil
+ }
+ }
+
+ persisted += len(batch)
+ }
+
+ // Batch mark superseded builders
+ if len(supersededPubkeys) > 0 {
+ err := db.SetBuildersSuperseded(supersededPubkeys, tx)
+ if err != nil {
+ return false, fmt.Errorf("error marking builders as superseded: %w", err)
+ }
+ }
+
+ if persisted > 0 || len(supersededPubkeys) > 0 {
+ cache.indexer.logger.Infof("persisted %d builders to db [%d-%d], marked %d as superseded",
+ persisted, firstIndex, lastIndex, len(supersededPubkeys))
+ }
+
+ return hasMore, nil
+}
+
+// persistBuilderBatch persists a batch of builders and returns pubkeys that were superseded
+func (cache *builderCache) persistBuilderBatch(tx *sqlx.Tx, batch []*dbtypes.Builder, indices []uint64) ([][]byte, error) {
+ if len(batch) == 0 {
+ return nil, nil
+ }
+
+ // Get range for this batch
+ minIndex := indices[0]
+ maxIndex := indices[0]
+ for _, idx := range indices[1:] {
+ if idx < minIndex {
+ minIndex = idx
+ }
+ if idx > maxIndex {
+ maxIndex = idx
+ }
+ }
+
+ // Fetch existing builders in this batch's range
+ existingBuilders := db.GetBuilderRange(cache.indexer.ctx, minIndex, maxIndex)
+ existingByIndex := make(map[uint64]*dbtypes.Builder, len(existingBuilders))
+ for _, b := range existingBuilders {
+ existingByIndex[b.BuilderIndex] = b
+ }
+
+ // Find superseded pubkeys
+ supersededPubkeys := make([][]byte, 0)
+ for i, dbBuilder := range batch {
+ if existing, ok := existingByIndex[indices[i]]; ok {
+ if !bytes.Equal(existing.Pubkey, dbBuilder.Pubkey) {
+ supersededPubkeys = append(supersededPubkeys, existing.Pubkey)
+ }
+ }
+ }
+
+ // Insert batch
+ err := db.InsertBuilderBatch(batch, tx)
+ if err != nil {
+ return nil, fmt.Errorf("error persisting builder batch: %w", err)
+ }
+
+ return supersededPubkeys, nil
+}
diff --git a/indexer/beacon/canonical.go b/indexer/beacon/canonical.go
index c0d51dde0..4467af3ad 100644
--- a/indexer/beacon/canonical.go
+++ b/indexer/beacon/canonical.go
@@ -61,6 +61,10 @@ func (indexer *Indexer) IsCanonicalBlockByHead(block *Block, headBlock *Block) b
return false
}
+ if block == headBlock {
+ return true
+ }
+
if block.forkChecked && headBlock.forkChecked {
parentForkIds := indexer.forkCache.getParentForkIds(headBlock.forkId)
return slices.Contains(parentForkIds, block.forkId)
diff --git a/indexer/beacon/client.go b/indexer/beacon/client.go
index 7176d925a..2d724c508 100644
--- a/indexer/beacon/client.go
+++ b/indexer/beacon/client.go
@@ -10,6 +10,7 @@ import (
v1 "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethereum/go-ethereum/common"
"github.com/ethpandaops/dora/clients/consensus"
@@ -33,7 +34,9 @@ type Client struct {
archive bool
skipValidators bool
- streamSubscription *utils.Subscription[*rpc.BeaconStreamEvent]
+ streamSubscription *utils.Subscription[*rpc.BeaconStreamEvent]
+ executionPayloadSubscription *utils.Subscription[*v1.ExecutionPayloadAvailableEvent]
+ executionPayloadBidSubscription *utils.Subscription[*gloas.SignedExecutionPayloadBid]
headRoot phase0.Root
}
@@ -80,6 +83,8 @@ func (c *Client) startIndexing() {
// single ordered subscription for block & head events to preserve SSE ordering
c.streamSubscription = c.client.SubscribeStreamEvent(100, true)
+ c.executionPayloadSubscription = c.client.SubscribeExecutionPayloadAvailableEvent(100, true)
+ c.executionPayloadBidSubscription = c.client.SubscribeExecutionPayloadBidEvent(100, true)
go c.startClientLoop()
}
@@ -144,7 +149,7 @@ func (c *Client) runClientLoop() error {
c.headRoot = headRoot
- headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false)
+ headBlock, isNew, processingTimes, err := c.processBlock(headSlot, headRoot, nil, false, true)
if err != nil {
return fmt.Errorf("failed processing head block: %v", err)
}
@@ -185,6 +190,16 @@ func (c *Client) runClientLoop() error {
headEvent.Slot, headEvent.Block.String(), err)
}
}
+ case executionPayloadEvent := <-c.executionPayloadSubscription.Channel():
+ err := c.processExecutionPayloadAvailableEvent(executionPayloadEvent)
+ if err != nil {
+ c.logger.Errorf("failed processing execution payload %v (%v): %v", executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot.String(), err)
+ }
+ case executionPayloadBidEvent := <-c.executionPayloadBidSubscription.Channel():
+ err := c.processExecutionPayloadBidEvent(executionPayloadBidEvent)
+ if err != nil {
+ c.logger.Errorf("failed processing execution payload bid %v (%v): %v", executionPayloadBidEvent.Message.Slot, executionPayloadBidEvent.Message.ParentBlockRoot.String(), err)
+ }
}
}
@@ -245,50 +260,59 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error {
chainState := c.client.GetPool().GetChainState()
dependentRoot := headEvent.CurrentDutyDependentRoot
-
- var dependentBlock *Block
if !bytes.Equal(dependentRoot[:], consensus.NullRoot[:]) {
block.dependentRoot = &dependentRoot
-
- dependentBlock = c.indexer.blockCache.getBlockByRoot(dependentRoot)
- if dependentBlock == nil {
- c.logger.Warnf("dependent block (%v) not found after backfilling", dependentRoot.String())
- }
- } else {
- dependentBlock = c.indexer.blockCache.getDependentBlock(chainState, block, c)
}
// walk back the chain of epoch stats to ensure we have all duties & epoch specific data for the clients chain
currentBlock := block
- currentEpoch := chainState.EpochOfSlot(currentBlock.Slot)
+ headEpoch := chainState.EpochOfSlot(currentBlock.Slot)
+ currentEpoch := headEpoch
minInMemorySlot := c.indexer.getMinInMemorySlot()
absoluteMinInMemoryEpoch := c.indexer.getAbsoluteMinInMemoryEpoch()
for {
- if dependentBlock != nil && currentBlock.Slot >= minInMemorySlot {
- epoch := chainState.EpochOfSlot(currentBlock.Slot)
+ parentRoot := currentBlock.GetParentRoot()
+ if parentRoot == nil {
+ break
+ }
- // only request state for epochs that are allowed in memory by configuration
- // we accept some gaps here, these will be fixed by the pruning/finalization process
- requestState := epoch >= absoluteMinInMemoryEpoch
+ isEpochStart := false
+ parentBlock := c.indexer.blockCache.getBlockByRoot(*parentRoot)
+
+ if currentBlock.Slot == 0 {
+ isEpochStart = true
+ } else if currentBlock.dependentRoot != nil && *parentRoot == *currentBlock.dependentRoot && (parentBlock == nil || parentBlock.Slot > 0) {
+ isEpochStart = true
+ } else if parentBlock != nil && chainState.EpochOfSlot(parentBlock.Slot) < currentEpoch {
+ isEpochStart = true
+ } else if parentBlock == nil && chainState.EpochOfSlot(currentBlock.Slot) == currentEpoch {
+ // parent block is not in cache, but we're still in currentEpoch.
+ // this block is the oldest block in cache for this epoch, so treat its
+ // parent root as the dependent root for epoch boundary detection.
+ isEpochStart = true
+ }
+
+ if isEpochStart {
+ epoch := chainState.EpochOfSlot(currentBlock.Slot)
+ dependentRoot := *parentRoot
// ensure epoch stats for the epoch
- epochStats := c.indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root, requestState)
+ epochStats := c.indexer.epochCache.createOrGetEpochStats(epoch, dependentRoot)
+
+ if epoch >= absoluteMinInMemoryEpoch {
+ c.indexer.epochCache.ensureEpochDependentState(epochStats, currentBlock.Root)
+ }
if !epochStats.addRequestedBy(c) {
break
}
- if epochStats.dependentState == nil && epoch == currentEpoch {
- // always load most recent dependent state to ensure we have the latest validator set
- c.indexer.epochCache.addEpochStateRequest(epochStats)
- }
- } else {
- if dependentBlock == nil {
- c.logger.Debugf("epoch stats check failed: dependent block for %v:%v (%v) not found", currentBlock.Slot, chainState.EpochOfSlot(currentBlock.Slot), currentBlock.Root.String())
- }
+ }
+
+ if parentBlock == nil || parentBlock.Slot < minInMemorySlot {
break
}
- currentBlock = dependentBlock
- dependentBlock = c.indexer.blockCache.getDependentBlock(chainState, currentBlock, c)
+ currentBlock = parentBlock
+ currentEpoch = chainState.EpochOfSlot(currentBlock.Slot)
}
c.headRoot = block.Root
@@ -297,7 +321,7 @@ func (c *Client) processHeadEvent(headEvent *v1.HeadEvent) error {
// processStreamBlock processes a block received from the stream (either via block or head events).
func (c *Client) processStreamBlock(slot phase0.Slot, root phase0.Root) (*Block, error) {
- block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true)
+ block, isNew, processingTimes, err := c.processBlock(slot, root, nil, true, false)
if err != nil {
return nil, err
}
@@ -351,7 +375,7 @@ func (c *Client) processReorg(oldHead *Block, newHead *Block) error {
}
// processBlock processes a block (from stream & polling).
-func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) {
+func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0.SignedBeaconBlockHeader, trackRecvDelay bool, loadPayload bool) (block *Block, isNew bool, processingTimes []time.Duration, err error) {
chainState := c.client.GetPool().GetChainState()
finalizedSlot := chainState.GetFinalizedSlot()
processingTimes = make([]time.Duration, 3)
@@ -409,6 +433,25 @@ func (c *Client) processBlock(slot phase0.Slot, root phase0.Root, header *phase0
return
}
+ if loadPayload {
+ newPayload, _ := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) {
+ t1 := time.Now()
+ defer func() {
+ processingTimes[0] += time.Since(t1)
+ }()
+
+ return LoadExecutionPayload(c.getContext(), c, root)
+ })
+
+ if !isNew && newPayload {
+ // write payload to db
+ err = c.persistExecutionPayload(block)
+ if err != nil {
+ return
+ }
+ }
+ }
+
if slot >= finalizedSlot && isNew {
c.indexer.blockCache.addBlockToParentMap(block)
c.indexer.blockCache.addBlockToExecBlockMap(block)
@@ -532,7 +575,7 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error {
if parentBlock == nil {
var err error
- parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false)
+ parentBlock, isNewBlock, processingTimes, err = c.processBlock(parentSlot, parentRoot, parentHead, false, true)
if err != nil {
return fmt.Errorf("could not process block [0x%x]: %v", parentRoot, err)
}
@@ -559,3 +602,87 @@ func (c *Client) backfillParentBlocks(headBlock *Block) error {
}
return nil
}
+
+// processExecutionPayloadEvent processes an execution payload event from the event stream.
+func (c *Client) processExecutionPayloadAvailableEvent(executionPayloadEvent *v1.ExecutionPayloadAvailableEvent) error {
+ if c.client.GetStatus() != consensus.ClientStatusOnline && c.client.GetStatus() != consensus.ClientStatusOptimistic {
+ // client is not ready, skip
+ return nil
+ }
+
+ chainState := c.client.GetPool().GetChainState()
+ finalizedSlot := chainState.GetFinalizedSlot()
+
+ var block *Block
+
+ if executionPayloadEvent.Slot < finalizedSlot {
+ // block is in finalized epoch
+ // known block or a new orphaned block
+
+ // don't add to cache, process this block right after loading the details
+ block = newBlock(c.indexer.dynSsz, executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot, 0)
+
+ dbBlockHead := db.GetBlockHeadByRoot(c.getContext(), executionPayloadEvent.BlockRoot[:])
+ if dbBlockHead != nil {
+ block.isInFinalizedDb = true
+ block.parentRoot = (*phase0.Root)(dbBlockHead.ParentRoot)
+ }
+
+ } else {
+ block, _ = c.indexer.blockCache.createOrGetBlock(executionPayloadEvent.BlockRoot, executionPayloadEvent.Slot)
+ }
+
+ if block == nil {
+ c.logger.Warnf("execution payload event for unknown block %v:%v [0x%x]", chainState.EpochOfSlot(executionPayloadEvent.Slot), executionPayloadEvent.Slot, executionPayloadEvent.BlockRoot)
+ return nil
+ }
+
+ newPayload, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) {
+ return LoadExecutionPayload(c.getContext(), c, executionPayloadEvent.BlockRoot)
+ })
+ if err != nil {
+ return err
+ }
+
+ if newPayload {
+ // write payload to db
+ err = c.persistExecutionPayload(block)
+ if err != nil {
+ return err
+ }
+ }
+
+ return nil
+}
+
+func (c *Client) persistExecutionPayload(block *Block) error {
+ payloadVer, payloadSSZ, err := MarshalVersionedSignedExecutionPayloadEnvelopeSSZ(block.dynSsz, block.executionPayload, c.indexer.blockCompression)
+ if err != nil {
+ return fmt.Errorf("marshal execution payload ssz failed: %v", err)
+ }
+
+ return db.RunDBTransaction(func(tx *sqlx.Tx) error {
+ err := db.UpdateUnfinalizedBlockPayload(c.getContext(), tx, block.Root[:], payloadVer, payloadSSZ)
+ if err != nil {
+ return err
+ }
+
+ return nil
+ })
+}
+
+func (c *Client) processExecutionPayloadBidEvent(executionPayloadBidEvent *gloas.SignedExecutionPayloadBid) error {
+ bid := &dbtypes.BlockBid{
+ ParentRoot: executionPayloadBidEvent.Message.ParentBlockRoot[:],
+ ParentHash: executionPayloadBidEvent.Message.ParentBlockHash[:],
+ BlockHash: executionPayloadBidEvent.Message.BlockHash[:],
+ FeeRecipient: executionPayloadBidEvent.Message.FeeRecipient[:],
+ GasLimit: uint64(executionPayloadBidEvent.Message.GasLimit),
+ BuilderIndex: int64(executionPayloadBidEvent.Message.BuilderIndex),
+ Slot: uint64(executionPayloadBidEvent.Message.Slot),
+ Value: uint64(executionPayloadBidEvent.Message.Value),
+ ElPayment: uint64(executionPayloadBidEvent.Message.ExecutionPayment),
+ }
+ c.indexer.blockBidCache.AddBid(bid)
+ return nil
+}
diff --git a/indexer/beacon/duties/duties.go b/indexer/beacon/duties/duties.go
index 9a6ee6402..dd9647edc 100644
--- a/indexer/beacon/duties/duties.go
+++ b/indexer/beacon/duties/duties.go
@@ -357,3 +357,62 @@ func swapOrNot(buf []byte, byteV byte, i ActiveIndiceIndex, input []ActiveIndice
}
return byteV, source
}
+
+// GetPtcDuties returns the Payload Timeliness Committee (PTC) members for a given slot.
+// The PTC is selected from the concatenated attestation committees for the slot using
+// balance-weighted selection without shuffling.
+func GetPtcDuties(
+ spec *consensus.ChainSpec,
+ state *BeaconState,
+ attesterDuties [][]ActiveIndiceIndex,
+ slot phase0.Slot,
+) ([]ActiveIndiceIndex, error) {
+ if spec.PtcSize == 0 {
+ return nil, nil
+ }
+
+ epoch := phase0.Epoch(slot / phase0.Slot(spec.SlotsPerEpoch))
+
+ // Derive PTC seed: hash(get_seed(state, epoch, DOMAIN_PTC_ATTESTER) + uint_to_bytes(slot))
+ seedData := []byte{}
+ seedHash := GetSeed(spec, state, epoch, spec.DomainPtcAttester)
+ seedData = append(seedData, seedHash[:]...)
+ seedData = append(seedData, UintToBytes(uint64(slot))...)
+ seed := Hash(seedData)
+
+ // Concatenate all committee indices for the slot (in order)
+ indices := make([]ActiveIndiceIndex, 0)
+ for _, committee := range attesterDuties {
+ indices = append(indices, committee...)
+ }
+
+ if len(indices) == 0 {
+ return nil, errors.New("empty committee indices")
+ }
+
+ // Balance-weighted selection without shuffling (shuffle_indices=false)
+ // Uses same acceptance logic as GetProposerIndex (Electra-style 16-bit random values)
+ maxRandomValue := uint64(1<<16 - 1)
+ total := uint64(len(indices))
+ selected := make([]ActiveIndiceIndex, 0, spec.PtcSize)
+
+ for i := uint64(0); uint64(len(selected)) < spec.PtcSize; i++ {
+ // No shuffling - traverse indices in order
+ nextIndex := i % total
+ candidateIndex := indices[nextIndex]
+
+ // Balance-weighted acceptance check (same as proposer selection)
+ b := append(seed[:], UintToBytes(i/16)...)
+ offset := (i % 16) * 2
+ hash := Hash(b)
+ randomValue := BytesToUint(hash[offset : offset+2])
+
+ effectiveBal := uint64(state.GetEffectiveBalance(candidateIndex))
+
+ if effectiveBal*maxRandomValue >= spec.MaxEffectiveBalanceElectra*randomValue {
+ selected = append(selected, candidateIndex)
+ }
+ }
+
+ return selected, nil
+}
diff --git a/indexer/beacon/epochcache.go b/indexer/beacon/epochcache.go
index 2ed805ef6..a43b9a2f9 100644
--- a/indexer/beacon/epochcache.go
+++ b/indexer/beacon/epochcache.go
@@ -65,7 +65,7 @@ func newEpochCache(indexer *Indexer) *epochCache {
}
// createOrGetEpochStats gets an existing EpochStats entry for the given epoch and dependentRoot or creates a new instance if not found.
-func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root, createStateRequest bool) *EpochStats {
+func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root) *EpochStats {
cache.cacheMutex.Lock()
defer cache.cacheMutex.Unlock()
@@ -77,43 +77,40 @@ func (cache *epochCache) createOrGetEpochStats(epoch phase0.Epoch, dependentRoot
cache.statsMap[statsKey] = epochStats
}
- // get or create beacon state which the epoch status depends on (dependentRoot beacon state)
- epochState := cache.stateMap[dependentRoot]
- if epochState == nil && !epochStats.ready && createStateRequest {
- epochState = newEpochState(dependentRoot)
- cache.stateMap[dependentRoot] = epochState
-
- cache.indexer.logger.Infof("added epoch state request for epoch %v (%v) to queue", epoch, dependentRoot.String())
- }
-
- if epochState != nil {
- epochStats.dependentState = epochState
-
- if epochState.loadingStatus == 2 && !epochStats.ready {
- // dependent state is already loaded, process it
- go epochStats.processState(cache.indexer, nil)
- }
- }
-
return epochStats
}
-func (cache *epochCache) addEpochStateRequest(epochStats *EpochStats) {
+func (cache *epochCache) ensureEpochDependentState(epochStats *EpochStats, firstBlockRoot phase0.Root) {
+ cache.cacheMutex.Lock()
+ defer cache.cacheMutex.Unlock()
+
if epochStats.dependentState != nil {
return
}
- cache.cacheMutex.Lock()
- defer cache.cacheMutex.Unlock()
-
+ // get or create beacon state which the epoch status depends on (dependentRoot beacon state)
epochState := cache.stateMap[epochStats.dependentRoot]
- if epochState == nil {
- epochState = newEpochState(epochStats.dependentRoot)
+ if epochState == nil && !epochStats.ready {
+ stateRoot := epochStats.dependentRoot
+ chainState := cache.indexer.consensusPool.GetChainState()
+ if chainState.IsFuluEnabled(epochStats.epoch) {
+ stateRoot = firstBlockRoot
+ }
+
+ epochState = newEpochState(stateRoot)
cache.stateMap[epochStats.dependentRoot] = epochState
cache.indexer.logger.Infof("added epoch state request for epoch %v (%v) to queue", epochStats.epoch, epochStats.dependentRoot.String())
}
- epochStats.dependentState = epochState
+
+ if epochState != nil {
+ epochStats.dependentState = epochState
+
+ if epochState.loadingStatus == 2 && !epochStats.ready {
+ // dependent state is already loaded, process it
+ go epochStats.processState(cache.indexer, nil, 0)
+ }
+ }
}
func (cache *epochCache) getEpochStats(epoch phase0.Epoch, dependentRoot phase0.Root) *EpochStats {
@@ -251,7 +248,7 @@ func (cache *epochCache) removeUnreferencedEpochStates() uint64 {
defer cache.cacheMutex.Unlock()
removed := uint64(0)
- for _, state := range cache.stateMap {
+ for root, state := range cache.stateMap {
found := false
for _, stats := range cache.statsMap {
if stats.dependentState == state {
@@ -262,7 +259,7 @@ func (cache *epochCache) removeUnreferencedEpochStates() uint64 {
if !found {
state.dispose()
- delete(cache.stateMap, state.slotRoot)
+ delete(cache.stateMap, root)
removed++
}
}
@@ -468,11 +465,14 @@ func (cache *epochCache) loadEpochStats(epochStats *EpochStats) bool {
log.Infof("loading epoch %v stats (dep: %v, req: %v)", epochStats.epoch, epochStats.dependentRoot.String(), len(epochStats.requestedBy))
+ t1 := time.Now()
state, err := epochStats.dependentState.loadState(client.getContext(), client, cache)
if err != nil && epochStats.dependentState.loadingStatus == 0 {
client.logger.Warnf("failed loading epoch %v stats (dep: %v): %v", epochStats.epoch, epochStats.dependentRoot.String(), err)
}
+ loadDuration := time.Since(t1)
+
if epochStats.dependentState.loadingStatus != 2 {
// epoch state could not be loaded
epochStats.dependentState.retryCount++
@@ -497,7 +497,7 @@ func (cache *epochCache) loadEpochStats(epochStats *EpochStats) bool {
cache.cacheMutex.Unlock()
for _, stats := range dependentStats {
- go stats.processState(cache.indexer, validatorSet)
+ go stats.processState(cache.indexer, validatorSet, loadDuration)
}
return true
diff --git a/indexer/beacon/epochstate.go b/indexer/beacon/epochstate.go
index 4d696f480..6ab42317b 100644
--- a/indexer/beacon/epochstate.go
+++ b/indexer/beacon/epochstate.go
@@ -8,7 +8,9 @@ import (
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/electra"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
+ "github.com/ethpandaops/dora/clients/consensus"
)
// epochState represents a beacon state which a epoch status depends on.
@@ -25,6 +27,7 @@ type epochState struct {
stateSlot phase0.Slot
validatorBalances []phase0.Gwei
+ builderBalances []phase0.Gwei
randaoMixes []phase0.Root
depositIndex uint64
syncCommittee []phase0.ValidatorIndex
@@ -90,7 +93,6 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch
}
s.loadingStatus = 1
- client.logger.Debugf("loading state for slot %v", s.slotRoot.String())
ctx, cancel := context.WithTimeout(ctx, beaconStateRequestTimeout+(beaconHeaderRequestTimeout*2))
s.loadingCancel = cancel
@@ -104,29 +106,48 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch
}
}()
- var blockHeader *phase0.SignedBeaconBlockHeader
+ var beaconBlock *spec.VersionedSignedBeaconBlock
block := client.indexer.blockCache.getBlockByRoot(s.slotRoot)
if block != nil {
- blockHeader = block.AwaitHeader(ctx, beaconHeaderRequestTimeout)
+ beaconBlock = block.AwaitBlock(ctx, beaconHeaderRequestTimeout)
}
- if blockHeader == nil {
+ if beaconBlock == nil {
var err error
- blockHeader, err = LoadBeaconHeader(ctx, client, s.slotRoot)
+ beaconBlock, err = LoadBeaconBlock(ctx, client, s.slotRoot)
if err != nil {
return nil, err
}
}
- s.stateRoot = blockHeader.Message.StateRoot
+ if beaconBlock != nil {
+ slot, _ := beaconBlock.Slot()
+ client.logger.Infof("loading state for block root %v (slot %v)", s.slotRoot.String(), slot)
- resState, err := LoadBeaconState(ctx, client, blockHeader.Message.StateRoot)
+ var err error
+ s.stateRoot, err = beaconBlock.StateRoot()
+ if err != nil {
+ return nil, fmt.Errorf("error getting state root from beacon block %v: %v", s.slotRoot.String(), err)
+ }
+ }
+
+ resState, err := LoadBeaconState(ctx, client, s.stateRoot)
if err != nil {
return nil, err
}
- err = s.processState(resState, cache)
+ var executionPayload *gloas.SignedExecutionPayloadEnvelope
+ if beaconBlock != nil && beaconBlock.Version >= spec.DataVersionGloas {
+ if block != nil {
+ executionPayload = block.GetExecutionPayload(ctx)
+ }
+ if executionPayload == nil {
+ executionPayload, _ = LoadExecutionPayload(ctx, client, s.slotRoot)
+ }
+ }
+
+ err = s.processState(resState, beaconBlock, executionPayload, cache, client.indexer.consensusPool.GetChainState().GetSpecs())
if err != nil {
return nil, err
}
@@ -144,7 +165,7 @@ func (s *epochState) loadState(ctx context.Context, client *Client, cache *epoch
// processState processes the state and updates the epochState instance.
// the function extracts and unifies all relevant information from the beacon state, so the full beacon state can be dropped from memory afterwards.
-func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epochCache) error {
+func (s *epochState) processState(state *spec.VersionedBeaconState, beaconBlock *spec.VersionedSignedBeaconBlock, executionPayload *gloas.SignedExecutionPayloadEnvelope, cache *epochCache, specs *consensus.ChainSpec) error {
slot, err := state.Slot()
if err != nil {
return fmt.Errorf("error getting slot from state %v: %v", s.slotRoot.String(), err)
@@ -152,13 +173,37 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch
s.stateSlot = slot
+ dependentRoot := s.slotRoot
+ if state.Version >= spec.DataVersionFulu {
+ parentRoot, err := getLatestBlockHeaderParentRoot(state)
+ if err != nil {
+ return fmt.Errorf("error getting latest block header parent root from state %v: %v", s.slotRoot.String(), err)
+ }
+
+ dependentRoot = parentRoot
+ }
+
validatorList, err := state.Validators()
if err != nil {
return fmt.Errorf("error getting validators from state %v: %v", s.slotRoot.String(), err)
}
if cache != nil {
- cache.indexer.validatorCache.updateValidatorSet(slot, s.slotRoot, validatorList)
+ cache.indexer.validatorCache.updateValidatorSet(slot, dependentRoot, validatorList)
+ }
+
+ // Process builder set for Gloas
+ if state.Version >= spec.DataVersionGloas && state.Gloas != nil {
+ if cache != nil {
+ cache.indexer.builderCache.updateBuilderSet(slot, dependentRoot, state.Gloas.Builders)
+ }
+
+ // Extract builder balances
+ builderBalances := make([]phase0.Gwei, len(state.Gloas.Builders))
+ for i, builder := range state.Gloas.Builders {
+ builderBalances[i] = builder.Balance
+ }
+ s.builderBalances = builderBalances
}
validatorPubkeyMap := make(map[phase0.BLSPubKey]phase0.ValidatorIndex)
@@ -179,7 +224,33 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch
}
s.randaoMixes = randaoMixes
- s.depositIndex = getStateDepositIndex(state)
+
+ if state.Version >= spec.DataVersionFulu {
+ if state.Version >= spec.DataVersionGloas {
+ isPostPayload := isGloasPostPayloadState(state, slot)
+ if isPostPayload && executionPayload != nil &&
+ executionPayload.Message != nil &&
+ executionPayload.Message.ExecutionRequests != nil &&
+ len(executionPayload.Message.ExecutionRequests.Deposits) > 0 {
+ s.depositIndex = executionPayload.Message.ExecutionRequests.Deposits[0].Index
+ } else {
+ s.depositIndex = getStateDepositIndex(state)
+ }
+ } else {
+ blockRequests, err := beaconBlock.ExecutionRequests()
+ if err != nil {
+ return fmt.Errorf("error getting execution requests from block %v: %v",
+ s.slotRoot.String(), err)
+ }
+ if len(blockRequests.Deposits) > 0 {
+ s.depositIndex = blockRequests.Deposits[0].Index
+ } else {
+ s.depositIndex = getStateDepositIndex(state)
+ }
+ }
+ } else {
+ s.depositIndex = getStateDepositIndex(state)
+ }
if state.Version >= spec.DataVersionAltair {
currentSyncCommittee, err := getStateCurrentSyncCommittee(state)
@@ -232,3 +303,17 @@ func (s *epochState) processState(state *spec.VersionedBeaconState, cache *epoch
return nil
}
+
+// isGloasPostPayloadState checks whether the Gloas state is post-payload
+// (i.e. execution payload deposits have been applied) for the given slot.
+func isGloasPostPayloadState(state *spec.VersionedBeaconState, slot phase0.Slot) bool {
+ if state.Gloas == nil {
+ return false
+ }
+ bitfieldLen := uint64(len(state.Gloas.ExecutionPayloadAvailability)) * 8
+ if bitfieldLen == 0 {
+ return false
+ }
+ idx := uint64(slot) % bitfieldLen
+ return state.Gloas.ExecutionPayloadAvailability[idx/8]&(1<<(idx%8)) != 0
+}
diff --git a/indexer/beacon/epochstats.go b/indexer/beacon/epochstats.go
index 7f1fbc9cb..8f08fd53a 100644
--- a/indexer/beacon/epochstats.go
+++ b/indexer/beacon/epochstats.go
@@ -47,10 +47,11 @@ type EpochStatsValues struct {
RandaoMix phase0.Hash32
NextRandaoMix phase0.Hash32
ActiveIndices []phase0.ValidatorIndex
- EffectiveBalances []uint32
+ EffectiveBalances []uint32 // effective balance in full ETH of last epoch for pre-fulu stats, effective balance in full ETH of current epoch for fulu+ stats
ProposerDuties []phase0.ValidatorIndex
AttesterDuties [][][]duties.ActiveIndiceIndex
SyncCommitteeDuties []phase0.ValidatorIndex
+ PtcDuties [][]duties.ActiveIndiceIndex // [slot_index][ptc_member_index] - PTC duties for Gloas+ epochs
ActiveValidators uint64
TotalBalance phase0.Gwei
ActiveBalance phase0.Gwei
@@ -265,7 +266,9 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt
proposerDuties = append(proposerDuties, proposerIndex)
}
- values.ProposerDuties = proposerDuties
+ if len(values.ProposerDuties) == 0 {
+ values.ProposerDuties = proposerDuties
+ }
if beaconState.RandaoMix != nil {
values.RandaoMix = *beaconState.RandaoMix
}
@@ -273,6 +276,17 @@ func (es *EpochStats) parsePackedSSZ(chainState *consensus.ChainState, ssz []byt
// compute committees
attesterDuties, _ := duties.GetAttesterDuties(chainState.GetSpecs(), beaconState, es.epoch)
values.AttesterDuties = attesterDuties
+
+ // compute PTC duties (Gloas+ only)
+ if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil {
+ ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch)
+ for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ {
+ slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex)
+ ptc, _ := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot)
+ ptcDuties[slotIndex] = ptc
+ }
+ values.PtcDuties = ptcDuties
+ }
}
return values, nil
@@ -291,6 +305,7 @@ func (es *EpochStats) pruneValues() {
ProposerDuties: es.values.ProposerDuties,
AttesterDuties: nil, // prune
SyncCommitteeDuties: es.values.SyncCommitteeDuties,
+ PtcDuties: nil, // prune - only needed for recent epochs
ActiveValidators: es.values.ActiveValidators,
TotalBalance: es.values.TotalBalance,
ActiveBalance: es.values.ActiveBalance,
@@ -322,7 +337,7 @@ func (es *EpochStats) loadValuesFromDb(ctx context.Context, chainState *consensu
}
// processState processes the epoch state and computes proposer and attester duties.
-func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Validator) {
+func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Validator, loadDuration time.Duration) {
if es.dependentState == nil || es.dependentState.loadingStatus != 2 {
return
}
@@ -424,7 +439,7 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali
offset = slotsPerEpoch
}
- values.ProposerDuties = dependentState.proposerLookahead[offset : offset+slotsPerEpoch]
+ values.ProposerDuties = dependentState.proposerLookahead[offset:]
} else {
proposerDuties := []phase0.ValidatorIndex{}
for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ {
@@ -450,6 +465,20 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali
}
values.AttesterDuties = attesterDuties
+ // compute PTC duties (Gloas+ only)
+ if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil {
+ ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch)
+ for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ {
+ slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex)
+ ptc, ptcErr := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot)
+ if ptcErr != nil {
+ indexer.logger.Warnf("failed computing PTC duties for slot %v: %v", slot, ptcErr)
+ }
+ ptcDuties[slotIndex] = ptc
+ }
+ values.PtcDuties = ptcDuties
+ }
+
if beaconState.RandaoMix != nil {
values.RandaoMix = *beaconState.RandaoMix
values.NextRandaoMix = *beaconState.NextRandaoMix
@@ -475,12 +504,13 @@ func (es *EpochStats) processState(indexer *Indexer, validatorSet []*phase0.Vali
es.isInDb = true
indexer.logger.Infof(
- "processed epoch %v stats (root: %v / state: %v, validators: %v/%v, %v ms), %v bytes",
+ "processed epoch %v stats (root: %v / state: %v, validators: %v/%v, load: %v ms, process: %v ms), %v bytes",
es.epoch,
es.dependentRoot.String(),
dependentState.stateRoot.String(),
values.ActiveValidators,
len(validatorSet),
+ loadDuration.Milliseconds(),
time.Since(t1).Milliseconds(),
len(packedSsz),
)
@@ -555,14 +585,20 @@ func (es *EpochStats) precomputeFromParentState(indexer *Indexer, parentState *E
// compute proposers
proposerDuties := []phase0.ValidatorIndex{}
- for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ {
- proposer, err := duties.GetProposerIndex(chainState.GetSpecs(), beaconState, slot)
- proposerIndex := phase0.ValidatorIndex(math.MaxInt64)
- if err == nil {
- proposerIndex = values.ActiveIndices[proposer]
- }
- proposerDuties = append(proposerDuties, proposerIndex)
+ specs := chainState.GetSpecs()
+ if parentState.dependentState != nil && uint64(len(parentState.dependentState.proposerLookahead)) > specs.SlotsPerEpoch {
+ proposerDuties = parentState.dependentState.proposerLookahead[specs.SlotsPerEpoch:]
+ } else {
+ for slot := chainState.EpochToSlot(es.epoch); slot < chainState.EpochToSlot(es.epoch+1); slot++ {
+ proposer, err := duties.GetProposerIndex(chainState.GetSpecs(), beaconState, slot)
+ proposerIndex := phase0.ValidatorIndex(math.MaxInt64)
+ if err == nil {
+ proposerIndex = values.ActiveIndices[proposer]
+ }
+
+ proposerDuties = append(proposerDuties, proposerIndex)
+ }
}
values.ProposerDuties = proposerDuties
@@ -571,6 +607,17 @@ func (es *EpochStats) precomputeFromParentState(indexer *Indexer, parentState *E
attesterDuties, _ := duties.GetAttesterDuties(chainState.GetSpecs(), beaconState, es.epoch)
values.AttesterDuties = attesterDuties
+ // compute PTC duties (Gloas+ only)
+ if chainState.IsEip7732Enabled(es.epoch) && attesterDuties != nil {
+ ptcDuties := make([][]duties.ActiveIndiceIndex, chainState.GetSpecs().SlotsPerEpoch)
+ for slotIndex := uint64(0); slotIndex < chainState.GetSpecs().SlotsPerEpoch; slotIndex++ {
+ slot := chainState.EpochToSlot(es.epoch) + phase0.Slot(slotIndex)
+ ptc, _ := duties.GetPtcDuties(chainState.GetSpecs(), beaconState, attesterDuties[slotIndex], slot)
+ ptcDuties[slotIndex] = ptc
+ }
+ values.PtcDuties = ptcDuties
+ }
+
es.precalcValues = values
indexer.logger.Infof(
diff --git a/indexer/beacon/finalization.go b/indexer/beacon/finalization.go
index 2bb70ba17..194305fd6 100644
--- a/indexer/beacon/finalization.go
+++ b/indexer/beacon/finalization.go
@@ -9,6 +9,7 @@ import (
v1 "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
"github.com/ethpandaops/dora/db"
@@ -149,6 +150,15 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
if block.block == nil {
return true, fmt.Errorf("missing block body for canonical block %v (%v)", block.Slot, block.Root.String())
}
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ if _, err := block.EnsureExecutionPayload(func() (*gloas.SignedExecutionPayloadEnvelope, error) {
+ return LoadExecutionPayload(client.getContext(), client, block.Root)
+ }); err != nil {
+ client.logger.Warnf("failed loading finalized execution payload %v (%v): %v", block.Slot, block.Root.String(), err)
+ }
+ }
+
canonicalBlocks = append(canonicalBlocks, block)
} else {
if block.block == nil {
@@ -206,6 +216,10 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
}
}
+ if firstBlock.Slot == 0 {
+ dependentRoot = phase0.Root{}
+ }
+
if !isValid {
return false, fmt.Errorf("first canonical block %v (%v) is not the first block of epoch %v", firstBlock.Slot, firstBlock.Root.String(), epoch)
}
@@ -268,7 +282,7 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
// if the state is not yet loaded, we set it to high priority and wait for it to be loaded
if !epochStats.ready {
if epochStats.dependentState == nil {
- indexer.epochCache.addEpochStateRequest(epochStats)
+ indexer.epochCache.ensureEpochDependentState(epochStats, canonicalBlocks[0].Root)
}
if epochStats.dependentState != nil && epochStats.dependentState.loadingStatus != 2 && epochStats.dependentState.retryCount < 10 {
indexer.logger.Infof("epoch %d state (%v) not yet loaded, waiting for state to be loaded", epoch, dependentRoot.String())
@@ -314,6 +328,45 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
finalizedForkIds[block.GetForkId()] = true
}
+ // Determine payload status for canonical blocks (ePBS only)
+ // A payload is orphaned if the next canonical block doesn't build on it
+ allCanonicalBlocks := append(canonicalBlocks, nextEpochCanonicalBlocks...)
+ if chainState.IsEip7732Enabled(epoch) {
+ for i, block := range canonicalBlocks {
+ blockIndex := block.GetBlockIndex(indexer.ctx)
+ if blockIndex == nil || blockIndex.ExecutionNumber == 0 {
+ fmt.Printf("payload status for slot %v: no execution payload\n", block.Slot)
+ continue // no execution payload
+ }
+
+ // Find the next canonical block
+ if i+1 >= len(allCanonicalBlocks) {
+ fmt.Printf("payload status for slot %v: no next canonical block\n", block.Slot)
+ continue
+ }
+
+ nextBlock := allCanonicalBlocks[i+1]
+ if nextBlock == nil {
+ fmt.Printf("payload status for slot %v: no next canonical block\n", block.Slot)
+ continue
+ }
+
+ nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx)
+ if nextBlockIndex == nil {
+ fmt.Printf("payload status for slot %v: no next canonical block index\n", block.Slot)
+ continue
+ }
+
+ // Check if next block builds on this block's payload
+ if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) {
+ fmt.Printf("payload status for slot %v: orphaned\n", block.Slot)
+ block.isPayloadOrphaned = true
+ } else {
+ fmt.Printf("payload status for slot %v: canonical\n", block.Slot)
+ }
+ }
+ }
+
dependentGroups := map[phase0.Root][]*Block{}
for _, block := range orphanedBlocks {
var dependentRoot phase0.Root
@@ -383,6 +436,36 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
}
}
+ // Determine payload status for orphaned chain blocks (ePBS only)
+ // A payload is orphaned if the next block in the chain doesn't build on it
+ allChainBlocks := append(chain, nextBlocks...)
+ for i, block := range chain {
+ if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ continue
+ }
+
+ blockIndex := block.GetBlockIndex(indexer.ctx)
+ if blockIndex == nil || blockIndex.ExecutionNumber == 0 {
+ continue // no execution payload
+ }
+
+ // Find the next block in this orphaned chain
+ var nextBlock *Block
+ if i+1 < len(allChainBlocks) {
+ nextBlock = allChainBlocks[i+1]
+ }
+
+ if nextBlock != nil {
+ nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx)
+ if nextBlockIndex != nil {
+ // Check if next block builds on this block's payload
+ if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) {
+ block.isPayloadOrphaned = true
+ }
+ }
+ }
+ }
+
// compute votes for canonical blocks
votingBlocks := make([]*Block, len(chain)+len(nextBlocks))
copy(votingBlocks, chain)
@@ -528,10 +611,9 @@ func (indexer *Indexer) finalizeEpoch(epoch phase0.Epoch, justifiedRoot phase0.R
t1 = time.Now()
- // update validator cache
- if len(canonicalBlocks) > 0 {
- indexer.validatorCache.setFinalizedEpoch(epoch, canonicalBlocks[len(canonicalBlocks)-1].Root)
- }
+ // update validator & builder cache with the epoch's dependent root (last block of parent epoch)
+ indexer.validatorCache.setFinalizedEpoch(epoch, dependentRoot)
+ indexer.builderCache.setFinalizedEpoch(epoch, dependentRoot)
// clean fork cache
indexer.forkCache.setFinalizedEpoch(deleteBeforeSlot, justifiedRoot)
diff --git a/indexer/beacon/indexer.go b/indexer/beacon/indexer.go
index b49bc187e..015551057 100644
--- a/indexer/beacon/indexer.go
+++ b/indexer/beacon/indexer.go
@@ -47,6 +47,8 @@ type Indexer struct {
pubkeyCache *pubkeyCache
validatorCache *validatorCache
validatorActivity *validatorActivityCache
+ blockBidCache *blockBidCache
+ builderCache *builderCache
// indexer state
clients []*Client
@@ -118,6 +120,8 @@ func NewIndexer(ctx context.Context, logger logrus.FieldLogger, consensusPool *c
indexer.pubkeyCache = newPubkeyCache(indexer, utils.Config.Indexer.PubkeyCachePath)
indexer.validatorCache = newValidatorCache(indexer)
indexer.validatorActivity = newValidatorActivityCache(indexer)
+ indexer.blockBidCache = newBlockBidCache(indexer)
+ indexer.builderCache = newBuilderCache(indexer)
indexer.dbWriter = newDbWriter(indexer)
badChainRoots := utils.Config.Indexer.BadChainRoots
@@ -275,6 +279,14 @@ func (indexer *Indexer) StartIndexer() {
indexer.logger.Infof("restored %v validators from DB (%.3f sec)", validatorCount, time.Since(t1).Seconds())
}
+ // restore finalized builder set from db
+ t1 = time.Now()
+ if builderCount, err := indexer.builderCache.prepopulateFromDB(); err != nil {
+ indexer.logger.WithError(err).Errorf("failed loading builder set")
+ } else if builderCount > 0 {
+ indexer.logger.Infof("restored %v builders from DB (%.3f sec)", builderCount, time.Since(t1).Seconds())
+ }
+
// restore unfinalized epoch stats from db
restoredEpochStats := 0
t1 = time.Now()
@@ -292,7 +304,7 @@ func (indexer *Indexer) StartIndexer() {
processingWaitGroup.Done()
}()
- epochStats := indexer.epochCache.createOrGetEpochStats(phase0.Epoch(dbDuty.Epoch), phase0.Root(dbDuty.DependentRoot), false)
+ epochStats := indexer.epochCache.createOrGetEpochStats(phase0.Epoch(dbDuty.Epoch), phase0.Root(dbDuty.DependentRoot))
pruneStats := dbDuty.Epoch < uint64(indexer.lastPrunedEpoch)
err := epochStats.restoreFromDb(dbDuty, chainState, !pruneStats)
@@ -340,6 +352,7 @@ func (indexer *Indexer) StartIndexer() {
// restore unfinalized blocks from db
restoredBlockCount := 0
restoredBodyCount := 0
+ restoredPayloadCount := 0
t1 = time.Now()
err = db.StreamUnfinalizedBlocks(indexer.ctx, uint64(finalizedSlot), func(dbBlock *dbtypes.UnfinalizedBlock) {
block, _ := indexer.blockCache.createOrGetBlock(phase0.Root(dbBlock.Root), phase0.Slot(dbBlock.Slot))
@@ -377,10 +390,23 @@ func (indexer *Indexer) StartIndexer() {
block.SetBlock(blockBody)
restoredBodyCount++
} else {
- block.setBlockIndex(blockBody)
+ block.setBlockIndex(blockBody, nil)
block.isInFinalizedDb = true
}
+ if len(dbBlock.PayloadSSZ) > 0 {
+ blockPayload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, dbBlock.PayloadVer, dbBlock.PayloadSSZ)
+ if err != nil {
+ indexer.logger.Warnf("could not restore unfinalized block payload %v [%x] from db: %v", dbBlock.Slot, dbBlock.Root, err)
+ } else if block.processingStatus == 0 {
+ block.SetExecutionPayload(blockPayload)
+ restoredPayloadCount++
+ } else {
+ block.setBlockIndex(blockBody, blockPayload)
+ block.hasExecutionPayload = true
+ }
+ }
+
indexer.blockCache.addBlockToExecBlockMap(block)
blockFork := indexer.forkCache.getForkById(block.forkId)
@@ -404,6 +430,9 @@ func (indexer *Indexer) StartIndexer() {
indexer.logger.Infof("restored %v unfinalized blocks from DB (%v with bodies, %.3f sec)", restoredBlockCount, restoredBodyCount, time.Since(t1).Seconds())
}
+ // restore block bids from db
+ indexer.blockBidCache.loadFromDB(chainState.CurrentSlot())
+
// start indexing for all clients
for _, client := range indexer.clients {
client.startIndexing()
@@ -424,7 +453,8 @@ func (indexer *Indexer) StartIndexer() {
if len(genesisBlock) == 0 {
indexer.logger.Warnf("genesis block not found in cache")
} else {
- indexer.epochCache.createOrGetEpochStats(0, genesisBlock[0].Root, true)
+ epochStats := indexer.epochCache.createOrGetEpochStats(0, genesisBlock[0].Root)
+ indexer.epochCache.ensureEpochDependentState(epochStats, genesisBlock[0].Root)
}
}
@@ -438,6 +468,11 @@ func (indexer *Indexer) StartIndexer() {
}
func (indexer *Indexer) StopIndexer() {
+ // flush block bids to db before shutdown
+ if err := indexer.blockBidCache.flushAll(); err != nil {
+ indexer.logger.WithError(err).Errorf("error flushing block bids on shutdown")
+ }
+
indexer.pubkeyCache.Close()
}
@@ -489,6 +524,11 @@ func (indexer *Indexer) runIndexerLoop() {
slotIndex := chainState.SlotToSlotIndex(phase0.Slot(slotEvent.Number()))
slotProgress := uint8(100 / chainState.GetSpecs().SlotsPerEpoch * uint64(slotIndex))
+ // flush old block bids if needed
+ if err := indexer.blockBidCache.checkAndFlush(); err != nil {
+ indexer.logger.WithError(err).Errorf("failed flushing block bids")
+ }
+
// precalc next canonical duties on epoch start
if epoch >= indexer.lastPrecalcRunEpoch {
err := indexer.precalcNextEpochStats(epoch)
diff --git a/indexer/beacon/indexer_getter.go b/indexer/beacon/indexer_getter.go
index 46817c0b5..2d7988408 100644
--- a/indexer/beacon/indexer_getter.go
+++ b/indexer/beacon/indexer_getter.go
@@ -9,9 +9,11 @@ import (
v1 "github.com/attestantio/go-eth2-client/api/v1"
"github.com/attestantio/go-eth2-client/spec/electra"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/clients/consensus"
"github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
dynssz "github.com/pk910/dynamic-ssz"
)
@@ -222,6 +224,14 @@ func (indexer *Indexer) GetOrphanedBlockByRoot(blockRoot phase0.Root) (*Block, e
block.SetHeader(header)
block.SetBlock(blockBody)
+ if len(orphanedBlock.PayloadSSZ) > 0 {
+ payload, err := UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(indexer.dynSsz, orphanedBlock.PayloadVer, orphanedBlock.PayloadSSZ)
+ if err != nil {
+ return nil, fmt.Errorf("could not restore orphaned block payload %v [%x] from db: %v", header.Message.Slot, orphanedBlock.Root, err)
+ }
+ block.SetExecutionPayload(payload)
+ }
+
return block, nil
}
@@ -505,3 +515,71 @@ func (indexer *Indexer) GetFullValidatorByIndex(validatorIndex phase0.ValidatorI
return validatorData
}
+
+// GetBlockBids returns the execution payload bids for a given parent block root.
+// It first checks the in-memory cache, then falls back to the database.
+func (indexer *Indexer) GetBlockBids(parentBlockRoot phase0.Root) []*dbtypes.BlockBid {
+ // First check the in-memory cache
+ bids := indexer.blockBidCache.GetBidsForBlockRoot(parentBlockRoot)
+ if len(bids) > 0 {
+ return bids
+ }
+
+ // Fall back to database
+ return db.GetBidsForBlockRoot(indexer.ctx, parentBlockRoot[:])
+}
+
+// StreamActiveBuilderDataForRoot streams the available builder set data for a given blockRoot.
+func (indexer *Indexer) StreamActiveBuilderDataForRoot(blockRoot phase0.Root, activeOnly bool, epoch *phase0.Epoch, cb BuilderSetStreamer) error {
+ return indexer.builderCache.streamBuilderSetForRoot(blockRoot, activeOnly, epoch, cb)
+}
+
+// GetBuilderSetSize returns the size of the builder set cache.
+func (indexer *Indexer) GetBuilderSetSize() uint64 {
+ return indexer.builderCache.getBuilderSetSize()
+}
+
+// GetBuilderByIndex returns the builder by index for the canonical head.
+func (indexer *Indexer) GetBuilderByIndex(index gloas.BuilderIndex, overrideForkId *ForkKey) *gloas.Builder {
+ return indexer.builderCache.getBuilderByIndex(index, overrideForkId)
+}
+
+// GetRecentBuilderBalances returns the most recent builder balances for the given fork.
+func (indexer *Indexer) GetRecentBuilderBalances(overrideForkId *ForkKey) []phase0.Gwei {
+ chainState := indexer.consensusPool.GetChainState()
+
+ canonicalHead := indexer.GetCanonicalHead(overrideForkId)
+ if canonicalHead == nil {
+ return nil
+ }
+
+ headEpoch := chainState.EpochOfSlot(canonicalHead.Slot)
+
+ var epochStats *EpochStats
+ for {
+ cEpoch := chainState.EpochOfSlot(canonicalHead.Slot)
+ if headEpoch-cEpoch > 2 {
+ return nil
+ }
+
+ dependentBlock := indexer.blockCache.getDependentBlock(chainState, canonicalHead, nil)
+ if dependentBlock == nil {
+ return nil
+ }
+ canonicalHead = dependentBlock
+
+ stats := indexer.epochCache.getEpochStats(cEpoch, dependentBlock.Root)
+ if cEpoch > 0 && (stats == nil || stats.dependentState == nil || stats.dependentState.loadingStatus != 2) {
+ continue // retry previous state
+ }
+
+ epochStats = stats
+ break
+ }
+
+ if epochStats == nil || epochStats.dependentState == nil {
+ return nil
+ }
+
+ return epochStats.dependentState.builderBalances
+}
diff --git a/indexer/beacon/precalc.go b/indexer/beacon/precalc.go
index 078370eda..ada710034 100644
--- a/indexer/beacon/precalc.go
+++ b/indexer/beacon/precalc.go
@@ -32,7 +32,7 @@ func (indexer *Indexer) precalcNextEpochStats(epoch phase0.Epoch) error {
}
// precompute epoch stats for the epoch if we have the parent epoch stats ready
- epochStats := indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root, false)
+ epochStats := indexer.epochCache.createOrGetEpochStats(epoch, dependentBlock.Root)
if !epochStats.ready {
var parentDependentBlock *Block
if chainState.EpochOfSlot(dependentBlock.Slot) == epoch-1 {
diff --git a/indexer/beacon/pruning.go b/indexer/beacon/pruning.go
index ff64840ad..92a02a010 100644
--- a/indexer/beacon/pruning.go
+++ b/indexer/beacon/pruning.go
@@ -117,7 +117,7 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui
// if the state is not yet loaded, we set it to high priority and wait for it to be loaded
if epochStats != nil && !epochStats.ready {
if epochStats.dependentState == nil {
- indexer.epochCache.addEpochStateRequest(epochStats)
+ indexer.epochCache.ensureEpochDependentState(epochStats, blocks[0].Root)
}
if epochStats.dependentState != nil && epochStats.dependentState.loadingStatus != 2 && epochStats.dependentState.retryCount < 10 {
indexer.logger.Infof("epoch %d state (%v) not yet loaded, waiting for state to be loaded", pruneEpoch, dependentRoot.String())
@@ -169,6 +169,36 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui
}
}
+ // Determine payload status for chain blocks (ePBS only)
+ // A payload is orphaned if the next block in the chain doesn't build on it
+ allChainBlocks := append(chain, nextBlocks...)
+ for i, block := range chain {
+ if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ continue
+ }
+
+ blockIndex := block.GetBlockIndex(indexer.ctx)
+ if blockIndex == nil || blockIndex.ExecutionNumber == 0 {
+ continue // no execution payload
+ }
+
+ // Find the next block in this chain
+ var nextBlock *Block
+ if i+1 < len(allChainBlocks) {
+ nextBlock = allChainBlocks[i+1]
+ }
+
+ if nextBlock != nil {
+ nextBlockIndex := nextBlock.GetBlockIndex(indexer.ctx)
+ if nextBlockIndex != nil {
+ // Check if next block builds on this block's payload
+ if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) {
+ block.isPayloadOrphaned = true
+ }
+ }
+ }
+ }
+
// compute votes for canonical blocks
votingBlocks := make([]*Block, len(chain)+len(nextBlocks))
copy(votingBlocks, chain)
@@ -257,8 +287,9 @@ func (indexer *Indexer) processEpochPruning(pruneEpoch phase0.Epoch) (uint64, ui
for _, block := range pruningBlocks {
block.isInFinalizedDb = true
block.processingStatus = dbtypes.UnfinalizedBlockStatusPruned
- block.setBlockIndex(block.block)
+ block.setBlockIndex(block.block, block.executionPayload)
block.block = nil
+ block.executionPayload = nil
block.blockResults = nil
}
diff --git a/indexer/beacon/requests.go b/indexer/beacon/requests.go
index 604033f8f..cdbfe587d 100644
--- a/indexer/beacon/requests.go
+++ b/indexer/beacon/requests.go
@@ -6,6 +6,7 @@ import (
"time"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
)
@@ -18,6 +19,9 @@ const beaconBodyRequestTimeout time.Duration = 30 * time.Second
// BeaconStateRequestTimeout is the timeout duration for beacon state requests.
const beaconStateRequestTimeout time.Duration = 600 * time.Second
+// ExecutionPayloadRequestTimeout is the timeout duration for execution payload requests.
+const executionPayloadRequestTimeout time.Duration = 30 * time.Second
+
const beaconStateRetryCount = 10
const beaconStateHighPriorityRetryCount uint64 = 3
@@ -69,10 +73,29 @@ func LoadBeaconState(ctx context.Context, client *Client, root phase0.Root) (*sp
ctx, cancel := context.WithTimeout(ctx, beaconStateRequestTimeout)
defer cancel()
- resState, err := client.client.GetRPCClient().GetState(ctx, fmt.Sprintf("0x%x", root[:]))
+ stateRef := fmt.Sprintf("0x%x", root[:])
+ nullRoot := phase0.Root{}
+ if root == nullRoot {
+ stateRef = "genesis"
+ }
+
+ resState, err := client.client.GetRPCClient().GetState(ctx, stateRef)
if err != nil {
return nil, err
}
return resState, nil
}
+
+// LoadExecutionPayload loads the execution payload from the client.
+func LoadExecutionPayload(ctx context.Context, client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ ctx, cancel := context.WithTimeout(ctx, executionPayloadRequestTimeout)
+ defer cancel()
+
+ payload, err := client.client.GetRPCClient().GetExecutionPayloadByBlockroot(ctx, root)
+ if err != nil {
+ return nil, err
+ }
+
+ return payload, nil
+}
diff --git a/indexer/beacon/synchronizer.go b/indexer/beacon/synchronizer.go
index 436ce6fe3..76ca8f687 100644
--- a/indexer/beacon/synchronizer.go
+++ b/indexer/beacon/synchronizer.go
@@ -10,6 +10,7 @@ import (
"time"
"github.com/attestantio/go-eth2-client/spec"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
"github.com/ethpandaops/dora/clients/consensus"
@@ -264,11 +265,17 @@ func (s *synchronizer) loadBlockHeader(client *Client, slot phase0.Slot) (*phase
}
func (s *synchronizer) loadBlockBody(client *Client, root phase0.Root) (*spec.VersionedSignedBeaconBlock, error) {
- ctx, cancel := context.WithTimeout(s.syncCtx, beaconHeaderRequestTimeout)
+ ctx, cancel := context.WithTimeout(s.syncCtx, beaconBodyRequestTimeout)
defer cancel()
return LoadBeaconBlock(ctx, client, root)
}
+func (s *synchronizer) loadBlockPayload(client *Client, root phase0.Root) (*gloas.SignedExecutionPayloadEnvelope, error) {
+ ctx, cancel := context.WithTimeout(s.syncCtx, executionPayloadRequestTimeout)
+ defer cancel()
+ return LoadExecutionPayload(ctx, client, root)
+}
+
func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry bool) (bool, error) {
if !utils.Config.Indexer.ResyncForceUpdate && db.IsEpochSynchronized(s.syncCtx, uint64(syncEpoch)) {
return true, nil
@@ -327,6 +334,17 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry
block.SetBlock(blockBody)
}
+ if slot > 0 && chainState.IsEip7732Enabled(chainState.EpochOfSlot(slot)) {
+ blockPayload, err := s.loadBlockPayload(client, phase0.Root(blockRoot))
+ if err != nil && !lastTry {
+ return false, fmt.Errorf("error fetching slot %v execution payload: %v", slot, err)
+ }
+
+ if blockPayload != nil {
+ block.SetExecutionPayload(blockPayload)
+ }
+ }
+
s.cachedBlocks[slot] = block
}
@@ -365,7 +383,9 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry
}
epochState := newEpochState(dependentRoot)
+ t1 := time.Now()
state, err := epochState.loadState(s.syncCtx, client, nil)
+ loadDuration := time.Since(t1)
if (err != nil || epochState.loadingStatus != 2) && !lastTry {
return false, fmt.Errorf("error fetching epoch %v state: %v", syncEpoch, err)
}
@@ -385,7 +405,7 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry
if epochState != nil && epochState.loadingStatus == 2 {
epochStats = newEpochStats(syncEpoch, dependentRoot)
epochStats.dependentState = epochState
- epochStats.processState(s.indexer, validatorSet)
+ epochStats.processState(s.indexer, validatorSet, loadDuration)
epochStatsValues = epochStats.GetValues(false)
}
@@ -410,6 +430,36 @@ func (s *synchronizer) syncEpoch(syncEpoch phase0.Epoch, client *Client, lastTry
sim.validatorSet = validatorSet
}
+ // Determine payload status for canonical blocks (ePBS only)
+ // A payload is orphaned if the next canonical block doesn't build on it
+ allCanonicalBlocks := append(canonicalBlocks, nextEpochCanonicalBlocks...)
+ for i, block := range canonicalBlocks {
+ if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ continue
+ }
+
+ blockIndex := block.GetBlockIndex(s.indexer.ctx)
+ if blockIndex == nil || blockIndex.ExecutionNumber == 0 {
+ continue // no execution payload
+ }
+
+ // Find the next canonical block
+ var nextBlock *Block
+ if i+1 < len(allCanonicalBlocks) {
+ nextBlock = allCanonicalBlocks[i+1]
+ }
+
+ if nextBlock != nil {
+ nextBlockIndex := nextBlock.GetBlockIndex(s.indexer.ctx)
+ if nextBlockIndex != nil {
+ // Check if next block builds on this block's payload
+ if !bytes.Equal(nextBlockIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) {
+ block.isPayloadOrphaned = true
+ }
+ }
+ }
+ }
+
// save blocks
err = db.RunDBTransaction(func(tx *sqlx.Tx) error {
err = s.indexer.dbWriter.persistEpochData(tx, syncEpoch, canonicalBlocks, epochStats, epochVotes, sim)
diff --git a/indexer/beacon/validatorcache.go b/indexer/beacon/validatorcache.go
index 54e3b86cf..43f593136 100644
--- a/indexer/beacon/validatorcache.go
+++ b/indexer/beacon/validatorcache.go
@@ -345,8 +345,9 @@ func (cache *validatorCache) getValidatorFlags(validatorIndex phase0.ValidatorIn
return cache.valsetCache[validatorIndex].statusFlags
}
-// setFinalizedEpoch sets the last finalized epoch and updates the validator set
-func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDependentRoot phase0.Root) {
+// setFinalizedEpoch sets the last finalized epoch and updates the validator set.
+// dependentRoot is the dependent root of the finalized epoch (last block of the parent epoch).
+func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, dependentRoot phase0.Root) {
cache.cacheMutex.Lock()
defer cache.cacheMutex.Unlock()
@@ -361,7 +362,7 @@ func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDepe
// Find the finalized validator state
for _, diff := range cachedValidator.validatorDiffs {
- if diff.dependentRoot == nextEpochDependentRoot {
+ if diff.dependentRoot == dependentRoot {
cachedValidator.finalValidator = diff.validator
cachedValidator.finalChecksum = calculateValidatorChecksum(diff.validator)
cachedValidator.statusFlags = GetValidatorStatusFlags(diff.validator)
@@ -398,6 +399,9 @@ func (cache *validatorCache) setFinalizedEpoch(epoch phase0.Epoch, nextEpochDepe
cache.lastFinalizedActiveCount = activeCount
+ cache.indexer.logger.Infof("finalized validator set for epoch %v (dependent root: %v, updated: %v, total: %v)",
+ epoch, dependentRoot.String(), updatedCount, len(cache.valsetCache))
+
if updatedCount > 0 {
select {
case cache.triggerDbUpdate <- true:
diff --git a/indexer/beacon/writedb.go b/indexer/beacon/writedb.go
index 8f7f5f581..a62932d10 100644
--- a/indexer/beacon/writedb.go
+++ b/indexer/beacon/writedb.go
@@ -79,6 +79,11 @@ func (dbw *dbWriter) persistBlockData(tx *sqlx.Tx, block *Block, epochStats *Epo
dbBlock.Status = dbtypes.Orphaned
}
+ // Apply payload orphaned status from block flag (set during finalization/sync)
+ if block.isPayloadOrphaned {
+ dbBlock.PayloadStatus = dbtypes.PayloadStatusOrphaned
+ }
+
err := db.InsertSlot(dbw.indexer.ctx, tx, dbBlock)
if err != nil {
return nil, fmt.Errorf("error inserting slot: %v", err)
@@ -245,6 +250,8 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
epochStatsValues = epochStats.GetValues(true)
}
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
graffiti, _ := blockBody.Graffiti()
attestations, _ := blockBody.Attestations()
deposits, _ := blockBody.Deposits()
@@ -253,28 +260,56 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
proposerSlashings, _ := blockBody.ProposerSlashings()
blsToExecChanges, _ := blockBody.BLSToExecutionChanges()
syncAggregate, _ := blockBody.SyncAggregate()
+ executionBlockHash, _ := blockBody.ExecutionBlockHash()
blobKzgCommitments, _ := blockBody.BlobKZGCommitments()
- var executionExtraData []byte
var executionBlockNumber uint64
- var executionBlockHash phase0.Hash32
+ var executionBlockParentHash []byte
+ var executionExtraData []byte
var executionTransactions []bellatrix.Transaction
var executionWithdrawals []*capella.Withdrawal
-
- executionPayload, _ := blockBody.ExecutionPayload()
- if executionPayload != nil {
- executionExtraData, _ = executionPayload.ExtraData()
- executionBlockHash, _ = executionPayload.BlockHash()
- executionBlockNumber, _ = executionPayload.BlockNumber()
- executionTransactions, _ = executionPayload.Transactions()
- executionWithdrawals, _ = executionPayload.Withdrawals()
- }
-
var depositRequests []*electra.DepositRequest
+ var payloadStatus dbtypes.PayloadStatus
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ blockPayload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if blockPayload != nil {
+ executionBlockNumber = blockPayload.Message.Payload.BlockNumber
+ executionBlockParentHash = blockPayload.Message.Payload.ParentHash[:]
+ executionExtraData = blockPayload.Message.Payload.ExtraData
+ executionTransactions = blockPayload.Message.Payload.Transactions
+ executionWithdrawals = blockPayload.Message.Payload.Withdrawals
+ depositRequests = blockPayload.Message.ExecutionRequests.Deposits
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ } else {
+ payloadStatus = dbtypes.PayloadStatusMissing
+ }
+ } else {
+ payloadStatus = dbtypes.PayloadStatusCanonical
+ executionBlockNumber, _ = blockBody.ExecutionBlockNumber()
+ executionPayload, _ := blockBody.ExecutionPayload()
+ if executionPayload != nil {
+ executionExtraData, _ = executionPayload.ExtraData()
+ executionTransactions, _ = executionPayload.Transactions()
+ executionWithdrawals, _ = executionPayload.Withdrawals()
+ if parentHash, err := executionPayload.ParentHash(); err == nil {
+ executionBlockParentHash = parentHash[:]
+ }
+ }
+ executionRequests, _ := blockBody.ExecutionRequests()
+ if executionRequests != nil {
+ depositRequests = executionRequests.Deposits
+ }
+ }
- executionRequests, _ := blockBody.ExecutionRequests()
- if executionRequests != nil {
- depositRequests = executionRequests.Deposits
+ // Get builder index from block, default to -1 (self-built/MaxUint64)
+ var builderIndexInt64 int64 = -1
+ if blockIndex := block.GetBlockIndex(dbw.indexer.ctx); blockIndex != nil {
+ if blockIndex.BuilderIndex == math.MaxUint64 {
+ builderIndexInt64 = -1
+ } else {
+ builderIndexInt64 = int64(blockIndex.BuilderIndex)
+ }
}
dbBlock := dbtypes.Slot{
@@ -295,7 +330,9 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
BLSChangeCount: uint64(len(blsToExecChanges)),
BlobCount: uint64(len(blobKzgCommitments)),
RecvDelay: block.recvDelay,
+ PayloadStatus: payloadStatus,
BlockUid: block.BlockUID,
+ BuilderIndex: builderIndexInt64,
}
blockSize, err := getBlockSize(block.dynSsz, blockBody)
@@ -331,6 +368,7 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
dbBlock.EthTransactionCount = uint64(len(executionTransactions))
dbBlock.EthBlockNumber = &executionBlockNumber
dbBlock.EthBlockHash = executionBlockHash[:]
+ dbBlock.EthBlockParentHash = executionBlockParentHash
dbBlock.EthBlockExtra = executionExtraData
dbBlock.EthBlockExtraText = utils.GraffitiToString(executionExtraData[:])
dbBlock.WithdrawCount = uint64(len(executionWithdrawals))
@@ -409,6 +447,15 @@ func (dbw *dbWriter) buildDbBlock(block *Block, epochStats *EpochStats, override
dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas)
dbBlock.EthFeeRecipient = payload.FeeRecipient[:]
}
+ case spec.DataVersionGloas:
+ blockPayload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if blockPayload != nil {
+ payload := blockPayload.Message.Payload
+ dbBlock.EthGasUsed = payload.GasUsed
+ dbBlock.EthGasLimit = payload.GasLimit
+ dbBlock.EthBaseFee = utils.GetBaseFeeAsUint64(payload.BaseFeePerGas)
+ dbBlock.EthFeeRecipient = payload.FeeRecipient[:]
+ }
}
}
@@ -482,15 +529,27 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat
proposerSlashings, _ := blockBody.ProposerSlashings()
blsToExecChanges, _ := blockBody.BLSToExecutionChanges()
syncAggregate, _ := blockBody.SyncAggregate()
- executionTransactions, _ := blockBody.ExecutionTransactions()
- executionWithdrawals, _ := blockBody.Withdrawals()
blobKzgCommitments, _ := blockBody.BlobKZGCommitments()
+ var executionTransactions []bellatrix.Transaction
+ var executionWithdrawals []*capella.Withdrawal
var depositRequests []*electra.DepositRequest
- executionRequests, _ := blockBody.ExecutionRequests()
- if executionRequests != nil {
- depositRequests = executionRequests.Deposits
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ blockPayload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if blockPayload != nil {
+ dbEpoch.PayloadCount++
+ executionTransactions = blockPayload.Message.Payload.Transactions
+ executionWithdrawals = blockPayload.Message.Payload.Withdrawals
+ depositRequests = blockPayload.Message.ExecutionRequests.Deposits
+ }
+ } else {
+ executionTransactions, _ = blockBody.ExecutionTransactions()
+ executionWithdrawals, _ = blockBody.Withdrawals()
+ executionRequests, _ := blockBody.ExecutionRequests()
+ if executionRequests != nil {
+ depositRequests = executionRequests.Deposits
+ }
}
dbEpoch.AttestationCount += uint64(len(attestations))
@@ -564,6 +623,13 @@ func (dbw *dbWriter) buildDbEpoch(epoch phase0.Epoch, blocks []*Block, epochStat
dbEpoch.EthGasUsed += payload.GasUsed
dbEpoch.EthGasLimit += payload.GasLimit
}
+ case spec.DataVersionGloas:
+ blockPayload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if blockPayload != nil {
+ payload := blockPayload.Message.Payload
+ dbEpoch.EthGasUsed += payload.GasUsed
+ dbEpoch.EthGasLimit += payload.GasLimit
+ }
}
}
}
@@ -652,14 +718,26 @@ func (dbw *dbWriter) persistBlockDepositRequests(tx *sqlx.Tx, block *Block, orph
}
func (dbw *dbWriter) buildDbDepositRequests(block *Block, orphaned bool, overrideForkId *ForkKey) []*dbtypes.Deposit {
- blockBody := block.GetBlock(dbw.indexer.ctx)
- if blockBody == nil {
- return nil
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
+ var requests *electra.ExecutionRequests
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ payload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if payload != nil {
+ requests = payload.Message.ExecutionRequests
+ }
+ } else {
+ blockBody := block.GetBlock(dbw.indexer.ctx)
+ if blockBody == nil {
+ return nil
+ }
+
+ requests, _ = blockBody.ExecutionRequests()
}
- requests, err := blockBody.ExecutionRequests()
- if err != nil {
- return nil
+ if requests == nil {
+ return []*dbtypes.Deposit{}
}
deposits := requests.Deposits
@@ -839,14 +917,29 @@ func (dbw *dbWriter) persistBlockConsolidationRequests(tx *sqlx.Tx, block *Block
}
func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.ConsolidationRequest {
- blockBody := block.GetBlock(dbw.indexer.ctx)
- if blockBody == nil {
- return nil
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
+ var requests *electra.ExecutionRequests
+ var blockNumber uint64
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ payload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if payload != nil {
+ requests = payload.Message.ExecutionRequests
+ blockNumber = payload.Message.Payload.BlockNumber
+ }
+ } else {
+ blockBody := block.GetBlock(dbw.indexer.ctx)
+ if blockBody == nil {
+ return nil
+ }
+
+ requests, _ = blockBody.ExecutionRequests()
+ blockNumber, _ = blockBody.ExecutionBlockNumber()
}
- requests, err := blockBody.ExecutionRequests()
- if err != nil {
- return nil
+ if requests == nil {
+ return []*dbtypes.ConsolidationRequest{}
}
if sim == nil {
@@ -868,8 +961,6 @@ func (dbw *dbWriter) buildDbConsolidationRequests(block *Block, orphaned bool, o
blockResults = sim.replayBlockResults(block)
}
- blockNumber, _ := blockBody.ExecutionBlockNumber()
-
dbConsolidations := make([]*dbtypes.ConsolidationRequest, len(consolidations))
for idx, consolidation := range consolidations {
dbConsolidation := &dbtypes.ConsolidationRequest{
@@ -920,14 +1011,29 @@ func (dbw *dbWriter) persistBlockWithdrawalRequests(tx *sqlx.Tx, block *Block, o
}
func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, overrideForkId *ForkKey, sim *stateSimulator) []*dbtypes.WithdrawalRequest {
- blockBody := block.GetBlock(dbw.indexer.ctx)
- if blockBody == nil {
- return nil
+ chainState := dbw.indexer.consensusPool.GetChainState()
+
+ var requests *electra.ExecutionRequests
+ var blockNumber uint64
+
+ if chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ payload := block.GetExecutionPayload(dbw.indexer.ctx)
+ if payload != nil {
+ requests = payload.Message.ExecutionRequests
+ blockNumber = payload.Message.Payload.BlockNumber
+ }
+ } else {
+ blockBody := block.GetBlock(dbw.indexer.ctx)
+ if blockBody == nil {
+ return nil
+ }
+
+ requests, _ = blockBody.ExecutionRequests()
+ blockNumber, _ = blockBody.ExecutionBlockNumber()
}
- requests, err := blockBody.ExecutionRequests()
- if err != nil {
- return nil
+ if requests == nil {
+ return []*dbtypes.WithdrawalRequest{}
}
if sim == nil {
@@ -949,8 +1055,6 @@ func (dbw *dbWriter) buildDbWithdrawalRequests(block *Block, orphaned bool, over
blockResults = sim.replayBlockResults(block)
}
- blockNumber, _ := blockBody.ExecutionBlockNumber()
-
dbWithdrawalRequests := make([]*dbtypes.WithdrawalRequest, len(withdrawalRequests))
for idx, withdrawalRequest := range withdrawalRequests {
dbWithdrawalRequest := &dbtypes.WithdrawalRequest{
diff --git a/services/chainservice.go b/services/chainservice.go
index 0c8c99e7b..7f666078f 100644
--- a/services/chainservice.go
+++ b/services/chainservice.go
@@ -265,6 +265,13 @@ func (cs *ChainService) StartService() error {
return fmt.Errorf("failed initializing s3 blockdb: %v", err)
}
cs.logger.Infof("S3 blockdb initialized at %v", utils.Config.BlockDb.S3.Bucket)
+ case "tiered":
+ err := blockdb.InitWithTiered(utils.Config.BlockDb.Tiered, cs.logger)
+ if err != nil {
+ return fmt.Errorf("failed initializing tiered blockdb: %v", err)
+ }
+ cs.logger.Infof("Tiered blockdb initialized (Pebble cache: %v, S3: %v)",
+ utils.Config.BlockDb.Tiered.Pebble.Path, utils.Config.BlockDb.Tiered.S3.Bucket)
default:
cs.logger.Infof("Blockdb disabled")
}
diff --git a/services/chainservice_blocks.go b/services/chainservice_blocks.go
index e063ef81c..8364c23c2 100644
--- a/services/chainservice_blocks.go
+++ b/services/chainservice_blocks.go
@@ -9,9 +9,11 @@ import (
"github.com/attestantio/go-eth2-client/spec"
"github.com/attestantio/go-eth2-client/spec/deneb"
+ "github.com/attestantio/go-eth2-client/spec/gloas"
"github.com/attestantio/go-eth2-client/spec/phase0"
"github.com/ethpandaops/dora/blockdb"
+ btypes "github.com/ethpandaops/dora/blockdb/types"
"github.com/ethpandaops/dora/db"
"github.com/ethpandaops/dora/dbtypes"
"github.com/ethpandaops/dora/indexer/beacon"
@@ -22,6 +24,7 @@ type CombinedBlockResponse struct {
Root phase0.Root
Header *phase0.SignedBeaconBlockHeader
Block *spec.VersionedSignedBeaconBlock
+ Payload *gloas.SignedExecutionPayloadEnvelope
Orphaned bool
}
@@ -103,6 +106,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
Root: blockInfo.Root,
Header: blockInfo.GetHeader(),
Block: blockInfo.GetBlock(ctx),
+ Payload: blockInfo.GetExecutionPayload(ctx),
Orphaned: !bs.beaconIndexer.IsCanonicalBlock(blockInfo, nil),
}
}
@@ -115,6 +119,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
Root: blockInfo.Root,
Header: blockInfo.GetHeader(),
Block: blockInfo.GetBlock(ctx),
+ Payload: blockInfo.GetExecutionPayload(ctx),
Orphaned: true,
}
}
@@ -127,18 +132,34 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
}
var block *spec.VersionedSignedBeaconBlock
+ var payload *gloas.SignedExecutionPayloadEnvelope
bodyRetry := 0
for ; bodyRetry < 3; bodyRetry++ {
client := clients[bodyRetry%len(clients)]
- block, err = beacon.LoadBeaconBlock(ctx, client, blockroot)
- if block != nil {
- break
- } else if err != nil {
- log := logrus.WithError(err)
- if client != nil {
- log = log.WithField("client", client.GetClient().GetName())
+ if block == nil {
+ block, err = beacon.LoadBeaconBlock(ctx, client, blockroot)
+ if err != nil {
+ log := logrus.WithError(err)
+ if client != nil {
+ log = log.WithField("client", client.GetClient().GetName())
+ }
+ log.Warnf("Error loading block body for root 0x%x", blockroot)
+ }
+ }
+
+ if block.Version >= spec.DataVersionGloas {
+ payload, err = beacon.LoadExecutionPayload(ctx, client, blockroot)
+ if payload != nil {
+ break
+ } else if err != nil {
+ log := logrus.WithError(err)
+ if client != nil {
+ log = log.WithField("client", client.GetClient().GetName())
+ }
+ log.Warnf("Error loading block payload for root 0x%x", blockroot)
}
- log.Warnf("Error loading block body for root 0x%x", blockroot)
+ } else if block != nil {
+ break
}
}
if err == nil && block != nil {
@@ -146,6 +167,7 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
Root: blockroot,
Header: header,
Block: block,
+ Payload: payload,
Orphaned: false,
}
}
@@ -153,16 +175,24 @@ func (bs *ChainService) GetSlotDetailsByBlockroot(ctx context.Context, blockroot
// try loading from block db
if result == nil && header != nil && blockdb.GlobalBlockDb != nil {
- blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:], func(version uint64, block []byte) (interface{}, error) {
- return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
- })
- if err == nil && blockData != nil {
- result = &CombinedBlockResponse{
+ blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(header.Message.Slot), blockroot[:],
+ btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload,
+ func(version uint64, block []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
+ }, func(version uint64, payload []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload)
+ })
+ if err == nil && blockData != nil && blockData.Body != nil {
+ resp := &CombinedBlockResponse{
Root: blockroot,
Header: header,
Block: blockData.Body.(*spec.VersionedSignedBeaconBlock),
Orphaned: false,
}
+ if blockData.Payload != nil {
+ resp.Payload = blockData.Payload.(*gloas.SignedExecutionPayloadEnvelope)
+ }
+ result = resp
}
}
@@ -232,6 +262,7 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl
Root: cachedBlock.Root,
Header: blockHeader,
Block: blockBody,
+ Payload: cachedBlock.GetExecutionPayload(ctx),
Orphaned: isOrphaned,
}
}
@@ -248,25 +279,40 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl
var err error
var block *spec.VersionedSignedBeaconBlock
+ var payload *gloas.SignedExecutionPayloadEnvelope
bodyRetry := 0
for ; bodyRetry < 3; bodyRetry++ {
client := clients[bodyRetry%len(clients)]
block, err = beacon.LoadBeaconBlock(ctx, client, blockRoot)
- if block != nil {
- break
- } else if err != nil {
+ if err != nil {
log := logrus.WithError(err)
if client != nil {
log = log.WithField("client", client.GetClient().GetName())
}
log.Warnf("Error loading block body for slot %v", slot)
}
+
+ if block != nil && block.Version >= spec.DataVersionGloas {
+ payload, err = beacon.LoadExecutionPayload(ctx, client, blockRoot)
+ if payload != nil {
+ break
+ } else if err != nil {
+ log := logrus.WithError(err)
+ if client != nil {
+ log = log.WithField("client", client.GetClient().GetName())
+ }
+ log.Warnf("Error loading block payload for root 0x%x", blockRoot)
+ }
+ } else if block != nil {
+ break
+ }
}
if err == nil && block != nil {
result = &CombinedBlockResponse{
Root: blockRoot,
Header: header,
Block: block,
+ Payload: payload,
Orphaned: orphaned,
}
}
@@ -274,22 +320,30 @@ func (bs *ChainService) GetSlotDetailsBySlot(ctx context.Context, slot phase0.Sl
// try loading from block db
if result == nil && header != nil && blockdb.GlobalBlockDb != nil {
- blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:], func(version uint64, block []byte) (interface{}, error) {
- return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
- })
- if err == nil && blockData != nil {
+ blockData, err := blockdb.GlobalBlockDb.GetBlock(ctx, uint64(slot), blockRoot[:],
+ btypes.BlockDataFlagHeader|btypes.BlockDataFlagBody|btypes.BlockDataFlagPayload,
+ func(version uint64, block []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedBeaconBlockSSZ(bs.beaconIndexer.GetDynSSZ(), version, block)
+ }, func(version uint64, payload []byte) (any, error) {
+ return beacon.UnmarshalVersionedSignedExecutionPayloadEnvelopeSSZ(bs.beaconIndexer.GetDynSSZ(), version, payload)
+ })
+ if err == nil && blockData != nil && blockData.Body != nil {
header := &phase0.SignedBeaconBlockHeader{}
err = header.UnmarshalSSZ(blockData.HeaderData)
if err != nil {
return nil, err
}
- result = &CombinedBlockResponse{
+ resp := &CombinedBlockResponse{
Root: blockRoot,
Header: header,
Block: blockData.Body.(*spec.VersionedSignedBeaconBlock),
Orphaned: false,
}
+ if blockData.Payload != nil {
+ resp.Payload = blockData.Payload.(*gloas.SignedExecutionPayloadEnvelope)
+ }
+ result = resp
}
}
@@ -309,6 +363,49 @@ func (bs *ChainService) GetBlobSidecarsByBlockRoot(ctx context.Context, blockroo
return client.GetClient().GetRPCClient().GetBlobSidecarsByBlockroot(ctx, blockroot)
}
+// getPayloadStatus computes the payload status for a given block.
+func (bs *ChainService) getPayloadStatus(ctx context.Context, block *beacon.Block, canonicalHead *beacon.Block) dbtypes.PayloadStatus {
+ chainState := bs.consensusPool.GetChainState()
+ if !chainState.IsEip7732Enabled(chainState.EpochOfSlot(block.Slot)) {
+ return dbtypes.PayloadStatusCanonical
+ }
+
+ if !block.HasExecutionPayload() {
+ return dbtypes.PayloadStatusMissing
+ }
+
+ blockIndex := block.GetBlockIndex(ctx)
+ if blockIndex == nil {
+ return dbtypes.PayloadStatusCanonical
+ }
+
+ // Get child blocks and check if any canonical child builds on this payload
+ childBlocks := bs.beaconIndexer.GetBlockByParentRoot(block.Root)
+
+ if len(childBlocks) == 0 {
+ // no children, so it's canonical for now
+ return dbtypes.PayloadStatusCanonical
+ }
+
+ for _, child := range childBlocks {
+ childIndex := child.GetBlockIndex(ctx)
+ if childIndex == nil {
+ continue
+ }
+ // Check if child is in the canonical chain (use original head since
+ // children are at higher slots than the updated lastCanonicalBlock)
+ if !bs.beaconIndexer.IsCanonicalBlockByHead(child, canonicalHead) {
+ continue
+ }
+ // Check if child builds on this block's execution payload
+ if bytes.Equal(childIndex.ExecutionParentHash[:], blockIndex.ExecutionHash[:]) {
+ return dbtypes.PayloadStatusCanonical
+ }
+ }
+
+ return dbtypes.PayloadStatusOrphaned
+}
+
// GetDbBlocksForSlots retrieves blocks for a range of slots from cache & database.
// The firstSlot parameter specifies the starting slot.
// The slotLimit parameter limits the number of slots to retrieve.
@@ -334,7 +431,10 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
proposerAssignmentsEpoch := phase0.Epoch(math.MaxInt64)
getCanonicalProposer := func(slot phase0.Slot) phase0.ValidatorIndex {
epoch := chainState.EpochOfSlot(slot)
- if epoch != proposerAssignmentsEpoch {
+ if proposerAssignmentsEpoch != phase0.Epoch(math.MaxInt64) && epoch == proposerAssignmentsEpoch+1 && chainState.IsFuluEnabled(epoch) {
+ // extended proposer lookahead in fulu, use the same proposer assignments as the previous epoch
+ } else if epoch != proposerAssignmentsEpoch {
+ assignmentsEpoch := epoch
if epochStats := bs.beaconIndexer.GetEpochStats(epoch, nil); epochStats != nil {
if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil {
proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{}
@@ -343,8 +443,20 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
proposerAssignments[slot] = proposer
}
}
+ } else if epoch > 0 && chainState.IsFuluEnabled(epoch-1) {
+ if epochStats := bs.beaconIndexer.GetEpochStats(epoch-1, nil); epochStats != nil {
+ if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil {
+ assignmentsEpoch = epoch - 1
+ proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{}
+ for slotIdx, proposer := range epochStatsValues.ProposerDuties {
+ slot := chainState.EpochToSlot(assignmentsEpoch) + phase0.Slot(slotIdx)
+ proposerAssignments[slot] = proposer
+ }
+ }
+ }
+
}
- proposerAssignmentsEpoch = epoch
+ proposerAssignmentsEpoch = assignmentsEpoch
}
proposer, ok := proposerAssignments[slot]
@@ -364,6 +476,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
blocks := bs.beaconIndexer.GetBlocksBySlot(slot)
for _, block := range blocks {
isCanonical := bs.beaconIndexer.IsCanonicalBlockByHead(block, lastCanonicalBlock)
+ payloadStatus := bs.getPayloadStatus(ctx, block, lastCanonicalBlock)
if isCanonical {
lastCanonicalBlock = block
}
@@ -372,6 +485,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
}
dbBlock := block.GetDbBlock(bs.beaconIndexer, isCanonical)
if dbBlock != nil {
+ dbBlock.PayloadStatus = payloadStatus
resBlocks = append(resBlocks, dbBlock)
}
}
@@ -434,6 +548,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
}
isCanonical := bs.beaconIndexer.IsCanonicalBlockByHead(block, lastCanonicalBlock)
+ payloadStatus := bs.getPayloadStatus(ctx, block, lastCanonicalBlock)
if isCanonical {
lastCanonicalBlock = block
}
@@ -450,9 +565,10 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
blockRoots = append(blockRoots, block.Root[:])
blockRootsIdx = append(blockRootsIdx, len(resBlocks))
resBlocks = append(resBlocks, &dbtypes.Slot{
- Slot: uint64(slot),
- Proposer: uint64(blockHeader.Message.ProposerIndex),
- Status: blockStatus,
+ Slot: uint64(slot),
+ Proposer: uint64(blockHeader.Message.ProposerIndex),
+ Status: blockStatus,
+ PayloadStatus: payloadStatus,
})
}
@@ -495,6 +611,7 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
for idx, blockRoot := range blockRoots {
if dbBlock, ok := blockMap[phase0.Root(blockRoot)]; ok {
dbBlock.Status = resBlocks[blockRootsIdx[idx]].Status
+ dbBlock.PayloadStatus = resBlocks[blockRootsIdx[idx]].PayloadStatus
resBlocks[blockRootsIdx[idx]] = dbBlock
}
}
@@ -549,10 +666,11 @@ func (bs *ChainService) GetDbBlocksForSlots(ctx context.Context, firstSlot uint6
}
type cachedDbBlock struct {
- slot uint64
- proposer uint64
- orphaned bool
- block *beacon.Block
+ slot uint64
+ proposer uint64
+ orphaned bool
+ payloadStatus dbtypes.PayloadStatus
+ block *beacon.Block
}
// GetDbBlocksByFilter retrieves a filtered range of blocks from cache & database.
@@ -580,7 +698,10 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
proposerAssignmentsEpoch := phase0.Epoch(math.MaxInt64)
getCanonicalProposer := func(slot phase0.Slot) phase0.ValidatorIndex {
epoch := chainState.EpochOfSlot(slot)
- if epoch != proposerAssignmentsEpoch {
+ if proposerAssignmentsEpoch != phase0.Epoch(math.MaxInt64) && epoch == proposerAssignmentsEpoch+1 && chainState.IsFuluEnabled(epoch) {
+ // extended proposer lookahead in fulu, use the same proposer assignments as the previous epoch
+ } else if epoch != proposerAssignmentsEpoch {
+ assignmentsEpoch := epoch
if epochStats := bs.beaconIndexer.GetEpochStats(epoch, nil); epochStats != nil {
if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil {
proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{}
@@ -589,8 +710,20 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
proposerAssignments[slot] = proposer
}
}
+ } else if epoch > 0 && chainState.IsFuluEnabled(epoch-1) {
+ if epochStats := bs.beaconIndexer.GetEpochStats(epoch-1, nil); epochStats != nil {
+ if epochStatsValues := epochStats.GetValues(true); epochStatsValues != nil {
+ assignmentsEpoch = epoch - 1
+ proposerAssignments = map[phase0.Slot]phase0.ValidatorIndex{}
+ for slotIdx, proposer := range epochStatsValues.ProposerDuties {
+ slot := chainState.EpochToSlot(assignmentsEpoch) + phase0.Slot(slotIdx)
+ proposerAssignments[slot] = proposer
+ }
+ }
+ }
+
}
- proposerAssignmentsEpoch = epoch
+ proposerAssignmentsEpoch = assignmentsEpoch
}
proposer, ok := proposerAssignments[slot]
@@ -611,9 +744,14 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
filter.MaxSlot = &maxSlot
}
+ if filter.WithPayloadMask == 0 {
+ filter.WithPayloadMask = dbtypes.PayloadStatusMaskAll
+ }
+
// get blocks from cache
// iterate from current slot to finalized slot
- lastCanonicalBlock := bs.beaconIndexer.GetCanonicalHead(nil)
+ canonicalHead := bs.beaconIndexer.GetCanonicalHead(nil)
+ lastCanonicalBlock := canonicalHead
// apply epoch filter to slot range
cacheStartSlot := startSlot
@@ -687,6 +825,8 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
}
isCanonical := bs.beaconIndexer.IsCanonicalBlockByHead(block, lastCanonicalBlock)
+ payloadStatus := bs.getPayloadStatus(ctx, block, lastCanonicalBlock)
+
if isCanonical {
lastCanonicalBlock = block
}
@@ -707,6 +847,17 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
continue
}
+ // filter by payload status
+ if filter.WithPayloadMask&dbtypes.PayloadStatusMaskMissing == 0 && payloadStatus == dbtypes.PayloadStatusMissing {
+ continue
+ }
+ if filter.WithPayloadMask&dbtypes.PayloadStatusMaskCanonical == 0 && payloadStatus == dbtypes.PayloadStatusCanonical {
+ continue
+ }
+ if filter.WithPayloadMask&dbtypes.PayloadStatusMaskOrphaned == 0 && payloadStatus == dbtypes.PayloadStatusOrphaned {
+ continue
+ }
+
// filter by graffiti
if filter.Graffiti != "" {
blockGraffiti := string(blockIndex.Graffiti[:])
@@ -820,6 +971,28 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
}
}
+ // filter by builder index
+ if filter.BuilderIndex != nil {
+ builderIndex := blockIndex.BuilderIndex
+ // Convert uint64 to int64 for comparison (-1 means self-built/MaxUint64)
+ var builderIndexInt64 int64
+ if builderIndex == math.MaxUint64 {
+ builderIndexInt64 = -1
+ } else {
+ builderIndexInt64 = int64(builderIndex)
+ }
+ if builderIndexInt64 != *filter.BuilderIndex {
+ continue
+ }
+ }
+
+ // filter by EL block parent hash
+ if len(filter.EthBlockParentHash) > 0 {
+ if !bytes.Equal(blockIndex.ExecutionParentHash[:], filter.EthBlockParentHash) {
+ continue
+ }
+ }
+
// filter by gas used
if filter.MinGasUsed != nil || filter.MaxGasUsed != nil {
gasUsed := blockIndex.GasUsed
@@ -854,16 +1027,22 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
}
cachedMatches = append(cachedMatches, cachedDbBlock{
- slot: uint64(block.Slot),
- proposer: uint64(blockHeader.Message.ProposerIndex),
- orphaned: !isCanonical,
- block: block,
+ slot: uint64(block.Slot),
+ proposer: uint64(blockHeader.Message.ProposerIndex),
+ orphaned: !isCanonical,
+ payloadStatus: payloadStatus,
+ block: block,
})
}
// reconstruct missing blocks from epoch duties
// For slot/root filtering, we still need to check if we need missing blocks for that specific slot
- shouldCheckMissing := filter.WithMissing != 0 && filter.Graffiti == "" && filter.ExtraData == "" && filter.WithOrphaned != 2 && filter.MinSyncParticipation == nil && filter.MaxSyncParticipation == nil && filter.MinExecTime == nil && filter.MaxExecTime == nil && filter.MinTxCount == nil && filter.MaxTxCount == nil && filter.MinBlobCount == nil && filter.MaxBlobCount == nil && len(filter.ForkIds) == 0 && filter.MinGasUsed == nil && filter.MaxGasUsed == nil && filter.MinGasLimit == nil && filter.MaxGasLimit == nil && filter.MinBlockSize == nil && filter.MaxBlockSize == nil && filter.WithMevBlock == 0
+ shouldCheckMissing := filter.WithMissing != 0 && filter.Graffiti == "" && filter.ExtraData == "" && filter.WithOrphaned != 2 &&
+ filter.MinSyncParticipation == nil && filter.MaxSyncParticipation == nil && filter.MinExecTime == nil && filter.MaxExecTime == nil &&
+ filter.MinTxCount == nil && filter.MaxTxCount == nil && filter.MinBlobCount == nil && filter.MaxBlobCount == nil && len(filter.ForkIds) == 0 &&
+ filter.BuilderIndex == nil && filter.WithPayloadMask&dbtypes.PayloadStatusMaskMissing != 0 && len(filter.EthBlockParentHash) == 0 && filter.MinGasUsed == nil &&
+ filter.MaxGasUsed == nil && filter.MinGasLimit == nil && filter.MaxGasLimit == nil && filter.MinBlockSize == nil && filter.MaxBlockSize == nil &&
+ filter.WithMevBlock == 0 && filter.ProposerIndex == nil && filter.ProposerName == ""
// If filtering by slot, only check missing for that specific slot
if filter.Slot != nil {
@@ -975,6 +1154,7 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
if block.block != nil {
if block.slot >= uint64(prunedSlot) {
assignedBlock.Block = block.block.GetDbBlock(bs.beaconIndexer, !block.orphaned)
+ assignedBlock.Block.PayloadStatus = block.payloadStatus
} else {
blockRoots = append(blockRoots, block.block.Root[:])
blockRootsIdx = append(blockRootsIdx, resIdx)
@@ -992,12 +1172,14 @@ func (bs *ChainService) GetDbBlocksByFilter(ctx context.Context, filter *dbtypes
if blockMap != nil {
for idx, blockRoot := range blockRoots {
if dbBlock, ok := blockMap[phase0.Root(blockRoot)]; ok {
+ cachedMatch := cachedMatches[blockRootsCachedId[idx]]
dbBlock.Status = dbtypes.Canonical
- if cachedMatches[blockRootsCachedId[idx]].orphaned {
+ if cachedMatch.orphaned {
dbBlock.Status = dbtypes.Orphaned
}
+ dbBlock.PayloadStatus = cachedMatch.payloadStatus
resBlocks[blockRootsIdx[idx]].Block = dbBlock
}
}
diff --git a/services/chainservice_builder.go b/services/chainservice_builder.go
new file mode 100644
index 000000000..49a4c9162
--- /dev/null
+++ b/services/chainservice_builder.go
@@ -0,0 +1,263 @@
+package services
+
+import (
+ "bytes"
+ "context"
+ "slices"
+ "sort"
+
+ "github.com/attestantio/go-eth2-client/spec/gloas"
+ "github.com/attestantio/go-eth2-client/spec/phase0"
+
+ "github.com/ethpandaops/dora/db"
+ "github.com/ethpandaops/dora/dbtypes"
+ "github.com/ethpandaops/dora/indexer/beacon"
+)
+
+// BuilderIndexFlag separates builder indices from validator indices
+// A validator/builder index with this flag set is a builder index
+const BuilderIndexFlag = beacon.BuilderIndexFlag
+
+type BuilderWithIndex struct {
+ Index gloas.BuilderIndex
+ Builder *gloas.Builder
+ Superseded bool
+}
+
+// GetFilteredBuilderSet returns builders matching the filter criteria
+func (bs *ChainService) GetFilteredBuilderSet(ctx context.Context, filter *dbtypes.BuilderFilter, withBalance bool) ([]BuilderWithIndex, uint64) {
+ var overrideForkId *beacon.ForkKey
+
+ canonicalHead := bs.beaconIndexer.GetCanonicalHead(overrideForkId)
+ if canonicalHead == nil {
+ return nil, 0
+ }
+
+ var balances []phase0.Gwei
+ if withBalance {
+ balances = bs.beaconIndexer.GetRecentBuilderBalances(overrideForkId)
+ }
+ currentEpoch := bs.consensusPool.GetChainState().CurrentEpoch()
+
+ cachedResults := make([]BuilderWithIndex, 0, 1000)
+ cachedIndexes := map[uint64]bool{}
+
+ // Get matching entries from cached builders
+ bs.beaconIndexer.StreamActiveBuilderDataForRoot(canonicalHead.Root, false, ¤tEpoch, func(index gloas.BuilderIndex, flags uint16, activeData *beacon.BuilderData, builder *gloas.Builder) error {
+ if builder == nil {
+ return nil
+ }
+ if filter.MinIndex != nil && uint64(index) < *filter.MinIndex {
+ return nil
+ }
+ if filter.MaxIndex != nil && uint64(index) > *filter.MaxIndex {
+ return nil
+ }
+ if len(filter.PubKey) > 0 {
+ pubkeylen := min(len(filter.PubKey), 48)
+ if !bytes.Equal(builder.PublicKey[:pubkeylen], filter.PubKey) {
+ return nil
+ }
+ }
+ if len(filter.ExecutionAddress) > 0 {
+ if !bytes.Equal(builder.ExecutionAddress[:], filter.ExecutionAddress) {
+ return nil
+ }
+ }
+
+ if len(filter.Status) > 0 {
+ builderStatus := getBuilderStatus(builder, currentEpoch, false)
+ if !slices.Contains(filter.Status, builderStatus) {
+ return nil
+ }
+ }
+
+ cachedResults = append(cachedResults, BuilderWithIndex{
+ Index: index,
+ Builder: builder,
+ })
+ cachedIndexes[uint64(index)] = true
+
+ return nil
+ })
+
+ // Get matching entries from DB
+ dbIndexes, err := db.GetBuilderIndexesByFilter(ctx, *filter, uint64(currentEpoch))
+ if err != nil {
+ bs.logger.Warnf("error getting builder indexes by filter: %v", err)
+ return nil, 0
+ }
+
+ // Sort results
+ var sortFn func(builderA, builderB BuilderWithIndex) bool
+ switch filter.OrderBy {
+ case dbtypes.BuilderOrderIndexAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Index < builderB.Index
+ }
+ case dbtypes.BuilderOrderIndexDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Index > builderB.Index
+ }
+ case dbtypes.BuilderOrderPubKeyAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) < 0
+ }
+ case dbtypes.BuilderOrderPubKeyDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return bytes.Compare(builderA.Builder.PublicKey[:], builderB.Builder.PublicKey[:]) > 0
+ }
+ case dbtypes.BuilderOrderBalanceAsc:
+ if balances == nil {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.Balance < builderB.Builder.Balance
+ }
+ } else {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return balances[builderA.Index] < balances[builderB.Index]
+ }
+ sort.Slice(dbIndexes, func(i, j int) bool {
+ if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) {
+ return dbIndexes[i] < dbIndexes[j]
+ }
+ return balances[dbIndexes[i]] < balances[dbIndexes[j]]
+ })
+ }
+ case dbtypes.BuilderOrderBalanceDesc:
+ if balances == nil {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.Balance > builderB.Builder.Balance
+ }
+ } else {
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return balances[builderA.Index] > balances[builderB.Index]
+ }
+ sort.Slice(dbIndexes, func(i, j int) bool {
+ if dbIndexes[i] >= uint64(len(balances)) || dbIndexes[j] >= uint64(len(balances)) {
+ return dbIndexes[i] > dbIndexes[j]
+ }
+ return balances[dbIndexes[i]] > balances[dbIndexes[j]]
+ })
+ }
+ case dbtypes.BuilderOrderDepositEpochAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.DepositEpoch < builderB.Builder.DepositEpoch
+ }
+ case dbtypes.BuilderOrderDepositEpochDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.DepositEpoch > builderB.Builder.DepositEpoch
+ }
+ case dbtypes.BuilderOrderWithdrawableEpochAsc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.WithdrawableEpoch < builderB.Builder.WithdrawableEpoch
+ }
+ case dbtypes.BuilderOrderWithdrawableEpochDesc:
+ sortFn = func(builderA, builderB BuilderWithIndex) bool {
+ return builderA.Builder.WithdrawableEpoch > builderB.Builder.WithdrawableEpoch
+ }
+ }
+
+ sort.Slice(cachedResults, func(i, j int) bool {
+ return sortFn(cachedResults[i], cachedResults[j])
+ })
+
+ // Stream builder set from db and merge cached results
+ resCap := filter.Limit
+ if resCap == 0 {
+ resCap = uint64(len(cachedResults) + len(dbIndexes))
+ }
+ result := make([]BuilderWithIndex, 0, resCap)
+ cachedIndex := 0
+ matchingCount := uint64(0)
+ resultCount := uint64(0)
+ dbEntryCount := uint64(0)
+
+ db.StreamBuildersByIndexes(ctx, dbIndexes, func(dbBuilder *dbtypes.Builder) bool {
+ dbEntryCount++
+ builderWithIndex := BuilderWithIndex{
+ Index: gloas.BuilderIndex(dbBuilder.BuilderIndex),
+ Builder: beacon.UnwrapDbBuilder(dbBuilder),
+ Superseded: dbBuilder.Superseded,
+ }
+
+ for cachedIndex < len(cachedResults) && (cachedResults[cachedIndex].Index == builderWithIndex.Index || sortFn(cachedResults[cachedIndex], builderWithIndex)) {
+ if matchingCount >= filter.Offset {
+ resultBuilder := cachedResults[cachedIndex]
+ if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) {
+ resultBuilder.Builder.Balance = balances[resultBuilder.Index]
+ }
+ result = append(result, resultBuilder)
+ resultCount++
+ }
+ matchingCount++
+ cachedIndex++
+
+ if filter.Limit > 0 && resultCount >= filter.Limit {
+ return false // stop streaming
+ }
+ }
+
+ if cachedIndexes[dbBuilder.BuilderIndex] {
+ return true // skip this index, cache entry is newer
+ }
+
+ if matchingCount >= filter.Offset {
+ if !builderWithIndex.Superseded && balances != nil && dbBuilder.BuilderIndex < uint64(len(balances)) {
+ builderWithIndex.Builder.Balance = balances[dbBuilder.BuilderIndex]
+ }
+ result = append(result, builderWithIndex)
+ resultCount++
+ }
+ matchingCount++
+
+ if filter.Limit > 0 && resultCount >= filter.Limit {
+ return false // stop streaming
+ }
+
+ return true // get more from db
+ })
+
+ for cachedIndex < len(cachedResults) && (filter.Limit == 0 || resultCount < filter.Limit) {
+ if matchingCount >= filter.Offset {
+ resultBuilder := cachedResults[cachedIndex]
+ if balances != nil && uint64(resultBuilder.Index) < uint64(len(balances)) {
+ resultBuilder.Builder.Balance = balances[resultBuilder.Index]
+ }
+ result = append(result, resultBuilder)
+ resultCount++
+ }
+ matchingCount++
+ cachedIndex++
+ }
+
+ // Add remaining cached results
+ matchingCount += uint64(len(cachedResults) - cachedIndex)
+
+ // Add remaining db results
+ remainingDbCount := uint64(0)
+ for i := dbEntryCount; i < uint64(len(dbIndexes)); i++ {
+ if cachedIndexes[dbIndexes[i]] {
+ continue
+ }
+ remainingDbCount++
+ }
+ matchingCount += remainingDbCount
+
+ return result, matchingCount
+}
+
+// GetBuilderByIndex returns the builder by index
+func (bs *ChainService) GetBuilderByIndex(index gloas.BuilderIndex) *gloas.Builder {
+ return bs.beaconIndexer.GetBuilderByIndex(index, nil)
+}
+
+// getBuilderStatus determines the status of a builder
+func getBuilderStatus(builder *gloas.Builder, currentEpoch phase0.Epoch, superseded bool) dbtypes.BuilderStatus {
+ if superseded {
+ return dbtypes.BuilderStatusSupersededFilter
+ }
+ if builder.WithdrawableEpoch <= currentEpoch {
+ return dbtypes.BuilderStatusExitedFilter
+ }
+ return dbtypes.BuilderStatusActiveFilter
+}
diff --git a/services/chainservice_deposits.go b/services/chainservice_deposits.go
index 5491a2530..073039dc7 100644
--- a/services/chainservice_deposits.go
+++ b/services/chainservice_deposits.go
@@ -306,7 +306,8 @@ func (bs *ChainService) GetDepositOperationsByFilter(ctx context.Context, filter
if len(txFilter.WithdrawalAddress) > 0 {
wdcreds := depositWithTx.WithdrawalCredentials
- if wdcreds[0] != 0x01 && wdcreds[0] != 0x02 {
+ // 0x01 = ETH1, 0x02 = compounding, 0x03 = builder deposit
+ if wdcreds[0] != 0x01 && wdcreds[0] != 0x02 && wdcreds[0] != 0x03 {
continue
}
@@ -527,11 +528,11 @@ func (bs *ChainService) GetIndexedDepositQueue(ctx context.Context, headBlock *b
indexedQueue.QueueEstimation = queueEpoch
if lastNormalDeposit != nil && !bytes.Equal(lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:]) {
- // something is bad, return empty queue
- logrus.Warnf("ChainService.GetIndexedDepositQueue: last included deposit not found in queue, %x != %x", lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:])
- return &IndexedDepositQueue{
- Queue: []*IndexedDepositQueueEntry{},
- }
+ // Mismatch between queue and included deposits - this can happen if there are
+ // builder deposits (0x03) that skip the queue. Log warning but still return
+ // the queue to show useful information. The deposit indexes might not be perfectly
+ // matched but the queue itself is still valid.
+ logrus.Debugf("ChainService.GetIndexedDepositQueue: last included deposit not found in queue (possibly due to builder deposits), %x != %x", lastNormalDeposit.PendingDeposit.Pubkey[:], lastIncludedDeposit.PublicKey[:])
}
return indexedQueue
@@ -599,7 +600,17 @@ func (bs *ChainService) getLastIncludedDeposit(ctx context.Context, headRoot pha
}
if len(deposits) > 0 {
- lastDeposits = deposits
+ // Filter out builder deposits (0x03) as they skip the queue
+ filteredDeposits := make([]*dbtypes.Deposit, 0, len(deposits))
+ for _, deposit := range deposits {
+ if len(deposit.WithdrawalCredentials) > 0 && deposit.WithdrawalCredentials[0] == 0x03 {
+ continue // Skip builder deposits
+ }
+ filteredDeposits = append(filteredDeposits, deposit)
+ }
+ if len(filteredDeposits) > 0 {
+ lastDeposits = filteredDeposits
+ }
}
}
}
diff --git a/static/css/layout.css b/static/css/layout.css
index e0df61b79..665ee04d7 100644
--- a/static/css/layout.css
+++ b/static/css/layout.css
@@ -329,6 +329,26 @@ span.validator-label {
padding: 1px .25rem;
}
+.badge.split-warning {
+ background: linear-gradient(
+ 90deg,
+ rgba(255,255,255,0) 0%,
+ rgba(255,255,255,0) 50%,
+ rgba(255,193,7,1) 50%,
+ rgba(255,193,7,1) 100%
+ );
+}
+
+.badge.split-info {
+ background: linear-gradient(
+ 90deg,
+ rgba(255,255,255,0) 0%,
+ rgba(255,255,255,0) 50%,
+ rgba(13,202,240,1) 50%,
+ rgba(13,202,240,1) 100%
+ );
+}
+
.text-monospace {
font-family: var(--bs-font-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace) !important;
}
diff --git a/templates/builder/builder.html b/templates/builder/builder.html
new file mode 100644
index 000000000..3e4e17f50
--- /dev/null
+++ b/templates/builder/builder.html
@@ -0,0 +1,274 @@
+{{ define "page" }}
+
+
+
Builder {{ formatBuilderWithIndex .Index .Name }}
+
+
+ Home
+ Builders
+ Builder details
+
+
+
+
+
+
+
+
+
+ Status:
+
+
+
+
+
+
+
+
+ {{ if .ShowDeposit }}
+
+ {{ end }}
+
+
+
+
+
+
+
+ {{ if .ShowWithdrawable }}
+
+ {{ end }}
+
+
+
+
+
+
+
+
+
+
Index:
+
+ {{ formatBuilderWithIndex .Index .Name }}
+
+
+
+
+
Public Key:
+
+ 0x{{ printf "%x" .PublicKey }}
+
+
+
+
+
Execution Address:
+
+ {{ ethAddressLink .ExecutionAddress }}
+
+
+
+
Status:
+
+ {{ if eq .State "Pending" }}
+ Pending
+ {{ else if eq .State "Active" }}
+ Active
+ {{ else if eq .State "Exited" }}
+ Exited
+ {{ else if eq .State "Superseded" }}
+ Superseded
+ {{ else }}
+ {{ .State }}
+ {{ end }}
+
+
+
+
Balance:
+
+ {{ formatEthFromGwei .Balance }}
+
+
+
+
+
Version:
+
+ {{ .Version }}
+
+
+ {{ if .ShowDeposit }}
+
+ {{ end }}
+ {{ if .ShowWithdrawable }}
+
+
Withdrawable Epoch:
+
+
+ {{ end }}
+ {{ if .ExitReason }}
+
+
Exit Reason:
+
+ {{ if .ExitReasonVoluntaryExit }}
+ Builder submitted a voluntary exit request in
slot {{ .ExitReasonSlot }}
+ {{ else if .ExitReasonWithdrawal }}
+ Builder submitted a full withdrawal request in
slot {{ .ExitReasonSlot }}
+ {{ if .ExitReasonTxDetails }}
+
+
+ Transaction: {{ ethTransactionLink .ExitReasonTxHash 0 }}
+
+
+ {{ end }}
+ {{ else }}
+ {{ .ExitReason }}
+ {{ end }}
+
+
+ {{ end }}
+
+
+
+
+
+
+
+
+
+ {{ if eq .TabView "blocks" }}
+ {{ template "recentBlocks" . }}
+ {{ end }}
+
+
+ {{ if eq .TabView "bids" }}
+ {{ template "recentBids" . }}
+ {{ end }}
+
+
+ {{ if eq .TabView "deposits" }}
+ {{ template "recentDeposits" . }}
+ {{ end }}
+
+
+
+
+{{ end }}
+{{ define "lazyPage" }}
+ {{ if eq .TabView "blocks" }}
+ {{ template "recentBlocks" . }}
+ {{ else if eq .TabView "bids" }}
+ {{ template "recentBids" . }}
+ {{ else if eq .TabView "deposits" }}
+ {{ template "recentDeposits" . }}
+ {{ else }}
+ Unknown tab
+ {{ end }}
+{{ end }}
+{{ define "js" }}
+
+{{ end }}
+{{ define "css" }}
+
+{{ end }}
diff --git a/templates/builder/notfound.html b/templates/builder/notfound.html
new file mode 100644
index 000000000..b92472273
--- /dev/null
+++ b/templates/builder/notfound.html
@@ -0,0 +1,27 @@
+{{ define "js" }}
+{{ end }}
+
+{{ define "css" }}
+{{ end }}
+
+{{ define "page" }}
+
+
+
+
Builder not found
+
+
+ Home
+ Builders
+ Builder details
+
+
+
+
+
+
+
Sorry but we could not find the builder you are looking for. The builder may not exist or has not been indexed yet.
+
+
+
+{{ end }}
diff --git a/templates/builder/recentBids.html b/templates/builder/recentBids.html
new file mode 100644
index 000000000..d3cbddb7b
--- /dev/null
+++ b/templates/builder/recentBids.html
@@ -0,0 +1,57 @@
+{{ define "recentBids" }}
+
+
+
+
+
+
+ Slot
+ Time
+ Block Hash
+ Gas Limit
+ Value
+ EL Payment
+ Status
+
+
+
+ {{ if gt (len .RecentBids) 0 }}
+ {{ range $i, $bid := .RecentBids }}
+
+ {{ formatAddCommas $bid.Slot }}
+ {{ formatRecentTimeShort $bid.Ts }}
+
+
+ 0x{{ printf "%x" $bid.BlockHash }}
+
+
+
+ {{ formatAddCommas $bid.GasLimit }}
+ {{ formatEthFromGwei $bid.Value }}
+ {{ formatEthFromGwei $bid.ElPayment }}
+
+ {{ if $bid.IsWinning }}
+ Won
+ {{ else }}
+ -
+ {{ end }}
+
+
+ {{ end }}
+ {{ else }}
+
+
+
+
+ {{ template "timeline_svg" }}
+
+
+
+
+ {{ end }}
+
+
+
+
+
+{{ end }}
diff --git a/templates/builder/recentBlocks.html b/templates/builder/recentBlocks.html
new file mode 100644
index 000000000..c03749e4a
--- /dev/null
+++ b/templates/builder/recentBlocks.html
@@ -0,0 +1,58 @@
+{{ define "recentBlocks" }}
+
+
+
+
+
+
+ Epoch
+ Slot
+ Block Hash
+ Status
+ Time
+ Value
+
+
+
+ {{ if gt (len .RecentBlocks) 0 }}
+ {{ range $i, $block := .RecentBlocks }}
+
+ {{ formatAddCommas $block.Epoch }}
+ {{ formatAddCommas $block.Slot }}
+
+
+ 0x{{ printf "%x" $block.BlockHash }}
+
+
+
+ {{ if eq $block.Status 0 }}
+ Missing
+ {{ else if eq $block.Status 1 }}
+ Canonical
+ {{ else if eq $block.Status 2 }}
+ Orphaned
+ {{ else }}
+ Unknown
+ {{ end }}
+
+ {{ formatRecentTimeShort $block.Ts }}
+ {{ formatEthFromGwei $block.Value }}
+
+ {{ end }}
+ {{ else }}
+
+
+
+
+ {{ template "timeline_svg" }}
+
+
+
+
+ {{ end }}
+
+
+
+
+
+{{ end }}
diff --git a/templates/builder/recentDeposits.html b/templates/builder/recentDeposits.html
new file mode 100644
index 000000000..5d676a8a1
--- /dev/null
+++ b/templates/builder/recentDeposits.html
@@ -0,0 +1,54 @@
+{{ define "recentDeposits" }}
+
+
+
+
+
+
+ Type
+ Slot
+ Time
+ Status
+
+
+
+ {{ if gt (len .RecentDeposits) 0 }}
+ {{ range $i, $deposit := .RecentDeposits }}
+
+
+ {{ if eq $deposit.Type "exit" }}
+ Voluntary Exit
+ {{ else if eq $deposit.Type "deposit" }}
+ Deposit
+ {{ else }}
+ {{ $deposit.Type }}
+ {{ end }}
+
+ {{ formatAddCommas $deposit.SlotNumber }}
+ {{ formatRecentTimeShort $deposit.Time }}
+
+ {{ if $deposit.Orphaned }}
+ Orphaned
+ {{ else }}
+ Included
+ {{ end }}
+
+
+ {{ end }}
+ {{ else }}
+
+
+
+
+ {{ template "timeline_svg" }}
+
+
+
+
+ {{ end }}
+
+
+
+
+
+{{ end }}
diff --git a/templates/builders/builders.html b/templates/builders/builders.html
new file mode 100644
index 000000000..25869e1a2
--- /dev/null
+++ b/templates/builders/builders.html
@@ -0,0 +1,303 @@
+{{ define "page" }}
+
+
+
Builders Overview
+
+
+ Home
+ Builders
+ Overview
+
+
+
+
+
+
+
+
+
+
+ {{ if gt .TotalPages 1 }}
+
+ {{ end }}
+
+
+
+
+{{ end }}
+{{ define "js" }}
+
+
+{{ end }}
+{{ define "css" }}
+
+
+{{ end }}
diff --git a/templates/deposits/deposits.html b/templates/deposits/deposits.html
index 6945e6b2f..c95b3ba8b 100644
--- a/templates/deposits/deposits.html
+++ b/templates/deposits/deposits.html
@@ -162,7 +162,11 @@ This table displays the deposits received by the Beac
{{ if $deposit.ValidatorExists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ end }}
{{ else }}
0x{{ printf "%x" $deposit.PublicKey }}
{{ end }}
@@ -187,18 +191,18 @@ This table displays the deposits received by the Beac
{{ end }}
{{ if $deposit.IsQueued }}
-
Queued
{{ end }}
{{ if $deposit.InvalidSignature }}
-
@@ -295,7 +299,11 @@ This table displays the deposits made for validators
{{ if $deposit.ValidatorExists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ end }}
{{ else }}
0x{{ printf "%x" $deposit.PublicKey }}
{{ end }}
@@ -410,7 +418,11 @@ This table displays deposits waiting to be activated
{{ if $deposit.ValidatorExists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ end }}
{{ else }}
0x{{ printf "%x" $deposit.PublicKey }}
{{ end }}
diff --git a/templates/el_withdrawals/el_withdrawals.html b/templates/el_withdrawals/el_withdrawals.html
index bd0019c6b..f68ca0306 100644
--- a/templates/el_withdrawals/el_withdrawals.html
+++ b/templates/el_withdrawals/el_withdrawals.html
@@ -187,7 +187,11 @@
{{- if $request.ValidatorValid }}
- {{ formatValidatorWithIndex $request.ValidatorIndex $request.ValidatorName }}
+ {{- if $request.IsBuilder }}
+ {{ formatBuilderWithIndex $request.ValidatorIndex $request.ValidatorName }}
+ {{- else }}
+ {{ formatValidatorWithIndex $request.ValidatorIndex $request.ValidatorName }}
+ {{- end }}
{{- else }}
0x{{ printf "%x" $request.PublicKey }}
diff --git a/templates/epoch/epoch.html b/templates/epoch/epoch.html
index f047ee436..8f6a8b271 100644
--- a/templates/epoch/epoch.html
+++ b/templates/epoch/epoch.html
@@ -177,15 +177,15 @@
{{ formatRecentTimeShort $slot.Ts }}
diff --git a/templates/exits/exits.html b/templates/exits/exits.html
index e046b44fa..70f6015d9 100644
--- a/templates/exits/exits.html
+++ b/templates/exits/exits.html
@@ -138,7 +138,11 @@ This table displays the most recent voluntary exit re
{{ formatRecentTimeShort $exit.Time }}
- {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }}
+ {{ if $exit.IsBuilder }}
+ {{ formatBuilderWithIndex $exit.ValidatorIndex $exit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }}
+ {{ end }}
diff --git a/templates/included_deposits/included_deposits.html b/templates/included_deposits/included_deposits.html
index 85f8d5b59..9b25aaf09 100644
--- a/templates/included_deposits/included_deposits.html
+++ b/templates/included_deposits/included_deposits.html
@@ -194,7 +194,11 @@
{{ if $deposit.ValidatorExists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ end }}
{{ else }}
0x{{ printf "%x" $deposit.PublicKey }}
{{ end }}
diff --git a/templates/index/recentBlocks.html b/templates/index/recentBlocks.html
index 47f381bc1..172084371 100644
--- a/templates/index/recentBlocks.html
+++ b/templates/index/recentBlocks.html
@@ -41,9 +41,9 @@ Genesis
Missed
- Proposed
- Missed (Orphaned)
- Unknown
+ Proposed
+ Missed (Orphaned)
+ Unknown
@@ -74,11 +74,11 @@ Missed
{{ else if eq .Status 1 }}
- Proposed
+ Proposed
{{ else if eq .Status 2 }}
- Missed (Orphaned)
+ Missed (Orphaned)
{{ else }}
- Unknown
+ Unknown
{{ end }}
{{ formatRecentTimeShort $block.Ts }}
diff --git a/templates/index/recentSlots.html b/templates/index/recentSlots.html
index 8b74d6755..12b8066ff 100644
--- a/templates/index/recentSlots.html
+++ b/templates/index/recentSlots.html
@@ -42,9 +42,9 @@ Genesis
Missed
-
Proposed
-
Missed (Orphaned)
-
Unknown
+
Proposed
+
Missed (Orphaned)
+
Unknown
@@ -97,11 +97,11 @@ Missed
{{ else if eq .Status 1 }}
- Proposed
+ Proposed
{{ else if eq .Status 2 }}
- Orphaned
+ Missed (Orphaned)
{{ else }}
- Unknown
+ Unknown
{{ end }}
{{ formatRecentTimeShort $slot.Ts }}
diff --git a/templates/initiated_deposits/initiated_deposits.html b/templates/initiated_deposits/initiated_deposits.html
index 75950ac09..8a9b577b9 100644
--- a/templates/initiated_deposits/initiated_deposits.html
+++ b/templates/initiated_deposits/initiated_deposits.html
@@ -166,7 +166,11 @@
{{ if $deposit.ValidatorExists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ end }}
{{ else }}
0x{{ printf "%x" $deposit.PublicKey }}
{{ end }}
diff --git a/templates/queued_deposits/queued_deposits.html b/templates/queued_deposits/queued_deposits.html
index 424bf6370..da75a769c 100644
--- a/templates/queued_deposits/queued_deposits.html
+++ b/templates/queued_deposits/queued_deposits.html
@@ -135,7 +135,11 @@
{{ if $deposit.ValidatorExists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{ end }}
{{ else }}
0x{{ printf "%x" $deposit.PublicKey }}
{{ end }}
diff --git a/templates/slot/attestations.html b/templates/slot/attestations.html
index 17ad6eaae..09068b912 100644
--- a/templates/slot/attestations.html
+++ b/templates/slot/attestations.html
@@ -12,9 +12,25 @@
Slot:
-
+
+
+ {{ html "" }}
+ Payload Status:
+ {{ html "" }}
+
+
+ {{ html "" }}
+ FULL
+ {{ html "" }}
+ {{ html "" }}
+ EMPTY
+ {{ html "" }}
+ {{ html "" }}
+ UNKNOWN
+ {{ html "" }}
+
@@ -184,6 +200,7 @@
self.signature = base64ToBytes(data.signature);
self.validators = data.validators;
self.included_validators = data.included_validators;
+ self.payload_status = data.payload_status !== undefined ? data.payload_status : null;
self.showDetails = ko.observable(false);
diff --git a/templates/slot/bids.html b/templates/slot/bids.html
new file mode 100644
index 000000000..37f6184f8
--- /dev/null
+++ b/templates/slot/bids.html
@@ -0,0 +1,42 @@
+{{ define "block_bids" }}
+
+
+
+
+ Builder
+ Block Hash
+ Fee Recipient
+ Gas Limit
+ Value
+ EL Payment
+ Total
+
+
+
+ {{ range $i, $bid := .Block.Bids }}
+
+
+ {{ if $bid.IsSelfBuilt }}
+ Self-built
+ {{ else }}
+ {{ formatValidatorWithIndex $bid.BuilderIndex $bid.BuilderName }}
+ {{ end }}
+ {{ if $bid.IsWinning }}Winner {{ end }}
+
+
+
+ 0x{{ printf "%x" $bid.BlockHash }}
+
+
+
+ {{ ethAddressLink $bid.FeeRecipient }}
+ {{ formatAddCommas $bid.GasLimit }}
+ {{ formatEthFromGwei $bid.Value }}
+ {{ formatEthFromGwei $bid.ElPayment }}
+ {{ formatEthFromGwei $bid.TotalValue }}
+
+ {{ end }}
+
+
+
+{{ end }}
diff --git a/templates/slot/consolidation_requests.html b/templates/slot/consolidation_requests.html
index 61a1e6140..4f84782d7 100644
--- a/templates/slot/consolidation_requests.html
+++ b/templates/slot/consolidation_requests.html
@@ -28,7 +28,11 @@
{{- if $consolidationreq.SourceFound }}
- {{ formatValidatorWithIndex $consolidationreq.SourceIndex $consolidationreq.SourceName }}
+ {{- if $consolidationreq.SourceIsBuilder }}
+ {{ formatBuilderWithIndex $consolidationreq.SourceIndex $consolidationreq.SourceName }}
+ {{- else }}
+ {{ formatValidatorWithIndex $consolidationreq.SourceIndex $consolidationreq.SourceName }}
+ {{- end }}
{{- else }}
?
{{- end }}
@@ -41,7 +45,11 @@
{{- if $consolidationreq.TargetFound }}
- {{ formatValidatorWithIndex $consolidationreq.TargetIndex $consolidationreq.TargetName }}
+ {{- if $consolidationreq.TargetIsBuilder }}
+ {{ formatBuilderWithIndex $consolidationreq.TargetIndex $consolidationreq.TargetName }}
+ {{- else }}
+ {{ formatValidatorWithIndex $consolidationreq.TargetIndex $consolidationreq.TargetName }}
+ {{- end }}
{{- else }}
?
{{- end }}
diff --git a/templates/slot/deposit_requests.html b/templates/slot/deposit_requests.html
index bbbc6a8bd..dda82c410 100644
--- a/templates/slot/deposit_requests.html
+++ b/templates/slot/deposit_requests.html
@@ -16,13 +16,21 @@
{{ $deposit.Index }}
+ {{- if $deposit.IsBuilder }}
+
+ {{- else }}
+ {{- end }}
0x{{ printf "%x" $deposit.PublicKey }}
{{- if $deposit.Exists }}
- {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{- if $deposit.IsBuilder }}
+ {{ formatBuilderWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{- else }}
+ {{ formatValidatorWithIndex $deposit.ValidatorIndex $deposit.ValidatorName }}
+ {{- end }}
{{- else }}
?
{{- end }}
diff --git a/templates/slot/overview.html b/templates/slot/overview.html
index 40c031a5a..eb5505275 100644
--- a/templates/slot/overview.html
+++ b/templates/slot/overview.html
@@ -204,15 +204,27 @@
{{ end }}
- {{ if .Block.ExecutionData }}
+
+ {{ if .Block.PayloadHeader }}
{{ $block := .Block }}
- {{ with .Block.ExecutionData }}
+ {{ with .Block.PayloadHeader }}
-
Execution Payload:
+
Payload Header:
+
-
Block Number:
-
{{ ethBlockLink .BlockNumber }}
+
Payload Status:
+
+ {{ if eq .PayloadStatus 0 }}
+ Missing
+ {{ else if eq .PayloadStatus 1 }}
+ Revealed
+ {{ else if eq .PayloadStatus 2 }}
+ Orphaned
+ {{ else }}
+ Unknown
+ {{ end }}
+
@@ -226,11 +238,85 @@
Parent Hash:
- 0x{{ printf "%x" .ParentHash }}
-
+ {{ ethBlockHashLink .ParentBlockHash }}
+
+
+
+
+
+
Builder:
+
+ {{ formatBuilderWithIndex .BuilderIndex .BuilderName }}
+
+
Block Value:
+
+ {{ formatEthFromGwei .Value }}
+
+
+
+
+
Gas Limit:
+
+ {{ .GasLimit }}
+
+
+
+
+
Blob KZG Commitments:
+
+ {{ len .BlobKZGCommitments }} blob{{ if ne (len .BlobKZGCommitments) 1 }}s{{ end }}
+ {{ if .BlobKZGCommitments }}
+
+ Show commitments
+
+
+ {{ range $i, $c := .BlobKZGCommitments }}
+
+ {{ $i }}:
+ 0x{{ printf "%x" $c }}
+
+
+ {{ end }}
+
+ {{ end }}
+
+
+
+
+ {{ end }}
+ {{ end }}
+ {{ if .Block.ExecutionData }}
+ {{ $block := .Block }}
+ {{ with .Block.ExecutionData }}
+
+
Execution Payload:
+
+
+
Block Number:
+
{{ ethBlockLink .BlockNumber }}
+
+
+ {{ if not $block.PayloadHeader }}
+
+
Block Hash:
+
+ {{ ethBlockHashLink .BlockHash }}
+
+
+
+
+
+
Parent Hash:
+
+ 0x{{ printf "%x" .ParentHash }}
+
+
+
+ {{ end }}
+
{{ if .StateRoot }}
State Root:
@@ -281,10 +367,12 @@
-
-
Gas Limit:
-
{{ formatAddCommas .GasLimit }}
-
+ {{ if not $block.PayloadHeader }}
+
+
Gas Limit:
+
{{ formatAddCommas .GasLimit }}
+
+ {{ end }}
Base fee per gas:
diff --git a/templates/slot/ptc_votes.html b/templates/slot/ptc_votes.html
new file mode 100644
index 000000000..094ca327e
--- /dev/null
+++ b/templates/slot/ptc_votes.html
@@ -0,0 +1,108 @@
+{{ define "block_ptc_votes" }}
+
+ {{ if .Block.PtcVotes }}
+
+
+ PTC (Payload Timeliness Committee) votes included in this block are for
+
slot {{ .Block.PtcVotes.VotedSlot }} (the previous slot).
+ {{ if .Block.PtcVotes.VotedBlockRoot }}
+
Voted block root:
0x{{ printf "%x" .Block.PtcVotes.VotedBlockRoot }}
+ {{ end }}
+
+
+
+
+
+
+
{{ formatParticipation .Block.PtcVotes.Participation }}
+ Participation
+
+
+
+
+
+
+
{{ len .Block.PtcVotes.Aggregates }}
+ Aggregates
+
+
+
+
+
+
+
{{ .Block.PtcVotes.TotalPtcSize }}
+ Committee Size
+
+
+
+
+
+
Vote Aggregates
+ {{ range $i, $agg := .Block.PtcVotes.Aggregates }}
+
+ {{ end }}
+
+ {{ if gt .Block.PtcVotes.NonVoterCount 0 }}
+
+ {{ end }}
+ {{ else }}
+
No PTC vote data available.
+ {{ end }}
+
+{{ end }}
diff --git a/templates/slot/slot.html b/templates/slot/slot.html
index 74fe5c764..fc9bf9b7e 100644
--- a/templates/slot/slot.html
+++ b/templates/slot/slot.html
@@ -43,9 +43,19 @@
+ Bids {{ .Block.BidsCount }}
+
+ {{ end }}
Attestations {{ .Block.AttestationsCount }}
+ {{ if gt .Block.PtcVotesCount 0 }}
+
+ PTC Votes {{ .Block.PtcVotesCount }}
+
+ {{ end }}
{{ if gt .Block.DepositsCount 0 }}
Deposits {{ .Block.DepositsCount }}
@@ -258,6 +268,30 @@ Showing {{ .Block.ConsolidationRequestsC
{{ template "block_consolidation_requests" . }}
{{ end }}
+ {{ if gt .Block.BidsCount 0 }}
+
+
+
+
+
Showing {{ .Block.BidsCount }} Execution Payload Bids
+
+
+ {{ template "block_bids" . }}
+
+
+ {{ end }}
+ {{ if gt .Block.PtcVotesCount 0 }}
+
+
+
+
+
Showing {{ .Block.PtcVotesCount }} PTC Votes (for slot {{ .Block.PtcVotes.VotedSlot }})
+
+
+ {{ template "block_ptc_votes" . }}
+
+
+ {{ end }}
{{ if .Block }}
diff --git a/templates/slot/voluntary_exits.html b/templates/slot/voluntary_exits.html
index 0ffbc0333..187999d64 100644
--- a/templates/slot/voluntary_exits.html
+++ b/templates/slot/voluntary_exits.html
@@ -13,7 +13,13 @@
{{ range $i, $exit := .Block.VoluntaryExits }}
{{ $i }}
- {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }}
+
+ {{ if $exit.IsBuilder }}
+ {{ formatBuilderWithIndex $exit.ValidatorIndex $exit.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $exit.ValidatorIndex $exit.ValidatorName }}
+ {{ end }}
+
{{ $exit.Epoch }}
0x{{ printf "%x" $exit.Signature }}
diff --git a/templates/slot/withdrawal_requests.html b/templates/slot/withdrawal_requests.html
index 1e842771a..c73d111ef 100644
--- a/templates/slot/withdrawal_requests.html
+++ b/templates/slot/withdrawal_requests.html
@@ -20,7 +20,11 @@
{{- if $withdrawalreq.Exists }}
- {{ formatValidatorWithIndex $withdrawalreq.ValidatorIndex $withdrawalreq.ValidatorName }}
+ {{- if $withdrawalreq.IsBuilder }}
+ {{ formatBuilderWithIndex $withdrawalreq.ValidatorIndex $withdrawalreq.ValidatorName }}
+ {{- else }}
+ {{ formatValidatorWithIndex $withdrawalreq.ValidatorIndex $withdrawalreq.ValidatorName }}
+ {{- end }}
{{- else }}
?
{{- end }}
diff --git a/templates/slot/withdrawals.html b/templates/slot/withdrawals.html
index e07ec002f..e76b5bdfe 100644
--- a/templates/slot/withdrawals.html
+++ b/templates/slot/withdrawals.html
@@ -13,7 +13,13 @@
{{ range $i, $withdrawal := .Block.Withdrawals }}
{{ $withdrawal.Index }}
- {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }}
+
+ {{ if $withdrawal.IsBuilder }}
+ {{ formatBuilderWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }}
+ {{ else }}
+ {{ formatValidatorWithIndex $withdrawal.ValidatorIndex $withdrawal.ValidatorName }}
+ {{ end }}
+
{{ ethAddressLink $withdrawal.Address }}
{{ formatEthFromGwei $withdrawal.Amount }}
diff --git a/templates/slots/slots.html b/templates/slots/slots.html
index a90bafe82..6e97fbbfb 100644
--- a/templates/slots/slots.html
+++ b/templates/slots/slots.html
@@ -132,9 +132,9 @@
Slots
{{ if eq $slot.Slot 0 }}
Genesis
{{ else if eq $slot.Status 1 }}
-
Proposed
+
Proposed
{{ else if eq $slot.Status 2 }}
-
Orphaned
+
Missed (Orphaned)
{{ else if $slot.Scheduled }}
Scheduled
{{ else if not $slot.Synchronized }}
@@ -142,7 +142,7 @@
Slots
{{ else if eq $slot.Status 0 }}
Missed
{{ else }}
-
Unknown
+
Unknown
{{ end }}
{{ end }}
diff --git a/templates/slots_filtered/slots_filtered.html b/templates/slots_filtered/slots_filtered.html
index cd4af7a48..a901e2eda 100644
--- a/templates/slots_filtered/slots_filtered.html
+++ b/templates/slots_filtered/slots_filtered.html
@@ -97,25 +97,27 @@
Filtered Slots
- Missing Blocks
+ Block Status
-
-
- Hide missing
- Show all
- Missing only
+
+
+
+ Missing
+ Canonical
+ Orphaned
- Orphaned Blocks
+ Payload Status
-
-
- Hide orphaned
- Show all
- Orphaned only
+
+
+
+ Missing
+ Canonical
+ Orphaned
@@ -310,9 +312,9 @@
Filtered Slots
{{- if eq $slot.Slot 0 }}
Genesis
{{- else if eq $slot.Status 1 }}
-
Proposed
+
Proposed
{{- else if eq $slot.Status 2 }}
-
Orphaned
+
Missed (Orphaned)
{{- else if $slot.Scheduled }}
Scheduled
{{- else if not $slot.Synchronized }}
@@ -320,7 +322,7 @@
Filtered Slots
{{- else if eq $slot.Status 0 }}
Missed
{{- else }}
-
Unknown
+
Unknown
{{- end }}
{{- end }}
@@ -463,17 +465,25 @@
Filtered Slots