diff --git a/api-spec/openapi/swagger/ark/v1/indexer.openapi.json b/api-spec/openapi/swagger/ark/v1/indexer.openapi.json index e390b7b74..4cd552d57 100644 --- a/api-spec/openapi/swagger/ark/v1/indexer.openapi.json +++ b/api-spec/openapi/swagger/ark/v1/indexer.openapi.json @@ -623,6 +623,13 @@ "type": "string" } }, + { + "name": "pageToken", + "in": "query", + "schema": { + "type": "string" + } + }, { "name": "page.size", "in": "query", @@ -1052,6 +1059,9 @@ "page": { "$ref": "#/components/schemas/IndexerPageRequest" }, + "pageToken": { + "type": "string" + }, "token": { "type": "string", "description": "Valid auth_token can also be used if the ownership has already been proved.\nA valid token obtained from GetVirtualTxs rpc can be recycled for this request." @@ -1072,6 +1082,9 @@ "$ref": "#/components/schemas/IndexerChain" } }, + "nextPageToken": { + "type": "string" + }, "page": { "$ref": "#/components/schemas/IndexerPageResponse" } @@ -1461,6 +1474,10 @@ "type": "integer", "format": "int64" }, + "depth": { + "type": "integer", + "format": "uint32" + }, "expiresAt": { "type": "integer", "format": "int64" diff --git a/api-spec/openapi/swagger/ark/v1/service.openapi.json b/api-spec/openapi/swagger/ark/v1/service.openapi.json index 0782a48ee..acca45935 100644 --- a/api-spec/openapi/swagger/ark/v1/service.openapi.json +++ b/api-spec/openapi/swagger/ark/v1/service.openapi.json @@ -1534,6 +1534,10 @@ "type": "integer", "format": "int64" }, + "depth": { + "type": "integer", + "format": "uint32" + }, "expiresAt": { "type": "integer", "format": "int64" diff --git a/api-spec/openapi/swagger/ark/v1/types.openapi.json b/api-spec/openapi/swagger/ark/v1/types.openapi.json index 382760334..5641ec0f4 100644 --- a/api-spec/openapi/swagger/ark/v1/types.openapi.json +++ b/api-spec/openapi/swagger/ark/v1/types.openapi.json @@ -435,6 +435,10 @@ "type": "integer", "format": "int64" }, + "depth": { + "type": "integer", + "format": "uint32" + }, "expiresAt": { "type": "integer", "format": "int64" diff --git a/api-spec/protobuf/ark/v1/indexer.proto b/api-spec/protobuf/ark/v1/indexer.proto index 74435c0f9..3a3cc3930 100644 --- a/api-spec/protobuf/ark/v1/indexer.proto +++ b/api-spec/protobuf/ark/v1/indexer.proto @@ -217,12 +217,14 @@ message GetVtxoChainRequest { // A valid token obtained from GetVirtualTxs rpc can be recycled for this request. string token = 4; } + string page_token = 5; } message GetVtxoChainResponse { repeated IndexerChain chain = 1; IndexerPageResponse page = 2; // Auth token can be used for other rpcs related to this vtxo/tx that require proof of ownership. string auth_token = 3; + string next_page_token = 4; } message GetVirtualTxsRequest { @@ -302,6 +304,7 @@ message IndexerVtxo { string settled_by = 12; string ark_txid = 13; repeated IndexerAsset assets = 14; + uint32 depth = 15; } message IndexerAsset { diff --git a/api-spec/protobuf/ark/v1/types.proto b/api-spec/protobuf/ark/v1/types.proto index 01596acaa..4266bf812 100644 --- a/api-spec/protobuf/ark/v1/types.proto +++ b/api-spec/protobuf/ark/v1/types.proto @@ -29,6 +29,7 @@ message Vtxo { string settled_by = 12; string ark_txid = 13; repeated Asset assets = 14; + uint32 depth = 15; } message Asset { diff --git a/api-spec/protobuf/gen/ark/v1/indexer.pb.go b/api-spec/protobuf/gen/ark/v1/indexer.pb.go index afcdbc9fe..eac773145 100644 --- a/api-spec/protobuf/gen/ark/v1/indexer.pb.go +++ b/api-spec/protobuf/gen/ark/v1/indexer.pb.go @@ -860,6 +860,7 @@ type GetVtxoChainRequest struct { // *GetVtxoChainRequest_Intent // *GetVtxoChainRequest_Token Auth isGetVtxoChainRequest_Auth `protobuf_oneof:"auth"` + PageToken string `protobuf:"bytes,5,opt,name=page_token,json=pageToken,proto3" json:"page_token,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -933,6 +934,13 @@ func (x *GetVtxoChainRequest) GetToken() string { return "" } +func (x *GetVtxoChainRequest) GetPageToken() string { + if x != nil { + return x.PageToken + } + return "" +} + type isGetVtxoChainRequest_Auth interface { isGetVtxoChainRequest_Auth() } @@ -958,6 +966,7 @@ type GetVtxoChainResponse struct { Page *IndexerPageResponse `protobuf:"bytes,2,opt,name=page,proto3" json:"page,omitempty"` // Auth token can be used for other rpcs related to this vtxo/tx that require proof of ownership. AuthToken string `protobuf:"bytes,3,opt,name=auth_token,json=authToken,proto3" json:"auth_token,omitempty"` + NextPageToken string `protobuf:"bytes,4,opt,name=next_page_token,json=nextPageToken,proto3" json:"next_page_token,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1013,6 +1022,13 @@ func (x *GetVtxoChainResponse) GetAuthToken() string { return "" } +func (x *GetVtxoChainResponse) GetNextPageToken() string { + if x != nil { + return x.NextPageToken + } + return "" +} + type GetVirtualTxsRequest struct { state protoimpl.MessageState `protogen:"open.v1"` Txids []string `protobuf:"bytes,1,rep,name=txids,proto3" json:"txids,omitempty"` @@ -1618,6 +1634,7 @@ type IndexerVtxo struct { SettledBy string `protobuf:"bytes,12,opt,name=settled_by,json=settledBy,proto3" json:"settled_by,omitempty"` ArkTxid string `protobuf:"bytes,13,opt,name=ark_txid,json=arkTxid,proto3" json:"ark_txid,omitempty"` Assets []*IndexerAsset `protobuf:"bytes,14,rep,name=assets,proto3" json:"assets,omitempty"` + Depth uint32 `protobuf:"varint,15,opt,name=depth,proto3" json:"depth,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -1750,6 +1767,13 @@ func (x *IndexerVtxo) GetAssets() []*IndexerAsset { return nil } +func (x *IndexerVtxo) GetDepth() uint32 { + if x != nil { + return x.Depth + } + return 0 +} + type IndexerAsset struct { state protoimpl.MessageState `protogen:"open.v1"` AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` @@ -2707,18 +2731,21 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\x06before\x18\t \x01(\x03R\x06before\"n\n" + "\x10GetVtxosResponse\x12)\n" + "\x05vtxos\x18\x01 \x03(\v2\x13.ark.v1.IndexerVtxoR\x05vtxos\x12/\n" + - "\x04page\x18\x02 \x01(\v2\x1b.ark.v1.IndexerPageResponseR\x04page\"\xcb\x01\n" + + "\x04page\x18\x02 \x01(\v2\x1b.ark.v1.IndexerPageResponseR\x04page\"\xea\x01\n" + "\x13GetVtxoChainRequest\x123\n" + "\boutpoint\x18\x01 \x01(\v2\x17.ark.v1.IndexerOutpointR\boutpoint\x12.\n" + "\x04page\x18\x02 \x01(\v2\x1a.ark.v1.IndexerPageRequestR\x04page\x12/\n" + "\x06intent\x18\x03 \x01(\v2\x15.ark.v1.IndexerIntentH\x00R\x06intent\x12\x16\n" + - "\x05token\x18\x04 \x01(\tH\x00R\x05tokenB\x06\n" + - "\x04auth\"\x92\x01\n" + + "\x05token\x18\x04 \x01(\tH\x00R\x05token\x12\x1d\n" + + "\n" + + "page_token\x18\x05 \x01(\tR\tpageTokenB\x06\n" + + "\x04auth\"\xba\x01\n" + "\x14GetVtxoChainResponse\x12*\n" + "\x05chain\x18\x01 \x03(\v2\x14.ark.v1.IndexerChainR\x05chain\x12/\n" + "\x04page\x18\x02 \x01(\v2\x1b.ark.v1.IndexerPageResponseR\x04page\x12\x1d\n" + "\n" + - "auth_token\x18\x03 \x01(\tR\tauthToken\"\xad\x01\n" + + "auth_token\x18\x03 \x01(\tR\tauthToken\x12&\n" + + "\x0fnext_page_token\x18\x04 \x01(\tR\rnextPageToken\"\xad\x01\n" + "\x14GetVirtualTxsRequest\x12\x14\n" + "\x05txids\x18\x01 \x03(\tR\x05txids\x12.\n" + "\x04page\x18\x02 \x01(\v2\x1a.ark.v1.IndexerPageRequestR\x04page\x12/\n" + @@ -2758,7 +2785,7 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\bchildren\x18\x02 \x03(\v2!.ark.v1.IndexerNode.ChildrenEntryR\bchildren\x1a;\n" + "\rChildrenEntry\x12\x10\n" + "\x03key\x18\x01 \x01(\rR\x03key\x12\x14\n" + - "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xde\x03\n" + + "\x05value\x18\x02 \x01(\tR\x05value:\x028\x01\"\xf4\x03\n" + "\vIndexerVtxo\x123\n" + "\boutpoint\x18\x01 \x01(\v2\x17.ark.v1.IndexerOutpointR\boutpoint\x12\x1d\n" + "\n" + @@ -2778,7 +2805,8 @@ const file_ark_v1_indexer_proto_rawDesc = "" + "\n" + "settled_by\x18\f \x01(\tR\tsettledBy\x12\x19\n" + "\bark_txid\x18\r \x01(\tR\aarkTxid\x12,\n" + - "\x06assets\x18\x0e \x03(\v2\x14.ark.v1.IndexerAssetR\x06assets\"A\n" + + "\x06assets\x18\x0e \x03(\v2\x14.ark.v1.IndexerAssetR\x06assets\x12\x14\n" + + "\x05depth\x18\x0f \x01(\rR\x05depth\"A\n" + "\fIndexerAsset\x12\x19\n" + "\basset_id\x18\x01 \x01(\tR\aassetId\x12\x16\n" + "\x06amount\x18\x02 \x01(\x04R\x06amount\"\x8b\x01\n" + diff --git a/api-spec/protobuf/gen/ark/v1/types.pb.go b/api-spec/protobuf/gen/ark/v1/types.pb.go index f1a2c2615..99dac62b6 100644 --- a/api-spec/protobuf/gen/ark/v1/types.pb.go +++ b/api-spec/protobuf/gen/ark/v1/types.pb.go @@ -141,6 +141,7 @@ type Vtxo struct { SettledBy string `protobuf:"bytes,12,opt,name=settled_by,json=settledBy,proto3" json:"settled_by,omitempty"` ArkTxid string `protobuf:"bytes,13,opt,name=ark_txid,json=arkTxid,proto3" json:"ark_txid,omitempty"` Assets []*Asset `protobuf:"bytes,14,rep,name=assets,proto3" json:"assets,omitempty"` + Depth uint32 `protobuf:"varint,15,opt,name=depth,proto3" json:"depth,omitempty"` unknownFields protoimpl.UnknownFields sizeCache protoimpl.SizeCache } @@ -273,6 +274,13 @@ func (x *Vtxo) GetAssets() []*Asset { return nil } +func (x *Vtxo) GetDepth() uint32 { + if x != nil { + return x.Depth + } + return 0 +} + type Asset struct { state protoimpl.MessageState `protogen:"open.v1"` AssetId string `protobuf:"bytes,1,opt,name=asset_id,json=assetId,proto3" json:"asset_id,omitempty"` @@ -1580,7 +1588,7 @@ const file_ark_v1_types_proto_rawDesc = "" + "\x04vout\x18\x02 \x01(\rR\x04vout\"l\n" + "\x05Input\x12,\n" + "\boutpoint\x18\x01 \x01(\v2\x10.ark.v1.OutpointR\boutpoint\x125\n" + - "\ftaproot_tree\x18\x02 \x01(\v2\x12.ark.v1.TapscriptsR\vtaprootTree\"\xc9\x03\n" + + "\ftaproot_tree\x18\x02 \x01(\v2\x12.ark.v1.TapscriptsR\vtaprootTree\"\xdf\x03\n" + "\x04Vtxo\x12,\n" + "\boutpoint\x18\x01 \x01(\v2\x10.ark.v1.OutpointR\boutpoint\x12\x16\n" + "\x06amount\x18\x02 \x01(\x04R\x06amount\x12\x16\n" + @@ -1600,7 +1608,8 @@ const file_ark_v1_types_proto_rawDesc = "" + "\n" + "settled_by\x18\f \x01(\tR\tsettledBy\x12\x19\n" + "\bark_txid\x18\r \x01(\tR\aarkTxid\x12%\n" + - "\x06assets\x18\x0e \x03(\v2\r.ark.v1.AssetR\x06assets\":\n" + + "\x06assets\x18\x0e \x03(\v2\r.ark.v1.AssetR\x06assets\x12\x14\n" + + "\x05depth\x18\x0f \x01(\rR\x05depth\":\n" + "\x05Asset\x12\x19\n" + "\basset_id\x18\x01 \x01(\tR\aassetId\x12\x16\n" + "\x06amount\x18\x02 \x01(\x04R\x06amount\",\n" + diff --git a/docker-compose.regtest.yml b/docker-compose.regtest.yml index 31a91a74c..907507c76 100644 --- a/docker-compose.regtest.yml +++ b/docker-compose.regtest.yml @@ -20,7 +20,7 @@ services: container_name: nbxplorer ports: - 32838:32838 - image: nicolasdorier/nbxplorer:2.5.30 + image: nicolasdorier/nbxplorer:2.5.30-1 environment: - NBXPLORER_NETWORK=regtest - NBXPLORER_CHAINS=btc diff --git a/internal/config/config.go b/internal/config/config.go index b1028536d..37500341b 100644 --- a/internal/config/config.go +++ b/internal/config/config.go @@ -779,8 +779,14 @@ func (c *Config) IndexerService() (application.IndexerService, error) { return nil, fmt.Errorf("failed to get server signing pubkey: %w", err) } + var offchainTxCache ports.OffChainTxStore + if c.liveStore != nil { + offchainTxCache = c.liveStore.OffchainTxs() + } + return application.NewIndexerService( c.repo, c.wallet, privkey, signerPubkey, c.IndexerExposure, c.IndexerAuthTokenExpiry, + offchainTxCache, ) } diff --git a/internal/core/application/admin.go b/internal/core/application/admin.go index 604b12516..707b821e8 100644 --- a/internal/core/application/admin.go +++ b/internal/core/application/admin.go @@ -705,11 +705,18 @@ func (a *adminService) saveBatchSweptEvents( } for _, leaf := range vtxosLeaves { - vtxo := domain.Outpoint{ - Txid: leaf.UnsignedTx.TxID(), - VOut: 0, + // The VTXO is the first non-anchor output; leaf txs can + // carry an anchor at vout 0, so the VTXO is not always at + // vout 0. extractVtxoOutpoint handles that. + vtxo, err := extractVtxoOutpoint(leaf) + if err != nil { + log.WithError(err).Errorf( + "failed to extract vtxo outpoint from leaf %s", + leaf.UnsignedTx.TxID(), + ) + continue } - leafVtxos = append(leafVtxos, vtxo) + leafVtxos = append(leafVtxos, *vtxo) } } } @@ -729,7 +736,7 @@ func (a *adminService) saveBatchSweptEvents( } else { seen := make(map[string]struct{}) for _, leafVtxo := range leafVtxos { - children, err := vtxoRepo.GetAllChildrenVtxos(ctx, leafVtxo.Txid) + children, err := vtxoRepo.GetAllChildrenVtxos(ctx, leafVtxo) if err != nil { log.WithError(err).Error("error while getting children vtxos") continue diff --git a/internal/core/application/indexer.go b/internal/core/application/indexer.go index 993133743..e456ae584 100644 --- a/internal/core/application/indexer.go +++ b/internal/core/application/indexer.go @@ -3,10 +3,12 @@ package application import ( "bytes" "context" + "crypto/hmac" "crypto/sha256" "encoding/base64" "encoding/binary" "encoding/hex" + "encoding/json" "errors" "fmt" "math" @@ -25,6 +27,7 @@ import ( "github.com/btcsuite/btcd/btcutil/psbt" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" + log "github.com/sirupsen/logrus" ) const ( @@ -34,6 +37,11 @@ const ( maxPageSizeVtxoChain = 100 maxPageSizeVirtualTxs = 100 + // maxVtxoChainWalkSize is a hard upper bound applied when walking the full + // chain before paginating (GetVtxoChainByIntent). Prevents unbounded memory + // growth on pathologically deep chains. + maxVtxoChainWalkSize = 50_000 + defaultAuthTokenTTL = 5 * time.Minute ) @@ -69,7 +77,7 @@ type IndexerService interface { ctx context.Context, outpoints []Outpoint, page *Page, ) (*GetVtxosResp, error) GetVtxoChain( - ctx context.Context, authToken string, vtxoKey Outpoint, page *Page, + ctx context.Context, authToken string, vtxoKey Outpoint, page *Page, pageToken string, ) (*VtxoChainResp, error) GetVtxoChainByIntent(ctx context.Context, intent Intent, page *Page) (*VtxoChainResp, error) GetVirtualTxs( @@ -83,13 +91,15 @@ type IndexerService interface { } type indexerService struct { - repoManager ports.RepoManager - wallet ports.WalletService - authPrvkey *btcec.PrivateKey // key used to sign auth tokens - signerPubkey *btcec.PublicKey // server's signing key, used for stripping signatures from txs - txExposure exposure - authTokenTTL time.Duration - tokenCache *tokenCache + repoManager ports.RepoManager + wallet ports.WalletService + authPrvkey *btcec.PrivateKey // key used to sign auth tokens + cursorHMACKey []byte // HMAC key for signing pagination cursors + signerPubkey *btcec.PublicKey // server's signing key, used for stripping signatures from txs + txExposure exposure + authTokenTTL time.Duration + tokenCache *tokenCache + offchainTxCache ports.OffChainTxStore } func NewIndexerService( @@ -99,6 +109,7 @@ func NewIndexerService( signerPubkey *btcec.PublicKey, txExposure string, authTokenExpirySec int64, + offchainTxCache ports.OffChainTxStore, ) (IndexerService, error) { // validate txExposure switch exposure(txExposure) { @@ -112,13 +123,23 @@ func NewIndexerService( ttl = time.Duration(authTokenExpirySec) * time.Second } + // Derive HMAC key for pagination cursors from the auth private key. + // This prevents clients from forging cursors with arbitrary outpoints. + var cursorKey []byte + if privkey != nil { + h := sha256.Sum256(append(privkey.Serialize(), []byte("cursor-hmac")...)) + cursorKey = h[:] + } + svc := &indexerService{ - repoManager: repoManager, - wallet: wallet, - authPrvkey: privkey, - txExposure: exposure(txExposure), - authTokenTTL: ttl, - tokenCache: newTokenCache(ttl), + repoManager: repoManager, + wallet: wallet, + authPrvkey: privkey, + cursorHMACKey: cursorKey, + txExposure: exposure(txExposure), + authTokenTTL: ttl, + tokenCache: newTokenCache(ttl), + offchainTxCache: offchainTxCache, } if signerPubkey != nil { @@ -274,6 +295,20 @@ func (i *indexerService) GetVtxos( return nil, err } + // Mark vtxos that are pending-spent in the offchain tx cache. + // The DB projection updates asynchronously, so without this check + // clients can see stale spendable vtxos and build duplicate txs. + if i.offchainTxCache != nil { + for idx := range allVtxos { + if allVtxos[idx].Spent { + continue + } + if spent, _ := i.offchainTxCache.Includes(ctx, allVtxos[idx].Outpoint); spent { + allVtxos[idx].Spent = true + } + } + } + if spendableOnly { spendableVtxos := make([]domain.Vtxo, 0, len(allVtxos)) for _, vtxo := range allVtxos { @@ -326,7 +361,7 @@ func (i *indexerService) GetVtxosByOutpoint( } func (i *indexerService) GetVtxoChain( - ctx context.Context, authToken string, outpoint Outpoint, page *Page, + ctx context.Context, authToken string, vtxoKey Outpoint, page *Page, pageToken string, ) (*VtxoChainResp, error) { switch i.txExposure { case exposurePublic: @@ -334,39 +369,84 @@ func (i *indexerService) GetVtxoChain( case exposureWithheld: // Auth token is optional, validate it only if provided if authToken != "" { - hash, err := i.validateAuthToken(authToken) - if err != nil { + if err := i.validateChainAuth(authToken, vtxoKey, pageToken != ""); err != nil { return nil, err } - - outpoints, _, ok := i.tokenCache.getOutpoints(hash) - if !ok { - return nil, fmt.Errorf("auth token not found") - } - if _, ok := outpoints[outpoint.String()]; !ok { - return nil, fmt.Errorf("auth token is not for outpoint %s", outpoint) - } } case exposurePrivate: // Auth token is mandatory, always validate it - hash, err := i.validateAuthToken(authToken) - if err != nil { + if err := i.validateChainAuth(authToken, vtxoKey, pageToken != ""); err != nil { return nil, err } + } - outpoints, _, ok := i.tokenCache.getOutpoints(hash) - if !ok { - return nil, fmt.Errorf("auth token not found") + // Determine page size. + // Backward compat: nil page + empty token → return full chain (no pagination). + pageSize := math.MaxInt32 + if page != nil { + pageSize = int(page.PageSize) + if pageSize <= 0 { + pageSize = maxPageSizeVtxoChain } - if _, ok := outpoints[outpoint.String()]; !ok { - return nil, fmt.Errorf("auth token is not for outpoint %s", outpoint) + } else if pageToken != "" { + pageSize = maxPageSizeVtxoChain + } + + // Determine frontier: decode pageToken, or use [vtxoKey] for first page. + var frontier []domain.Outpoint + if pageToken != "" { + decoded, err := i.decodeChainCursor(pageToken) + if err != nil { + return nil, fmt.Errorf("invalid page_token: %w", err) } + frontier = decoded + } else { + frontier = []domain.Outpoint{vtxoKey} } - resp, _, err := i.getVtxoChain(ctx, outpoint, page) + + chain, _, nextToken, err := i.walkVtxoChain(ctx, frontier, pageSize) if err != nil { return nil, err } - return resp, nil + + return &VtxoChainResp{ + Chain: chain, + NextPageToken: nextToken, + }, nil +} + +// validateChainAuth validates the auth token for GetVtxoChain. On pagination +// continuations (isPaginating=true), if the signed timestamp has expired but +// the session is still active in the token cache (kept alive by touch on each +// page request), the token is accepted based on signature verification alone. +func (i *indexerService) validateChainAuth( + authToken string, vtxoKey Outpoint, isPaginating bool, +) error { + hash, err := i.validateAuthToken(authToken) + if err != nil && isPaginating { + // Token timestamp expired, but this is a pagination continuation. + // Verify signature only and check if the session is still live. + hash, err = i.verifyAuthTokenSignature(authToken) + if err != nil { + return err + } + if !i.tokenCache.isActive(hash) { + return fmt.Errorf("auth token expired") + } + } else if err != nil { + return err + } + + outpoints, _, ok := i.tokenCache.getOutpoints(hash) + if !ok { + return fmt.Errorf("auth token not found") + } + if _, ok := outpoints[vtxoKey.String()]; !ok { + return fmt.Errorf("auth token is not for outpoint %s", vtxoKey) + } + // Keep the session alive for pagination continuations. + i.tokenCache.touch(hash) + return nil } func (i *indexerService) GetVtxoChainByIntent( @@ -383,25 +463,48 @@ func (i *indexerService) GetVtxoChainByIntent( switch i.txExposure { case exposurePublic: - resp, _, err := i.getVtxoChain(ctx, outpoint, page) - return resp, err + chain, _, _, err := i.walkVtxoChain( + ctx, + []domain.Outpoint{outpoint}, + maxVtxoChainWalkSize+1, + ) + if err != nil { + return nil, err + } + if len(chain) > maxVtxoChainWalkSize { + return nil, fmt.Errorf("chain exceeds maximum size of %d", maxVtxoChainWalkSize) + } + txChain, pageResp := paginate(chain, page, maxPageSizeVtxoChain) + return &VtxoChainResp{Chain: txChain, Page: pageResp}, nil case exposureWithheld, exposurePrivate: if err := i.validateIntent(ctx, intent); err != nil { return nil, err } } - resp, allOutpoints, err := i.getVtxoChain(ctx, outpoint, page) + chain, allOutpoints, _, err := i.walkVtxoChain( + ctx, + []domain.Outpoint{outpoint}, + maxVtxoChainWalkSize+1, + ) if err != nil { return nil, err } + if len(chain) > maxVtxoChainWalkSize { + return nil, fmt.Errorf("chain exceeds maximum size of %d", maxVtxoChainWalkSize) + } token, err := i.createAuthToken(allOutpoints) if err != nil { return nil, fmt.Errorf("failed to create auth token: %w", err) } - resp.AuthToken = token - return resp, nil + + txChain, pageResp := paginate(chain, page, maxPageSizeVtxoChain) + return &VtxoChainResp{ + Chain: txChain, + Page: pageResp, + AuthToken: token, + }, nil } func (i *indexerService) GetVirtualTxs( @@ -508,37 +611,79 @@ func (i *indexerService) GetBatchSweepTxs( return txids, nil } -func (i *indexerService) getVtxoChain( - ctx context.Context, vtxoKey Outpoint, page *Page, -) (*VtxoChainResp, []Outpoint, error) { - chain, allOutpoints, err := i.buildVtxoChain(ctx, vtxoKey) - if err != nil { - return nil, nil, err - } - - txChain, pageResp := paginate(chain, page, maxPageSizeVtxoChain) - return &VtxoChainResp{ - Chain: txChain, - Page: pageResp, - }, allOutpoints, nil -} - -// buildVtxoChain builds the full chain of transactions for a given vtxo outpoint. -func (i *indexerService) buildVtxoChain( - ctx context.Context, outpoint Outpoint, -) ([]ChainTx, []Outpoint, error) { +// walkVtxoChain walks the VTXO chain from the given frontier outpoints, +// collecting chain transactions and all outpoints seen. +// If pageSize is reached, it returns early with a cursor token for the next page. +func (i *indexerService) walkVtxoChain( + ctx context.Context, frontier []domain.Outpoint, pageSize int, +) ([]ChainTx, []Outpoint, string, error) { chain := make([]ChainTx, 0) - nextVtxos := []domain.Outpoint{outpoint} + nextVtxos := frontier visited := make(map[string]bool) + offchainTxCache := make(map[string]*domain.OffchainTx) allOutpoints := make([]Outpoint, 0) - for len(nextVtxos) > 0 { - vtxos, err := i.repoManager.Vtxos().GetVtxos(ctx, nextVtxos) + // Lazy cache for VTXOs loaded during this page. + vtxoCache := make(map[string]domain.Vtxo) + loadedMarkers := make(map[string]bool) + + // Eagerly preload VTXOs and offchain txs by walking the marker DAG upward. + // Failures in the marker-driven preload are treated as optimization misses: + // the per-hop walk loop below falls back to Vtxos().GetVtxos + ensureVtxosCached, + // so we log marker-repo errors here and continue instead of aborting. + if i.repoManager.Markers() != nil { + startVtxos, err := i.repoManager.Vtxos().GetVtxos(ctx, nextVtxos) if err != nil { - return nil, nil, err + return nil, nil, "", err + } + if err := i.preloadByMarkers(ctx, startVtxos, vtxoCache, offchainTxCache); err != nil { + log.WithError(err).Warnf( + "marker-driven preload failed for frontier of %d outpoints; "+ + "falling back to per-hop walk", len(nextVtxos), + ) + } + } + + for len(nextVtxos) > 0 { + if err := i.ensureVtxosCached(ctx, nextVtxos, vtxoCache, loadedMarkers); err != nil { + return nil, nil, "", err + } + + vtxos := make([]domain.Vtxo, 0, len(nextVtxos)) + for _, op := range nextVtxos { + if v, ok := vtxoCache[op.String()]; ok { + vtxos = append(vtxos, v) + } } if len(vtxos) == 0 { - return nil, nil, fmt.Errorf("vtxo not found for outpoint: %v", nextVtxos) + return nil, nil, "", fmt.Errorf("vtxo not found for outpoint: %v", nextVtxos) + } + + missingOffchainTxids := make(map[string]struct{}) + for _, vtxo := range vtxos { + if !vtxo.Preconfirmed { + continue + } + if _, ok := offchainTxCache[vtxo.Txid]; ok { + continue + } + missingOffchainTxids[vtxo.Txid] = struct{}{} + } + + if len(missingOffchainTxids) > 0 { + txids := make([]string, 0, len(missingOffchainTxids)) + for txid := range missingOffchainTxids { + txids = append(txids, txid) + } + + offchainTxs, err := i.repoManager.OffchainTxs().GetOffchainTxsByTxids(ctx, txids) + if err != nil { + return nil, nil, "", fmt.Errorf("failed to retrieve offchain txs: %s", err) + } + + for _, tx := range offchainTxs { + offchainTxCache[tx.ArkTxid] = tx + } } newNextVtxos := make([]domain.Outpoint, 0) @@ -547,6 +692,21 @@ func (i *indexerService) buildVtxoChain( if visited[key] { continue } + + // Early termination: save unprocessed VTXOs to frontier for next page. + // Check before marking visited so the current VTXO is included in the frontier. + if len(chain) >= pageSize { + remaining := make([]domain.Outpoint, 0) + for _, v := range vtxos { + if !visited[v.Outpoint.String()] { + remaining = append(remaining, v.Outpoint) + } + } + remaining = append(remaining, newNextVtxos...) + token := i.encodeChainCursor(remaining) + return chain, allOutpoints, token, nil + } + allOutpoints = append(allOutpoints, vtxo.Outpoint) visited[key] = true @@ -555,9 +715,14 @@ func (i *indexerService) buildVtxoChain( // also, we have to populate the newNextVtxos with the checkpoints inputs // in order to continue the chain in the next iteration if vtxo.Preconfirmed { - offchainTx, err := i.repoManager.OffchainTxs().GetOffchainTx(ctx, vtxo.Txid) - if err != nil { - return nil, nil, fmt.Errorf("failed to retrieve offchain tx: %s", err) + offchainTx, ok := offchainTxCache[vtxo.Txid] + if !ok { + var err error + offchainTx, err = i.repoManager.OffchainTxs().GetOffchainTx(ctx, vtxo.Txid) + if err != nil { + return nil, nil, "", fmt.Errorf("failed to retrieve offchain tx: %s", err) + } + offchainTxCache[vtxo.Txid] = offchainTx } chainTx := ChainTx{ @@ -570,7 +735,10 @@ func (i *indexerService) buildVtxoChain( for _, b64 := range offchainTx.CheckpointTxs { ptx, err := psbt.NewFromRawBytes(strings.NewReader(b64), true) if err != nil { - return nil, nil, fmt.Errorf("failed to deserialize checkpoint tx: %s", err) + return nil, nil, "", fmt.Errorf( + "failed to deserialize checkpoint tx: %s", + err, + ) } txid := ptx.UnsignedTx.TxID() @@ -581,6 +749,7 @@ func (i *indexerService) buildVtxoChain( Spends: []string{ptx.UnsignedTx.TxIn[0].PreviousOutPoint.String()}, }) + allOutpoints = append(allOutpoints, Outpoint{Txid: txid, VOut: 0}) chainTx.Spends = append(chainTx.Spends, txid) // populate newNextVtxos with checkpoints inputs @@ -605,16 +774,16 @@ func (i *indexerService) buildVtxoChain( Txid: vtxo.RootCommitmentTxid, VOut: 0, }, nil) if err != nil { - return nil, nil, err + return nil, nil, "", err } vtxoTree, err := tree.NewTxTree(flatVtxoTree.Txs) if err != nil { - return nil, nil, err + return nil, nil, "", err } branch, err := vtxoTree.SubTree([]string{vtxo.Txid}) if err != nil { - return nil, nil, err + return nil, nil, "", err } fromRootToVtxo := make([]string, 0) @@ -622,7 +791,7 @@ func (i *indexerService) buildVtxoChain( fromRootToVtxo = append(fromRootToVtxo, tx.Root.UnsignedTx.TxID()) return true, nil }); err != nil { - return nil, nil, err + return nil, nil, "", err } // reverse fromRootToVtxo @@ -663,7 +832,209 @@ func (i *indexerService) buildVtxoChain( nextVtxos = newNextVtxos } - return chain, allOutpoints, nil + return chain, allOutpoints, "", nil +} + +// encodeChainCursor encodes a frontier of outpoints into an HMAC-signed opaque +// page token. The HMAC prevents clients from forging cursors with arbitrary +// outpoints, which would bypass auth validation in exposurePrivate mode. +func (i *indexerService) encodeChainCursor(frontier []domain.Outpoint) string { + if len(frontier) == 0 { + return "" + } + cur := vtxoChainCursor{Frontier: make([]Outpoint, len(frontier))} + for idx, op := range frontier { + cur.Frontier[idx] = Outpoint(op) + } + payload, _ := json.Marshal(cur) + + if len(i.cursorHMACKey) > 0 { + mac := hmac.New(sha256.New, i.cursorHMACKey) + mac.Write(payload) + payload = append(payload, mac.Sum(nil)...) + } + return base64.RawURLEncoding.EncodeToString(payload) +} + +// decodeChainCursor decodes and verifies an HMAC-signed page token. +func (i *indexerService) decodeChainCursor(token string) ([]domain.Outpoint, error) { + raw, err := base64.RawURLEncoding.DecodeString(token) + if err != nil { + return nil, fmt.Errorf("invalid base64: %w", err) + } + + payload := raw + if len(i.cursorHMACKey) > 0 { + if len(raw) < sha256.Size { + return nil, fmt.Errorf("invalid cursor: too short") + } + payload = raw[:len(raw)-sha256.Size] + sig := raw[len(raw)-sha256.Size:] + + mac := hmac.New(sha256.New, i.cursorHMACKey) + mac.Write(payload) + if !hmac.Equal(sig, mac.Sum(nil)) { + return nil, fmt.Errorf("invalid cursor: signature mismatch") + } + } + + var cur vtxoChainCursor + if err := json.Unmarshal(payload, &cur); err != nil { + return nil, fmt.Errorf("invalid JSON: %w", err) + } + outpoints := make([]domain.Outpoint, len(cur.Frontier)) + for idx, op := range cur.Frontier { + outpoints[idx] = domain.Outpoint(op) + } + return outpoints, nil +} + +// preloadByMarkers bulk-fetches VTXOs and their offchain txs by walking the +// marker DAG upward from the markers of startVtxos. This reduces DB round-trips +// from O(chain_length) to O(chain_length / MarkerInterval) for both layers. +func (i *indexerService) preloadByMarkers( + ctx context.Context, + startVtxos []domain.Vtxo, + vtxoCache map[string]domain.Vtxo, + offchainTxCache map[string]*domain.OffchainTx, +) error { + markerRepo := i.repoManager.Markers() + offchainTxRepo := i.repoManager.OffchainTxs() + + // Seed cache and collect initial marker IDs. + currentMarkerIDs := make(map[string]bool) + for _, v := range startVtxos { + vtxoCache[v.Outpoint.String()] = v + for _, mid := range v.MarkerIDs { + currentMarkerIDs[mid] = true + } + } + + visited := make(map[string]bool) + + for len(currentMarkerIDs) > 0 { + ids := make([]string, 0, len(currentMarkerIDs)) + for id := range currentMarkerIDs { + ids = append(ids, id) + visited[id] = true + } + + // Bulk-fetch all VTXOs tagged with these markers. + vtxos, err := markerRepo.GetVtxoChainByMarkers(ctx, ids) + if err != nil { + return err + } + for _, v := range vtxos { + if _, ok := vtxoCache[v.Outpoint.String()]; !ok { + vtxoCache[v.Outpoint.String()] = v + } + } + + // Piggyback: bulk-fetch the offchain txs for the preconfirmed VTXOs + // in this window, so the walk loop never has to hit the DB per-hop. + missingTxids := make([]string, 0, len(vtxos)) + seen := make(map[string]bool, len(vtxos)) + for _, v := range vtxos { + if !v.Preconfirmed { + continue + } + if seen[v.Txid] { + continue + } + seen[v.Txid] = true + if _, ok := offchainTxCache[v.Txid]; ok { + continue + } + missingTxids = append(missingTxids, v.Txid) + } + // offchainTxRepo may be nil in test helpers that do not wire up the + // offchain-tx repo. Skip the piggyback in that case — the walk loop + // will fall back to its own in-loop bulk fetch for any cache misses. + if len(missingTxids) > 0 && offchainTxRepo != nil { + offchainTxs, err := offchainTxRepo.GetOffchainTxsByTxids(ctx, missingTxids) + if err != nil { + return err + } + for _, tx := range offchainTxs { + offchainTxCache[tx.ArkTxid] = tx + } + } + + // Get marker objects to find parent markers. + markers, err := markerRepo.GetMarkersByIds(ctx, ids) + if err != nil { + return err + } + + nextMarkerIDs := make(map[string]bool) + for _, m := range markers { + for _, pid := range m.ParentMarkerIDs { + if !visited[pid] { + nextMarkerIDs[pid] = true + } + } + } + currentMarkerIDs = nextMarkerIDs + } + + return nil +} + +// ensureVtxosCached loads the given outpoints into the cache if not already present. +// For each fetched VTXO, it also loads its marker window into the cache to prefetch +// nearby VTXOs that will likely be needed in subsequent iterations. +func (i *indexerService) ensureVtxosCached( + ctx context.Context, + outpoints []domain.Outpoint, + cache map[string]domain.Vtxo, + loadedMarkers map[string]bool, +) error { + // Collect cache misses. + missingOutpoints := make([]domain.Outpoint, 0) + for _, op := range outpoints { + if _, ok := cache[op.String()]; !ok { + missingOutpoints = append(missingOutpoints, op) + } + } + if len(missingOutpoints) == 0 { + return nil + } + + // Fetch misses from DB. + dbVtxos, err := i.repoManager.Vtxos().GetVtxos(ctx, missingOutpoints) + if err != nil { + return err + } + for _, v := range dbVtxos { + cache[v.Outpoint.String()] = v + } + + // For each fetched VTXO, load its marker window(s) into cache. + if i.repoManager.Markers() == nil { + return nil + } + for _, v := range dbVtxos { + for _, markerID := range v.MarkerIDs { + if loadedMarkers[markerID] { + continue + } + loadedMarkers[markerID] = true + + windowVtxos, err := i.repoManager.Markers().GetVtxosByMarker(ctx, markerID) + if err != nil { + log.WithError(err). + Warnf("failed to load marker window %s, falling back to per-VTXO lookups", markerID) + continue + } + for _, wv := range windowVtxos { + if _, ok := cache[wv.Outpoint.String()]; !ok { + cache[wv.Outpoint.String()] = wv + } + } + } + } + + return nil } func (i *indexerService) getVirtualTxs( @@ -954,6 +1325,39 @@ func (i *indexerService) validateAuthToken(authToken string) (string, error) { return hex.EncodeToString(msg[:32]), nil } +// verifyAuthTokenSignature validates the auth token signature without checking +// the embedded timestamp. Used for pagination continuations where the session +// is kept alive via tokenCache.touch instead of the signed timestamp. +func (i *indexerService) verifyAuthTokenSignature(authToken string) (string, error) { + if authToken == "" { + return "", fmt.Errorf("missing auth") + } + + tokenBytes, err := base64.StdEncoding.DecodeString(authToken) + if err != nil { + return "", fmt.Errorf("invalid auth token format, must be base64") + } + + if len(tokenBytes) != 40+64 { + return "", fmt.Errorf("invalid auth token length") + } + + msg := tokenBytes[0:40] + sigBytes := tokenBytes[40:] + + msgHash := chainhash.HashB(msg) + sig, err := schnorr.ParseSignature(sigBytes) + if err != nil { + return "", fmt.Errorf("failed to parse auth token signature: %w", err) + } + + if !sig.Verify(msgHash, i.authPrvkey.PubKey()) { + return "", fmt.Errorf("signature verification failed") + } + + return hex.EncodeToString(msg[:32]), nil +} + // extractTokenHash decodes an auth token and returns the outpoints hash // without checking expiry. Signature is still verified. func (i *indexerService) extractTokenHash(authToken string) (string, error) { diff --git a/internal/core/application/indexer_bench_test.go b/internal/core/application/indexer_bench_test.go new file mode 100644 index 000000000..2d51f2fea --- /dev/null +++ b/internal/core/application/indexer_bench_test.go @@ -0,0 +1,879 @@ +package application + +import ( + "context" + "fmt" + "sort" + "strings" + "sync" + "sync/atomic" + "testing" + "time" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/btcsuite/btcd/btcutil/psbt" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/require" +) + +// Lightweight fake repos for benchmarks — no testify/mock overhead. +// Unused interface methods are satisfied by the embedded nil interface +// and will panic if called unexpectedly. + +type benchVtxoRepo struct { + domain.VtxoRepository + vtxos map[string]domain.Vtxo +} + +func (r *benchVtxoRepo) GetVtxos( + _ context.Context, outpoints []domain.Outpoint, +) ([]domain.Vtxo, error) { + result := make([]domain.Vtxo, 0, len(outpoints)) + for _, op := range outpoints { + if v, ok := r.vtxos[op.String()]; ok { + result = append(result, v) + } + } + return result, nil +} + +func (r *benchVtxoRepo) Close() {} + +type benchMarkerRepo struct { + domain.MarkerRepository + markers map[string]domain.Marker + vtxosByMarker map[string][]domain.Vtxo +} + +func (r *benchMarkerRepo) GetVtxoChainByMarkers( + _ context.Context, markerIDs []string, +) ([]domain.Vtxo, error) { + seen := make(map[string]bool) + var result []domain.Vtxo + for _, mid := range markerIDs { + for _, v := range r.vtxosByMarker[mid] { + key := v.Outpoint.String() + if !seen[key] { + seen[key] = true + result = append(result, v) + } + } + } + return result, nil +} + +func (r *benchMarkerRepo) GetMarkersByIds( + _ context.Context, ids []string, +) ([]domain.Marker, error) { + result := make([]domain.Marker, 0, len(ids)) + for _, id := range ids { + if m, ok := r.markers[id]; ok { + result = append(result, m) + } + } + return result, nil +} + +func (r *benchMarkerRepo) GetVtxosByMarker( + _ context.Context, markerID string, +) ([]domain.Vtxo, error) { + return r.vtxosByMarker[markerID], nil +} + +func (r *benchMarkerRepo) Close() {} + +type benchOffchainTxRepo struct { + domain.OffchainTxRepository + txs map[string]*domain.OffchainTx +} + +func (r *benchOffchainTxRepo) GetOffchainTx( + _ context.Context, txid string, +) (*domain.OffchainTx, error) { + if tx, ok := r.txs[txid]; ok { + return tx, nil + } + return &domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil +} + +func (r *benchOffchainTxRepo) GetOffchainTxsByTxids( + _ context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + result := make([]*domain.OffchainTx, 0, len(txids)) + for _, txid := range txids { + if tx, ok := r.txs[txid]; ok { + result = append(result, tx) + } + } + return result, nil +} + +func (r *benchOffchainTxRepo) Close() {} + +type benchRepoManager struct { + vtxoRepo *benchVtxoRepo + markerRepo *benchMarkerRepo + offchainRepo domain.OffchainTxRepository +} + +func (m *benchRepoManager) Events() domain.EventRepository { return nil } +func (m *benchRepoManager) Rounds() domain.RoundRepository { return nil } +func (m *benchRepoManager) Vtxos() domain.VtxoRepository { return m.vtxoRepo } +func (m *benchRepoManager) Markers() domain.MarkerRepository { + if m.markerRepo == nil { + return nil + } + return m.markerRepo +} +func (m *benchRepoManager) ScheduledSession() domain.ScheduledSessionRepo { return nil } +func (m *benchRepoManager) OffchainTxs() domain.OffchainTxRepository { + if m.offchainRepo == nil { + return nil + } + return m.offchainRepo +} +func (m *benchRepoManager) Convictions() domain.ConvictionRepository { return nil } +func (m *benchRepoManager) Assets() domain.AssetRepository { return nil } +func (m *benchRepoManager) Fees() domain.FeeRepository { return nil } +func (m *benchRepoManager) RegisterBatchUpdateHandler(func(data domain.Round)) {} +func (m *benchRepoManager) RegisterOffchainTxUpdateHandler(func(domain.OffchainTx)) {} +func (m *benchRepoManager) Close() {} + +// benchTxid returns a deterministic 64-char hex txid for index i. +func benchTxid(i int) string { + return fmt.Sprintf("%064x", i) +} + +// benchCheckpointPSBT creates a base64-encoded PSBT with a single input. +func benchCheckpointPSBT(inputTxid string, inputVout uint32) string { + prevHash, err := chainhash.NewHashFromStr(inputTxid) + if err != nil { + panic(fmt.Sprintf("benchCheckpointPSBT: bad txid %q: %v", inputTxid, err)) + } + p, err := psbt.New( + []*wire.OutPoint{wire.NewOutPoint(prevHash, inputVout)}, + []*wire.TxOut{wire.NewTxOut(1000, []byte{0x51})}, + 2, 0, + []uint32{wire.MaxTxInSequenceNum}, + ) + if err != nil { + panic(err) + } + b64, err := p.B64Encode() + if err != nil { + panic(err) + } + return b64 +} + +// buildLinearChain creates a linear preconfirmed chain: +// +// V0 -> cp0 -> V1 -> cp1 -> V2 -> ... -> V{n-1} (terminal) +func buildLinearChain(n int, withMarkers bool) (*indexerService, domain.Outpoint) { + vtxoRepo := &benchVtxoRepo{vtxos: make(map[string]domain.Vtxo, n)} + offchainRepo := &benchOffchainTxRepo{txs: make(map[string]*domain.OffchainTx, n)} + + vtxos := make([]domain.Vtxo, n) + for i := 0; i < n; i++ { + tid := benchTxid(i) + var markerIDs []string + if withMarkers { + markerIDs = []string{fmt.Sprintf("m-%d", i/int(domain.MarkerInterval))} + } + vtxos[i] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: tid, VOut: 0}, + Preconfirmed: true, + ExpiresAt: int64(1000 + i), + MarkerIDs: markerIDs, + } + vtxoRepo.vtxos[vtxos[i].Outpoint.String()] = vtxos[i] + + if i < n-1 { + offchainRepo.txs[tid] = &domain.OffchainTx{ + ArkTxid: tid, + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-%d", i): benchCheckpointPSBT(benchTxid(i+1), 0), + }, + } + } else { + offchainRepo.txs[tid] = &domain.OffchainTx{ + ArkTxid: tid, + CheckpointTxs: map[string]string{}, + } + } + } + + var markerRepo *benchMarkerRepo + if withMarkers { + markerRepo = &benchMarkerRepo{ + markers: make(map[string]domain.Marker), + vtxosByMarker: make(map[string][]domain.Vtxo), + } + interval := int(domain.MarkerInterval) + markersCount := (n + interval - 1) / interval + for m := 0; m < markersCount; m++ { + mid := fmt.Sprintf("m-%d", m) + start := m * interval + end := start + interval + if end > n { + end = n + } + markerRepo.vtxosByMarker[mid] = vtxos[start:end] + + var parentIDs []string + if m+1 < markersCount { + parentIDs = []string{fmt.Sprintf("m-%d", m+1)} + } + markerRepo.markers[mid] = domain.Marker{ + ID: mid, + Depth: uint32(m * interval), + ParentMarkerIDs: parentIDs, + } + } + } + + svc := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, markerRepo: markerRepo, offchainRepo: offchainRepo, + }} + return svc, domain.Outpoint{Txid: benchTxid(0), VOut: 0} +} + +// buildFanoutTree creates a binary-tree shaped chain where each VTXO has +// 2 checkpoints pointing to 2 children. Depth d produces 2^(d+1)-1 VTXOs. +// +// V0 +// / \ +// V1 V2 +// / \ / \ +// V3 V4 V5 V6 +// ... +func buildFanoutTree(depth int) (*indexerService, domain.Outpoint, int) { + n := (1 << (depth + 1)) - 1 + vtxoRepo := &benchVtxoRepo{vtxos: make(map[string]domain.Vtxo, n)} + offchainRepo := &benchOffchainTxRepo{txs: make(map[string]*domain.OffchainTx, n)} + + for i := 0; i < n; i++ { + tid := benchTxid(i) + vtxoRepo.vtxos[fmt.Sprintf("%s:0", tid)] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: tid, VOut: 0}, + Preconfirmed: true, + ExpiresAt: int64(1000 + i), + } + + left := 2*i + 1 + right := 2*i + 2 + if left < n && right < n { + offchainRepo.txs[tid] = &domain.OffchainTx{ + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-l-%d", i): benchCheckpointPSBT(benchTxid(left), 0), + fmt.Sprintf("cp-r-%d", i): benchCheckpointPSBT(benchTxid(right), 0), + }, + } + } else { + offchainRepo.txs[tid] = &domain.OffchainTx{CheckpointTxs: map[string]string{}} + } + } + + svc := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, offchainRepo: offchainRepo, + }} + return svc, domain.Outpoint{Txid: benchTxid(0), VOut: 0}, n +} + +// buildDiamondChain creates a chain of diamond patterns where paths diverge +// and reconverge, stressing the visited-set deduplication: +// +// V0 --(2 checkpoints)--> V1, V2 +// V1 --(1 checkpoint)---> V3 +// V2 --(1 checkpoint)---> V3 (same V3 = convergence) +// V3 --(2 checkpoints)--> V4, V5 +// V4 --(1 checkpoint)---> V6 +// V5 --(1 checkpoint)---> V6 +// ... +// +// Each diamond uses 3 node indices; the convergence node is the next diamond's +// fan-out. Total unique VTXOs = 3*diamonds + 1. +func buildDiamondChain(diamonds int) (*indexerService, domain.Outpoint, int) { + n := 3*diamonds + 1 + vtxoRepo := &benchVtxoRepo{vtxos: make(map[string]domain.Vtxo, n)} + offchainRepo := &benchOffchainTxRepo{txs: make(map[string]*domain.OffchainTx, n)} + + for i := 0; i < n; i++ { + tid := benchTxid(i) + vtxoRepo.vtxos[fmt.Sprintf("%s:0", tid)] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: tid, VOut: 0}, + Preconfirmed: true, + ExpiresAt: int64(1000 + i), + } + } + + for d := 0; d < diamonds; d++ { + fanOut := 3 * d + midA := 3*d + 1 + midB := 3*d + 2 + converge := 3 * (d + 1) + + // Fan-out: 2 checkpoints -> midA, midB + offchainRepo.txs[benchTxid(fanOut)] = &domain.OffchainTx{ + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-a-%d", d): benchCheckpointPSBT(benchTxid(midA), 0), + fmt.Sprintf("cp-b-%d", d): benchCheckpointPSBT(benchTxid(midB), 0), + }, + } + // Mid A -> converge + offchainRepo.txs[benchTxid(midA)] = &domain.OffchainTx{ + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-ca-%d", d): benchCheckpointPSBT(benchTxid(converge), 0), + }, + } + // Mid B -> converge (same target) + offchainRepo.txs[benchTxid(midB)] = &domain.OffchainTx{ + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-cb-%d", d): benchCheckpointPSBT(benchTxid(converge), 0), + }, + } + } + // Terminal + offchainRepo.txs[benchTxid(3*diamonds)] = &domain.OffchainTx{ + CheckpointTxs: map[string]string{}, + } + + svc := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, offchainRepo: offchainRepo, + }} + return svc, domain.Outpoint{Txid: benchTxid(0), VOut: 0}, n +} + +func BenchmarkGetVtxoChain(b *testing.B) { + ctx := context.Background() + + for _, size := range []int{1000, 5000} { + b.Run(fmt.Sprintf("linear/%d/with_markers", size), func(b *testing.B) { + svc, start := buildLinearChain(size, true) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + resp, err := svc.GetVtxoChain(ctx, "", start, nil, "") + if err != nil { + b.Fatal(err) + } + // Sanity: (n-1)*2 + 1 = 2n-1 items (ark + checkpoint per non-terminal, ark for terminal). + if len(resp.Chain) != 2*size-1 { + b.Fatalf("expected %d chain items, got %d", 2*size-1, len(resp.Chain)) + } + } + }) + + b.Run(fmt.Sprintf("linear/%d/without_markers", size), func(b *testing.B) { + svc, start := buildLinearChain(size, false) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + resp, err := svc.GetVtxoChain(ctx, "", start, nil, "") + if err != nil { + b.Fatal(err) + } + if len(resp.Chain) != 2*size-1 { + b.Fatalf("expected %d chain items, got %d", 2*size-1, len(resp.Chain)) + } + } + }) + } + + b.Run("fanout/depth10_2047_vtxos", func(b *testing.B) { + svc, start, n := buildFanoutTree(10) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + resp, err := svc.GetVtxoChain(ctx, "", start, nil, "") + if err != nil { + b.Fatal(err) + } + // Internal nodes: 2^depth - 1 = 1023, each emits ark + 2 checkpoints = 3 items. + // Leaves: 2^depth = 1024, each emits 1 ark item. + // Total: 1023*3 + 1024 = 4093. + internalNodes := (1 << 10) - 1 + leaves := 1 << 10 + expected := internalNodes*3 + leaves + if len(resp.Chain) != expected { + b.Fatalf("expected %d chain items, got %d (n=%d)", expected, len(resp.Chain), n) + } + } + }) + + b.Run("diamond/500_pairs", func(b *testing.B) { + svc, start, _ := buildDiamondChain(500) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + resp, err := svc.GetVtxoChain(ctx, "", start, nil, "") + if err != nil { + b.Fatal(err) + } + // Each diamond's fan-out: ark + 2 checkpoints = 3 items. + // Each mid node: ark + 1 checkpoint = 2 items. + // Terminal: 1 ark item. + // Per diamond: 3 + 2 + 2 = 7 items. + // Total: 7*diamonds + 1. + expected := 7*500 + 1 + if len(resp.Chain) != expected { + b.Fatalf("expected %d chain items, got %d", expected, len(resp.Chain)) + } + } + }) +} + +// BenchmarkCheckpointPSBTParse measures the raw cost of PSBT base64 +// decode + parse, which dominates GetVtxoChain runtime. +func BenchmarkCheckpointPSBTParse(b *testing.B) { + encoded := benchCheckpointPSBT(benchTxid(1), 0) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := psbt.NewFromRawBytes(strings.NewReader(encoded), true) + if err != nil { + b.Fatal(err) + } + } +} + +// countingOffchainTxRepo wraps benchOffchainTxRepo and counts calls. +type countingOffchainTxRepo struct { + inner *benchOffchainTxRepo + singleCalls atomic.Int64 + bulkCalls atomic.Int64 + latencyPerCall time.Duration +} + +func (r *countingOffchainTxRepo) GetOffchainTx( + ctx context.Context, txid string, +) (*domain.OffchainTx, error) { + r.singleCalls.Add(1) + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetOffchainTx(ctx, txid) +} + +func (r *countingOffchainTxRepo) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + r.bulkCalls.Add(1) + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) // one round-trip regardless of batch size + } + return r.inner.GetOffchainTxsByTxids(ctx, txids) +} + +func (r *countingOffchainTxRepo) AddOrUpdateOffchainTx( + _ context.Context, _ *domain.OffchainTx, +) error { + return nil +} + +func (r *countingOffchainTxRepo) Close() {} + +func (r *countingOffchainTxRepo) reset() { + r.singleCalls.Store(0) + r.bulkCalls.Store(0) +} + +// noBulkOffchainTxRepo is like benchOffchainTxRepo but GetOffchainTxsByTxids +// always returns empty, forcing the fallback to individual GetOffchainTx calls. +// This simulates the pre-optimization behavior. +type noBulkOffchainTxRepo struct { + countingOffchainTxRepo +} + +func (r *noBulkOffchainTxRepo) GetOffchainTxsByTxids( + _ context.Context, _ []string, +) ([]*domain.OffchainTx, error) { + r.bulkCalls.Add(1) + return []*domain.OffchainTx{}, nil +} + +// TestBulkOffchainTxReducesDBCalls verifies that the bulk prefetch reduces the +// number of DB round-trips. Uses a fanout tree where each iteration processes +// multiple VTXOs — bulk fetches all offchain txs in one call per iteration +// instead of one call per VTXO. +func TestBulkOffchainTxReducesDBCalls(t *testing.T) { + const depth = 8 // 2^9 - 1 = 511 VTXOs + ctx := context.Background() + + // Build fanout tree data (reuse the helper's repo setup). + n := (1 << (depth + 1)) - 1 + vtxoRepo := &benchVtxoRepo{vtxos: make(map[string]domain.Vtxo, n)} + innerRepo := &benchOffchainTxRepo{txs: make(map[string]*domain.OffchainTx, n)} + + for i := 0; i < n; i++ { + tid := benchTxid(i) + vtxoRepo.vtxos[fmt.Sprintf("%s:0", tid)] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: tid, VOut: 0}, + Preconfirmed: true, + ExpiresAt: int64(1000 + i), + } + left := 2*i + 1 + right := 2*i + 2 + if left < n && right < n { + innerRepo.txs[tid] = &domain.OffchainTx{ + ArkTxid: tid, + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-l-%d", i): benchCheckpointPSBT(benchTxid(left), 0), + fmt.Sprintf("cp-r-%d", i): benchCheckpointPSBT(benchTxid(right), 0), + }, + } + } else { + innerRepo.txs[tid] = &domain.OffchainTx{ + ArkTxid: tid, + CheckpointTxs: map[string]string{}, + } + } + } + + start := Outpoint{Txid: benchTxid(0), VOut: 0} + + // With bulk fetch (current behavior). + bulkRepo := &countingOffchainTxRepo{inner: innerRepo} + svc := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, offchainRepo: bulkRepo, + }} + resp, err := svc.GetVtxoChain(ctx, "", start, nil, "") + require.NoError(t, err) + + bulkSingle := bulkRepo.singleCalls.Load() + bulkBulk := bulkRepo.bulkCalls.Load() + + // Without bulk fetch (simulated pre-optimization: bulk returns empty). + noBulkRepo := &noBulkOffchainTxRepo{countingOffchainTxRepo{inner: innerRepo}} + svc2 := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, offchainRepo: noBulkRepo, + }} + resp2, err := svc2.GetVtxoChain(ctx, "", start, nil, "") + require.NoError(t, err) + require.Equal(t, len(resp.Chain), len(resp2.Chain)) + + noBulkSingle := noBulkRepo.singleCalls.Load() + + t.Logf("fanout tree: depth=%d, %d VTXOs", depth, n) + t.Logf("WITH bulk: %d bulk calls, %d individual calls (total round-trips: %d)", + bulkBulk, bulkSingle, bulkBulk+bulkSingle) + t.Logf("WITHOUT bulk: %d individual calls (total round-trips: %d)", + noBulkSingle, noBulkSingle) + + // With bulk fetch, individual calls should be 0 (all served from cache). + require.Zero(t, bulkSingle, "bulk prefetch should eliminate individual GetOffchainTx calls") + // Bulk calls = depth+1 iterations (one per tree level), much fewer than N VTXOs. + require.LessOrEqual(t, bulkBulk, int64(depth+1), + "bulk calls should equal tree depth (one per iteration)") + // Without bulk, individual calls == N (one per preconfirmed VTXO). + require.Equal(t, int64(n), noBulkSingle, + "without bulk, every VTXO triggers an individual call") +} + +// BenchmarkOffchainTxBulkVsSingle compares chain traversal with and without +// the bulk offchain tx prefetch, using simulated DB latency to make the +// round-trip reduction visible in wall-clock time. Uses a fanout tree +// (depth 8, 511 VTXOs) where each iteration processes an exponentially +// growing number of VTXOs — the bulk path does 9 round-trips vs 511 +// individual calls without it. +func BenchmarkOffchainTxBulkVsSingle(b *testing.B) { + const depth = 8 + const simulatedLatency = 50 * time.Microsecond + + n := (1 << (depth + 1)) - 1 + vtxoRepo := &benchVtxoRepo{vtxos: make(map[string]domain.Vtxo, n)} + innerRepo := &benchOffchainTxRepo{txs: make(map[string]*domain.OffchainTx, n)} + + for i := 0; i < n; i++ { + tid := benchTxid(i) + vtxoRepo.vtxos[fmt.Sprintf("%s:0", tid)] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: tid, VOut: 0}, + Preconfirmed: true, + ExpiresAt: int64(1000 + i), + } + left := 2*i + 1 + right := 2*i + 2 + if left < n && right < n { + innerRepo.txs[tid] = &domain.OffchainTx{ + ArkTxid: tid, + CheckpointTxs: map[string]string{ + fmt.Sprintf("cp-l-%d", i): benchCheckpointPSBT(benchTxid(left), 0), + fmt.Sprintf("cp-r-%d", i): benchCheckpointPSBT(benchTxid(right), 0), + }, + } + } else { + innerRepo.txs[tid] = &domain.OffchainTx{ + ArkTxid: tid, + CheckpointTxs: map[string]string{}, + } + } + } + + start := Outpoint{Txid: benchTxid(0), VOut: 0} + ctx := context.Background() + + b.Run(fmt.Sprintf("bulk_prefetch/%d_vtxos", n), func(b *testing.B) { + repo := &countingOffchainTxRepo{inner: innerRepo, latencyPerCall: simulatedLatency} + svc := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, offchainRepo: repo, + }} + b.ReportAllocs() + repo.reset() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := svc.GetVtxoChain(ctx, "", start, nil, "") + if err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportMetric(float64(repo.bulkCalls.Load())/float64(b.N), "bulk_calls/op") + b.ReportMetric(float64(repo.singleCalls.Load())/float64(b.N), "single_calls/op") + }) + + b.Run(fmt.Sprintf("no_bulk_fallback/%d_vtxos", n), func(b *testing.B) { + repo := &noBulkOffchainTxRepo{countingOffchainTxRepo{inner: innerRepo, latencyPerCall: simulatedLatency}} + svc := &indexerService{repoManager: &benchRepoManager{ + vtxoRepo: vtxoRepo, offchainRepo: repo, + }} + b.ReportAllocs() + repo.reset() + b.ResetTimer() + for i := 0; i < b.N; i++ { + _, err := svc.GetVtxoChain(ctx, "", start, nil, "") + if err != nil { + b.Fatal(err) + } + } + b.StopTimer() + b.ReportMetric(float64(repo.bulkCalls.Load())/float64(b.N), "bulk_calls/op") + b.ReportMetric(float64(repo.singleCalls.Load())/float64(b.N), "single_calls/op") + }) +} + +// phaseTimings accumulates per-phase wall-clock time and call counts across +// the wrapped repo methods. Safe for concurrent recording. +type phaseTimings struct { + mu sync.Mutex + totals map[string]time.Duration + counts map[string]int +} + +func newPhaseTimings() *phaseTimings { + return &phaseTimings{ + totals: make(map[string]time.Duration), + counts: make(map[string]int), + } +} + +func (p *phaseTimings) record(phase string, d time.Duration) { + p.mu.Lock() + p.totals[phase] += d + p.counts[phase]++ + p.mu.Unlock() +} + +func (p *phaseTimings) log(t *testing.T, header string, wall time.Duration) { + t.Helper() + p.mu.Lock() + defer p.mu.Unlock() + + phases := make([]string, 0, len(p.totals)) + var repoTotal time.Duration + for name, d := range p.totals { + phases = append(phases, name) + repoTotal += d + } + sort.Strings(phases) + + t.Logf("%s", header) + t.Logf(" %-32s %12s", "wall clock (GetVtxoChain)", wall) + for _, name := range phases { + t.Logf(" %-32s %12s (%d calls)", name, p.totals[name], p.counts[name]) + } + t.Logf(" %-32s %12s", "sum of repo phases", repoTotal) + t.Logf(" %-32s %12s", "other (psbt parse + overhead)", wall-repoTotal) +} + +// timingVtxoRepo wraps a VtxoRepository and records per-call latency into a +// shared phaseTimings. An optional per-call latency simulates DB round-trip +// cost so the relative phase times are visible when running against fakes. +type timingVtxoRepo struct { + domain.VtxoRepository + inner domain.VtxoRepository + t *phaseTimings + latencyPerCall time.Duration +} + +func (r *timingVtxoRepo) GetVtxos( + ctx context.Context, outpoints []domain.Outpoint, +) ([]domain.Vtxo, error) { + start := time.Now() + defer func() { r.t.record("Vtxos.GetVtxos", time.Since(start)) }() + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetVtxos(ctx, outpoints) +} + +func (r *timingVtxoRepo) Close() {} + +type timingMarkerRepo struct { + domain.MarkerRepository + inner domain.MarkerRepository + t *phaseTimings + latencyPerCall time.Duration +} + +func (r *timingMarkerRepo) GetVtxoChainByMarkers( + ctx context.Context, markerIDs []string, +) ([]domain.Vtxo, error) { + start := time.Now() + defer func() { r.t.record("Markers.GetVtxoChainByMarkers", time.Since(start)) }() + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetVtxoChainByMarkers(ctx, markerIDs) +} + +func (r *timingMarkerRepo) GetMarkersByIds( + ctx context.Context, ids []string, +) ([]domain.Marker, error) { + start := time.Now() + defer func() { r.t.record("Markers.GetMarkersByIds", time.Since(start)) }() + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetMarkersByIds(ctx, ids) +} + +func (r *timingMarkerRepo) GetVtxosByMarker( + ctx context.Context, markerID string, +) ([]domain.Vtxo, error) { + start := time.Now() + defer func() { r.t.record("Markers.GetVtxosByMarker", time.Since(start)) }() + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetVtxosByMarker(ctx, markerID) +} + +func (r *timingMarkerRepo) Close() {} + +type timingOffchainTxRepo struct { + domain.OffchainTxRepository + inner domain.OffchainTxRepository + t *phaseTimings + latencyPerCall time.Duration +} + +func (r *timingOffchainTxRepo) GetOffchainTx( + ctx context.Context, txid string, +) (*domain.OffchainTx, error) { + start := time.Now() + defer func() { r.t.record("OffchainTxs.GetOffchainTx", time.Since(start)) }() + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetOffchainTx(ctx, txid) +} + +func (r *timingOffchainTxRepo) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + start := time.Now() + defer func() { r.t.record("OffchainTxs.GetOffchainTxsByTxids", time.Since(start)) }() + if r.latencyPerCall > 0 { + time.Sleep(r.latencyPerCall) + } + return r.inner.GetOffchainTxsByTxids(ctx, txids) +} + +func (r *timingOffchainTxRepo) AddOrUpdateOffchainTx( + _ context.Context, _ *domain.OffchainTx, +) error { + return nil +} + +func (r *timingOffchainTxRepo) Close() {} + +// TestVtxoChainTimingBreakdown builds a deep linear chain and runs +// GetVtxoChain against it with timing-decorated repos, logging a per-phase +// wall-clock breakdown. This is the in-process replacement for the server-side +// timing log that previously lived in walkVtxoChain. +// +// The repos use an in-memory backing store and inject a fixed per-call +// simulatedLatency via time.Sleep, so the absolute numbers in the breakdown +// do NOT reflect real DB cost — they are only meaningful as relative phase +// proportions under a uniform latency assumption. +// +// Run with: +// +// go test -v -run TestVtxoChainTimingBreakdown ./internal/core/application/... +func TestVtxoChainTimingBreakdown(t *testing.T) { + const ( + chainLen = 10000 + simulatedLatency = 50 * time.Microsecond + ) + + ctx := context.Background() + + // Reuse buildLinearChain to get the same data layout the perf test produces, + // then swap its repo manager for a timing-decorated one. + svc, start := buildLinearChain(chainLen, true) + inner := svc.repoManager.(*benchRepoManager) + + timings := newPhaseTimings() + svc.repoManager = &wrappedRepoManager{ + vtxos: &timingVtxoRepo{ + inner: inner.vtxoRepo, t: timings, latencyPerCall: simulatedLatency, + }, + markers: &timingMarkerRepo{ + inner: inner.markerRepo, t: timings, latencyPerCall: simulatedLatency, + }, + offchainTxs: &timingOffchainTxRepo{ + inner: inner.offchainRepo, t: timings, latencyPerCall: simulatedLatency, + }, + } + + wallStart := time.Now() + resp, err := svc.GetVtxoChain(ctx, "", start, nil, "") + wall := time.Since(wallStart) + require.NoError(t, err) + require.Equal(t, 2*chainLen-1, len(resp.Chain)) + + timings.log(t, fmt.Sprintf( + "GetVtxoChain timing breakdown: linear chain n=%d, simulated repo latency=%s", + chainLen, simulatedLatency, + ), wall) +} + +// wrappedRepoManager is a minimal RepoManager that exposes only the repos +// walkVtxoChain touches. Unwired accessors panic with a descriptive message +// instead of returning nil, so an accidental dependency on one of them +// surfaces as a clear failure rather than a nil-pointer dereference. +type wrappedRepoManager struct { + vtxos domain.VtxoRepository + markers domain.MarkerRepository + offchainTxs domain.OffchainTxRepository +} + +func (m *wrappedRepoManager) Events() domain.EventRepository { panic("Events: not wired") } +func (m *wrappedRepoManager) Rounds() domain.RoundRepository { panic("Rounds: not wired") } +func (m *wrappedRepoManager) Vtxos() domain.VtxoRepository { return m.vtxos } +func (m *wrappedRepoManager) Markers() domain.MarkerRepository { + return m.markers +} +func (m *wrappedRepoManager) ScheduledSession() domain.ScheduledSessionRepo { + panic("ScheduledSession: not wired") +} +func (m *wrappedRepoManager) OffchainTxs() domain.OffchainTxRepository { return m.offchainTxs } +func (m *wrappedRepoManager) Convictions() domain.ConvictionRepository { + panic("Convictions: not wired") +} +func (m *wrappedRepoManager) Assets() domain.AssetRepository { panic("Assets: not wired") } +func (m *wrappedRepoManager) Fees() domain.FeeRepository { panic("Fees: not wired") } +func (m *wrappedRepoManager) RegisterBatchUpdateHandler(func(data domain.Round)) {} +func (m *wrappedRepoManager) RegisterOffchainTxUpdateHandler(func(domain.OffchainTx)) {} +func (m *wrappedRepoManager) Close() {} diff --git a/internal/core/application/indexer_exposure_test.go b/internal/core/application/indexer_exposure_test.go index 6c1390109..e9204c574 100644 --- a/internal/core/application/indexer_exposure_test.go +++ b/internal/core/application/indexer_exposure_test.go @@ -7,6 +7,7 @@ import ( "encoding/binary" "encoding/hex" "fmt" + "math" "slices" "strings" "testing" @@ -530,7 +531,7 @@ func TestGetVtxoChain(t *testing.T) { t.Run("valid", func(t *testing.T) { t.Run("private, token covers tree tx outpoints", func(t *testing.T) { // Build a 2-node PSBT tree: root_tx → vtxo_tx (leaf). - // buildVtxoChain collects allOutpoints = [vtxoOutpoint, vtxoOutpoint (dup), + // walkVtxoChain collects allOutpoints = [vtxoOutpoint, vtxoOutpoint (dup), // rootTxid:0, vtxoTxid:0] so the auth token covers both the vtxo and the tree tx. rootTxid, vtxoTxid, flatTree := buildTestTreeTxs(t) @@ -553,7 +554,7 @@ func TestGetVtxoChain(t *testing.T) { // Build chain first to collect allOutpoints. // allOutpoints includes vtxoOutpoint and the tree tx outpoints. - _, allOutpoints, err := indexer.buildVtxoChain(t.Context(), vtxoOutpoint) + _, allOutpoints, _, err := indexer.walkVtxoChain(t.Context(), []domain.Outpoint{vtxoOutpoint}, math.MaxInt32) require.NoError(t, err) // Verify allOutpoints covers both the vtxo and the root tree tx. @@ -569,7 +570,7 @@ func TestGetVtxoChain(t *testing.T) { require.NoError(t, err) // GetVtxoChain with the token — auth check passes and chain is returned. - resp, err := indexer.GetVtxoChain(t.Context(), token, vtxoOutpoint, nil) + resp, err := indexer.GetVtxoChain(t.Context(), token, vtxoOutpoint, nil, "") require.NoError(t, err) require.NotEmpty(t, resp.Chain) @@ -584,6 +585,71 @@ func TestGetVtxoChain(t *testing.T) { rounds.AssertExpectations(t) vtxos.AssertExpectations(t) }) + + t.Run("preconfirmed chain bulk-loads offchain txs", func(t *testing.T) { + vtxoOutpoint := Outpoint{Txid: testTxids[0], VOut: 0} + offchainTxid := vtxoOutpoint.Txid + checkpointB64 := buildCheckpointTxSpending(t, vtxoOutpoint.Txid, vtxoOutpoint.VOut) + + vtxos := &mockedVtxoRepo{} + vtxos.On("GetVtxos", mock.Anything, []domain.Outpoint{vtxoOutpoint}). + Return([]domain.Vtxo{{ + Outpoint: domain.Outpoint{Txid: vtxoOutpoint.Txid, VOut: vtxoOutpoint.VOut}, + Preconfirmed: true, + }}, nil) + + offchainRepo := &mockedOffchainTxRepo{} + offchainRepo.On("GetOffchainTxsByTxids", mock.Anything, []string{offchainTxid}). + Return([]*domain.OffchainTx{{ + ArkTxid: offchainTxid, + CheckpointTxs: map[string]string{ + "cp": checkpointB64, + }, + }}, nil) + + indexer := newTestIndexer(t, privkey, exposurePrivate, nil, vtxos, nil, offchainRepo) + + chain, _, _, err := indexer.walkVtxoChain(t.Context(), []domain.Outpoint{vtxoOutpoint}, 1000) + require.NoError(t, err) + require.NotEmpty(t, chain) + + offchainRepo.AssertNotCalled(t, "GetOffchainTx", mock.Anything, offchainTxid) + offchainRepo.AssertExpectations(t) + vtxos.AssertExpectations(t) + }) + + t.Run("preconfirmed chain falls back to single fetch on cache miss", func(t *testing.T) { + vtxoOutpoint := Outpoint{Txid: testTxids[0], VOut: 0} + offchainTxid := vtxoOutpoint.Txid + checkpointB64 := buildCheckpointTxSpending(t, vtxoOutpoint.Txid, vtxoOutpoint.VOut) + + vtxos := &mockedVtxoRepo{} + vtxos.On("GetVtxos", mock.Anything, []domain.Outpoint{vtxoOutpoint}). + Return([]domain.Vtxo{{ + Outpoint: domain.Outpoint{Txid: vtxoOutpoint.Txid, VOut: vtxoOutpoint.VOut}, + Preconfirmed: true, + }}, nil) + + offchainRepo := &mockedOffchainTxRepo{} + offchainRepo.On("GetOffchainTxsByTxids", mock.Anything, []string{offchainTxid}). + Return([]*domain.OffchainTx{}, nil) + offchainRepo.On("GetOffchainTx", mock.Anything, offchainTxid). + Return(&domain.OffchainTx{ + ArkTxid: offchainTxid, + CheckpointTxs: map[string]string{ + "cp": checkpointB64, + }, + }, nil) + + indexer := newTestIndexer(t, privkey, exposurePrivate, nil, vtxos, nil, offchainRepo) + + chain, _, _, err := indexer.walkVtxoChain(t.Context(), []domain.Outpoint{vtxoOutpoint}, 1000) + require.NoError(t, err) + require.NotEmpty(t, chain) + + offchainRepo.AssertExpectations(t) + vtxos.AssertExpectations(t) + }) }) t.Run("invalid", func(t *testing.T) { @@ -657,7 +723,7 @@ func TestGetVtxoChain(t *testing.T) { indexer := newTestIndexer(t, privkey, tc.exposure, nil, nil, nil) token := tc.makeToken(t, indexer) - _, err := indexer.GetVtxoChain(t.Context(), token, tc.outpoint, nil) + _, err := indexer.GetVtxoChain(t.Context(), token, tc.outpoint, nil, "") require.Error(t, err) require.Contains(t, err.Error(), tc.errContains) }) @@ -695,7 +761,7 @@ func TestGetVtxoChainByIntent(t *testing.T) { rounds := &mockedRoundRepo{} vtxos := &mockedVtxoRepo{} - // GetVtxos is called twice: validateIntent + buildVtxoChain. + // GetVtxos is called twice: validateIntent + walkVtxoChain. vtxos.On("GetVtxos", mock.Anything, []domain.Outpoint{{Txid: leafTxid, VOut: 0}}). Return([]domain.Vtxo{vtxoData}, nil) rounds.On("GetRoundVtxoTree", mock.Anything, commitmentTxid). @@ -1122,6 +1188,7 @@ func TestRevokeTokens(t *testing.T) { func newTestIndexer( t *testing.T, privkey *btcec.PrivateKey, exposure exposure, rounds *mockedRoundRepo, vtxos *mockedVtxoRepo, wallet *mockedWallet, + offchainRepos ...*mockedOffchainTxRepo, ) *indexerService { t.Helper() @@ -1135,6 +1202,9 @@ func newTestIndexer( if vtxos != nil { repo.On("Vtxos").Return(vtxos) } + if len(offchainRepos) > 0 && offchainRepos[0] != nil { + repo.On("OffchainTxs").Return(offchainRepos[0]) + } cache := newTokenCache(defaultAuthTokenTTL) t.Cleanup(cache.close) @@ -1190,6 +1260,25 @@ func buildTestTreeTxs(t *testing.T) (rootTxid, leafTxid string, flatTree arktree return } +func buildCheckpointTxSpending(t *testing.T, prevTxid string, prevVout uint32) string { + t.Helper() + + prevHash, err := chainhash.NewHashFromStr(prevTxid) + require.NoError(t, err) + + ptx, err := psbt.New( + []*wire.OutPoint{{Hash: *prevHash, Index: prevVout}}, + []*wire.TxOut{{Value: 1000, PkScript: []byte{txscript.OP_TRUE}}}, + 2, 0, []uint32{wire.MaxTxInSequenceNum}, + ) + require.NoError(t, err) + + b64, err := ptx.B64Encode() + require.NoError(t, err) + + return b64 +} + // buildTestIntent creates a valid signed intent proof that passes intent.Verify. // It builds a MultisigClosure with vtxoKey, derives the taproot output key from // that closure, signs input 1, and returns the intent plus the taproot key (so @@ -1336,6 +1425,10 @@ func (m *mockedRepoManager) Rounds() domain.RoundRepository { return nil } +func (m *mockedRepoManager) Markers() domain.MarkerRepository { + return nil +} + func (m *mockedRepoManager) Vtxos() domain.VtxoRepository { if v := m.Called().Get(0); v != nil { return v.(domain.VtxoRepository) @@ -1343,6 +1436,36 @@ func (m *mockedRepoManager) Vtxos() domain.VtxoRepository { return nil } +func (m *mockedRepoManager) OffchainTxs() domain.OffchainTxRepository { + if v := m.Called().Get(0); v != nil { + return v.(domain.OffchainTxRepository) + } + return nil +} + +type mockedOffchainTxRepo struct { + mock.Mock + domain.OffchainTxRepository // unimplemented methods panic on call +} + +func (m *mockedOffchainTxRepo) GetOffchainTx(ctx context.Context, txid string) (*domain.OffchainTx, error) { + args := m.Called(ctx, txid) + if v := args.Get(0); v != nil { + return v.(*domain.OffchainTx), args.Error(1) + } + return nil, args.Error(1) +} + +func (m *mockedOffchainTxRepo) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + args := m.Called(ctx, txids) + if v := args.Get(0); v != nil { + return v.([]*domain.OffchainTx), args.Error(1) + } + return nil, args.Error(1) +} + type mockedWallet struct { mock.Mock ports.WalletService // unimplemented methods panic on call diff --git a/internal/core/application/indexer_test.go b/internal/core/application/indexer_test.go new file mode 100644 index 000000000..ea6d7b24e --- /dev/null +++ b/internal/core/application/indexer_test.go @@ -0,0 +1,1621 @@ +package application + +import ( + "context" + "encoding/base64" + "fmt" + "sort" + "strings" + "testing" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/btcsuite/btcd/btcutil/psbt" + "github.com/btcsuite/btcd/chaincfg/chainhash" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// Mock implementations for indexer tests + +type mockVtxoRepoForIndexer struct { + mock.Mock +} + +func (m *mockVtxoRepoForIndexer) GetVtxos( + ctx context.Context, + outpoints []domain.Outpoint, +) ([]domain.Vtxo, error) { + args := m.Called(ctx, outpoints) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]domain.Vtxo), args.Error(1) +} + +// Stub implementations for unused VtxoRepository methods +func (m *mockVtxoRepoForIndexer) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) error { + return nil +} + +func (m *mockVtxoRepoForIndexer) SettleVtxos( + ctx context.Context, + spentVtxos map[domain.Outpoint]string, + commitmentTxid string, +) error { + return nil +} + +func (m *mockVtxoRepoForIndexer) SpendVtxos( + ctx context.Context, + spentVtxos map[domain.Outpoint]string, + arkTxid string, +) error { + return nil +} + +func (m *mockVtxoRepoForIndexer) UnrollVtxos( + ctx context.Context, + outpoints []domain.Outpoint, +) error { + return nil +} + +func (m *mockVtxoRepoForIndexer) GetAllNonUnrolledVtxos( + ctx context.Context, + pubkey string, +) ([]domain.Vtxo, []domain.Vtxo, error) { + return nil, nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetAllSweepableUnrolledVtxos( + ctx context.Context, +) ([]domain.Vtxo, error) { + return nil, nil +} +func (m *mockVtxoRepoForIndexer) GetAllVtxos(ctx context.Context) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetAllVtxosWithPubKeys( + ctx context.Context, + pubkeys []string, + after, before int64, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetExpiringLiquidity( + ctx context.Context, + after, before int64, +) (uint64, error) { + return 0, nil +} +func (m *mockVtxoRepoForIndexer) GetRecoverableLiquidity(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (m *mockVtxoRepoForIndexer) UpdateVtxosExpiration( + ctx context.Context, + outpoints []domain.Outpoint, + expiresAt int64, +) error { + return nil +} + +func (m *mockVtxoRepoForIndexer) GetLeafVtxosForBatch( + ctx context.Context, + txid string, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetSweepableVtxosByCommitmentTxid( + ctx context.Context, + commitmentTxid string, +) ([]domain.Outpoint, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetAllChildrenVtxos( + ctx context.Context, + outpoint domain.Outpoint, +) ([]domain.Outpoint, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetVtxoPubKeysByCommitmentTxid( + ctx context.Context, + commitmentTxid string, + withMinimumAmount uint64, +) ([]string, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetPendingSpentVtxosWithPubKeys( + ctx context.Context, + pubkeys []string, + after, before int64, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepoForIndexer) GetPendingSpentVtxosWithOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, +) ([]domain.Vtxo, error) { + return nil, nil +} +func (m *mockVtxoRepoForIndexer) Close() {} + +type mockMarkerRepoForIndexer struct { + mock.Mock +} + +func (m *mockMarkerRepoForIndexer) GetMarker( + ctx context.Context, + id string, +) (*domain.Marker, error) { + args := m.Called(ctx, id) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*domain.Marker), args.Error(1) +} + +func (m *mockMarkerRepoForIndexer) GetVtxoChainByMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.Vtxo, error) { + args := m.Called(ctx, markerIDs) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]domain.Vtxo), args.Error(1) +} + +// Stub implementations for unused MarkerRepository methods +func (m *mockMarkerRepoForIndexer) AddMarker(ctx context.Context, marker domain.Marker) error { + return nil +} + +func (m *mockMarkerRepoForIndexer) GetMarkersByDepth( + ctx context.Context, + depth uint32, +) ([]domain.Marker, error) { + return nil, nil +} + +func (m *mockMarkerRepoForIndexer) GetMarkersByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Marker, error) { + return nil, nil +} + +func (m *mockMarkerRepoForIndexer) GetMarkersByIds( + ctx context.Context, + ids []string, +) ([]domain.Marker, error) { + args := m.Called(ctx, ids) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]domain.Marker), args.Error(1) +} + +func (m *mockMarkerRepoForIndexer) SweepMarker( + ctx context.Context, + markerID string, + sweptAt int64, +) error { + return nil +} + +func (m *mockMarkerRepoForIndexer) BulkSweepMarkers( + ctx context.Context, + markerIDs []string, + sweptAt int64, +) error { + return nil +} + +func (m *mockMarkerRepoForIndexer) SweepMarkerWithDescendants( + ctx context.Context, + markerID string, + sweptAt int64, +) (int64, error) { + return 0, nil +} + +func (m *mockMarkerRepoForIndexer) IsMarkerSwept( + ctx context.Context, + markerID string, +) (bool, error) { + return false, nil +} + +func (m *mockMarkerRepoForIndexer) GetSweptMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.SweptMarker, error) { + return nil, nil +} + +func (m *mockMarkerRepoForIndexer) UpdateVtxoMarkers( + ctx context.Context, + outpoint domain.Outpoint, + markerIDs []string, +) error { + return nil +} + +func (m *mockMarkerRepoForIndexer) GetVtxosByMarker( + ctx context.Context, + markerID string, +) ([]domain.Vtxo, error) { + args := m.Called(ctx, markerID) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]domain.Vtxo), args.Error(1) +} + +func (m *mockMarkerRepoForIndexer) SweepVtxosByMarker( + ctx context.Context, + markerID string, +) (int64, error) { + return 0, nil +} + +func (m *mockMarkerRepoForIndexer) CreateRootMarkersForVtxos( + ctx context.Context, + vtxos []domain.Vtxo, +) error { + return nil +} + +func (m *mockMarkerRepoForIndexer) GetVtxosByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockMarkerRepoForIndexer) GetVtxosByArkTxid( + ctx context.Context, + arkTxid string, +) ([]domain.Vtxo, error) { + return nil, nil +} +func (m *mockMarkerRepoForIndexer) SweepVtxoOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, + sweptAt int64, +) error { + return nil +} + +func (m *mockMarkerRepoForIndexer) Close() {} + +type mockOffchainTxRepoForIndexer struct { + mock.Mock +} + +func (m *mockOffchainTxRepoForIndexer) GetOffchainTx( + ctx context.Context, txid string, +) (*domain.OffchainTx, error) { + args := m.Called(ctx, txid) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).(*domain.OffchainTx), args.Error(1) +} + +func (m *mockOffchainTxRepoForIndexer) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + args := m.Called(ctx, txids) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]*domain.OffchainTx), args.Error(1) +} + +func (m *mockOffchainTxRepoForIndexer) AddOrUpdateOffchainTx( + ctx context.Context, offchainTx *domain.OffchainTx, +) error { + return nil +} + +func (m *mockOffchainTxRepoForIndexer) Close() {} + +type mockRepoManagerForIndexer struct { + vtxos *mockVtxoRepoForIndexer + markers *mockMarkerRepoForIndexer + offchainTxs *mockOffchainTxRepoForIndexer +} + +func (m *mockRepoManagerForIndexer) Events() domain.EventRepository { return nil } +func (m *mockRepoManagerForIndexer) Rounds() domain.RoundRepository { return nil } +func (m *mockRepoManagerForIndexer) Vtxos() domain.VtxoRepository { return m.vtxos } +func (m *mockRepoManagerForIndexer) Markers() domain.MarkerRepository { + if m.markers == nil { + return nil + } + return m.markers +} +func (m *mockRepoManagerForIndexer) ScheduledSession() domain.ScheduledSessionRepo { return nil } +func (m *mockRepoManagerForIndexer) OffchainTxs() domain.OffchainTxRepository { + if m.offchainTxs == nil { + return nil + } + return m.offchainTxs +} +func (m *mockRepoManagerForIndexer) Convictions() domain.ConvictionRepository { return nil } +func (m *mockRepoManagerForIndexer) Assets() domain.AssetRepository { return nil } +func (m *mockRepoManagerForIndexer) Fees() domain.FeeRepository { return nil } +func (m *mockRepoManagerForIndexer) RegisterBatchUpdateHandler(func(data domain.Round)) {} +func (m *mockRepoManagerForIndexer) RegisterOffchainTxUpdateHandler(func(domain.OffchainTx)) {} +func (m *mockRepoManagerForIndexer) Close() {} + +// newTestIndexer creates a fresh set of mock repos and an indexerService for testing. +func newChainTestIndexer() ( + *mockVtxoRepoForIndexer, + *mockMarkerRepoForIndexer, + *indexerService, +) { + vtxoRepo := &mockVtxoRepoForIndexer{} + markerRepo := &mockMarkerRepoForIndexer{} + repoManager := &mockRepoManagerForIndexer{vtxos: vtxoRepo, markers: markerRepo} + indexer := &indexerService{repoManager: repoManager} + return vtxoRepo, markerRepo, indexer +} + +// newTestIndexerWithOffchain creates mock repos including offchain tx repo. +func newChainTestIndexerWithOffchain() ( + *mockVtxoRepoForIndexer, + *mockMarkerRepoForIndexer, + *mockOffchainTxRepoForIndexer, + *indexerService, +) { + vtxoRepo := &mockVtxoRepoForIndexer{} + markerRepo := &mockMarkerRepoForIndexer{} + offchainTxRepo := &mockOffchainTxRepoForIndexer{} + // Default: bulk fetch returns empty so the fallback to GetOffchainTx is used. + // Tests that want to verify bulk behavior can override with a more specific expectation. + offchainTxRepo.On("GetOffchainTxsByTxids", mock.Anything, mock.Anything). + Return([]*domain.OffchainTx{}, nil).Maybe() + repoManager := &mockRepoManagerForIndexer{ + vtxos: vtxoRepo, markers: markerRepo, offchainTxs: offchainTxRepo, + } + indexer := &indexerService{repoManager: repoManager} + return vtxoRepo, markerRepo, offchainTxRepo, indexer +} + +// makeCheckpointPSBT creates a base64-encoded PSBT with a single input from +// the given previous outpoint. Used to build test checkpoint transactions. +func makeCheckpointPSBT(t *testing.T, inputTxid string, inputVout uint32) string { + t.Helper() + prevHash, err := chainhash.NewHashFromStr(inputTxid) + require.NoError(t, err) + + outPoint := wire.NewOutPoint(prevHash, inputVout) + output := wire.NewTxOut(1000, []byte{0x51}) // OP_TRUE + + p, err := psbt.New( + []*wire.OutPoint{outPoint}, + []*wire.TxOut{output}, + 2, 0, + []uint32{wire.MaxTxInSequenceNum}, + ) + require.NoError(t, err) + + b64, err := p.B64Encode() + require.NoError(t, err) + return b64 +} + +// TestEncodeDecodeChainCursor_RoundTrip verifies that encoding then decoding +// a frontier of outpoints returns the same outpoints. +func TestEncodeDecodeChainCursor_RoundTrip(t *testing.T) { + svc := &indexerService{} + frontier := []domain.Outpoint{ + {Txid: "abc123", VOut: 0}, + {Txid: "def456", VOut: 2}, + {Txid: "ghi789", VOut: 1}, + } + + token := svc.encodeChainCursor(frontier) + require.NotEmpty(t, token) + + decoded, err := svc.decodeChainCursor(token) + require.NoError(t, err) + require.Equal(t, frontier, decoded) +} + +// TestEncodeDecodeChainCursor_EmptyFrontier verifies that an empty frontier +// encodes to an empty string. +func TestEncodeDecodeChainCursor_EmptyFrontier(t *testing.T) { + svc := &indexerService{} + token := svc.encodeChainCursor(nil) + require.Empty(t, token) + + token = svc.encodeChainCursor([]domain.Outpoint{}) + require.Empty(t, token) +} + +// TestDecodeChainCursor_InvalidBase64 verifies that invalid base64 returns an error. +func TestDecodeChainCursor_InvalidBase64(t *testing.T) { + svc := &indexerService{} + _, err := svc.decodeChainCursor("not-valid-base64!!!") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid base64") +} + +// TestDecodeChainCursor_InvalidJSON verifies that valid base64 but invalid JSON +// returns an error. +func TestDecodeChainCursor_InvalidJSON(t *testing.T) { + svc := &indexerService{} + // Encode something that is not valid JSON + token := "bm90LWpzb24" // base64url of "not-json" + _, err := svc.decodeChainCursor(token) + require.Error(t, err) +} + +// TestDecodeChainCursor_HMACRejectsForgery verifies that a cursor signed with +// one key is rejected by a service with a different key. +func TestDecodeChainCursor_HMACRejectsForgery(t *testing.T) { + svc := &indexerService{cursorHMACKey: []byte("server-secret-key")} + frontier := []domain.Outpoint{{Txid: "abc123", VOut: 0}} + + token := svc.encodeChainCursor(frontier) + require.NotEmpty(t, token) + + // Valid decode with same key works. + decoded, err := svc.decodeChainCursor(token) + require.NoError(t, err) + require.Equal(t, frontier, decoded) + + // Forge a cursor with a different key — should be rejected. + forger := &indexerService{cursorHMACKey: []byte("attacker-key")} + forgedToken := forger.encodeChainCursor([]domain.Outpoint{{Txid: "victim-vtxo", VOut: 0}}) + _, err = svc.decodeChainCursor(forgedToken) + require.Error(t, err) + require.Contains(t, err.Error(), "signature mismatch") + + // Tampered cursor — modify one byte of a valid token. + rawToken, _ := base64.RawURLEncoding.DecodeString(token) + rawToken[0] ^= 0xff + tampered := base64.RawURLEncoding.EncodeToString(rawToken) + _, err = svc.decodeChainCursor(tampered) + require.Error(t, err) +} + +// TestDecodeChainCursor_HMACEdgeCases covers malicious and accidental misuse of +// the cursor field: truncated tokens, empty strings, unsigned cursors sent to a +// signing server, replaying a valid cursor after the HMAC portion is stripped, etc. +func TestDecodeChainCursor_HMACEdgeCases(t *testing.T) { + svc := &indexerService{cursorHMACKey: []byte("server-secret-key")} + frontier := []domain.Outpoint{{Txid: "abc123", VOut: 0}} + validToken := svc.encodeChainCursor(frontier) + + t.Run("empty string", func(t *testing.T) { + _, err := svc.decodeChainCursor("") + require.Error(t, err) + }) + + t.Run("truncated token missing HMAC bytes", func(t *testing.T) { + raw, err := base64.RawURLEncoding.DecodeString(validToken) + require.NoError(t, err) + // Strip the 32-byte HMAC, leaving only the JSON payload. + truncated := base64.RawURLEncoding.EncodeToString(raw[:len(raw)-32]) + _, err = svc.decodeChainCursor(truncated) + require.Error(t, err) + }) + + t.Run("unsigned cursor rejected by signing server", func(t *testing.T) { + // A server with no HMAC key produces unsigned cursors. + noKey := &indexerService{} + unsigned := noKey.encodeChainCursor(frontier) + // A server WITH an HMAC key must reject it. + _, err := svc.decodeChainCursor(unsigned) + require.Error(t, err) + }) + + t.Run("hand-crafted JSON without HMAC", func(t *testing.T) { + // Attacker builds raw JSON and base64-encodes it, no HMAC. + raw := []byte(`{"frontier":[{"txid":"victim","vout":0}]}`) + crafted := base64.RawURLEncoding.EncodeToString(raw) + _, err := svc.decodeChainCursor(crafted) + require.Error(t, err) + }) + + t.Run("cursor from restarted server with new key", func(t *testing.T) { + oldServer := &indexerService{cursorHMACKey: []byte("old-key")} + oldToken := oldServer.encodeChainCursor(frontier) + newServer := &indexerService{cursorHMACKey: []byte("new-key-after-restart")} + _, err := newServer.decodeChainCursor(oldToken) + require.Error(t, err) + require.Contains(t, err.Error(), "signature mismatch") + }) + + t.Run("swapped payload same length", func(t *testing.T) { + // Take a valid token, replace the JSON payload but keep the + // original HMAC — should fail because HMAC won't match. + raw, err := base64.RawURLEncoding.DecodeString(validToken) + require.NoError(t, err) + origHMAC := raw[len(raw)-32:] + newPayload := []byte(`{"frontier":[{"txid":"other","vout":0}]}`) + tampered := append(newPayload, origHMAC...) + token := base64.RawURLEncoding.EncodeToString(tampered) + _, err = svc.decodeChainCursor(token) + require.Error(t, err) + }) +} + +// TestEnsureVtxosCached_AllCacheHits verifies that when all outpoints are already +// in the cache, no DB call is made. +func TestEnsureVtxosCached_AllCacheHits(t *testing.T) { + vtxoRepo, _, indexer := newChainTestIndexer() + + ctx := context.Background() + cache := map[string]domain.Vtxo{ + "vtxo-1:0": {Outpoint: domain.Outpoint{Txid: "vtxo-1", VOut: 0}, Amount: 100}, + "vtxo-2:0": {Outpoint: domain.Outpoint{Txid: "vtxo-2", VOut: 0}, Amount: 200}, + } + loadedMarkers := make(map[string]bool) + + outpoints := []domain.Outpoint{ + {Txid: "vtxo-1", VOut: 0}, + {Txid: "vtxo-2", VOut: 0}, + } + + err := indexer.ensureVtxosCached(ctx, outpoints, cache, loadedMarkers) + require.NoError(t, err) + + // No DB call should be made + vtxoRepo.AssertNotCalled(t, "GetVtxos", mock.Anything, mock.Anything) +} + +// TestEnsureVtxosCached_CacheMissLoadsFromDBAndMarkerWindow verifies that cache +// misses trigger a DB lookup and marker window prefetch. +func TestEnsureVtxosCached_CacheMissLoadsFromDBAndMarkerWindow(t *testing.T) { + vtxoRepo, markerRepo, indexer := newChainTestIndexer() + + ctx := context.Background() + cache := make(map[string]domain.Vtxo) + loadedMarkers := make(map[string]bool) + + outpoints := []domain.Outpoint{{Txid: "vtxo-miss", VOut: 0}} + + // DB returns VTXO with a marker + dbVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-miss", VOut: 0}, + Amount: 500, + MarkerIDs: []string{"marker-100"}, + } + vtxoRepo.On("GetVtxos", ctx, outpoints).Return([]domain.Vtxo{dbVtxo}, nil) + + // Marker window returns additional VTXOs + windowVtxos := []domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "window-vtxo-1", VOut: 0}, Amount: 300}, + {Outpoint: domain.Outpoint{Txid: "window-vtxo-2", VOut: 0}, Amount: 400}, + } + markerRepo.On("GetVtxosByMarker", ctx, "marker-100").Return(windowVtxos, nil) + + err := indexer.ensureVtxosCached(ctx, outpoints, cache, loadedMarkers) + require.NoError(t, err) + + // Cache should contain the original VTXO plus window VTXOs + require.Contains(t, cache, "vtxo-miss:0") + require.Contains(t, cache, "window-vtxo-1:0") + require.Contains(t, cache, "window-vtxo-2:0") + + // Marker should be marked as loaded + require.True(t, loadedMarkers["marker-100"]) +} + +// TestEnsureVtxosCached_NilMarkerRepo verifies that when the marker repository +// is nil, ensureVtxosCached falls back to direct DB lookup without window prefetch. +func TestEnsureVtxosCached_NilMarkerRepo(t *testing.T) { + vtxoRepo := &mockVtxoRepoForIndexer{} + repoManager := &mockRepoManagerForIndexer{vtxos: vtxoRepo, markers: nil} + indexer := &indexerService{repoManager: repoManager} + + ctx := context.Background() + cache := make(map[string]domain.Vtxo) + loadedMarkers := make(map[string]bool) + + outpoints := []domain.Outpoint{{Txid: "vtxo-no-markers", VOut: 0}} + dbVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-no-markers", VOut: 0}, + Amount: 100, + MarkerIDs: []string{"marker-X"}, + } + vtxoRepo.On("GetVtxos", ctx, outpoints).Return([]domain.Vtxo{dbVtxo}, nil) + + err := indexer.ensureVtxosCached(ctx, outpoints, cache, loadedMarkers) + require.NoError(t, err) + + // VTXO should be cached even without marker window loading + require.Contains(t, cache, "vtxo-no-markers:0") +} + +// TestEnsureVtxosCached_DBErrorPropagated verifies that database errors +// are properly propagated. +func TestEnsureVtxosCached_DBErrorPropagated(t *testing.T) { + vtxoRepo, _, indexer := newChainTestIndexer() + + ctx := context.Background() + cache := make(map[string]domain.Vtxo) + loadedMarkers := make(map[string]bool) + + outpoints := []domain.Outpoint{{Txid: "vtxo-err", VOut: 0}} + vtxoRepo.On("GetVtxos", ctx, outpoints). + Return(nil, fmt.Errorf("database error")) + + err := indexer.ensureVtxosCached(ctx, outpoints, cache, loadedMarkers) + require.Error(t, err) + require.Contains(t, err.Error(), "database error") +} + +// TestEnsureVtxosCached_MarkerDedupAvoidsDuplicateLoad verifies that +// loadedMarkers prevents redundant GetVtxosByMarker calls when the same +// marker is encountered across multiple ensureVtxosCached invocations. +func TestEnsureVtxosCached_MarkerDedupAvoidsDuplicateLoad(t *testing.T) { + vtxoRepo, markerRepo, indexer := newChainTestIndexer() + + ctx := context.Background() + cache := make(map[string]domain.Vtxo) + loadedMarkers := make(map[string]bool) + + // First call: vtxo-1 has marker-A + vtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-1", VOut: 0}, + Amount: 100, + MarkerIDs: []string{"marker-A"}, + } + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: "vtxo-1", VOut: 0}}). + Return([]domain.Vtxo{vtxo1}, nil) + markerRepo.On("GetVtxosByMarker", ctx, "marker-A"). + Return([]domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "window-1", VOut: 0}, Amount: 200}, + }, nil).Once() // Expect exactly one call + + err := indexer.ensureVtxosCached( + ctx, + []domain.Outpoint{{Txid: "vtxo-1", VOut: 0}}, + cache, + loadedMarkers, + ) + require.NoError(t, err) + require.True(t, loadedMarkers["marker-A"]) + + // Second call: vtxo-2 also has marker-A + vtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-2", VOut: 0}, + Amount: 300, + MarkerIDs: []string{"marker-A"}, + } + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: "vtxo-2", VOut: 0}}). + Return([]domain.Vtxo{vtxo2}, nil) + + err = indexer.ensureVtxosCached( + ctx, + []domain.Outpoint{{Txid: "vtxo-2", VOut: 0}}, + cache, + loadedMarkers, + ) + require.NoError(t, err) + + // GetVtxosByMarker for marker-A should have been called only once + markerRepo.AssertNumberOfCalls(t, "GetVtxosByMarker", 1) +} + +// TestEnsureVtxosCached_GetVtxosByMarkerErrorSwallowed verifies that an error +// from GetVtxosByMarker is gracefully swallowed — the VTXO itself is still +// cached and the function returns no error. +func TestEnsureVtxosCached_GetVtxosByMarkerErrorSwallowed(t *testing.T) { + vtxoRepo, markerRepo, indexer := newChainTestIndexer() + + ctx := context.Background() + cache := make(map[string]domain.Vtxo) + loadedMarkers := make(map[string]bool) + + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-ok", VOut: 0}, + Amount: 500, + MarkerIDs: []string{"marker-bad"}, + } + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: "vtxo-ok", VOut: 0}}). + Return([]domain.Vtxo{vtxo}, nil) + markerRepo.On("GetVtxosByMarker", ctx, "marker-bad"). + Return(nil, fmt.Errorf("marker window load failed")) + + err := indexer.ensureVtxosCached( + ctx, + []domain.Outpoint{{Txid: "vtxo-ok", VOut: 0}}, + cache, + loadedMarkers, + ) + + // No error propagated + require.NoError(t, err) + // The VTXO itself is still in cache + require.Contains(t, cache, "vtxo-ok:0") + // Marker is marked as loaded (won't retry) + require.True(t, loadedMarkers["marker-bad"]) +} + +// TestGetVtxoChain_DefaultPageSizeWithTokenOnly verifies that when page is nil +// but a pageToken is provided, the default page size (maxPageSizeVtxoChain=100) +// is used instead of returning the full chain. +func TestGetVtxoChain_DefaultPageSizeWithTokenOnly(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + vtxoKey := setupPreconfirmedChain(t, ctx, vtxoRepo, markerRepo, offchainTxRepo) + + // Get the first page with an explicit page size to obtain a token + page := &Page{PageSize: 2} + resp1, err := indexer.GetVtxoChain(ctx, "", vtxoKey, page, "") + require.NoError(t, err) + require.NotEmpty(t, resp1.NextPageToken) + + // Resume with token but nil page — should use default page size (100), + // which is large enough to return the remaining chain in one shot. + resp2, err := indexer.GetVtxoChain(ctx, "", vtxoKey, nil, resp1.NextPageToken) + require.NoError(t, err) + // Remaining chain: B(ark+cp) + C(ark) = 3 items, all fit in default page + require.Equal(t, 3, len(resp2.Chain)) + require.Empty(t, resp2.NextPageToken) +} + +// TestGetVtxoChain_InvalidPageToken verifies that an invalid page_token +// returns an error. +func TestGetVtxoChain_InvalidPageToken(t *testing.T) { + _, _, indexer := newChainTestIndexer() + + ctx := context.Background() + vtxoKey := Outpoint{Txid: "abc123", VOut: 0} + + _, err := indexer.GetVtxoChain(ctx, "", vtxoKey, nil, "invalid-token!!!") + require.Error(t, err) + require.Contains(t, err.Error(), "invalid page_token") +} + +// TestGetVtxoChain_BackwardCompat_NilPageEmptyToken verifies that when +// page is nil and pageToken is empty, the VTXO not found error comes from +// the DB lookup (not from pagination parsing), confirming backward compat. +func TestGetVtxoChain_BackwardCompat_NilPageEmptyToken(t *testing.T) { + vtxoRepo, markerRepo, indexer := newChainTestIndexer() + + ctx := context.Background() + vtxoKey := Outpoint{Txid: "root-vtxo", VOut: 0} + + // Return no VTXOs so the chain walk fails with "vtxo not found" + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{vtxoKey}). + Return([]domain.Vtxo{}, nil) + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + _, err := indexer.GetVtxoChain(ctx, "", vtxoKey, nil, "") + + // Error should be from the chain walk, not from pagination setup + require.Error(t, err) + require.Contains(t, err.Error(), "vtxo not found") + require.NotContains(t, err.Error(), "invalid page_token") +} + +// setupPreconfirmedChain sets up a chain of preconfirmed VTXOs for pagination tests. +// Returns the VTXOs, the starting outpoint, and configures all mock expectations. +// Chain: vtxo-A -> checkpoint(input=vtxo-B) -> vtxo-B -> checkpoint(input=vtxo-C) -> vtxo-C (terminal) +func setupPreconfirmedChain( + t *testing.T, + ctx context.Context, + vtxoRepo *mockVtxoRepoForIndexer, + markerRepo *mockMarkerRepoForIndexer, + offchainTxRepo *mockOffchainTxRepoForIndexer, +) Outpoint { + t.Helper() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + txidC := strings.Repeat("c", 64) + + vtxoA := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 1000, + } + vtxoB := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 2000, + } + vtxoC := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 3000, + } + + // VTXOs returned from DB + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidA, VOut: 0}}). + Return([]domain.Vtxo{vtxoA}, nil) + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidB, VOut: 0}}). + Return([]domain.Vtxo{vtxoB}, nil) + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidC, VOut: 0}}). + Return([]domain.Vtxo{vtxoC}, nil) + + // Marker repo won't be used (no markers on these VTXOs) + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // Checkpoint PSBTs: A's checkpoint points to B, B's checkpoint points to C + cpA := makeCheckpointPSBT(t, txidB, 0) + cpB := makeCheckpointPSBT(t, txidC, 0) + + offchainTxA := &domain.OffchainTx{ArkTxid: txidA, CheckpointTxs: map[string]string{"cp-a": cpA}} + offchainTxB := &domain.OffchainTx{ArkTxid: txidB, CheckpointTxs: map[string]string{"cp-b": cpB}} + offchainTxC := &domain.OffchainTx{ArkTxid: txidC, CheckpointTxs: map[string]string{}} + + offchainTxRepo.On("GetOffchainTxsByTxids", ctx, []string{txidA}). + Return([]*domain.OffchainTx{offchainTxA}, nil).Maybe() + offchainTxRepo.On("GetOffchainTxsByTxids", ctx, []string{txidB}). + Return([]*domain.OffchainTx{offchainTxB}, nil).Maybe() + offchainTxRepo.On("GetOffchainTxsByTxids", ctx, []string{txidC}). + Return([]*domain.OffchainTx{offchainTxC}, nil).Maybe() + + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(offchainTxA, nil).Maybe() + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(offchainTxB, nil).Maybe() + offchainTxRepo.On("GetOffchainTx", ctx, txidC). + Return(offchainTxC, nil).Maybe() + + return Outpoint{Txid: txidA, VOut: 0} +} + +// TestGetVtxoChain_PaginationFirstPage verifies that the first page returns +// the expected number of items and a non-empty next_page_token when the chain +// exceeds the page size. +func TestGetVtxoChain_PaginationFirstPage(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + vtxoKey := setupPreconfirmedChain(t, ctx, vtxoRepo, markerRepo, offchainTxRepo) + + // Page size 2: vtxo-A produces 2 chain items (ark + checkpoint), + // then vtxo-B triggers early termination. + page := &Page{PageSize: 2} + resp, err := indexer.GetVtxoChain(ctx, "", vtxoKey, page, "") + + require.NoError(t, err) + require.Len(t, resp.Chain, 2) + require.Equal(t, IndexerChainedTxTypeArk, resp.Chain[0].Type) + require.Equal(t, IndexerChainedTxTypeCheckpoint, resp.Chain[1].Type) + require.NotEmpty(t, resp.NextPageToken, "should have next page token") +} + +// TestGetVtxoChain_PaginationResumeWithToken verifies that resuming with a +// page token continues the chain from where the previous page left off, +// eventually exhausting the chain with an empty token. +func TestGetVtxoChain_PaginationResumeWithToken(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + vtxoKey := setupPreconfirmedChain(t, ctx, vtxoRepo, markerRepo, offchainTxRepo) + + // Chain: A(ark+cp) -> B(ark+cp) -> C(ark) = 5 items total + // Page size 2: page1=2, page2=2, page3=1 + page := &Page{PageSize: 2} + + // Page 1 + resp1, err := indexer.GetVtxoChain(ctx, "", vtxoKey, page, "") + require.NoError(t, err) + require.Len(t, resp1.Chain, 2) + require.NotEmpty(t, resp1.NextPageToken) + + // Page 2: resume with token from page 1 + resp2, err := indexer.GetVtxoChain(ctx, "", vtxoKey, page, resp1.NextPageToken) + require.NoError(t, err) + require.Len(t, resp2.Chain, 2) + require.NotEmpty(t, resp2.NextPageToken) + + // Page 3: resume with token from page 2 + resp3, err := indexer.GetVtxoChain(ctx, "", vtxoKey, page, resp2.NextPageToken) + require.NoError(t, err) + require.Len(t, resp3.Chain, 1) + require.Empty(t, resp3.NextPageToken, "last page should have empty token") + + // Verify total items across all pages + totalItems := len(resp1.Chain) + len(resp2.Chain) + len(resp3.Chain) + require.Equal(t, 5, totalItems) + + // Verify chain types: each vtxo with checkpoints produces ark+checkpoint, + // terminal vtxo (C) produces only ark. + require.Equal(t, IndexerChainedTxTypeArk, resp3.Chain[0].Type) +} + +// TestGetVtxoChain_ShortChainNoToken verifies that when the chain is shorter +// than the page size, all items are returned with an empty next_page_token. +func TestGetVtxoChain_ShortChainNoToken(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + + // Single terminal preconfirmed VTXO (no checkpoints) + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 1000, + } + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidA, VOut: 0}}). + Return([]domain.Vtxo{vtxo}, nil) + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + offchainTxA := &domain.OffchainTx{ArkTxid: txidA, CheckpointTxs: map[string]string{}} + offchainTxRepo.On("GetOffchainTxsByTxids", ctx, []string{txidA}). + Return([]*domain.OffchainTx{offchainTxA}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(offchainTxA, nil).Maybe() + + // Page size larger than chain + page := &Page{PageSize: 100} + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, page, "") + + require.NoError(t, err) + require.Len(t, resp.Chain, 1) // Just the ark tx + require.Empty(t, resp.NextPageToken, "short chain should have empty token") + require.Equal(t, IndexerChainedTxTypeArk, resp.Chain[0].Type) +} + +// TestGetVtxoChain_PageSizeRespected verifies that each page never exceeds the +// page size (with allowance for grouped items from a single VTXO). +func TestGetVtxoChain_PageSizeRespected(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + vtxoKey := setupPreconfirmedChain(t, ctx, vtxoRepo, markerRepo, offchainTxRepo) + + // Use page size 1 — each VTXO produces 2 items (ark+checkpoint) for A and B, + // so pages will slightly overflow since items for one VTXO are emitted together. + page := &Page{PageSize: 1} + + resp, err := indexer.GetVtxoChain(ctx, "", vtxoKey, page, "") + require.NoError(t, err) + + // vtxo-A emits 2 items (ark + checkpoint) even though pageSize=1, + // because all items for a VTXO are emitted together. + require.Equal(t, 2, len(resp.Chain)) + require.NotEmpty(t, resp.NextPageToken) +} + +// matchOutpoints returns a mock.MatchedBy matcher that matches a []domain.Outpoint +// argument containing exactly the given outpoints, regardless of order. +func matchOutpoints(expected ...domain.Outpoint) interface{} { + sorted := make([]string, len(expected)) + for i, op := range expected { + sorted[i] = op.String() + } + sort.Strings(sorted) + return mock.MatchedBy(func(ops []domain.Outpoint) bool { + if len(ops) != len(sorted) { + return false + } + cp := make([]string, len(ops)) + for i, op := range ops { + cp[i] = op.String() + } + sort.Strings(cp) + for i := range cp { + if cp[i] != sorted[i] { + return false + } + } + return true + }) +} + +// matchIDs returns a mock.MatchedBy matcher that matches a []string argument +// containing exactly the given IDs, regardless of order. This avoids flakes from +// non-deterministic map iteration in preloadByMarkers. +func matchIDs(expected ...string) interface{} { + sorted := make([]string, len(expected)) + copy(sorted, expected) + sort.Strings(sorted) + return mock.MatchedBy(func(ids []string) bool { + if len(ids) != len(sorted) { + return false + } + cp := make([]string, len(ids)) + copy(cp, ids) + sort.Strings(cp) + for i := range cp { + if cp[i] != sorted[i] { + return false + } + } + return true + }) +} + +// TestPreloadVtxosByMarkers_WalksMarkerChain verifies that preloadByMarkers +// follows the marker DAG upward and populates the cache with all discovered VTXOs. +func TestPreloadVtxosByMarkers_WalksMarkerChain(t *testing.T) { + _, markerRepo, indexer := newChainTestIndexer() + ctx := context.Background() + + // Chain: vtxo-leaf has marker-200, which has parent marker-100, which has parent marker-0. + vtxoLeaf := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-leaf", VOut: 0}, + Amount: 100, + MarkerIDs: []string{"marker-200"}, + } + + // GetVtxoChainByMarkers returns VTXOs for each marker level. + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-200")). + Return([]domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "vtxo-200a", VOut: 0}, Amount: 200}, + {Outpoint: domain.Outpoint{Txid: "vtxo-200b", VOut: 0}, Amount: 201}, + }, nil) + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-100")). + Return([]domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "vtxo-100a", VOut: 0}, Amount: 300}, + }, nil) + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-0")). + Return([]domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "vtxo-0a", VOut: 0}, Amount: 400}, + }, nil) + + // GetMarkersByIds returns marker objects with parent pointers. + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-200")). + Return([]domain.Marker{ + {ID: "marker-200", Depth: 200, ParentMarkerIDs: []string{"marker-100"}}, + }, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-100")). + Return([]domain.Marker{ + {ID: "marker-100", Depth: 100, ParentMarkerIDs: []string{"marker-0"}}, + }, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-0")). + Return([]domain.Marker{ + {ID: "marker-0", Depth: 0, ParentMarkerIDs: nil}, + }, nil) + + cache := make(map[string]domain.Vtxo) + offchainCache := make(map[string]*domain.OffchainTx) + err := indexer.preloadByMarkers(ctx, []domain.Vtxo{vtxoLeaf}, cache, offchainCache) + require.NoError(t, err) + + // Cache should contain the seed vtxo plus all vtxos from all marker levels. + require.Contains(t, cache, "vtxo-leaf:0") + require.Contains(t, cache, "vtxo-200a:0") + require.Contains(t, cache, "vtxo-200b:0") + require.Contains(t, cache, "vtxo-100a:0") + require.Contains(t, cache, "vtxo-0a:0") + require.Len(t, cache, 5) + + markerRepo.AssertNumberOfCalls(t, "GetVtxoChainByMarkers", 3) + markerRepo.AssertNumberOfCalls(t, "GetMarkersByIds", 3) +} + +// TestPreloadVtxosByMarkers_NoCycleLoop verifies that the visited set prevents +// infinite loops when markers form a cycle. +func TestPreloadVtxosByMarkers_NoCycleLoop(t *testing.T) { + _, markerRepo, indexer := newChainTestIndexer() + ctx := context.Background() + + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo-cycle", VOut: 0}, + Amount: 100, + MarkerIDs: []string{"marker-A"}, + } + + // marker-A -> marker-B -> marker-A (cycle) + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-A")). + Return([]domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "vtxo-a", VOut: 0}, Amount: 100}, + }, nil) + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-B")). + Return([]domain.Vtxo{ + {Outpoint: domain.Outpoint{Txid: "vtxo-b", VOut: 0}, Amount: 200}, + }, nil) + + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-A")). + Return([]domain.Marker{ + {ID: "marker-A", Depth: 0, ParentMarkerIDs: []string{"marker-B"}}, + }, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-B")). + Return([]domain.Marker{ + {ID: "marker-B", Depth: 0, ParentMarkerIDs: []string{"marker-A"}}, + }, nil) + + cache := make(map[string]domain.Vtxo) + offchainCache := make(map[string]*domain.OffchainTx) + err := indexer.preloadByMarkers(ctx, []domain.Vtxo{vtxo}, cache, offchainCache) + require.NoError(t, err) + + // Should terminate without looping forever. + require.Contains(t, cache, "vtxo-cycle:0") + require.Contains(t, cache, "vtxo-a:0") + require.Contains(t, cache, "vtxo-b:0") + + // Each marker queried exactly once. + markerRepo.AssertNumberOfCalls(t, "GetVtxoChainByMarkers", 2) + markerRepo.AssertNumberOfCalls(t, "GetMarkersByIds", 2) +} + +// TestGetVtxoChain_WithMarkers_UsesPreload verifies that GetVtxoChain uses +// preloadByMarkers when VTXOs have markers, and that the main loop +// hits the cache instead of making additional DB calls. +func TestGetVtxoChain_WithMarkers_UsesPreload(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + txidC := strings.Repeat("c", 64) + + vtxoA := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 1000, + MarkerIDs: []string{"marker-200"}, + } + vtxoB := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 2000, + MarkerIDs: []string{"marker-100"}, + } + vtxoC := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 3000, + } + + // Initial GetVtxos call for preload (frontier = [vtxoA]). + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidA, VOut: 0}}). + Return([]domain.Vtxo{vtxoA}, nil) + + // Preload via marker chain: marker-200 -> marker-100 -> marker-0 (no parent). + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-200")). + Return([]domain.Vtxo{vtxoA, vtxoB}, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-200")). + Return([]domain.Marker{ + {ID: "marker-200", Depth: 200, ParentMarkerIDs: []string{"marker-100"}}, + }, nil) + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-100")). + Return([]domain.Vtxo{vtxoB, vtxoC}, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("marker-100")). + Return([]domain.Marker{ + {ID: "marker-100", Depth: 100, ParentMarkerIDs: nil}, + }, nil) + + // ensureVtxosCached will find cache hits for B and C (preloaded), + // so no additional GetVtxos calls for them. + // Marker window loading via GetVtxosByMarker is still called for markers on + // cache misses, but since everything is preloaded there are no misses. + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // Offchain tx setup for preconfirmed chain. + cpA := makeCheckpointPSBT(t, txidB, 0) + cpB := makeCheckpointPSBT(t, txidC, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-a": cpA}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-b": cpB}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidC). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, nil, "") + require.NoError(t, err) + require.Equal(t, 5, len(resp.Chain)) // A(ark+cp) + B(ark+cp) + C(ark) + + // GetVtxoChainByMarkers should have been called (preload path used). + markerRepo.AssertCalled(t, "GetVtxoChainByMarkers", ctx, matchIDs("marker-200")) + markerRepo.AssertCalled(t, "GetVtxoChainByMarkers", ctx, matchIDs("marker-100")) + + // GetVtxos should only be called once (for the initial preload fetch), + // not for B or C individually — they were already in the cache. + vtxoRepo.AssertNumberOfCalls(t, "GetVtxos", 1) +} + +// TestGetVtxoChain_PreloadReducesDBCalls builds a 500-VTXO preconfirmed chain +// with markers every 100 VTXOs and verifies that preloading reduces GetVtxos +// calls from ~500 (one per VTXO) to 1 (the initial frontier fetch). +func TestGetVtxoChain_PreloadReducesDBCalls(t *testing.T) { + const chainLen = 500 + const markersCount = chainLen / int(domain.MarkerInterval) // 5 + + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + // Generate txids and VTXOs grouped by marker bucket. + txids := make([]string, chainLen) + vtxos := make([]domain.Vtxo, chainLen) + for i := 0; i < chainLen; i++ { + txids[i] = fmt.Sprintf("%064x", i) + markerID := fmt.Sprintf("m-%d", i/int(domain.MarkerInterval)) + vtxos[i] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txids[i], VOut: 0}, + Preconfirmed: true, + ExpiresAt: int64(1000 + i), + MarkerIDs: []string{markerID}, + } + } + + // Preload: GetVtxos for frontier (single call). + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txids[0], VOut: 0}}). + Return([]domain.Vtxo{vtxos[0]}, nil) + + // Preload: marker chain m-0 → m-1 → m-2 → m-3 → m-4. + for m := 0; m < markersCount; m++ { + mid := fmt.Sprintf("m-%d", m) + batch := vtxos[m*int(domain.MarkerInterval) : (m+1)*int(domain.MarkerInterval)] + + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs(mid)). + Return(batch, nil) + + var parentIDs []string + if m+1 < markersCount { + parentIDs = []string{fmt.Sprintf("m-%d", m+1)} + } + markerRepo.On("GetMarkersByIds", ctx, matchIDs(mid)). + Return([]domain.Marker{ + {ID: mid, Depth: uint32(m * int(domain.MarkerInterval)), ParentMarkerIDs: parentIDs}, + }, nil) + } + + // Marker window (won't be called — all cache hits from preload). + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // Offchain tx: each vtxo_i has a checkpoint pointing to vtxo_{i+1}. + for i := 0; i < chainLen-1; i++ { + cp := makeCheckpointPSBT(t, txids[i+1], 0) + offchainTxRepo.On("GetOffchainTx", ctx, txids[i]). + Return(&domain.OffchainTx{ + CheckpointTxs: map[string]string{fmt.Sprintf("cp-%d", i): cp}, + }, nil) + } + // Terminal VTXO (no checkpoints). + offchainTxRepo.On("GetOffchainTx", ctx, txids[chainLen-1]). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txids[0], VOut: 0}, nil, "") + require.NoError(t, err) + + // Each non-terminal VTXO produces 2 items (ark + checkpoint), terminal produces 1. + expectedItems := (chainLen-1)*2 + 1 + require.Equal(t, expectedItems, len(resp.Chain)) + + // Key assertion: GetVtxos called only 1 time (preload frontier fetch). + // Without preloading this would be ~500 individual DB calls. + vtxoRepo.AssertNumberOfCalls(t, "GetVtxos", 1) + + // Marker-based preload: 5 bulk fetches + 5 marker lookups = 10 total queries. + markerRepo.AssertNumberOfCalls(t, "GetVtxoChainByMarkers", markersCount) + markerRepo.AssertNumberOfCalls(t, "GetMarkersByIds", markersCount) +} + +// TestGetVtxoChain_PreloadMarkerErrorFallback verifies that when the marker +// repo errors during preloadByMarkers, GetVtxoChain still returns the correct +// chain via the per-hop GetVtxos + ensureVtxosCached fallback, rather than +// aborting the request entirely. +func TestGetVtxoChain_PreloadMarkerErrorFallback(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + txidC := strings.Repeat("c", 64) + + vtxoA := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 1000, + MarkerIDs: []string{"marker-A"}, + } + vtxoB := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 2000, + MarkerIDs: []string{"marker-B"}, + } + vtxoC := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, + Preconfirmed: true, + ExpiresAt: 3000, + MarkerIDs: []string{"marker-C"}, + } + + // Initial preload fetch for the frontier. + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidA, VOut: 0}}). + Return([]domain.Vtxo{vtxoA}, nil) + + // Preload's first marker lookup fails — this is the fault we're injecting. + // Per-hop fallback should take over from here. + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("marker-A")). + Return(nil, fmt.Errorf("transient marker repo failure")) + + // ensureVtxosCached fetches B and C on cache miss. The fix lets these run + // even though preload aborted partway through. + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidB, VOut: 0}}). + Return([]domain.Vtxo{vtxoB}, nil) + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{{Txid: txidC, VOut: 0}}). + Return([]domain.Vtxo{vtxoC}, nil) + + // Marker window loading during the walk — can either succeed empty or + // error; ensureVtxosCached logs and continues either way. + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // Offchain tx for the preconfirmed chain: A → B → C. + cpA := makeCheckpointPSBT(t, txidB, 0) + cpB := makeCheckpointPSBT(t, txidC, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-a": cpA}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-b": cpB}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidC). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, nil, "") + require.NoError(t, err, "marker preload failure must not abort GetVtxoChain") + require.Equal(t, 5, len(resp.Chain)) // A(ark+cp) + B(ark+cp) + C(ark) + + // The preload GetVtxoChainByMarkers was attempted (and failed). + markerRepo.AssertCalled(t, "GetVtxoChainByMarkers", ctx, matchIDs("marker-A")) + // And the fallback did per-hop GetVtxos for B and C (plus the initial A). + vtxoRepo.AssertNumberOfCalls(t, "GetVtxos", 3) +} + +// TestGetVtxoChain_Fanout verifies that a VTXO with 2 checkpoints pointing +// to different parents correctly traverses both branches. +// +// A --(cp1)--> B +// A --(cp2)--> C +func TestGetVtxoChain_Fanout(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + txidC := strings.Repeat("c", 64) + + vtxoA := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, Preconfirmed: true, ExpiresAt: 1000} + vtxoB := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, Preconfirmed: true, ExpiresAt: 2000} + vtxoC := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, Preconfirmed: true, ExpiresAt: 3000} + + // Preload frontier fetch + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{vtxoA.Outpoint}). + Return([]domain.Vtxo{vtxoA}, nil) + // ensureVtxosCached for B and C (order-independent) + vtxoRepo.On("GetVtxos", ctx, matchOutpoints(vtxoB.Outpoint, vtxoC.Outpoint)). + Return([]domain.Vtxo{vtxoB, vtxoC}, nil) + + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // A has 2 checkpoints: one to B, one to C + cpB := makeCheckpointPSBT(t, txidB, 0) + cpC := makeCheckpointPSBT(t, txidC, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-b": cpB, "cp-c": cpC}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidC). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, nil, "") + require.NoError(t, err) + + // A: ark + 2 checkpoints = 3. B: ark = 1. C: ark = 1. Total: 5. + require.Equal(t, 5, len(resp.Chain)) + + // A is always the first ark tx + require.Equal(t, txidA, resp.Chain[0].Txid) + require.Equal(t, IndexerChainedTxTypeArk, resp.Chain[0].Type) + require.Len(t, resp.Chain[0].Spends, 2) + + // Count chain item types + arkCount, cpCount := 0, 0 + for _, item := range resp.Chain { + switch item.Type { + case IndexerChainedTxTypeArk: + arkCount++ + case IndexerChainedTxTypeCheckpoint: + cpCount++ + } + } + require.Equal(t, 3, arkCount) + require.Equal(t, 2, cpCount) +} + +// TestGetVtxoChain_Diamond verifies that two paths converging on the same +// ancestor VTXO only process that ancestor once. +// +// A --(cp1)--> B --(cp)--> D +// A --(cp2)--> C --(cp)--> D (same D) +func TestGetVtxoChain_Diamond(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + txidC := strings.Repeat("c", 64) + txidD := strings.Repeat("d", 64) + + vtxoA := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, Preconfirmed: true, ExpiresAt: 1000} + vtxoB := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, Preconfirmed: true, ExpiresAt: 2000} + vtxoC := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, Preconfirmed: true, ExpiresAt: 3000} + vtxoD := domain.Vtxo{Outpoint: domain.Outpoint{Txid: txidD, VOut: 0}, Preconfirmed: true, ExpiresAt: 4000} + + // Preload frontier + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{vtxoA.Outpoint}). + Return([]domain.Vtxo{vtxoA}, nil) + // B and C fetched together (order varies due to map iteration) + vtxoRepo.On("GetVtxos", ctx, matchOutpoints(vtxoB.Outpoint, vtxoC.Outpoint)). + Return([]domain.Vtxo{vtxoB, vtxoC}, nil) + // D appears as [D, D] because both B and C point to it before D is visited. + vtxoRepo.On("GetVtxos", ctx, mock.MatchedBy(func(ops []domain.Outpoint) bool { + for _, op := range ops { + if op.String() != vtxoD.Outpoint.String() { + return false + } + } + return len(ops) > 0 + })).Return([]domain.Vtxo{vtxoD}, nil) + + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // A fans out to B and C + cpB := makeCheckpointPSBT(t, txidB, 0) + cpC := makeCheckpointPSBT(t, txidC, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-b": cpB, "cp-c": cpC}}, nil) + + // B converges to D + cpBD := makeCheckpointPSBT(t, txidD, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-bd": cpBD}}, nil) + + // C converges to same D + cpCD := makeCheckpointPSBT(t, txidD, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidC). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-cd": cpCD}}, nil) + + // D is terminal + offchainTxRepo.On("GetOffchainTx", ctx, txidD). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, nil, "") + require.NoError(t, err) + + // A: ark + 2cp = 3. B: ark + 1cp = 2. C: ark + 1cp = 2. D: ark = 1. Total: 8. + require.Equal(t, 8, len(resp.Chain)) + + // D must appear exactly once despite convergence from B and C. + dCount := 0 + for _, item := range resp.Chain { + if item.Txid == txidD { + dCount++ + } + } + require.Equal(t, 1, dCount, "converged VTXO D should appear exactly once") +} + +// TestGetVtxoChain_MarkerBoundaryStart verifies that a chain starting exactly +// at marker boundary depth 0 preloads correctly (no parents to walk). +func TestGetVtxoChain_MarkerBoundaryStart(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + + vtxoA := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, Preconfirmed: true, + ExpiresAt: 1000, MarkerIDs: []string{"m-0"}, + } + vtxoB := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, Preconfirmed: true, + ExpiresAt: 2000, MarkerIDs: []string{"m-0"}, + } + + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{vtxoA.Outpoint}). + Return([]domain.Vtxo{vtxoA}, nil) + + // Preload: marker m-0 at depth 0 with no parents. + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("m-0")). + Return([]domain.Vtxo{vtxoA, vtxoB}, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("m-0")). + Return([]domain.Marker{ + {ID: "m-0", Depth: 0, ParentMarkerIDs: nil}, + }, nil) + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + cpB := makeCheckpointPSBT(t, txidB, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-b": cpB}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, nil, "") + require.NoError(t, err) + require.Equal(t, 3, len(resp.Chain)) // A(ark) + cp + B(ark) + + // Both VTXOs were preloaded via marker — only the frontier fetch needed. + vtxoRepo.AssertNumberOfCalls(t, "GetVtxos", 1) +} + +// TestGetVtxoChain_OverlappingMarkers verifies correct deduplication when a +// VTXO has multiple markers and one marker is both directly attached AND +// a parent of another marker. +// +// A (markers: m-a, m-b) -> B (marker: m-b) -> C (no markers) +// m-a has parent m-b, so m-b is already visited when discovered as parent. +func TestGetVtxoChain_OverlappingMarkers(t *testing.T) { + vtxoRepo, markerRepo, offchainTxRepo, indexer := newChainTestIndexerWithOffchain() + ctx := context.Background() + + txidA := strings.Repeat("a", 64) + txidB := strings.Repeat("b", 64) + txidC := strings.Repeat("c", 64) + + vtxoA := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, Preconfirmed: true, + ExpiresAt: 1000, MarkerIDs: []string{"m-a", "m-b"}, + } + vtxoB := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, Preconfirmed: true, + ExpiresAt: 2000, MarkerIDs: []string{"m-b"}, + } + vtxoC := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, Preconfirmed: true, + ExpiresAt: 3000, + } + + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{vtxoA.Outpoint}). + Return([]domain.Vtxo{vtxoA}, nil) + + // Preload: m-a and m-b fetched together. m-a's parent m-b is already visited. + markerRepo.On("GetVtxoChainByMarkers", ctx, matchIDs("m-a", "m-b")). + Return([]domain.Vtxo{vtxoA, vtxoB}, nil) + markerRepo.On("GetMarkersByIds", ctx, matchIDs("m-a", "m-b")). + Return([]domain.Marker{ + {ID: "m-a", Depth: 200, ParentMarkerIDs: []string{"m-b"}}, + {ID: "m-b", Depth: 100, ParentMarkerIDs: nil}, + }, nil) + markerRepo.On("GetVtxosByMarker", ctx, mock.Anything). + Return([]domain.Vtxo{}, nil).Maybe() + + // C not in any marker group — cache miss triggers DB fetch. + vtxoRepo.On("GetVtxos", ctx, []domain.Outpoint{vtxoC.Outpoint}). + Return([]domain.Vtxo{vtxoC}, nil) + + cpB := makeCheckpointPSBT(t, txidB, 0) + cpC := makeCheckpointPSBT(t, txidC, 0) + offchainTxRepo.On("GetOffchainTx", ctx, txidA). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-b": cpB}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidB). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{"cp-c": cpC}}, nil) + offchainTxRepo.On("GetOffchainTx", ctx, txidC). + Return(&domain.OffchainTx{CheckpointTxs: map[string]string{}}, nil) + + resp, err := indexer.GetVtxoChain(ctx, "", Outpoint{Txid: txidA, VOut: 0}, nil, "") + require.NoError(t, err) + require.Equal(t, 5, len(resp.Chain)) + + // 1 preload frontier + 1 for C (cache miss). A and B were preloaded via markers. + vtxoRepo.AssertNumberOfCalls(t, "GetVtxos", 2) + // Only 1 batch of marker fetches (m-a + m-b together; m-b's parent already visited). + markerRepo.AssertNumberOfCalls(t, "GetVtxoChainByMarkers", 1) + markerRepo.AssertNumberOfCalls(t, "GetMarkersByIds", 1) +} diff --git a/internal/core/application/service.go b/internal/core/application/service.go index 7bb8b29a3..bfaaacbe8 100644 --- a/internal/core/application/service.go +++ b/internal/core/application/service.go @@ -8,6 +8,7 @@ import ( "math" "runtime" "slices" + "sort" "strings" "sync" "sync/atomic" @@ -371,6 +372,30 @@ func (s *service) registerEventHandlers() { return } + if len(spentVtxos) != len(spentVtxoKeys) { + // Partial parent read: this means the offchain tx's finalization + // event references spent vtxos that we can no longer resolve from + // the DB. Drop propagation rather than emit a half-populated event; + // log at Error level so this inconsistency is surfaced for investigation. + log.Errorf( + "incomplete parent read: got %d of %d spent vtxos for tx %s; "+ + "dropping TransactionEvent propagation", + len(spentVtxos), len(spentVtxoKeys), txid, + ) + return + } + + // Calculate depth for new vtxos: max(parent depths) + 1 + var maxDepth uint32 + for _, v := range spentVtxos { + if v.Depth > maxDepth { + maxDepth = v.Depth + } + } + for i := range newVtxos { + newVtxos[i].Depth = maxDepth + 1 + } + // Make sure to mark new vtxos as swept if any of the spent inputs is swept as well or // expired. sweptIns := false @@ -1080,9 +1105,33 @@ func (s *service) SubmitOffchainTx( signedCheckpointTxsMap[rebuiltCheckpointTx.UnsignedTx.TxID()] = signedCheckpointTx } + // Compute depth and parent markers from spent VTXOs for the accepted event. + var maxDepth uint32 + parentMarkerSet := make(map[string]struct{}) + for _, v := range spentVtxos { + if v.Depth > maxDepth { + maxDepth = v.Depth + } + for _, markerID := range v.MarkerIDs { + if markerID != "" { + parentMarkerSet[markerID] = struct{}{} + } + } + } + var newDepth uint32 + if len(spentVtxos) > 0 { + newDepth = maxDepth + 1 + } + parentMarkerIDs := make([]string, 0, len(parentMarkerSet)) + for id := range parentMarkerSet { + parentMarkerIDs = append(parentMarkerIDs, id) + } + sort.Strings(parentMarkerIDs) + change, err := offchainTx.Accept( fullySignedArkTx, signedCheckpointTxsMap, commitmentTxsByCheckpointTxid, rootCommitmentTxid, expiration, + newDepth, parentMarkerIDs, ) if err != nil { return nil, errors.INTERNAL_ERROR.New("failed to accept offchain tx: %w", err). diff --git a/internal/core/application/sweeper.go b/internal/core/application/sweeper.go index 20e51ffb7..bb1ab55c6 100644 --- a/internal/core/application/sweeper.go +++ b/internal/core/application/sweeper.go @@ -549,12 +549,19 @@ func (s *sweeper) createBatchSweepTask(commitmentTxid, vtxoTreeRootTxid string) } for _, leaf := range vtxosLeaves { - vtxo := domain.Outpoint{ - Txid: leaf.UnsignedTx.TxID(), - VOut: 0, + // The VTXO is the first non-anchor output; leaf txs can + // carry an anchor at vout 0, so the VTXO is not always + // at vout 0. extractVtxoOutpoint handles that. + vtxo, err := extractVtxoOutpoint(leaf) + if err != nil { + log.WithError(err).Errorf( + "failed to extract vtxo outpoint from leaf %s", + leaf.UnsignedTx.TxID(), + ) + continue } - sweepableVtxos = append(sweepableVtxos, vtxo) + sweepableVtxos = append(sweepableVtxos, *vtxo) } if len(sweepableVtxos) <= 0 { @@ -703,7 +710,7 @@ func (s *sweeper) createBatchSweepTask(commitmentTxid, vtxoTreeRootTxid string) // get all vtxos related to the leaf swept seen := make(map[string]struct{}) for _, leafVtxo := range leafVtxoKeys { - children, childErr := vtxoRepo.GetAllChildrenVtxos(ctx, leafVtxo.Txid) + children, childErr := vtxoRepo.GetAllChildrenVtxos(ctx, leafVtxo) if childErr != nil { log.WithError(childErr).Error("error while getting children vtxos") continue @@ -766,15 +773,29 @@ func (s *sweeper) createCheckpointSweepTask( log.Debugf("sweeper: checkpoint %s swept by: %s", checkpointTxid, txid) } - // mark all vtxos linked to the unrolled vtxo as swept - childrenVtxos, err := s.repoManager.Vtxos().GetAllChildrenVtxos(ctx, vtxo.Txid) + // Mark all vtxos linked to the unrolled vtxo as swept. + // Use per-outpoint sweeping instead of marker-based sweeping here + // because markers can be shared across independent subtrees when + // offchain txs consolidate inputs from different lineages. Sweeping + // by marker would over-reach and incorrectly mark unrelated VTXOs. + childrenVtxos, err := s.repoManager.Vtxos().GetAllChildrenVtxos(ctx, vtxo) if err != nil { return err } - _, err = s.repoManager.Vtxos().SweepVtxos(ctx, childrenVtxos) - log.Debugf("swept %d vtxos", len(childrenVtxos)) - return err + if len(childrenVtxos) == 0 { + return nil + } + + sweptAt := time.Now().UnixMilli() + if err := s.repoManager.Markers(). + SweepVtxoOutpoints(ctx, childrenVtxos, sweptAt); err != nil { + log.WithError(err).Warn("failed to sweep vtxo outpoints") + return err + } + + log.Debugf("swept %d vtxo outpoints for checkpoint %s", len(childrenVtxos), checkpointTxid) + return nil } } diff --git a/internal/core/application/sweeper_test.go b/internal/core/application/sweeper_test.go new file mode 100644 index 000000000..1b0513a0c --- /dev/null +++ b/internal/core/application/sweeper_test.go @@ -0,0 +1,721 @@ +package application + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/internal/core/ports" + arklib "github.com/arkade-os/arkd/pkg/ark-lib" + "github.com/arkade-os/arkd/pkg/ark-lib/tree" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcutil/psbt" + "github.com/btcsuite/btcd/wire" + "github.com/stretchr/testify/mock" + "github.com/stretchr/testify/require" +) + +// Mock implementations for sweeper tests + +type mockWalletService struct { + mock.Mock +} + +func (m *mockWalletService) BroadcastTransaction( + ctx context.Context, + txs ...string, +) (string, error) { + args := m.Called(ctx, txs) + return args.String(0), args.Error(1) +} + +func (m *mockWalletService) GetTransaction(ctx context.Context, txid string) (string, error) { + args := m.Called(ctx, txid) + return args.String(0), args.Error(1) +} + +// Stub implementations for unused WalletService methods +func (m *mockWalletService) GetReadyUpdate(ctx context.Context) (<-chan bool, error) { + return nil, nil +} +func (m *mockWalletService) GenSeed(ctx context.Context) (string, error) { return "", nil } +func (m *mockWalletService) Create(ctx context.Context, seed, password string) error { + return nil +} +func (m *mockWalletService) Restore(ctx context.Context, seed, password string) error { + return nil +} +func (m *mockWalletService) Unlock(ctx context.Context, password string) error { return nil } +func (m *mockWalletService) Lock(ctx context.Context) error { return nil } +func (m *mockWalletService) Status(ctx context.Context) (ports.WalletStatus, error) { + return nil, nil +} +func (m *mockWalletService) GetNetwork(ctx context.Context) (*arklib.Network, error) { + return nil, nil +} +func (m *mockWalletService) GetForfeitPubkey(ctx context.Context) (*btcec.PublicKey, error) { + return nil, nil +} +func (m *mockWalletService) DeriveConnectorAddress(ctx context.Context) (string, error) { + return "", nil +} +func (m *mockWalletService) DeriveAddresses(ctx context.Context, num int) ([]string, error) { + return nil, nil +} + +func (m *mockWalletService) SignTransaction( + ctx context.Context, + tx string, + extractRawTx bool, +) (string, error) { + return "", nil +} + +func (m *mockWalletService) SignTransactionTapscript( + ctx context.Context, + tx string, + inputIndexes []int, +) (string, error) { + return "", nil +} + +func (m *mockWalletService) SelectUtxos( + ctx context.Context, + asset string, + amount uint64, + confirmedOnly bool, +) ([]ports.TxInput, uint64, error) { + return nil, 0, nil +} +func (m *mockWalletService) EstimateFees(ctx context.Context, pset string) (uint64, error) { + return 0, nil +} +func (m *mockWalletService) FeeRate(ctx context.Context) (uint64, error) { return 0, nil } + +func (m *mockWalletService) ListConnectorUtxos( + ctx context.Context, + addr string, +) ([]ports.TxInput, error) { + return nil, nil +} +func (m *mockWalletService) MainAccountBalance(ctx context.Context) (uint64, uint64, error) { + return 0, 0, nil +} +func (m *mockWalletService) ConnectorsAccountBalance(ctx context.Context) (uint64, uint64, error) { + return 0, 0, nil +} +func (m *mockWalletService) LockConnectorUtxos(ctx context.Context, utxos []domain.Outpoint) error { + return nil +} +func (m *mockWalletService) GetDustAmount(ctx context.Context) (uint64, error) { return 0, nil } + +func (m *mockWalletService) GetOutpointStatus( + ctx context.Context, + outpoint domain.Outpoint, +) (bool, error) { + return false, nil +} + +func (m *mockWalletService) GetCurrentBlockTime( + ctx context.Context, +) (*ports.BlockTimestamp, error) { + return nil, nil +} + +func (m *mockWalletService) Withdraw( + ctx context.Context, + address string, + amount uint64, + all bool, +) (string, error) { + return "", nil +} +func (m *mockWalletService) LoadSignerKey(ctx context.Context, prvkey string) error { return nil } +func (m *mockWalletService) Close() {} +func (m *mockWalletService) WatchScripts(ctx context.Context, scripts []string) error { + return nil +} +func (m *mockWalletService) UnwatchScripts(ctx context.Context, scripts []string) error { + return nil +} + +func (m *mockWalletService) GetNotificationChannel( + ctx context.Context, +) <-chan map[string][]ports.VtxoWithValue { + return nil +} + +func (m *mockWalletService) IsTransactionConfirmed( + ctx context.Context, + txid string, +) (bool, *ports.BlockTimestamp, error) { + return false, nil, nil +} +func (m *mockWalletService) RescanUtxos(ctx context.Context, outpoints []wire.OutPoint) error { + return nil +} + +type mockVtxoRepository struct { + mock.Mock +} + +func (m *mockVtxoRepository) GetAllChildrenVtxos( + ctx context.Context, + outpoint domain.Outpoint, +) ([]domain.Outpoint, error) { + args := m.Called(ctx, outpoint) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]domain.Outpoint), args.Error(1) +} + +func (m *mockVtxoRepository) GetVtxos( + ctx context.Context, + outpoints []domain.Outpoint, +) ([]domain.Vtxo, error) { + args := m.Called(ctx, outpoints) + if args.Get(0) == nil { + return nil, args.Error(1) + } + return args.Get(0).([]domain.Vtxo), args.Error(1) +} + +// Stub implementations for unused VtxoRepository methods +func (m *mockVtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) error { return nil } + +func (m *mockVtxoRepository) SettleVtxos( + ctx context.Context, + spentVtxos map[domain.Outpoint]string, + commitmentTxid string, +) error { + return nil +} + +func (m *mockVtxoRepository) SpendVtxos( + ctx context.Context, + spentVtxos map[domain.Outpoint]string, + arkTxid string, +) error { + return nil +} +func (m *mockVtxoRepository) UnrollVtxos(ctx context.Context, outpoints []domain.Outpoint) error { + return nil +} + +func (m *mockVtxoRepository) GetAllNonUnrolledVtxos( + ctx context.Context, + pubkey string, +) ([]domain.Vtxo, []domain.Vtxo, error) { + return nil, nil, nil +} + +func (m *mockVtxoRepository) GetAllSweepableUnrolledVtxos( + ctx context.Context, +) ([]domain.Vtxo, error) { + return nil, nil +} +func (m *mockVtxoRepository) GetAllVtxos(ctx context.Context) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepository) GetAllVtxosWithPubKeys( + ctx context.Context, + pubkeys []string, + after, before int64, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepository) GetExpiringLiquidity( + ctx context.Context, + after, before int64, +) (uint64, error) { + return 0, nil +} +func (m *mockVtxoRepository) GetRecoverableLiquidity(ctx context.Context) (uint64, error) { + return 0, nil +} + +func (m *mockVtxoRepository) UpdateVtxosExpiration( + ctx context.Context, + outpoints []domain.Outpoint, + expiresAt int64, +) error { + return nil +} + +func (m *mockVtxoRepository) GetLeafVtxosForBatch( + ctx context.Context, + txid string, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepository) GetSweepableVtxosByCommitmentTxid( + ctx context.Context, + commitmentTxid string, +) ([]domain.Outpoint, error) { + return nil, nil +} + +func (m *mockVtxoRepository) GetVtxoPubKeysByCommitmentTxid( + ctx context.Context, + commitmentTxid string, + withMinimumAmount uint64, +) ([]string, error) { + return nil, nil +} + +func (m *mockVtxoRepository) GetPendingSpentVtxosWithPubKeys( + ctx context.Context, + pubkeys []string, + after, before int64, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockVtxoRepository) GetPendingSpentVtxosWithOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, +) ([]domain.Vtxo, error) { + return nil, nil +} +func (m *mockVtxoRepository) Close() {} + +type mockMarkerRepository struct { + mock.Mock +} + +func (m *mockMarkerRepository) BulkSweepMarkers( + ctx context.Context, + markerIDs []string, + sweptAt int64, +) error { + args := m.Called(ctx, markerIDs, sweptAt) + return args.Error(0) +} + +// Stub implementations for unused MarkerRepository methods +func (m *mockMarkerRepository) AddMarker(ctx context.Context, marker domain.Marker) error { + return nil +} +func (m *mockMarkerRepository) GetMarker(ctx context.Context, id string) (*domain.Marker, error) { + return nil, nil +} + +func (m *mockMarkerRepository) GetMarkersByDepth( + ctx context.Context, + depth uint32, +) ([]domain.Marker, error) { + return nil, nil +} + +func (m *mockMarkerRepository) GetMarkersByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Marker, error) { + return nil, nil +} + +func (m *mockMarkerRepository) GetMarkersByIds( + ctx context.Context, + ids []string, +) ([]domain.Marker, error) { + return nil, nil +} + +func (m *mockMarkerRepository) SweepMarker( + ctx context.Context, + markerID string, + sweptAt int64, +) error { + return nil +} + +func (m *mockMarkerRepository) SweepMarkerWithDescendants( + ctx context.Context, + markerID string, + sweptAt int64, +) (int64, error) { + return 0, nil +} +func (m *mockMarkerRepository) IsMarkerSwept(ctx context.Context, markerID string) (bool, error) { + return false, nil +} + +func (m *mockMarkerRepository) GetSweptMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.SweptMarker, error) { + return nil, nil +} + +func (m *mockMarkerRepository) UpdateVtxoMarkers( + ctx context.Context, + outpoint domain.Outpoint, + markerIDs []string, +) error { + return nil +} + +func (m *mockMarkerRepository) GetVtxosByMarker( + ctx context.Context, + markerID string, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockMarkerRepository) SweepVtxosByMarker( + ctx context.Context, + markerID string, +) (int64, error) { + return 0, nil +} + +func (m *mockMarkerRepository) CreateRootMarkersForVtxos( + ctx context.Context, + vtxos []domain.Vtxo, +) error { + return nil +} + +func (m *mockMarkerRepository) GetVtxosByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockMarkerRepository) GetVtxosByArkTxid( + ctx context.Context, + arkTxid string, +) ([]domain.Vtxo, error) { + return nil, nil +} + +func (m *mockMarkerRepository) GetVtxoChainByMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.Vtxo, error) { + return nil, nil +} +func (m *mockMarkerRepository) SweepVtxoOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, + sweptAt int64, +) error { + args := m.Called(ctx, outpoints, sweptAt) + return args.Error(0) +} + +func (m *mockMarkerRepository) Close() {} + +type mockTxBuilder struct { + mock.Mock +} + +func (m *mockTxBuilder) BuildSweepTx(inputs []ports.TxInput) (string, string, error) { + args := m.Called(inputs) + return args.String(0), args.String(1), args.Error(2) +} + +// Stub implementations for unused TxBuilder methods +func (m *mockTxBuilder) BuildCommitmentTx( + signerPubkey *btcec.PublicKey, intents domain.Intents, + boardingInputs []ports.BoardingInput, cosigners [][]string, +) (string, *tree.TxTree, string, *tree.TxTree, error) { + return "", nil, "", nil, nil +} + +func (m *mockTxBuilder) VerifyForfeitTxs( + vtxos []domain.Vtxo, + connectors tree.FlatTxTree, + txs []string, +) (map[domain.Outpoint]ports.ValidForfeitTx, error) { + return nil, nil +} + +func (m *mockTxBuilder) GetSweepableBatchOutputs( + vtxoTree *tree.TxTree, +) (*arklib.RelativeLocktime, *ports.TxInput, error) { + return nil, nil, nil +} +func (m *mockTxBuilder) FinalizeAndExtract(tx string) (string, error) { return "", nil } + +func (m *mockTxBuilder) VerifyVtxoTapscriptSigs( + tx string, + mustIncludeSignerSig bool, +) (bool, *psbt.Packet, error) { + return false, nil, nil +} + +func (m *mockTxBuilder) VerifyBoardingTapscriptSigs( + signedTx string, + commitmentTx string, +) (map[uint32]ports.SignedBoardingInput, error) { + return nil, nil +} + +type mockRepoManager struct { + vtxos *mockVtxoRepository + markers *mockMarkerRepository +} + +func (m *mockRepoManager) Events() domain.EventRepository { return nil } +func (m *mockRepoManager) Rounds() domain.RoundRepository { return nil } +func (m *mockRepoManager) Vtxos() domain.VtxoRepository { return m.vtxos } +func (m *mockRepoManager) Markers() domain.MarkerRepository { return m.markers } +func (m *mockRepoManager) ScheduledSession() domain.ScheduledSessionRepo { return nil } +func (m *mockRepoManager) OffchainTxs() domain.OffchainTxRepository { return nil } +func (m *mockRepoManager) Convictions() domain.ConvictionRepository { return nil } +func (m *mockRepoManager) Assets() domain.AssetRepository { return nil } +func (m *mockRepoManager) Fees() domain.FeeRepository { return nil } +func (m *mockRepoManager) RegisterBatchUpdateHandler(func(data domain.Round)) {} +func (m *mockRepoManager) RegisterOffchainTxUpdateHandler(func(domain.OffchainTx)) {} +func (m *mockRepoManager) Close() {} + +type mockScheduler struct{} + +func (m *mockScheduler) Start() {} +func (m *mockScheduler) Stop() {} +func (m *mockScheduler) Unit() ports.TimeUnit { return ports.UnixTime } +func (m *mockScheduler) AfterNow(expiry int64) bool { return false } +func (m *mockScheduler) ScheduleTaskOnce(at int64, task func()) error { return nil } + +// newTestSweeper creates a fresh set of mocks and a sweeper instance for testing. +func newTestSweeper() ( + *mockWalletService, + *mockVtxoRepository, + *mockMarkerRepository, + *mockTxBuilder, + *sweeper, +) { + wallet := &mockWalletService{} + vtxoRepo := &mockVtxoRepository{} + markerRepo := &mockMarkerRepository{} + repoManager := &mockRepoManager{vtxos: vtxoRepo, markers: markerRepo} + builder := &mockTxBuilder{} + scheduler := &mockScheduler{} + s := newSweeper(wallet, repoManager, builder, scheduler, "") + return wallet, vtxoRepo, markerRepo, builder, s +} + +// TestCreateCheckpointSweepTask_SweepsVtxoOutpoints verifies that checkpoint +// sweeps use per-outpoint sweeping (SweepVtxoOutpoints) instead of marker-based +// sweeping. This prevents over-reach when markers are shared across independent +// subtrees due to offchain tx consolidation. +func TestCreateCheckpointSweepTask_SweepsVtxoOutpoints(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint123" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo123", VOut: 0} + + childOutpoints := []domain.Outpoint{ + {Txid: "child1", VOut: 0}, + {Txid: "child2", VOut: 0}, + {Txid: "child3", VOut: 0}, + } + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 10000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("sweeptxid123", "sweeptx_hex", nil) + + wallet.On("BroadcastTransaction", mock.Anything, []string{"sweeptx_hex"}). + Return("sweeptxid123", nil) + + vtxoRepo.On("GetAllChildrenVtxos", mock.Anything, vtxoOutpoint). + Return(childOutpoints, nil) + + // SweepVtxoOutpoints should be called with the exact child outpoints + markerRepo.On("SweepVtxoOutpoints", mock.Anything, childOutpoints, mock.AnythingOfType("int64")). + Return(nil) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + + require.NoError(t, err) + wallet.AssertExpectations(t) + vtxoRepo.AssertExpectations(t) + markerRepo.AssertExpectations(t) + builder.AssertExpectations(t) + // BulkSweepMarkers should NOT be called — checkpoint sweeps use per-outpoint + markerRepo.AssertNotCalled(t, "BulkSweepMarkers", mock.Anything, mock.Anything, mock.Anything) +} + +// TestCreateCheckpointSweepTask_SweptAtTimestamp verifies that the sweptAt +// timestamp passed to SweepVtxoOutpoints is accurate. +func TestCreateCheckpointSweepTask_SweptAtTimestamp(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint_timestamp" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo_timestamp", VOut: 0} + + childOutpoints := []domain.Outpoint{{Txid: "child_ts", VOut: 0}} + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 1000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("sweeptxid_ts", "sweeptx_hex", nil) + + wallet.On("BroadcastTransaction", mock.Anything, []string{"sweeptx_hex"}). + Return("sweeptxid_ts", nil) + + vtxoRepo.On("GetAllChildrenVtxos", mock.Anything, vtxoOutpoint). + Return(childOutpoints, nil) + + beforeExec := time.Now().UnixMilli() + var capturedSweptAt int64 + + markerRepo.On("SweepVtxoOutpoints", mock.Anything, childOutpoints, mock.MatchedBy(func(sweptAt int64) bool { + capturedSweptAt = sweptAt + return true + })).Return(nil) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + afterExec := time.Now().UnixMilli() + + require.NoError(t, err) + require.GreaterOrEqual(t, capturedSweptAt, beforeExec) + require.LessOrEqual(t, capturedSweptAt, afterExec) +} + +// TestCreateCheckpointSweepTask_SweepVtxoOutpointsError verifies error propagation. +func TestCreateCheckpointSweepTask_SweepVtxoOutpointsError(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint_error" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo_error", VOut: 0} + + childOutpoints := []domain.Outpoint{{Txid: "child_err", VOut: 0}} + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 1000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("sweeptxid_err", "sweeptx_hex", nil) + + wallet.On("BroadcastTransaction", mock.Anything, []string{"sweeptx_hex"}). + Return("sweeptxid_err", nil) + + vtxoRepo.On("GetAllChildrenVtxos", mock.Anything, vtxoOutpoint). + Return(childOutpoints, nil) + + dbError := fmt.Errorf("database connection failed") + markerRepo.On("SweepVtxoOutpoints", mock.Anything, childOutpoints, mock.AnythingOfType("int64")). + Return(dbError) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + + require.Error(t, err) + require.Contains(t, err.Error(), "database connection failed") +} + +// TestCreateCheckpointSweepTask_GetAllChildrenVtxosError verifies that when +// GetAllChildrenVtxos fails, the error is propagated. +func TestCreateCheckpointSweepTask_GetAllChildrenVtxosError(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint_children_err" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo_children_err", VOut: 0} + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 1000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("sweeptxid_children_err", "sweeptx_hex", nil) + + wallet.On("BroadcastTransaction", mock.Anything, []string{"sweeptx_hex"}). + Return("sweeptxid_children_err", nil) + + vtxoRepo.On("GetAllChildrenVtxos", mock.Anything, vtxoOutpoint). + Return(nil, fmt.Errorf("failed to query children vtxos")) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + + require.Error(t, err) + require.Contains(t, err.Error(), "failed to query children") + markerRepo.AssertNotCalled(t, "SweepVtxoOutpoints", mock.Anything, mock.Anything, mock.Anything) +} + +// TestCreateCheckpointSweepTask_BuildSweepTxError verifies that when BuildSweepTx +// fails, no sweep operations are attempted. +func TestCreateCheckpointSweepTask_BuildSweepTxError(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint_build_err" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo_build_err", VOut: 0} + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 1000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("", "", fmt.Errorf("insufficient funds for sweep")) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + + require.Error(t, err) + require.Contains(t, err.Error(), "insufficient funds") + + wallet.AssertNotCalled(t, "BroadcastTransaction", mock.Anything, mock.Anything) + vtxoRepo.AssertNotCalled(t, "GetAllChildrenVtxos", mock.Anything, mock.Anything) + markerRepo.AssertNotCalled(t, "SweepVtxoOutpoints", mock.Anything, mock.Anything, mock.Anything) +} + +// TestCreateCheckpointSweepTask_BroadcastError verifies that when broadcast +// fails, VTXOs are not marked as swept. +func TestCreateCheckpointSweepTask_BroadcastError(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint_broadcast_err" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo_broadcast_err", VOut: 0} + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 1000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("sweeptxid_broadcast_err", "sweeptx_hex", nil) + + wallet.On("BroadcastTransaction", mock.Anything, []string{"sweeptx_hex"}). + Return("", fmt.Errorf("network timeout")) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + + require.Error(t, err) + require.Contains(t, err.Error(), "network timeout") + + vtxoRepo.AssertNotCalled(t, "GetAllChildrenVtxos", mock.Anything, mock.Anything) + markerRepo.AssertNotCalled(t, "SweepVtxoOutpoints", mock.Anything, mock.Anything, mock.Anything) +} + +// TestCreateCheckpointSweepTask_NoChildrenVtxos verifies that an empty +// children list results in no sweep operations. +func TestCreateCheckpointSweepTask_NoChildrenVtxos(t *testing.T) { + wallet, vtxoRepo, markerRepo, builder, s := newTestSweeper() + + checkpointTxid := "checkpoint_no_children" + vtxoOutpoint := domain.Outpoint{Txid: "vtxo_no_children", VOut: 0} + + toSweep := ports.TxInput{Txid: checkpointTxid, Index: 0, Value: 5000} + + builder.On("BuildSweepTx", []ports.TxInput{toSweep}). + Return("sweeptxid_nc", "sweeptx_hex", nil) + + wallet.On("BroadcastTransaction", mock.Anything, []string{"sweeptx_hex"}). + Return("sweeptxid_nc", nil) + + vtxoRepo.On("GetAllChildrenVtxos", mock.Anything, vtxoOutpoint). + Return([]domain.Outpoint{}, nil) + + task := s.createCheckpointSweepTask(toSweep, vtxoOutpoint) + err := task() + + require.NoError(t, err) + wallet.AssertExpectations(t) + vtxoRepo.AssertExpectations(t) + markerRepo.AssertNotCalled(t, "SweepVtxoOutpoints", mock.Anything, mock.Anything, mock.Anything) +} diff --git a/internal/core/application/token_cache.go b/internal/core/application/token_cache.go index c35385d3d..ced1c1dd8 100644 --- a/internal/core/application/token_cache.go +++ b/internal/core/application/token_cache.go @@ -61,6 +61,47 @@ func (c *tokenCache) close() { close(c.stop) } +// touch extends the expiry of an existing cache entry by invalidationDuration +// from now. Auth tokens embed a signed timestamp that expires after authTokenTTL +// (5 min), but paginating a long VTXO chain can span many requests over a longer +// period. Each successful GetVtxoChain page calls touch so the cache entry stays +// live; validateChainAuth then accepts expired-timestamp tokens as long as the +// cache entry is still active, proving the session was recently used. +func (c *tokenCache) touch(hash string) { + c.mu.Lock() + defer c.mu.Unlock() + + outpoints, ok := c.outpointsByHash[hash] + if !ok { + return + } + newExpiry := time.Now().Add(c.invalidationDuration) + for op := range outpoints { + outpoints[op] = newExpiry + } +} + +// isActive returns true if the hash has any non-expired cache entry. In +// practice touch/add set every outpoint under a hash to the same expiry, so +// any single entry would answer the question; scanning all entries removes +// reliance on that invariant and on Go's non-deterministic map iteration. +func (c *tokenCache) isActive(hash string) bool { + c.mu.RLock() + defer c.mu.RUnlock() + + outpoints, ok := c.outpointsByHash[hash] + if !ok { + return false + } + now := time.Now() + for _, expiresAt := range outpoints { + if now.Before(expiresAt) { + return true + } + } + return false +} + func (c *tokenCache) add(hash string, outpoints []Outpoint, now time.Time) { c.mu.Lock() defer c.mu.Unlock() diff --git a/internal/core/application/types.go b/internal/core/application/types.go index 561bfcff7..2cd471997 100644 --- a/internal/core/application/types.go +++ b/internal/core/application/types.go @@ -141,9 +141,14 @@ type TeleportAsset struct { } type VtxoChainResp struct { - Chain []ChainTx - Page PageResp - AuthToken string + Chain []ChainTx + Page PageResp + NextPageToken string + AuthToken string +} + +type vtxoChainCursor struct { + Frontier []Outpoint `json:"frontier"` } type VOut int diff --git a/internal/core/application/utils.go b/internal/core/application/utils.go index ba0cfbffb..a905bd372 100644 --- a/internal/core/application/utils.go +++ b/internal/core/application/utils.go @@ -279,14 +279,17 @@ func getNewVtxosFromRound(round domain.Round) []domain.Vtxo { } vtxoPubkey := hex.EncodeToString(schnorr.SerializePubKey(vtxoTapKey)) + outpoint := domain.Outpoint{Txid: tx.UnsignedTx.TxID(), VOut: uint32(i)} vtxos = append(vtxos, domain.Vtxo{ - Outpoint: domain.Outpoint{Txid: tx.UnsignedTx.TxID(), VOut: uint32(i)}, + Outpoint: outpoint, PubKey: vtxoPubkey, Amount: uint64(out.Value), CommitmentTxids: []string{round.CommitmentTxid}, RootCommitmentTxid: round.CommitmentTxid, CreatedAt: createdAt, ExpiresAt: expireAt, + Depth: 0, + MarkerIDs: []string{outpoint.String()}, Assets: assets[uint32(i)], }) } diff --git a/internal/core/application/utils_test.go b/internal/core/application/utils_test.go index 45b0b05b4..c9c07dedb 100644 --- a/internal/core/application/utils_test.go +++ b/internal/core/application/utils_test.go @@ -5,12 +5,176 @@ import ( "testing" "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/pkg/ark-lib/tree" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/btcsuite/btcd/btcec/v2/schnorr" "github.com/btcsuite/btcd/btcutil/psbt" "github.com/btcsuite/btcd/chaincfg/chainhash" "github.com/btcsuite/btcd/wire" "github.com/stretchr/testify/require" ) +// makeP2TRLeafTx creates a valid base64-encoded PSBT with P2TR outputs +// for the given schnorr public keys and amounts. +func makeP2TRLeafTx(t *testing.T, outputs []struct { + pubkey *btcec.PublicKey + amount int64 +}) string { + t.Helper() + hash, err := chainhash.NewHashFromStr( + "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa", + ) + require.NoError(t, err) + + txOuts := make([]*wire.TxOut, 0, len(outputs)) + for _, out := range outputs { + pkScript := make([]byte, 34) + pkScript[0] = 0x51 // OP_1 + pkScript[1] = 0x20 // 32-byte push + copy(pkScript[2:], schnorr.SerializePubKey(out.pubkey)) + + txOuts = append(txOuts, &wire.TxOut{ + Value: out.amount, + PkScript: pkScript, + }) + } + + ptx, err := psbt.New( + []*wire.OutPoint{{Hash: *hash, Index: 0}}, + txOuts, + 3, + 0, + []uint32{wire.MaxTxInSequenceNum}, + ) + require.NoError(t, err) + + b64, err := ptx.B64Encode() + require.NoError(t, err) + return b64 +} + +// TestGetNewVtxosFromRound_MarkerIDsAndDepth verifies that getNewVtxosFromRound +// correctly assigns Depth=0 and MarkerIDs=[outpoint.String()] to every leaf VTXO +// produced from a round's VTXO tree. Also checks that commitment references, amounts, +// pubkeys, and sequential VOut indices are set correctly for multi-output leaf transactions. +func TestGetNewVtxosFromRound_MarkerIDsAndDepth(t *testing.T) { + // Generate two distinct keys for two outputs + privKey1, err := btcec.NewPrivateKey() + require.NoError(t, err) + privKey2, err := btcec.NewPrivateKey() + require.NoError(t, err) + + pub1 := privKey1.PubKey() + pub2 := privKey2.PubKey() + + leafTx := makeP2TRLeafTx(t, []struct { + pubkey *btcec.PublicKey + amount int64 + }{ + {pubkey: pub1, amount: 50000}, + {pubkey: pub2, amount: 30000}, + }) + + round := &domain.Round{ + CommitmentTxid: "test-commitment-txid", + VtxoTreeExpiration: 3600, + EndingTimestamp: 1700000000, + Stage: domain.Stage{Code: int(domain.RoundFinalizationStage), Ended: true}, + VtxoTree: tree.FlatTxTree{ + { + Txid: "leaf-tx-id", + Tx: leafTx, + Children: nil, // leaf node + }, + }, + } + + vtxos := getNewVtxosFromRound(*round) + + require.Len(t, vtxos, 2) + + for i, vtxo := range vtxos { + // All batch VTXOs must have Depth = 0 + require.Equal(t, uint32(0), vtxo.Depth, "vtxo %d should have depth 0", i) + + // MarkerIDs must be exactly []string{outpoint.String()} + expectedMarkerID := vtxo.Outpoint.String() + require.Equal(t, []string{expectedMarkerID}, vtxo.MarkerIDs, + "vtxo %d MarkerIDs should be [outpoint.String()]", i) + + // CommitmentTxids should reference the round's commitment + require.Equal(t, []string{"test-commitment-txid"}, vtxo.CommitmentTxids) + require.Equal(t, "test-commitment-txid", vtxo.RootCommitmentTxid) + + // Amount must match + if i == 0 { + require.Equal(t, uint64(50000), vtxo.Amount) + } else { + require.Equal(t, uint64(30000), vtxo.Amount) + } + + // PubKey must be non-empty + require.NotEmpty(t, vtxo.PubKey) + } + + // VOut should be sequential (0, 1) + require.Equal(t, uint32(0), vtxos[0].VOut) + require.Equal(t, uint32(1), vtxos[1].VOut) + + // Both should have the same txid (from the same PSBT) + require.Equal(t, vtxos[0].Txid, vtxos[1].Txid) +} + +// TestGetNewVtxosFromRound_EmptyVtxoTree verifies that a round with a nil VTXO tree +// returns nil, handling the edge case where no leaf transactions exist. +func TestGetNewVtxosFromRound_EmptyVtxoTree(t *testing.T) { + round := &domain.Round{ + CommitmentTxid: "empty-round", + VtxoTree: nil, + } + + vtxos := getNewVtxosFromRound(*round) + require.Nil(t, vtxos) +} + +// TestGetNewVtxosFromRound_SingleOutput verifies that a leaf transaction with a single +// P2TR output produces exactly one VTXO with Depth=0, the correct self-referencing +// MarkerID, the expected amount, and VOut=0. +func TestGetNewVtxosFromRound_SingleOutput(t *testing.T) { + privKey, err := btcec.NewPrivateKey() + require.NoError(t, err) + + leafTx := makeP2TRLeafTx(t, []struct { + pubkey *btcec.PublicKey + amount int64 + }{ + {pubkey: privKey.PubKey(), amount: 100000}, + }) + + round := &domain.Round{ + CommitmentTxid: "single-output-commitment", + VtxoTreeExpiration: 7200, + EndingTimestamp: 1700000000, + Stage: domain.Stage{Code: int(domain.RoundFinalizationStage), Ended: true}, + VtxoTree: tree.FlatTxTree{ + { + Txid: "single-leaf", + Tx: leafTx, + Children: nil, + }, + }, + } + + vtxos := getNewVtxosFromRound(*round) + require.Len(t, vtxos, 1) + + vtxo := vtxos[0] + require.Equal(t, uint32(0), vtxo.Depth) + require.Equal(t, []string{vtxo.Outpoint.String()}, vtxo.MarkerIDs) + require.Equal(t, uint64(100000), vtxo.Amount) + require.Equal(t, uint32(0), vtxo.VOut) +} + const bitcoinBlockWeight = 4_000_000 func TestMaxAssetsPerVtxo(t *testing.T) { diff --git a/internal/core/domain/marker.go b/internal/core/domain/marker.go new file mode 100644 index 000000000..5d70be1ab --- /dev/null +++ b/internal/core/domain/marker.go @@ -0,0 +1,53 @@ +package domain + +import "fmt" + +// MarkerInterval is the depth interval at which markers are created. +// VTXOs at depth 0, 100, 200, etc. create new markers. +const MarkerInterval = 100 + +// Marker represents a DAG traversal checkpoint created at regular depth intervals. +// Markers enable compressed traversal of the VTXO chain by allowing jumps of +// MarkerInterval depths instead of traversing each VTXO individually. +type Marker struct { + // ID is the unique identifier for this marker (typically the VTXO outpoint) + ID string + // Depth is the chain depth at which this marker exists (0, 100, 200, ...) + Depth uint32 + // ParentMarkerIDs is a list of marker IDs that this marker descends from + ParentMarkerIDs []string +} + +// isAtMarkerBoundary returns true if the given depth is at a marker boundary. +func isAtMarkerBoundary(depth uint32) bool { + return depth%MarkerInterval == 0 +} + +// NewMarker computes marker information for a new offchain transaction. +// If the depth is at a marker boundary, it returns a new Marker and the marker IDs +// to assign to the child VTXOs (just the new marker ID). +// Otherwise, it returns nil and the inherited parent marker IDs. +func NewMarker(txid string, depth uint32, parentMarkerIDs []string) (*Marker, []string) { + if isAtMarkerBoundary(depth) { + id := fmt.Sprintf("%s:marker:%d", txid, depth) + marker := &Marker{ + ID: id, + Depth: depth, + ParentMarkerIDs: parentMarkerIDs, + } + return marker, []string{id} + } + if len(parentMarkerIDs) > 0 { + return nil, parentMarkerIDs + } + return nil, nil +} + +// SweptMarker records when a marker (and all VTXOs it covers) was swept. +// This is an append-only table that enables efficient bulk sweep operations. +type SweptMarker struct { + // MarkerID is the ID of the marker that was swept + MarkerID string + // SweptAt is the Unix timestamp (milliseconds) when the marker was swept + SweptAt int64 +} diff --git a/internal/core/domain/marker_repo.go b/internal/core/domain/marker_repo.go new file mode 100644 index 000000000..e32663308 --- /dev/null +++ b/internal/core/domain/marker_repo.go @@ -0,0 +1,55 @@ +package domain + +import "context" + +type MarkerRepository interface { + // AddMarker creates or updates a marker + AddMarker(ctx context.Context, marker Marker) error + // GetMarker retrieves a marker by ID + GetMarker(ctx context.Context, id string) (*Marker, error) + // GetMarkersByDepth retrieves all markers at a specific depth + GetMarkersByDepth(ctx context.Context, depth uint32) ([]Marker, error) + // GetMarkersByDepthRange retrieves all markers within a depth range + GetMarkersByDepthRange(ctx context.Context, minDepth, maxDepth uint32) ([]Marker, error) + // GetMarkersByIds retrieves markers by their IDs + GetMarkersByIds(ctx context.Context, ids []string) ([]Marker, error) + + // SweepMarker marks a marker as swept at the given timestamp + SweepMarker(ctx context.Context, markerID string, sweptAt int64) error + // BulkSweepMarkers marks multiple markers as swept in a single operation + BulkSweepMarkers(ctx context.Context, markerIDs []string, sweptAt int64) error + // SweepMarkerWithDescendants marks a marker and all its descendants as swept + // Returns the number of markers swept (including descendants) + SweepMarkerWithDescendants(ctx context.Context, markerID string, sweptAt int64) (int64, error) + // IsMarkerSwept checks if a marker has been swept + IsMarkerSwept(ctx context.Context, markerID string) (bool, error) + // GetSweptMarkers retrieves swept marker records for the given marker IDs + GetSweptMarkers(ctx context.Context, markerIDs []string) ([]SweptMarker, error) + + // UpdateVtxoMarkers updates the markers array for a VTXO + UpdateVtxoMarkers(ctx context.Context, outpoint Outpoint, markerIDs []string) error + // GetVtxosByMarker retrieves all VTXOs associated with a marker + GetVtxosByMarker(ctx context.Context, markerID string) ([]Vtxo, error) + // SweepVtxosByMarker inserts the marker into swept_marker table + // Returns the number of VTXOs that will now be considered swept + SweepVtxosByMarker(ctx context.Context, markerID string) (int64, error) + + // CreateRootMarkersForVtxos creates root markers for batch VTXOs and updates their marker references + // in a single transaction. Each VTXO gets a marker with ID equal to its outpoint string. + CreateRootMarkersForVtxos(ctx context.Context, vtxos []Vtxo) error + + // SweepVtxoOutpoints marks specific VTXO outpoints as swept in the swept_vtxo + // table. Used by checkpoint sweeps where marker-based sweeping would over-reach + // across independent subtrees that share inherited markers. + SweepVtxoOutpoints(ctx context.Context, outpoints []Outpoint, sweptAt int64) error + + // Chain traversal methods for GetVtxoChain optimization + // GetVtxosByDepthRange retrieves VTXOs within a depth range + GetVtxosByDepthRange(ctx context.Context, minDepth, maxDepth uint32) ([]Vtxo, error) + // GetVtxosByArkTxid retrieves VTXOs created by a specific ark tx + GetVtxosByArkTxid(ctx context.Context, arkTxid string) ([]Vtxo, error) + // GetVtxoChainByMarkers retrieves VTXOs that have markers in the given list + GetVtxoChainByMarkers(ctx context.Context, markerIDs []string) ([]Vtxo, error) + + Close() +} diff --git a/internal/core/domain/marker_test.go b/internal/core/domain/marker_test.go new file mode 100644 index 000000000..57873b0ab --- /dev/null +++ b/internal/core/domain/marker_test.go @@ -0,0 +1,661 @@ +package domain + +import ( + "fmt" + "sort" + "testing" + + "github.com/stretchr/testify/require" +) + +func TestIsAtMarkerBoundary(t *testing.T) { + tests := []struct { + depth uint32 + expected bool + }{ + {0, true}, // First marker boundary + {1, false}, + {50, false}, + {99, false}, + {100, true}, // Second marker boundary + {101, false}, + {150, false}, + {199, false}, + {200, true}, // Third marker boundary + {201, false}, + {300, true}, + {1000, true}, + {1001, false}, + {10000, true}, + } + + for _, tt := range tests { + result := isAtMarkerBoundary(tt.depth) + require.Equal(t, tt.expected, result, + "isAtMarkerBoundary(%d) should be %v", tt.depth, tt.expected) + } +} + +func TestMarkerInterval(t *testing.T) { + require.Equal(t, uint32(100), uint32(MarkerInterval)) +} + +func TestMarkerStruct(t *testing.T) { + marker := Marker{ + ID: "test-marker-id", + Depth: 100, + ParentMarkerIDs: []string{"parent-marker-1", "parent-marker-2"}, + } + + require.Equal(t, "test-marker-id", marker.ID) + require.Equal(t, uint32(100), marker.Depth) + require.Len(t, marker.ParentMarkerIDs, 2) + require.Contains(t, marker.ParentMarkerIDs, "parent-marker-1") + require.Contains(t, marker.ParentMarkerIDs, "parent-marker-2") +} + +func TestSweptMarkerStruct(t *testing.T) { + sweptMarker := SweptMarker{ + MarkerID: "swept-marker-id", + SweptAt: 1234567890, + } + + require.Equal(t, "swept-marker-id", sweptMarker.MarkerID) + require.Equal(t, int64(1234567890), sweptMarker.SweptAt) +} + +func TestRootMarkerHasNoParents(t *testing.T) { + rootMarker := Marker{ + ID: "root-marker", + Depth: 0, + ParentMarkerIDs: nil, + } + + require.True(t, isAtMarkerBoundary(rootMarker.Depth)) + require.Nil(t, rootMarker.ParentMarkerIDs) +} + +func TestNewMarker(t *testing.T) { + t.Run("at boundary creates marker", func(t *testing.T) { + parentIDs := []string{"parent-A", "parent-B"} + marker, markerIDs := NewMarker("txid123", 100, parentIDs) + + require.NotNil(t, marker) + require.Equal(t, "txid123:marker:100", marker.ID) + require.Equal(t, uint32(100), marker.Depth) + require.Equal(t, parentIDs, marker.ParentMarkerIDs) + require.Equal(t, []string{"txid123:marker:100"}, markerIDs) + }) + + t.Run("at depth 0 creates root marker", func(t *testing.T) { + marker, markerIDs := NewMarker("txid-root", 0, nil) + + require.NotNil(t, marker) + require.Equal(t, "txid-root:marker:0", marker.ID) + require.Equal(t, uint32(0), marker.Depth) + require.Nil(t, marker.ParentMarkerIDs) + require.Equal(t, []string{"txid-root:marker:0"}, markerIDs) + }) + + t.Run("non-boundary inherits parent markers", func(t *testing.T) { + parentIDs := []string{"marker-A", "marker-B"} + marker, markerIDs := NewMarker("txid456", 51, parentIDs) + + require.Nil(t, marker) + require.Equal(t, parentIDs, markerIDs) + }) + + t.Run("non-boundary no parents returns nil", func(t *testing.T) { + marker, markerIDs := NewMarker("txid789", 5, nil) + + require.Nil(t, marker) + require.Nil(t, markerIDs) + }) + + t.Run("at depth 200 with parents", func(t *testing.T) { + parentIDs := []string{"marker-100"} + marker, markerIDs := NewMarker("deep-tx", 200, parentIDs) + + require.NotNil(t, marker) + require.Equal(t, "deep-tx:marker:200", marker.ID) + require.Equal(t, uint32(200), marker.Depth) + require.Equal(t, parentIDs, marker.ParentMarkerIDs) + require.Len(t, markerIDs, 1) + require.Equal(t, marker.ID, markerIDs[0]) + }) +} + +// calculateMaxDepth returns the maximum depth from a set of spent VTXOs. +func calculateMaxDepth(spentVtxos []Vtxo) uint32 { + var maxDepth uint32 + for _, v := range spentVtxos { + if v.Depth > maxDepth { + maxDepth = v.Depth + } + } + return maxDepth +} + +// collectParentMarkers collects all unique, non-empty marker IDs from spent VTXOs. +func collectParentMarkers(spentVtxos []Vtxo) []string { + parentMarkerSet := make(map[string]struct{}) + for _, v := range spentVtxos { + for _, markerID := range v.MarkerIDs { + if markerID != "" { + parentMarkerSet[markerID] = struct{}{} + } + } + } + result := make([]string, 0, len(parentMarkerSet)) + for id := range parentMarkerSet { + result = append(result, id) + } + return result +} + +func TestDepthCalculation(t *testing.T) { + testCases := []struct { + name string + spentVtxos []Vtxo + expectedDepth uint32 + description string + }{ + { + name: "single batch vtxo at depth 0", + spentVtxos: []Vtxo{{Depth: 0}}, + expectedDepth: 1, + description: "spending a batch vtxo creates vtxo at depth 1", + }, + { + name: "single vtxo at depth 50", + spentVtxos: []Vtxo{{Depth: 50}}, + expectedDepth: 51, + description: "spending a chained vtxo increments depth", + }, + { + name: "multiple vtxos with same depth", + spentVtxos: []Vtxo{ + {Depth: 10}, + {Depth: 10}, + {Depth: 10}, + }, + expectedDepth: 11, + description: "combining vtxos at same depth increments once", + }, + { + name: "multiple vtxos with different depths", + spentVtxos: []Vtxo{ + {Depth: 5}, + {Depth: 25}, + {Depth: 15}, + }, + expectedDepth: 26, + description: "uses max depth from inputs", + }, + { + name: "vtxos spanning marker boundary", + spentVtxos: []Vtxo{ + {Depth: 95}, + {Depth: 105}, + }, + expectedDepth: 106, + description: "handles depths across marker boundaries", + }, + { + name: "deep chain near marker boundary", + spentVtxos: []Vtxo{ + {Depth: 99}, + }, + expectedDepth: 100, + description: "result at marker boundary (100)", + }, + { + name: "very deep chain", + spentVtxos: []Vtxo{ + {Depth: 500}, + }, + expectedDepth: 501, + description: "handles deep chains beyond multiple marker intervals", + }, + { + name: "no spent vtxos (empty)", + spentVtxos: []Vtxo{}, + expectedDepth: 0, + description: "empty input results in depth 0 (no spent vtxos means maxDepth stays 0, newDepth = 0)", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + maxDepth := calculateMaxDepth(tc.spentVtxos) + var newDepth uint32 + if len(tc.spentVtxos) > 0 { + newDepth = maxDepth + 1 + } + require.Equal(t, tc.expectedDepth, newDepth, tc.description) + }) + } +} + +func TestDepthAtMarkerBoundary(t *testing.T) { + testCases := []struct { + depth uint32 + isAtBoundary bool + description string + }{ + {0, true, "depth 0 is at marker boundary"}, + {1, false, "depth 1 is not at boundary"}, + {50, false, "depth 50 is not at boundary"}, + {99, false, "depth 99 is not at boundary"}, + {100, true, "depth 100 is at marker boundary"}, + {101, false, "depth 101 is not at boundary"}, + {200, true, "depth 200 is at marker boundary"}, + {500, true, "depth 500 is at marker boundary"}, + {1000, true, "depth 1000 is at marker boundary"}, + } + + for _, tc := range testCases { + t.Run(tc.description, func(t *testing.T) { + require.Equal(t, tc.isAtBoundary, isAtMarkerBoundary(tc.depth)) + }) + } +} + +func TestDepthIncrementCreatesMarkerAtBoundary(t *testing.T) { + testCases := []struct { + parentDepth uint32 + newDepth uint32 + shouldCreateMarker bool + }{ + {99, 100, true}, // crossing into boundary + {100, 101, false}, // leaving boundary + {199, 200, true}, // crossing into next boundary + {0, 1, false}, // moving away from initial boundary + {98, 99, false}, // approaching but not at boundary + } + + for _, tc := range testCases { + t.Run("", func(t *testing.T) { + spentVtxos := []Vtxo{{Depth: tc.parentDepth}} + maxDepth := calculateMaxDepth(spentVtxos) + newDepth := maxDepth + 1 + + require.Equal(t, tc.newDepth, newDepth) + marker, _ := NewMarker("test-txid", newDepth, nil) + require.Equal(t, tc.shouldCreateMarker, marker != nil) + }) + } +} + +func TestParentMarkerCollectionFromMultipleParents(t *testing.T) { + testCases := []struct { + name string + spentVtxos []Vtxo + expectedMarkers []string + }{ + { + name: "single parent with one marker", + spentVtxos: []Vtxo{ + {MarkerIDs: []string{"marker-A"}}, + }, + expectedMarkers: []string{"marker-A"}, + }, + { + name: "two parents with distinct markers", + spentVtxos: []Vtxo{ + {MarkerIDs: []string{"marker-A"}}, + {MarkerIDs: []string{"marker-B"}}, + }, + expectedMarkers: []string{"marker-A", "marker-B"}, + }, + { + name: "three parents with overlapping markers", + spentVtxos: []Vtxo{ + {MarkerIDs: []string{"marker-A", "marker-B"}}, + {MarkerIDs: []string{"marker-B", "marker-C"}}, + {MarkerIDs: []string{"marker-A", "marker-C"}}, + }, + expectedMarkers: []string{"marker-A", "marker-B", "marker-C"}, + }, + { + name: "all parents share the same marker", + spentVtxos: []Vtxo{ + {MarkerIDs: []string{"root-marker"}}, + {MarkerIDs: []string{"root-marker"}}, + {MarkerIDs: []string{"root-marker"}}, + }, + expectedMarkers: []string{"root-marker"}, + }, + { + name: "no parents", + spentVtxos: []Vtxo{}, + expectedMarkers: []string{}, + }, + { + name: "parent with no markers", + spentVtxos: []Vtxo{ + {MarkerIDs: []string{}}, + }, + expectedMarkers: []string{}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + result := collectParentMarkers(tc.spentVtxos) + sort.Strings(result) + sort.Strings(tc.expectedMarkers) + require.Equal(t, tc.expectedMarkers, result) + }) + } +} + +func TestParentMarkerCollectionSkipsEmptyMarkerIDs(t *testing.T) { + spentVtxos := []Vtxo{ + {MarkerIDs: []string{"marker-A", "", "marker-B"}}, + {MarkerIDs: []string{"", ""}}, + {MarkerIDs: []string{"marker-C", ""}}, + } + + result := collectParentMarkers(spentVtxos) + sort.Strings(result) + require.Equal(t, []string{"marker-A", "marker-B", "marker-C"}, result) +} + +func TestMarkerInheritanceAtNonBoundary(t *testing.T) { + testCases := []struct { + name string + parentDepths []uint32 + parentMarkerSets [][]string + expectedDepth uint32 + expectedMarkers []string + description string + }{ + { + name: "single parent at depth 0, child at depth 1", + parentDepths: []uint32{0}, + parentMarkerSets: [][]string{{"root-marker-1"}}, + expectedDepth: 1, + expectedMarkers: []string{"root-marker-1"}, + description: "child inherits single parent marker", + }, + { + name: "single parent at depth 50, child at depth 51", + parentDepths: []uint32{50}, + parentMarkerSets: [][]string{{"marker-A", "marker-B"}}, + expectedDepth: 51, + expectedMarkers: []string{"marker-A", "marker-B"}, + description: "child inherits multiple parent markers", + }, + { + name: "two parents at different depths, child not at boundary", + parentDepths: []uint32{30, 40}, + parentMarkerSets: [][]string{{"marker-X"}, {"marker-Y"}}, + expectedDepth: 41, + expectedMarkers: []string{"marker-X", "marker-Y"}, + description: "child inherits union of all parent markers", + }, + { + name: "three parents with overlapping markers", + parentDepths: []uint32{10, 20, 15}, + parentMarkerSets: [][]string{{"m1", "m2"}, {"m2", "m3"}, {"m1"}}, + expectedDepth: 21, + expectedMarkers: []string{"m1", "m2", "m3"}, + description: "child inherits deduplicated union", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + spentVtxos := make([]Vtxo, len(tc.parentDepths)) + for i, depth := range tc.parentDepths { + spentVtxos[i] = Vtxo{ + Depth: depth, + MarkerIDs: tc.parentMarkerSets[i], + } + } + + maxDepth := calculateMaxDepth(spentVtxos) + newDepth := maxDepth + 1 + require.Equal(t, tc.expectedDepth, newDepth) + + require.False(t, isAtMarkerBoundary(newDepth), + "depth %d should not be at marker boundary for this test", newDepth) + + parentMarkers := collectParentMarkers(spentVtxos) + marker, markerIDs := NewMarker("some-txid", newDepth, parentMarkers) + + require.Nil(t, marker, tc.description) + sort.Strings(markerIDs) + sort.Strings(tc.expectedMarkers) + require.Equal(t, tc.expectedMarkers, markerIDs, tc.description) + }) + } +} + +func TestMarkerCreationAtBoundary(t *testing.T) { + testCases := []struct { + name string + parentDepths []uint32 + parentMarkerSets [][]string + expectedDepth uint32 + description string + }{ + { + name: "parent at depth 99, child at depth 100", + parentDepths: []uint32{99}, + parentMarkerSets: [][]string{{"root-marker"}}, + expectedDepth: 100, + description: "first non-root boundary", + }, + { + name: "parent at depth 199, child at depth 200", + parentDepths: []uint32{199}, + parentMarkerSets: [][]string{{"marker-100", "root-marker"}}, + expectedDepth: 200, + description: "second boundary with two parent markers", + }, + { + name: "multiple parents converging at boundary", + parentDepths: []uint32{95, 99}, + parentMarkerSets: [][]string{{"marker-A"}, {"marker-B"}}, + expectedDepth: 100, + description: "boundary with multiple parent VTXOs", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + spentVtxos := make([]Vtxo, len(tc.parentDepths)) + for i, depth := range tc.parentDepths { + spentVtxos[i] = Vtxo{ + Depth: depth, + MarkerIDs: tc.parentMarkerSets[i], + } + } + + maxDepth := calculateMaxDepth(spentVtxos) + newDepth := maxDepth + 1 + require.Equal(t, tc.expectedDepth, newDepth) + + require.True(t, isAtMarkerBoundary(newDepth), + "depth %d should be at marker boundary for this test", newDepth) + + parentMarkers := collectParentMarkers(spentVtxos) + createdMarker, markerIDs := NewMarker("test-txid", newDepth, parentMarkers) + + require.NotNil(t, createdMarker, tc.description) + require.Equal(t, newDepth, createdMarker.Depth) + sort.Strings(createdMarker.ParentMarkerIDs) + sort.Strings(parentMarkers) + require.Equal(t, parentMarkers, createdMarker.ParentMarkerIDs) + require.Len(t, markerIDs, 1) + require.Equal(t, createdMarker.ID, markerIDs[0]) + }) + } +} + +func TestAllNewVtxosGetSameDepth(t *testing.T) { + testCases := []struct { + name string + parentDepths []uint32 + parentMarkerSets [][]string + numOutputVtxos int + expectedDepth uint32 + expectedMarkerLen int + description string + }{ + { + name: "3 outputs from single parent at depth 0", + parentDepths: []uint32{0}, + parentMarkerSets: [][]string{{"root-marker-1"}}, + numOutputVtxos: 3, + expectedDepth: 1, + expectedMarkerLen: 1, + description: "all 3 outputs get depth 1 and inherit root marker", + }, + { + name: "5 outputs from two parents at different depths", + parentDepths: []uint32{30, 50}, + parentMarkerSets: [][]string{{"marker-A"}, {"marker-B", "marker-C"}}, + numOutputVtxos: 5, + expectedDepth: 51, + expectedMarkerLen: 3, + description: "all 5 outputs get depth 51 (max+1) and inherit union of markers", + }, + { + name: "2 outputs at marker boundary", + parentDepths: []uint32{99}, + parentMarkerSets: [][]string{{"root-marker"}}, + numOutputVtxos: 2, + expectedDepth: 100, + expectedMarkerLen: 1, + description: "both outputs get depth 100 and the same new marker", + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + spentVtxos := make([]Vtxo, len(tc.parentDepths)) + for i, depth := range tc.parentDepths { + spentVtxos[i] = Vtxo{ + Depth: depth, + MarkerIDs: tc.parentMarkerSets[i], + } + } + + maxDepth := calculateMaxDepth(spentVtxos) + newDepth := maxDepth + 1 + require.Equal(t, tc.expectedDepth, newDepth) + + parentMarkers := collectParentMarkers(spentVtxos) + _, markerIDs := NewMarker("tx-with-multiple-outputs", newDepth, parentMarkers) + + // Simulate creating multiple output VTXOs — each gets the same depth and markers + outputs := make([]Vtxo, tc.numOutputVtxos) + for i := 0; i < tc.numOutputVtxos; i++ { + outputs[i] = Vtxo{ + Outpoint: Outpoint{Txid: "tx-with-multiple-outputs", VOut: uint32(i)}, + Depth: newDepth, + MarkerIDs: markerIDs, + } + } + + for i, v := range outputs { + require.Equal(t, tc.expectedDepth, v.Depth, + "output %d has wrong depth", i) + } + + for i := 1; i < len(outputs); i++ { + sort.Strings(outputs[0].MarkerIDs) + sort.Strings(outputs[i].MarkerIDs) + require.Equal(t, outputs[0].MarkerIDs, outputs[i].MarkerIDs, + "output %d has different markers than output 0", i) + } + + require.Len(t, outputs[0].MarkerIDs, tc.expectedMarkerLen, tc.description) + }) + } +} + +func TestDepth20k_MarkerBoundaryAndInheritance(t *testing.T) { + t.Run("depth 19999 inherits markers, depth 20000 creates new marker", func(t *testing.T) { + parent := Vtxo{Depth: 19999, MarkerIDs: []string{"marker-19900"}} + parentMarkers := collectParentMarkers([]Vtxo{parent}) + + newDepth := calculateMaxDepth([]Vtxo{parent}) + 1 + require.Equal(t, uint32(20000), newDepth) + require.True(t, isAtMarkerBoundary(newDepth)) + + createdMarker, markerIDs := NewMarker("tx-at-20k", newDepth, parentMarkers) + require.NotNil(t, createdMarker, "marker should be created at depth 20000") + require.Equal(t, uint32(20000), createdMarker.Depth) + require.Equal(t, []string{"marker-19900"}, createdMarker.ParentMarkerIDs) + require.Len(t, markerIDs, 1) + require.Equal(t, createdMarker.ID, markerIDs[0]) + }) + + t.Run("depth 20001 inherits markers from boundary parent", func(t *testing.T) { + parent := Vtxo{Depth: 20000, MarkerIDs: []string{"marker-20000"}} + parentMarkers := collectParentMarkers([]Vtxo{parent}) + + newDepth := calculateMaxDepth([]Vtxo{parent}) + 1 + require.Equal(t, uint32(20001), newDepth) + require.False(t, isAtMarkerBoundary(newDepth)) + + createdMarker, markerIDs := NewMarker("tx-at-20001", newDepth, parentMarkers) + require.Nil(t, createdMarker, "no marker at non-boundary depth") + require.Equal(t, []string{"marker-20000"}, markerIDs) + }) + + t.Run("VTXO with 200 inherited markers from deep chain", func(t *testing.T) { + markers := make([]string, 200) + for i := range markers { + markers[i] = fmt.Sprintf("marker-%d", i*100) + } + + parent := Vtxo{Depth: 19950, MarkerIDs: markers} + collected := collectParentMarkers([]Vtxo{parent}) + sort.Strings(collected) + sort.Strings(markers) + require.Equal(t, markers, collected, "all 200 markers should be collected") + }) + + t.Run("multiple deep parents merge 200+ markers correctly", func(t *testing.T) { + markersA := make([]string, 100) + markersB := make([]string, 150) + for i := range markersA { + markersA[i] = fmt.Sprintf("marker-%d", i*100) + } + for i := range markersB { + markersB[i] = fmt.Sprintf("marker-%d", i*100) + } + + parents := []Vtxo{ + {Depth: 10000, MarkerIDs: markersA}, + {Depth: 15000, MarkerIDs: markersB}, + } + collected := collectParentMarkers(parents) + + require.Len(t, collected, 150) + + newDepth := calculateMaxDepth(parents) + 1 + require.Equal(t, uint32(15001), newDepth) + require.False(t, isAtMarkerBoundary(newDepth)) + + createdMarker, markerIDs := NewMarker("merge-tx", newDepth, collected) + require.Nil(t, createdMarker) + require.Len(t, markerIDs, 150, "child inherits all 150 unique markers") + }) + + t.Run("depth beyond 20k target remains valid", func(t *testing.T) { + parent := Vtxo{Depth: 20000, MarkerIDs: []string{"marker-20000"}} + newDepth := calculateMaxDepth([]Vtxo{parent}) + 1 + require.Equal(t, uint32(20001), newDepth) + + require.True(t, isAtMarkerBoundary(20100)) + require.True(t, isAtMarkerBoundary(20200)) + require.False(t, isAtMarkerBoundary(20001)) + require.False(t, isAtMarkerBoundary(20099)) + }) +} diff --git a/internal/core/domain/offchain_tx.go b/internal/core/domain/offchain_tx.go index 37da1b13d..79e76c4c7 100644 --- a/internal/core/domain/offchain_tx.go +++ b/internal/core/domain/offchain_tx.go @@ -42,6 +42,8 @@ type OffchainTx struct { CommitmentTxids map[string]string RootCommitmentTxId string ExpiryTimestamp int64 + Depth uint32 + ParentMarkerIDs []string FailReason string Version uint changes []Event @@ -97,6 +99,7 @@ func (s *OffchainTx) Request( func (s *OffchainTx) Accept( finalArkTx string, signedCheckpointTxs map[string]string, commitmentTxsByCheckpointTxid map[string]string, rootCommitmentTx string, expiryTimestamp int64, + depth uint32, parentMarkerIDs []string, ) (Event, error) { if finalArkTx == "" { return nil, fmt.Errorf("missing final ark tx") @@ -132,6 +135,8 @@ func (s *OffchainTx) Accept( CommitmentTxids: commitmentTxsByCheckpointTxid, RootCommitmentTxid: rootCommitmentTx, ExpiryTimestamp: expiryTimestamp, + Depth: depth, + ParentMarkerIDs: parentMarkerIDs, } s.raise(event) return event, nil @@ -245,6 +250,8 @@ func (s *OffchainTx) on(event Event, replayed bool) { s.CommitmentTxids = e.CommitmentTxids s.RootCommitmentTxId = e.RootCommitmentTxid s.ExpiryTimestamp = e.ExpiryTimestamp + s.Depth = e.Depth + s.ParentMarkerIDs = e.ParentMarkerIDs case OffchainTxFinalized: if s.Stage.Code != int(OffchainTxAcceptedStage) { return diff --git a/internal/core/domain/offchain_tx_event.go b/internal/core/domain/offchain_tx_event.go index b49dea04c..b057288c4 100644 --- a/internal/core/domain/offchain_tx_event.go +++ b/internal/core/domain/offchain_tx_event.go @@ -24,6 +24,8 @@ type OffchainTxAccepted struct { FinalArkTx string SignedCheckpointTxs map[string]string ExpiryTimestamp int64 + Depth uint32 + ParentMarkerIDs []string } type OffchainTxFinalized struct { diff --git a/internal/core/domain/offchain_tx_repo.go b/internal/core/domain/offchain_tx_repo.go index 87615d7a1..094bad62c 100644 --- a/internal/core/domain/offchain_tx_repo.go +++ b/internal/core/domain/offchain_tx_repo.go @@ -5,5 +5,6 @@ import "context" type OffchainTxRepository interface { AddOrUpdateOffchainTx(ctx context.Context, offchainTx *OffchainTx) error GetOffchainTx(ctx context.Context, txid string) (*OffchainTx, error) + GetOffchainTxsByTxids(ctx context.Context, txids []string) ([]*OffchainTx, error) Close() } diff --git a/internal/core/domain/offchain_tx_test.go b/internal/core/domain/offchain_tx_test.go index 86736a2f3..e7c129692 100644 --- a/internal/core/domain/offchain_tx_test.go +++ b/internal/core/domain/offchain_tx_test.go @@ -144,6 +144,7 @@ func testAcceptOffchainTx(t *testing.T) { commitmentTxsByCheckpointTxid, rootCommitmentTxid, expiryTimestamp, + 1, []string{"parent-marker"}, ) require.NoError(t, err) require.NotNil(t, event) @@ -156,6 +157,8 @@ func testAcceptOffchainTx(t *testing.T) { require.Equal(t, signedCheckpointTxs, offchainTx.CheckpointTxs) require.Equal(t, commitmentTxsByCheckpointTxid, offchainTx.CommitmentTxids) require.Equal(t, rootCommitmentTxid, offchainTx.RootCommitmentTxId) + require.Equal(t, uint32(1), offchainTx.Depth) + require.Equal(t, []string{"parent-marker"}, offchainTx.ParentMarkerIDs) events := offchainTx.Events() require.Len(t, events, 2) @@ -251,6 +254,7 @@ func testAcceptOffchainTx(t *testing.T) { event, err := f.offchainTx.Accept( f.finalArkTx, f.signedCheckpointTxs, f.commitmentTxids, rootCommitmentTxid, f.expiryTimestamp, + 0, nil, ) require.EqualError(t, err, f.expectedErr) require.Nil(t, event) @@ -270,6 +274,7 @@ func testFinalizeOffchainTx(t *testing.T) { event, err = offchainTx.Accept( finalArkTx, signedCheckpointTxs, commitmentTxsByCheckpointTxid, rootCommitmentTxid, expiryTimestamp, + 0, nil, ) require.NoError(t, err) require.NotNil(t, event) @@ -349,6 +354,7 @@ func testFailOffchainTx(t *testing.T) { event, err = offchainTx.Accept( finalArkTx, signedCheckpointTxs, commitmentTxsByCheckpointTxid, rootCommitmentTxid, expiryTimestamp, + 0, nil, ) require.NoError(t, err) require.NotNil(t, event) diff --git a/internal/core/domain/vtxo.go b/internal/core/domain/vtxo.go index 47aaf74db..d03013031 100644 --- a/internal/core/domain/vtxo.go +++ b/internal/core/domain/vtxo.go @@ -62,6 +62,8 @@ type Vtxo struct { Preconfirmed bool ExpiresAt int64 CreatedAt int64 + Depth uint32 // chain depth: 0 for vtxos from batch, increments on each chain + MarkerIDs []string // marker IDs for DAG traversal optimization (supports multiple parent markers) Assets []AssetDenomination } diff --git a/internal/core/domain/vtxo_repo.go b/internal/core/domain/vtxo_repo.go index 793ff2d74..e25ba627e 100644 --- a/internal/core/domain/vtxo_repo.go +++ b/internal/core/domain/vtxo_repo.go @@ -7,7 +7,6 @@ type VtxoRepository interface { SettleVtxos(ctx context.Context, spentVtxos map[Outpoint]string, commitmentTxid string) error SpendVtxos(ctx context.Context, spentVtxos map[Outpoint]string, arkTxid string) error UnrollVtxos(ctx context.Context, outpoints []Outpoint) error - SweepVtxos(ctx context.Context, outpoints []Outpoint) (int, error) GetVtxos(ctx context.Context, outpoints []Outpoint) ([]Vtxo, error) GetAllNonUnrolledVtxos(ctx context.Context, pubkey string) ([]Vtxo, []Vtxo, error) GetAllSweepableUnrolledVtxos(ctx context.Context) ([]Vtxo, error) @@ -24,7 +23,7 @@ type VtxoRepository interface { GetSweepableVtxosByCommitmentTxid( ctx context.Context, commitmentTxid string, ) ([]Outpoint, error) - GetAllChildrenVtxos(ctx context.Context, txid string) ([]Outpoint, error) + GetAllChildrenVtxos(ctx context.Context, outpoint Outpoint) ([]Outpoint, error) GetVtxoPubKeysByCommitmentTxid( ctx context.Context, commitmentTxid string, withMinimumAmount uint64, ) ( diff --git a/internal/core/ports/repo_manager.go b/internal/core/ports/repo_manager.go index 653f4c373..e5c324702 100644 --- a/internal/core/ports/repo_manager.go +++ b/internal/core/ports/repo_manager.go @@ -6,6 +6,7 @@ type RepoManager interface { Events() domain.EventRepository Rounds() domain.RoundRepository Vtxos() domain.VtxoRepository + Markers() domain.MarkerRepository ScheduledSession() domain.ScheduledSessionRepo OffchainTxs() domain.OffchainTxRepository Convictions() domain.ConvictionRepository diff --git a/internal/infrastructure/db/badger/ark_repo.go b/internal/infrastructure/db/badger/ark_repo.go index baa1928d2..03223ad79 100644 --- a/internal/infrastructure/db/badger/ark_repo.go +++ b/internal/infrastructure/db/badger/ark_repo.go @@ -240,6 +240,28 @@ func (r *arkRepository) GetOffchainTx( return r.getOffchainTx(ctx, txid) } +func (r *arkRepository) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + if len(txids) == 0 { + return []*domain.OffchainTx{}, nil + } + + txs := make([]*domain.OffchainTx, 0, len(txids)) + for _, txid := range txids { + tx, err := r.getOffchainTx(ctx, txid) + if err != nil { + if errors.Is(err, badgerhold.ErrNotFound) { + continue + } + return nil, err + } + txs = append(txs, tx) + } + + return txs, nil +} + func (r *arkRepository) Close() { // nolint r.store.Close() @@ -352,10 +374,10 @@ func (r *arkRepository) getOffchainTx( err = r.store.Get(txid, &offchainTx) } if err != nil && err == badgerhold.ErrNotFound { - return nil, fmt.Errorf("offchain tx %s not found", txid) + return nil, fmt.Errorf("offchain tx %s: %w", txid, badgerhold.ErrNotFound) } if offchainTx.Stage.Code == int(domain.OffchainTxUndefinedStage) { - return nil, fmt.Errorf("offchain tx %s not found", txid) + return nil, fmt.Errorf("offchain tx %s: %w", txid, badgerhold.ErrNotFound) } return &offchainTx, nil diff --git a/internal/infrastructure/db/badger/marker_repo.go b/internal/infrastructure/db/badger/marker_repo.go new file mode 100644 index 000000000..ea92ba145 --- /dev/null +++ b/internal/infrastructure/db/badger/marker_repo.go @@ -0,0 +1,594 @@ +package badgerdb + +import ( + "context" + "errors" + "fmt" + "path/filepath" + "time" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/dgraph-io/badger/v4" + "github.com/timshannon/badgerhold/v4" +) + +const ( + markerStoreDir = "markers" + sweptMarkerStoreDir = "swept_markers" +) + +type markerRepository struct { + markerStore *badgerhold.Store + sweptMarkerStore *badgerhold.Store + vtxoStore *badgerhold.Store + ownsVtxoStore bool // whether this repo owns the vtxo store (for Close()) +} + +type markerDTO struct { + ID string + Depth uint32 + ParentMarkerIDs []string +} + +type sweptMarkerDTO struct { + MarkerID string + SweptAt int64 +} + +// NewMarkerRepository creates a new marker repository. +// Config can be: +// - [baseDir string, logger badger.Logger] - creates its own vtxo store +// - [baseDir string, logger badger.Logger, vtxoStore *badgerhold.Store] - uses shared vtxo store +func NewMarkerRepository(config ...interface{}) (domain.MarkerRepository, error) { + if len(config) < 2 { + return nil, fmt.Errorf("invalid config: need at least baseDir and logger") + } + baseDir, ok := config[0].(string) + if !ok { + return nil, fmt.Errorf("invalid base directory") + } + var logger badger.Logger + if config[1] != nil { + logger, ok = config[1].(badger.Logger) + if !ok { + return nil, fmt.Errorf("invalid logger") + } + } + + var markerDir, sweptMarkerDir string + if len(baseDir) > 0 { + markerDir = filepath.Join(baseDir, markerStoreDir) + sweptMarkerDir = filepath.Join(baseDir, sweptMarkerStoreDir) + } + + markerStore, err := createDB(markerDir, logger) + if err != nil { + return nil, fmt.Errorf("failed to open marker store: %s", err) + } + + sweptMarkerStore, err := createDB(sweptMarkerDir, logger) + if err != nil { + _ = markerStore.Close() + return nil, fmt.Errorf("failed to open swept marker store: %s", err) + } + + // Check if a shared vtxo store was provided + var vtxoStore *badgerhold.Store + ownsVtxoStore := false + if len(config) >= 3 && config[2] != nil { + vtxoStore, ok = config[2].(*badgerhold.Store) + if !ok { + _ = markerStore.Close() + _ = sweptMarkerStore.Close() + return nil, fmt.Errorf("invalid vtxo store") + } + } else { + // Create our own vtxo store + var vtxoDir string + if len(baseDir) > 0 { + vtxoDir = filepath.Join(baseDir, vtxoStoreDir) + } + vtxoStore, err = createDB(vtxoDir, logger) + if err != nil { + _ = markerStore.Close() + _ = sweptMarkerStore.Close() + return nil, fmt.Errorf("failed to open vtxo store for marker repo: %s", err) + } + ownsVtxoStore = true + } + + return &markerRepository{ + markerStore: markerStore, + sweptMarkerStore: sweptMarkerStore, + vtxoStore: vtxoStore, + ownsVtxoStore: ownsVtxoStore, + }, nil +} + +func (r *markerRepository) Close() { + _ = r.markerStore.Close() + _ = r.sweptMarkerStore.Close() + if r.ownsVtxoStore { + _ = r.vtxoStore.Close() + } +} + +func (r *markerRepository) AddMarker(ctx context.Context, marker domain.Marker) error { + dto := markerDTO{ + ID: marker.ID, + Depth: marker.Depth, + ParentMarkerIDs: marker.ParentMarkerIDs, + } + + err := r.markerStore.Upsert(marker.ID, dto) + if err != nil { + if errors.Is(err, badger.ErrConflict) { + for attempts := 1; attempts <= maxRetries; attempts++ { + time.Sleep(100 * time.Millisecond) + err = r.markerStore.Upsert(marker.ID, dto) + if err == nil { + break + } + } + } + } + return err +} + +func (r *markerRepository) GetMarker(ctx context.Context, id string) (*domain.Marker, error) { + var dto markerDTO + err := r.markerStore.Get(id, &dto) + if err != nil { + if err == badgerhold.ErrNotFound { + return nil, nil + } + return nil, err + } + + return &domain.Marker{ + ID: dto.ID, + Depth: dto.Depth, + ParentMarkerIDs: dto.ParentMarkerIDs, + }, nil +} + +func (r *markerRepository) GetMarkersByDepth( + ctx context.Context, + depth uint32, +) ([]domain.Marker, error) { + var dtos []markerDTO + err := r.markerStore.Find(&dtos, badgerhold.Where("Depth").Eq(depth)) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(dtos)) + for _, dto := range dtos { + markers = append(markers, domain.Marker{ + ID: dto.ID, + Depth: dto.Depth, + ParentMarkerIDs: dto.ParentMarkerIDs, + }) + } + return markers, nil +} + +func (r *markerRepository) GetMarkersByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Marker, error) { + var dtos []markerDTO + err := r.markerStore.Find(&dtos, + badgerhold.Where("Depth").Ge(minDepth).And("Depth").Le(maxDepth)) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(dtos)) + for _, dto := range dtos { + markers = append(markers, domain.Marker{ + ID: dto.ID, + Depth: dto.Depth, + ParentMarkerIDs: dto.ParentMarkerIDs, + }) + } + return markers, nil +} + +func (r *markerRepository) GetMarkersByIds( + ctx context.Context, + ids []string, +) ([]domain.Marker, error) { + if len(ids) == 0 { + return nil, nil + } + + markers := make([]domain.Marker, 0, len(ids)) + for _, id := range ids { + marker, err := r.GetMarker(ctx, id) + if err != nil { + return nil, err + } + if marker != nil { + markers = append(markers, *marker) + } + } + return markers, nil +} + +func (r *markerRepository) SweepMarker(ctx context.Context, markerID string, sweptAt int64) error { + // Check if already swept - if so, preserve original swept_at (ON CONFLICT DO NOTHING behavior) + var existing sweptMarkerDTO + err := r.sweptMarkerStore.Get(markerID, &existing) + if err == nil { + // Already swept, don't update + return nil + } + if err != badgerhold.ErrNotFound { + return err + } + + dto := sweptMarkerDTO{ + MarkerID: markerID, + SweptAt: sweptAt, + } + + err = r.sweptMarkerStore.Insert(markerID, dto) + if err != nil { + if errors.Is(err, badgerhold.ErrKeyExists) { + // Already exists (race condition), that's fine + return nil + } + if errors.Is(err, badger.ErrConflict) { + for attempts := 1; attempts <= maxRetries; attempts++ { + time.Sleep(100 * time.Millisecond) + err = r.sweptMarkerStore.Insert(markerID, dto) + if err == nil || errors.Is(err, badgerhold.ErrKeyExists) { + break + } + } + if err != nil && !errors.Is(err, badgerhold.ErrKeyExists) { + return err + } + } else { + return err + } + } + + // Update Swept field on VTXOs that contain this marker. + // This keeps the stored Swept field in sync for query compatibility. + // Errors here are non-fatal since swept_marker is already recorded. + var filteredDtos []vtxoDTO + if err := r.vtxoStore.Find( + &filteredDtos, + badgerhold.Where("MarkerIDs").Contains(markerID), + ); err != nil { + return nil + } + + for _, dto := range filteredDtos { + if !dto.Swept { + dto.Swept = true + dto.UpdatedAt = time.Now().UnixMilli() + if err := r.vtxoStore.Update(dto.Outpoint.String(), dto); err != nil { + if errors.Is(err, badger.ErrConflict) { + for attempts := 1; attempts <= maxRetries; attempts++ { + time.Sleep(100 * time.Millisecond) + if err = r.vtxoStore.Update(dto.Outpoint.String(), dto); err == nil { + break + } + } + } + } + } + } + + return nil +} + +func (r *markerRepository) BulkSweepMarkers( + ctx context.Context, + markerIDs []string, + sweptAt int64, +) error { + for _, markerID := range markerIDs { + if err := r.SweepMarker(ctx, markerID, sweptAt); err != nil { + return err + } + } + return nil +} + +func (r *markerRepository) SweepVtxoOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, + sweptAt int64, +) error { + for _, op := range outpoints { + var dto vtxoDTO + if err := r.vtxoStore.Get(op.String(), &dto); err != nil { + if err == badgerhold.ErrNotFound { + continue + } + return err + } + dto.Swept = true + if err := r.vtxoStore.Update(op.String(), dto); err != nil { + return err + } + } + return nil +} + +func (r *markerRepository) SweepMarkerWithDescendants( + ctx context.Context, + markerID string, + sweptAt int64, +) (int64, error) { + // Find all descendant markers using BFS + descendantIDs, err := r.getDescendantMarkerIds(ctx, markerID) + if err != nil { + return 0, fmt.Errorf("failed to get descendant markers: %w", err) + } + + var count int64 + for _, id := range descendantIDs { + // Check if already swept + isSwept, err := r.IsMarkerSwept(ctx, id) + if err != nil { + return count, err + } + if isSwept { + continue + } + + if err := r.SweepMarker(ctx, id, sweptAt); err != nil { + return count, fmt.Errorf("failed to sweep marker %s: %w", id, err) + } + count++ + } + + return count, nil +} + +// getDescendantMarkerIds finds all markers that descend from the given marker ID +// using BFS traversal of the parent_marker_ids relationship. +// Returns empty slice if the root marker doesn't exist. +func (r *markerRepository) getDescendantMarkerIds( + ctx context.Context, + rootMarkerID string, +) ([]string, error) { + // First check if the root marker exists + var rootDTO markerDTO + err := r.markerStore.Get(rootMarkerID, &rootDTO) + if err != nil { + if err == badgerhold.ErrNotFound { + return []string{}, nil // Root doesn't exist, return empty + } + return nil, err + } + + descendantIDs := []string{rootMarkerID} + visited := map[string]bool{rootMarkerID: true} + queue := []string{rootMarkerID} + + for len(queue) > 0 { + currentID := queue[0] + queue = queue[1:] + + // Find all markers that have currentID in their ParentMarkerIDs + var dtos []markerDTO + err := r.markerStore.Find(&dtos, + badgerhold.Where("ParentMarkerIDs").Contains(currentID)) + if err != nil { + return nil, err + } + + for _, dto := range dtos { + if !visited[dto.ID] { + visited[dto.ID] = true + descendantIDs = append(descendantIDs, dto.ID) + queue = append(queue, dto.ID) + } + } + } + + return descendantIDs, nil +} + +func (r *markerRepository) IsMarkerSwept(ctx context.Context, markerID string) (bool, error) { + var dto sweptMarkerDTO + err := r.sweptMarkerStore.Get(markerID, &dto) + if err != nil { + if err == badgerhold.ErrNotFound { + return false, nil + } + return false, err + } + return true, nil +} + +func (r *markerRepository) GetSweptMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.SweptMarker, error) { + if len(markerIDs) == 0 { + return nil, nil + } + + sweptMarkers := make([]domain.SweptMarker, 0, len(markerIDs)) + for _, id := range markerIDs { + var dto sweptMarkerDTO + err := r.sweptMarkerStore.Get(id, &dto) + if err != nil { + if err == badgerhold.ErrNotFound { + continue + } + return nil, err + } + sweptMarkers = append(sweptMarkers, domain.SweptMarker{ + MarkerID: dto.MarkerID, + SweptAt: dto.SweptAt, + }) + } + return sweptMarkers, nil +} + +func (r *markerRepository) UpdateVtxoMarkers( + ctx context.Context, + outpoint domain.Outpoint, + markerIDs []string, +) error { + var dto vtxoDTO + err := r.vtxoStore.Get(outpoint.String(), &dto) + if err != nil { + if err == badgerhold.ErrNotFound { + return nil // VTXO not found, nothing to update + } + return err + } + + dto.MarkerIDs = markerIDs + dto.UpdatedAt = time.Now().UnixMilli() + + err = r.vtxoStore.Update(outpoint.String(), dto) + if err != nil { + if errors.Is(err, badger.ErrConflict) { + for attempts := 1; attempts <= maxRetries; attempts++ { + time.Sleep(100 * time.Millisecond) + err = r.vtxoStore.Update(outpoint.String(), dto) + if err == nil { + break + } + } + } + } + return err +} + +func (r *markerRepository) GetVtxosByMarker( + ctx context.Context, + markerID string, +) ([]domain.Vtxo, error) { + var dtos []vtxoDTO + err := r.vtxoStore.Find(&dtos, badgerhold.Where("MarkerIDs").Contains(markerID)) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(dtos)) + for _, dto := range dtos { + vtxos = append(vtxos, dto.Vtxo) + } + return vtxos, nil +} + +func (r *markerRepository) SweepVtxosByMarker(ctx context.Context, markerID string) (int64, error) { + // Count unswept VTXOs before marking to match Postgres/SQLite behaviour. + var dtos []vtxoDTO + if err := r.vtxoStore.Find(&dtos, + badgerhold.Where("MarkerIDs").Contains(markerID)); err != nil { + return 0, err + } + var unsweptCount int64 + for _, dto := range dtos { + if !dto.Swept { + unsweptCount++ + } + } + + if err := r.SweepMarker(ctx, markerID, time.Now().UnixMilli()); err != nil { + return 0, err + } + + return unsweptCount, nil +} + +func (r *markerRepository) CreateRootMarkersForVtxos( + ctx context.Context, + vtxos []domain.Vtxo, +) error { + if len(vtxos) == 0 { + return nil + } + + for _, vtxo := range vtxos { + markerID := vtxo.Outpoint.String() + + // Create the root marker (depth 0, no parents) + // Note: vtxo.MarkerIDs should already be set before AddVtxos is called + if err := r.AddMarker(ctx, domain.Marker{ + ID: markerID, + Depth: 0, + ParentMarkerIDs: nil, + }); err != nil { + return fmt.Errorf("failed to create marker for vtxo %s: %w", markerID, err) + } + } + + return nil +} + +func (r *markerRepository) GetVtxosByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Vtxo, error) { + var dtos []vtxoDTO + err := r.vtxoStore.Find(&dtos, + badgerhold.Where("Depth").Ge(minDepth).And("Depth").Le(maxDepth)) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(dtos)) + for _, dto := range dtos { + vtxos = append(vtxos, dto.Vtxo) + } + return vtxos, nil +} + +func (r *markerRepository) GetVtxosByArkTxid( + ctx context.Context, + arkTxid string, +) ([]domain.Vtxo, error) { + var dtos []vtxoDTO + err := r.vtxoStore.Find(&dtos, badgerhold.Where("ArkTxid").Eq(arkTxid)) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(dtos)) + for _, dto := range dtos { + vtxos = append(vtxos, dto.Vtxo) + } + return vtxos, nil +} + +func (r *markerRepository) GetVtxoChainByMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.Vtxo, error) { + if len(markerIDs) == 0 { + return nil, nil + } + + seen := make(map[string]bool) + vtxos := make([]domain.Vtxo, 0) + + for _, markerID := range markerIDs { + var dtos []vtxoDTO + err := r.vtxoStore.Find(&dtos, + badgerhold.Where("MarkerIDs").Contains(markerID)) + if err != nil { + return nil, err + } + for _, dto := range dtos { + key := dto.Outpoint.String() + if !seen[key] { + seen[key] = true + vtxos = append(vtxos, dto.Vtxo) + } + } + } + return vtxos, nil +} diff --git a/internal/infrastructure/db/badger/vtxo_repo.go b/internal/infrastructure/db/badger/vtxo_repo.go index 6efd61089..88278a66b 100644 --- a/internal/infrastructure/db/badger/vtxo_repo.go +++ b/internal/infrastructure/db/badger/vtxo_repo.go @@ -16,10 +16,15 @@ import ( const vtxoStoreDir = "vtxos" -type vtxoRepository struct { +type VtxoRepository struct { store *badgerhold.Store } +// GetStore returns the underlying badgerhold store for use by marker repository +func (r *VtxoRepository) GetStore() *badgerhold.Store { + return r.store +} + type vtxoDTO struct { domain.Vtxo UpdatedAt int64 @@ -50,16 +55,16 @@ func NewVtxoRepository(config ...interface{}) (domain.VtxoRepository, error) { return nil, fmt.Errorf("failed to open round events store: %s", err) } - return &vtxoRepository{store}, nil + return &VtxoRepository{store}, nil } -func (r *vtxoRepository) AddVtxos( +func (r *VtxoRepository) AddVtxos( ctx context.Context, vtxos []domain.Vtxo, ) error { return r.addVtxos(ctx, vtxos) } -func (r *vtxoRepository) SettleVtxos( +func (r *VtxoRepository) SettleVtxos( ctx context.Context, spentVtxos map[domain.Outpoint]string, commitmentTxid string, ) error { for outpoint, spentBy := range spentVtxos { @@ -70,7 +75,7 @@ func (r *vtxoRepository) SettleVtxos( return nil } -func (r *vtxoRepository) SpendVtxos( +func (r *VtxoRepository) SpendVtxos( ctx context.Context, spentVtxos map[domain.Outpoint]string, arkTxid string, ) error { for outpoint, spentBy := range spentVtxos { @@ -81,7 +86,7 @@ func (r *vtxoRepository) SpendVtxos( return nil } -func (r *vtxoRepository) UnrollVtxos( +func (r *VtxoRepository) UnrollVtxos( ctx context.Context, outpoints []domain.Outpoint, ) error { for _, outpoint := range outpoints { @@ -93,7 +98,7 @@ func (r *vtxoRepository) UnrollVtxos( return nil } -func (r *vtxoRepository) GetVtxos( +func (r *VtxoRepository) GetVtxos( ctx context.Context, outpoints []domain.Outpoint, ) ([]domain.Vtxo, error) { vtxos := make([]domain.Vtxo, 0, len(outpoints)) @@ -113,14 +118,14 @@ func (r *vtxoRepository) GetVtxos( return vtxos, nil } -func (r *vtxoRepository) GetLeafVtxosForBatch( +func (r *VtxoRepository) GetLeafVtxosForBatch( ctx context.Context, txid string, ) ([]domain.Vtxo, error) { query := badgerhold.Where("RootCommitmentTxid").Eq(txid).And("Preconfirmed").Eq(false) return r.findVtxos(ctx, query) } -func (r *vtxoRepository) GetAllNonUnrolledVtxos( +func (r *VtxoRepository) GetAllNonUnrolledVtxos( ctx context.Context, pubkey string, ) ([]domain.Vtxo, []domain.Vtxo, error) { query := badgerhold.Where("Unrolled").Eq(false) @@ -144,7 +149,7 @@ func (r *vtxoRepository) GetAllNonUnrolledVtxos( return unspentVtxos, spentVtxos, nil } -func (r *vtxoRepository) GetAllSweepableUnrolledVtxos( +func (r *VtxoRepository) GetAllSweepableUnrolledVtxos( ctx context.Context, ) ([]domain.Vtxo, error) { query := badgerhold.Where("Unrolled"). @@ -158,34 +163,11 @@ func (r *vtxoRepository) GetAllSweepableUnrolledVtxos( return r.findVtxos(ctx, query) } -func (r *vtxoRepository) GetAllVtxos(ctx context.Context) ([]domain.Vtxo, error) { +func (r *VtxoRepository) GetAllVtxos(ctx context.Context) ([]domain.Vtxo, error) { return r.findVtxos(ctx, &badgerhold.Query{}) } -func (r *vtxoRepository) SweepVtxos( - ctx context.Context, outpoints []domain.Outpoint, -) (int, error) { - sweptCount := 0 - for _, outpoint := range outpoints { - vtxo, err := r.getVtxo(ctx, outpoint) - if err != nil { - return -1, err - } - if vtxo.Swept { - continue // Skip already swept vtxos - } - - // Mark as swept - vtxo.Swept = true - if err := r.updateVtxo(ctx, vtxo); err != nil { - return -1, err - } - sweptCount++ - } - return sweptCount, nil -} - -func (r *vtxoRepository) UpdateVtxosExpiration( +func (r *VtxoRepository) UpdateVtxosExpiration( ctx context.Context, vtxos []domain.Outpoint, expiresAt int64, ) error { var err error @@ -229,7 +211,7 @@ func (r *vtxoRepository) UpdateVtxosExpiration( return err } -func (r *vtxoRepository) GetAllVtxosWithPubKeys( +func (r *VtxoRepository) GetAllVtxosWithPubKeys( ctx context.Context, pubkeys []string, after, before int64, ) ([]domain.Vtxo, error) { if err := validateTimeRange(after, before); err != nil { @@ -254,7 +236,7 @@ func (r *vtxoRepository) GetAllVtxosWithPubKeys( return allVtxos, nil } -func (r *vtxoRepository) GetExpiringLiquidity( +func (r *VtxoRepository) GetExpiringLiquidity( ctx context.Context, after, before int64, ) (uint64, error) { query := badgerhold.Where("Swept").Eq(false). @@ -278,7 +260,7 @@ func (r *vtxoRepository) GetExpiringLiquidity( return sum, nil } -func (r *vtxoRepository) GetRecoverableLiquidity(ctx context.Context) (uint64, error) { +func (r *VtxoRepository) GetRecoverableLiquidity(ctx context.Context) (uint64, error) { query := badgerhold.Where("Swept").Eq(true).And("Spent").Eq(false) vtxos, err := r.findVtxos(ctx, query) if err != nil { @@ -292,7 +274,7 @@ func (r *vtxoRepository) GetRecoverableLiquidity(ctx context.Context) (uint64, e return sum, nil } -func (r *vtxoRepository) GetVtxoPubKeysByCommitmentTxid( +func (r *VtxoRepository) GetVtxoPubKeysByCommitmentTxid( ctx context.Context, commitmentTxid string, amountFilter uint64, ) ([]string, error) { if commitmentTxid == "" { @@ -339,7 +321,7 @@ func (r *vtxoRepository) GetVtxoPubKeysByCommitmentTxid( return taprootKeys, nil } -func (r *vtxoRepository) GetPendingSpentVtxosWithPubKeys( +func (r *VtxoRepository) GetPendingSpentVtxosWithPubKeys( ctx context.Context, pubkeys []string, after, before int64, ) ([]domain.Vtxo, error) { if err := validateTimeRange(after, before); err != nil { @@ -389,7 +371,7 @@ func (r *vtxoRepository) GetPendingSpentVtxosWithPubKeys( return vtxos, nil } -func (r *vtxoRepository) GetPendingSpentVtxosWithOutpoints( +func (r *VtxoRepository) GetPendingSpentVtxosWithOutpoints( ctx context.Context, outpoints []domain.Outpoint, ) ([]domain.Vtxo, error) { // Get all candidates @@ -431,12 +413,12 @@ func (r *vtxoRepository) GetPendingSpentVtxosWithOutpoints( return vtxos, nil } -func (r *vtxoRepository) Close() { +func (r *VtxoRepository) Close() { // nolint:all r.store.Close() } -func (r *vtxoRepository) addVtxos( +func (r *VtxoRepository) addVtxos( ctx context.Context, vtxos []domain.Vtxo, ) error { for _, vtxo := range vtxos { @@ -474,7 +456,7 @@ func (r *vtxoRepository) addVtxos( return nil } -func (r *vtxoRepository) getVtxo( +func (r *VtxoRepository) getVtxo( ctx context.Context, outpoint domain.Outpoint, ) (*domain.Vtxo, error) { var dto vtxoDTO @@ -495,7 +477,7 @@ func (r *vtxoRepository) getVtxo( return &dto.Vtxo, nil } -func (r *vtxoRepository) settleVtxo( +func (r *VtxoRepository) settleVtxo( ctx context.Context, outpoint domain.Outpoint, spentBy, settledBy string, ) error { vtxo, err := r.getVtxo(ctx, outpoint) @@ -516,7 +498,7 @@ func (r *vtxoRepository) settleVtxo( return r.updateVtxo(ctx, vtxo) } -func (r *vtxoRepository) spendVtxo( +func (r *VtxoRepository) spendVtxo( ctx context.Context, outpoint domain.Outpoint, spentBy, arkTxid string, ) error { vtxo, err := r.getVtxo(ctx, outpoint) @@ -537,7 +519,7 @@ func (r *vtxoRepository) spendVtxo( return r.updateVtxo(ctx, vtxo) } -func (r *vtxoRepository) unrollVtxo( +func (r *VtxoRepository) unrollVtxo( ctx context.Context, outpoint domain.Outpoint, ) (*domain.Vtxo, error) { vtxo, err := r.getVtxo(ctx, outpoint) @@ -559,7 +541,7 @@ func (r *vtxoRepository) unrollVtxo( return vtxo, nil } -func (r *vtxoRepository) findVtxos( +func (r *VtxoRepository) findVtxos( ctx context.Context, query *badgerhold.Query, ) ([]domain.Vtxo, error) { vtxos := make([]domain.Vtxo, 0) @@ -579,7 +561,7 @@ func (r *vtxoRepository) findVtxos( return vtxos, err } -func (r *vtxoRepository) updateVtxo(ctx context.Context, vtxo *domain.Vtxo) error { +func (r *VtxoRepository) updateVtxo(ctx context.Context, vtxo *domain.Vtxo) error { dto := vtxoDTO{ Vtxo: *vtxo, UpdatedAt: time.Now().UnixMilli(), @@ -610,7 +592,7 @@ func (r *vtxoRepository) updateVtxo(ctx context.Context, vtxo *domain.Vtxo) erro return nil } -func (r *vtxoRepository) GetSweepableVtxosByCommitmentTxid( +func (r *VtxoRepository) GetSweepableVtxosByCommitmentTxid( ctx context.Context, txid string, ) ([]domain.Outpoint, error) { @@ -653,15 +635,34 @@ func (r *vtxoRepository) GetSweepableVtxosByCommitmentTxid( return outpoints, nil } -func (r *vtxoRepository) GetAllChildrenVtxos( +func (r *VtxoRepository) GetAllChildrenVtxos( ctx context.Context, - txid string, + outpoint domain.Outpoint, ) ([]domain.Outpoint, error) { + // Seed with the specific outpoint, not all vouts of the txid, so that + // sibling outputs (which belong to independent lineages) are not included. + seedQuery := badgerhold.Where("Txid").Eq(outpoint.Txid). + And("VOut").Eq(outpoint.VOut) + seedVtxos, err := r.findVtxos(ctx, seedQuery) + if err != nil { + return nil, fmt.Errorf("failed to find seed vtxo %s: %w", outpoint, err) + } + visited := make(map[string]bool) visitedTxids := make(map[string]bool) var outpoints []domain.Outpoint - - queue := []string{txid} + queue := make([]string, 0, len(seedVtxos)) + + for _, vtxo := range seedVtxos { + outpointKey := vtxo.Outpoint.String() + if !visited[outpointKey] { + visited[outpointKey] = true + outpoints = append(outpoints, vtxo.Outpoint) + if vtxo.ArkTxid != "" { + queue = append(queue, vtxo.ArkTxid) + } + } + } for len(queue) > 0 { currentTxid := queue[0] diff --git a/internal/infrastructure/db/postgres/marker_repo.go b/internal/infrastructure/db/postgres/marker_repo.go new file mode 100644 index 000000000..9be837af5 --- /dev/null +++ b/internal/infrastructure/db/postgres/marker_repo.go @@ -0,0 +1,489 @@ +package pgdb + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "time" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/internal/infrastructure/db/postgres/sqlc/queries" + log "github.com/sirupsen/logrus" + "github.com/sqlc-dev/pqtype" +) + +type markerRepository struct { + db *sql.DB + querier *queries.Queries +} + +func NewMarkerRepository(config ...interface{}) (domain.MarkerRepository, error) { + if len(config) != 1 { + return nil, fmt.Errorf("invalid config") + } + db, ok := config[0].(*sql.DB) + if !ok { + return nil, fmt.Errorf("cannot open marker repository: invalid config") + } + + return &markerRepository{ + db: db, + querier: queries.New(db), + }, nil +} + +func (m *markerRepository) Close() { + _ = m.db.Close() +} + +func (m *markerRepository) AddMarker(ctx context.Context, marker domain.Marker) error { + parentMarkerIDs := marker.ParentMarkerIDs + if parentMarkerIDs == nil { + parentMarkerIDs = []string{} + } + parentMarkersJSON, err := json.Marshal(parentMarkerIDs) + if err != nil { + return fmt.Errorf("failed to marshal parent markers: %w", err) + } + + return m.querier.UpsertMarker(ctx, queries.UpsertMarkerParams{ + ID: marker.ID, + Depth: int32(marker.Depth), + ParentMarkers: pqtype.NullRawMessage{ + RawMessage: parentMarkersJSON, + Valid: true, + }, + }) +} + +func (m *markerRepository) GetMarker(ctx context.Context, id string) (*domain.Marker, error) { + row, err := m.querier.SelectMarker(ctx, id) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + return &marker, nil +} + +func (m *markerRepository) GetMarkersByDepth( + ctx context.Context, + depth uint32, +) ([]domain.Marker, error) { + rows, err := m.querier.SelectMarkersByDepth(ctx, int32(depth)) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(rows)) + for _, row := range rows { + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + markers = append(markers, marker) + } + return markers, nil +} + +func (m *markerRepository) GetMarkersByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Marker, error) { + rows, err := m.querier.SelectMarkersByDepthRange(ctx, queries.SelectMarkersByDepthRangeParams{ + MinDepth: int32(minDepth), + MaxDepth: int32(maxDepth), + }) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(rows)) + for _, row := range rows { + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + markers = append(markers, marker) + } + return markers, nil +} + +func (m *markerRepository) GetMarkersByIds( + ctx context.Context, + ids []string, +) ([]domain.Marker, error) { + if len(ids) == 0 { + return nil, nil + } + + rows, err := m.querier.SelectMarkersByIds(ctx, ids) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(rows)) + for _, row := range rows { + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + markers = append(markers, marker) + } + return markers, nil +} + +func (m *markerRepository) SweepMarker(ctx context.Context, markerID string, sweptAt int64) error { + return m.querier.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: markerID, + SweptAt: sweptAt, + }) +} + +func (m *markerRepository) BulkSweepMarkers( + ctx context.Context, + markerIDs []string, + sweptAt int64, +) error { + if len(markerIDs) == 0 { + return nil + } + return m.querier.BulkInsertSweptMarkers(ctx, queries.BulkInsertSweptMarkersParams{ + MarkerIds: markerIDs, + SweptAt: sweptAt, + }) +} + +func (m *markerRepository) SweepVtxoOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, + sweptAt int64, +) error { + if len(outpoints) == 0 { + return nil + } + txids := make([]string, len(outpoints)) + vouts := make([]int32, len(outpoints)) + for i, op := range outpoints { + txids[i] = op.Txid + vouts[i] = int32(op.VOut) + } + return m.querier.BulkInsertSweptVtxos(ctx, queries.BulkInsertSweptVtxosParams{ + Txids: txids, + Vouts: vouts, + SweptAt: sweptAt, + }) +} + +func (m *markerRepository) SweepMarkerWithDescendants( + ctx context.Context, + markerID string, + sweptAt int64, +) (int64, error) { + tx, err := m.db.BeginTx(ctx, nil) + if err != nil { + return 0, fmt.Errorf("failed to begin transaction: %w", err) + } + defer tx.Rollback() //nolint:errcheck + + txQuerier := m.querier.WithTx(tx) + + // Get all descendant marker IDs (including the root marker) that are not already swept + descendantIDs, err := txQuerier.GetDescendantMarkerIds(ctx, markerID) + if err != nil { + return 0, fmt.Errorf("failed to get descendant markers: %w", err) + } + + // Insert each descendant into swept_marker + var count int64 + for _, id := range descendantIDs { + err := txQuerier.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: id, + SweptAt: sweptAt, + }) + if err != nil { + return 0, fmt.Errorf("failed to sweep marker %s: %w", id, err) + } + count++ + } + + if err := tx.Commit(); err != nil { + return 0, fmt.Errorf("failed to commit transaction: %w", err) + } + + return count, nil +} + +func (m *markerRepository) IsMarkerSwept(ctx context.Context, markerID string) (bool, error) { + result, err := m.querier.IsMarkerSwept(ctx, markerID) + if err != nil { + return false, err + } + return result, nil +} + +func (m *markerRepository) GetSweptMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.SweptMarker, error) { + if len(markerIDs) == 0 { + return nil, nil + } + + rows, err := m.querier.SelectSweptMarkersByIds(ctx, markerIDs) + if err != nil { + return nil, err + } + + sweptMarkers := make([]domain.SweptMarker, 0, len(rows)) + for _, row := range rows { + sweptMarkers = append(sweptMarkers, domain.SweptMarker{ + MarkerID: row.MarkerID, + SweptAt: row.SweptAt, + }) + } + return sweptMarkers, nil +} + +func (m *markerRepository) UpdateVtxoMarkers( + ctx context.Context, + outpoint domain.Outpoint, + markerIDs []string, +) error { + if markerIDs == nil { + markerIDs = []string{} + } + markersJSON, err := json.Marshal(markerIDs) + if err != nil { + return fmt.Errorf("failed to marshal markers: %w", err) + } + return m.querier.UpdateVtxoMarkers(ctx, queries.UpdateVtxoMarkersParams{ + Markers: markersJSON, + Txid: outpoint.Txid, + Vout: int32(outpoint.VOut), + }) +} + +func (m *markerRepository) GetVtxosByMarker( + ctx context.Context, + markerID string, +) ([]domain.Vtxo, error) { + rows, err := m.querier.SelectVtxosByMarkerId(ctx, markerID) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromMarkerQuery(row)) + } + return vtxos, nil +} + +func (m *markerRepository) SweepVtxosByMarker(ctx context.Context, markerID string) (int64, error) { + var count int64 + txBody := func(qtx *queries.Queries) error { + // First check if the marker exists (foreign key constraint on swept_marker) + if _, err := qtx.SelectMarker(ctx, markerID); err != nil { + if err == sql.ErrNoRows { + return nil // Marker doesn't exist, nothing to sweep + } + return fmt.Errorf("failed to check marker existence: %w", err) + } + + // Count unswept VTXOs with this marker before inserting to swept_marker + c, err := qtx.CountUnsweptVtxosByMarkerId(ctx, markerID) + if err != nil { + return fmt.Errorf("failed to count unswept vtxos: %w", err) + } + + // Insert the marker into swept_marker (sweep state is computed via view) + if err := qtx.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: markerID, + SweptAt: time.Now().UnixMilli(), + }); err != nil { + return fmt.Errorf("failed to insert swept marker: %w", err) + } + count = c + return nil + } + if err := execTx(ctx, m.db, txBody); err != nil { + return 0, err + } + return count, nil +} + +func (m *markerRepository) CreateRootMarkersForVtxos( + ctx context.Context, + vtxos []domain.Vtxo, +) error { + if len(vtxos) == 0 { + return nil + } + + txBody := func(querierWithTx *queries.Queries) error { + for _, vtxo := range vtxos { + markerID := vtxo.Outpoint.String() + + // Create the root marker (depth 0, no parents) + // Note: vtxo.MarkerIDs should already be set before AddVtxos is called + if err := querierWithTx.UpsertMarker(ctx, queries.UpsertMarkerParams{ + ID: markerID, + Depth: 0, + ParentMarkers: pqtype.NullRawMessage{ + RawMessage: []byte("[]"), + Valid: true, + }, + }); err != nil { + return fmt.Errorf("failed to create marker for vtxo %s: %w", markerID, err) + } + } + return nil + } + + return execTx(ctx, m.db, txBody) +} + +func (m *markerRepository) GetVtxosByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Vtxo, error) { + rows, err := m.querier.SelectVtxosByDepthRange(ctx, queries.SelectVtxosByDepthRangeParams{ + MinDepth: int32(minDepth), + MaxDepth: int32(maxDepth), + }) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromVtxoVw(row)) + } + return vtxos, nil +} + +func (m *markerRepository) GetVtxosByArkTxid( + ctx context.Context, + arkTxid string, +) ([]domain.Vtxo, error) { + rows, err := m.querier.SelectVtxosByArkTxid( + ctx, + sql.NullString{String: arkTxid, Valid: arkTxid != ""}, + ) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromVtxoVw(row)) + } + return vtxos, nil +} + +func (m *markerRepository) GetVtxoChainByMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.Vtxo, error) { + if len(markerIDs) == 0 { + return nil, nil + } + + rows, err := m.querier.SelectVtxoChainByMarker(ctx, markerIDs) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromVtxoVw(row)) + } + return vtxos, nil +} + +// rowToVtxoFromVtxoVw converts a VtxoVw (used in multiple query results) to domain.Vtxo +func rowToVtxoFromVtxoVw(row queries.VtxoVw) domain.Vtxo { + return domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: row.Txid, + VOut: uint32(row.Vout), + }, + Amount: uint64(row.Amount), + PubKey: row.Pubkey, + RootCommitmentTxid: row.CommitmentTxid, + CommitmentTxids: parseCommitments(row.Commitments, []byte(",")), + SettledBy: row.SettledBy.String, + ArkTxid: row.ArkTxid.String, + SpentBy: row.SpentBy.String, + Spent: row.Spent, + Unrolled: row.Unrolled, + Swept: row.Swept.Bool, + Preconfirmed: row.Preconfirmed, + ExpiresAt: row.ExpiresAt, + CreatedAt: row.CreatedAt, + Depth: uint32(row.Depth), + MarkerIDs: parseMarkersJSONB(row.Markers), + } +} + +func rowToMarker(row queries.Marker) (domain.Marker, error) { + var parentMarkerIDs []string + if row.ParentMarkers.Valid && len(row.ParentMarkers.RawMessage) > 0 { + if err := json.Unmarshal(row.ParentMarkers.RawMessage, &parentMarkerIDs); err != nil { + return domain.Marker{}, fmt.Errorf("failed to unmarshal parent markers: %w", err) + } + } + + return domain.Marker{ + ID: row.ID, + Depth: uint32(row.Depth), + ParentMarkerIDs: parentMarkerIDs, + }, nil +} + +func rowToVtxoFromMarkerQuery(row queries.SelectVtxosByMarkerIdRow) domain.Vtxo { + return domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: row.VtxoVw.Txid, + VOut: uint32(row.VtxoVw.Vout), + }, + Amount: uint64(row.VtxoVw.Amount), + PubKey: row.VtxoVw.Pubkey, + RootCommitmentTxid: row.VtxoVw.CommitmentTxid, + CommitmentTxids: parseCommitments(row.VtxoVw.Commitments, []byte(",")), + SettledBy: row.VtxoVw.SettledBy.String, + ArkTxid: row.VtxoVw.ArkTxid.String, + SpentBy: row.VtxoVw.SpentBy.String, + Spent: row.VtxoVw.Spent, + Unrolled: row.VtxoVw.Unrolled, + Swept: row.VtxoVw.Swept.Bool, + Preconfirmed: row.VtxoVw.Preconfirmed, + ExpiresAt: row.VtxoVw.ExpiresAt, + CreatedAt: row.VtxoVw.CreatedAt, + Depth: uint32(row.VtxoVw.Depth), + MarkerIDs: parseMarkersJSONB(row.VtxoVw.Markers), + } +} + +// parseMarkersJSONB parses a JSONB array into a slice of strings. +// Logs and returns nil if the JSON is malformed so that corrupt markers are +// surfaced instead of silently treated as empty. +func parseMarkersJSONB(markers json.RawMessage) []string { + if len(markers) == 0 { + return nil + } + var markerIDs []string + if err := json.Unmarshal(markers, &markerIDs); err != nil { + log.WithError(err).Warnf("failed to parse markers JSONB: %q", string(markers)) + return nil + } + return markerIDs +} diff --git a/internal/infrastructure/db/postgres/migration/20260210100000_add_depth_and_markers.down.sql b/internal/infrastructure/db/postgres/migration/20260210100000_add_depth_and_markers.down.sql new file mode 100644 index 000000000..9db38b334 --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20260210100000_add_depth_and_markers.down.sql @@ -0,0 +1,44 @@ +-- Drop views first (they depend on vtxo columns via v.*) +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +-- Restore the swept column that the up migration dropped. Backfill from +-- swept_marker (joined via the markers JSON array) before dropping the marker +-- tables, otherwise the rollback silently loses sweep state — VTXOs that +-- were swept via swept_marker would reappear as unswept. +ALTER TABLE vtxo ADD COLUMN swept BOOLEAN NOT NULL DEFAULT false; +UPDATE vtxo v +SET swept = true +WHERE EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) +); + +-- Drop markers index and column from vtxo +DROP INDEX IF EXISTS idx_vtxo_markers; +ALTER TABLE vtxo DROP COLUMN IF EXISTS markers; + +-- Drop depth column from vtxo +ALTER TABLE vtxo DROP COLUMN IF EXISTS depth; + +-- Drop marker tables (indexes are dropped automatically with the table) +DROP TABLE IF EXISTS swept_marker; +DROP TABLE IF EXISTS marker; + +-- Recreate views without depth and markers columns +CREATE VIEW vtxo_vw AS +SELECT v.*, string_agg(vc.commitment_txid, ',') AS commitments +FROM vtxo v +LEFT JOIN vtxo_commitment_txid vc +ON v.txid = vc.vtxo_txid AND v.vout = vc.vtxo_vout +GROUP BY v.txid, v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, + intent.id, + intent.round_id, + intent.proof, + intent.message +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; diff --git a/internal/infrastructure/db/postgres/migration/20260210100000_add_depth_and_markers.up.sql b/internal/infrastructure/db/postgres/migration/20260210100000_add_depth_and_markers.up.sql new file mode 100644 index 000000000..4cac60754 --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20260210100000_add_depth_and_markers.up.sql @@ -0,0 +1,110 @@ +-- Add depth and markers columns to vtxo +ALTER TABLE vtxo + ADD COLUMN IF NOT EXISTS depth INTEGER NOT NULL DEFAULT 0, + ADD COLUMN IF NOT EXISTS markers JSONB NOT NULL DEFAULT '[]'::jsonb; +CREATE INDEX IF NOT EXISTS idx_vtxo_markers ON vtxo USING GIN (markers); + +-- Create marker table +CREATE TABLE IF NOT EXISTS marker ( + id TEXT PRIMARY KEY, + depth INTEGER NOT NULL, + parent_markers JSONB -- JSON array of parent marker IDs +); +CREATE INDEX IF NOT EXISTS idx_marker_depth ON marker(depth); +CREATE INDEX IF NOT EXISTS idx_marker_parent_markers ON marker USING GIN (parent_markers); + +-- Create swept_marker table (append-only) +CREATE TABLE IF NOT EXISTS swept_marker ( + marker_id TEXT PRIMARY KEY REFERENCES marker(id), + swept_at BIGINT NOT NULL +); + +-- Recreate views to include the new columns +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +CREATE VIEW vtxo_vw AS +SELECT + v.*, + COALESCE(vc.commitments, '') AS commitments, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN LATERAL ( + SELECT string_agg(commitment_txid, ',') AS commitments + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout +) vc ON true +LEFT JOIN ( + SELECT txid, vout, asset_id, amount + FROM asset_projection + GROUP BY txid, vout, asset_id, amount +) ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, intent.id, intent.round_id, intent.proof, intent.message, intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; + +-- Backfill: Create a marker for every existing VTXO using its outpoint as marker ID +-- This ensures every VTXO has at least 1 marker +-- NOTE: this INSERT and the UPDATE below run over all VTXOs and will hold locks. +-- On large production DBs (millions of rows) expect 10-60 seconds; plan a maintenance window. +INSERT INTO marker (id, depth, parent_markers) +SELECT + v.txid || ':' || v.vout, + v.depth, + '[]'::jsonb +FROM vtxo v; + +-- Assign the marker to every VTXO +UPDATE vtxo SET markers = jsonb_build_array(txid || ':' || vout); + +-- Migrate existing swept VTXOs to swept_marker table before dropping column +-- Insert the VTXO's marker into swept_marker +INSERT INTO swept_marker (marker_id, swept_at) +SELECT + v.txid || ':' || v.vout, + (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT +FROM vtxo v +WHERE v.swept = true +ON CONFLICT (marker_id) DO NOTHING; + +-- Drop views before dropping the swept column (views depend on it via v.*) +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +-- Drop swept column from vtxo table (swept state now computed via markers) +ALTER TABLE vtxo DROP COLUMN IF EXISTS swept; + +-- Recreate views to compute swept status dynamically + +CREATE VIEW vtxo_vw AS +SELECT v.*, + COALESCE(vc.commitments, '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN LATERAL ( + SELECT string_agg(commitment_txid, ',') AS commitments + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout +) vc ON true +LEFT JOIN ( + SELECT txid, vout, asset_id, amount + FROM asset_projection + GROUP BY txid, vout, asset_id, amount +) ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, intent.id, intent.round_id, intent.proof, intent.message, intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; diff --git a/internal/infrastructure/db/postgres/migration/20260219082956_fix_vtxo_vw_perf.up.sql b/internal/infrastructure/db/postgres/migration/20260219082956_fix_vtxo_vw_perf.up.sql index ddaaa2854..e0cd5783b 100644 --- a/internal/infrastructure/db/postgres/migration/20260219082956_fix_vtxo_vw_perf.up.sql +++ b/internal/infrastructure/db/postgres/migration/20260219082956_fix_vtxo_vw_perf.up.sql @@ -12,13 +12,18 @@ SELECT v.spent_by, v.spent, v.unrolled, - v.swept, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, + v.depth, + v.markers, COALESCE(vc.commitments, '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) AS swept, COALESCE(ap.asset_id, '') AS asset_id, COALESCE(ap.amount, 0) AS asset_amount, intent.id, @@ -40,4 +45,3 @@ LEFT JOIN ( ) ap ON ap.txid = v.txid AND ap.vout = v.vout; CREATE INDEX IF NOT EXISTS idx_asset_projection_vtxo ON asset_projection(txid, vout); - diff --git a/internal/infrastructure/db/postgres/migration/20260409140000_checkpoint_tx_offchain_txid_index.down.sql b/internal/infrastructure/db/postgres/migration/20260409140000_checkpoint_tx_offchain_txid_index.down.sql new file mode 100644 index 000000000..3bf97317f --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20260409140000_checkpoint_tx_offchain_txid_index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS idx_checkpoint_tx_offchain_txid; diff --git a/internal/infrastructure/db/postgres/migration/20260409140000_checkpoint_tx_offchain_txid_index.up.sql b/internal/infrastructure/db/postgres/migration/20260409140000_checkpoint_tx_offchain_txid_index.up.sql new file mode 100644 index 000000000..4fddcadc2 --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20260409140000_checkpoint_tx_offchain_txid_index.up.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS idx_checkpoint_tx_offchain_txid + ON checkpoint_tx (offchain_txid); diff --git a/internal/infrastructure/db/postgres/migration/20260416120000_add_swept_vtxo.down.sql b/internal/infrastructure/db/postgres/migration/20260416120000_add_swept_vtxo.down.sql new file mode 100644 index 000000000..a23aa8ee7 --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20260416120000_add_swept_vtxo.down.sql @@ -0,0 +1,71 @@ +-- Guard against silently resurrecting swept VTXOs. +-- +-- swept_vtxo holds per-outpoint sweep state for the checkpoint-sweep path. +-- Dropping the table would make vtxo_vw.swept flip back to false for every +-- outpoint tracked only here (marker-based sweeps still survive via +-- swept_marker). When the table has data, fail loudly rather than silently +-- discard it. When the table is empty, the rollback is safe — drop the +-- table and restore the pre-swept_vtxo view shape. +DO $$ +BEGIN + IF EXISTS (SELECT 1 FROM swept_vtxo) THEN + RAISE EXCEPTION 'irreversible migration: swept_vtxo contains % entries; rolling back would resurrect swept VTXOs. Truncate swept_vtxo manually if you accept the data loss, then re-run.', + (SELECT count(*) FROM swept_vtxo); + END IF; +END +$$; + +DROP TABLE IF EXISTS swept_vtxo; + +-- Restore views without swept_vtxo check +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +CREATE VIEW vtxo_vw AS +SELECT v.*, + COALESCE(vc.commitments, '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN LATERAL ( + SELECT string_agg(commitment_txid, ',') AS commitments + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout +) vc ON true +LEFT JOIN ( + SELECT txid, vout, asset_id, amount + FROM asset_projection + GROUP BY txid, vout, asset_id, amount +) ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT + v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, + v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.preconfirmed, + v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.depth, v.markers, + COALESCE(vc.commitments, '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount, + intent.id, intent.round_id, intent.proof, intent.message, + intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo v ON intent.id = v.intent_id +LEFT JOIN LATERAL ( + SELECT string_agg(commitment_txid, ',') AS commitments + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout +) vc ON true +LEFT JOIN ( + SELECT txid, vout, asset_id, amount + FROM asset_projection + GROUP BY txid, vout, asset_id, amount +) ap ON ap.txid = v.txid AND ap.vout = v.vout; diff --git a/internal/infrastructure/db/postgres/migration/20260416120000_add_swept_vtxo.up.sql b/internal/infrastructure/db/postgres/migration/20260416120000_add_swept_vtxo.up.sql new file mode 100644 index 000000000..d453506f2 --- /dev/null +++ b/internal/infrastructure/db/postgres/migration/20260416120000_add_swept_vtxo.up.sql @@ -0,0 +1,104 @@ +-- Per-outpoint sweep tracking for checkpoint sweeps. +-- Markers can be shared across independent subtrees when offchain txs +-- consolidate inputs from different lineages. BulkSweepMarkers is safe +-- for batch sweeps (entire round) but over-reaches for checkpoint sweeps +-- (single subtree). This table tracks per-outpoint sweep status for the +-- checkpoint path. +CREATE TABLE IF NOT EXISTS swept_vtxo ( + txid TEXT NOT NULL, + vout INTEGER NOT NULL, + swept_at BIGINT NOT NULL, + PRIMARY KEY (txid, vout) +); + +-- Rebuild vtxo_vw: swept if marker in swept_marker OR outpoint in swept_vtxo +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +-- swept is OR'd across two sources on purpose: +-- * swept_marker — populated by batch/round sweeps. Coarse-grained: a single +-- marker can cover many VTXOs, so marker-based sweeping is efficient for +-- whole-round sweeps but would over-reach if applied to checkpoint sweeps +-- (markers are shared across independent subtrees). +-- * swept_vtxo — populated by checkpoint sweeps. Fine-grained: one row per +-- (txid, vout), so it safely scopes to a single outpoint's lineage. +-- New sweep code paths must pick the right table; maintainers adding a third +-- sweep path should extend this OR rather than re-overloading one of them. +CREATE VIEW vtxo_vw AS +SELECT v.*, + COALESCE(vc.commitments, '') AS commitments, + ( + EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) + OR EXISTS ( + SELECT 1 FROM swept_vtxo sv + WHERE sv.txid = v.txid AND sv.vout = v.vout + ) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN LATERAL ( + SELECT string_agg(commitment_txid, ',') AS commitments + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout +) vc ON true +LEFT JOIN ( + SELECT txid, vout, asset_id, amount + FROM asset_projection + GROUP BY txid, vout, asset_id, amount +) ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +-- Rebuild intent_with_inputs_vw +CREATE VIEW intent_with_inputs_vw AS +SELECT + v.txid, + v.vout, + v.pubkey, + v.amount, + v.expires_at, + v.created_at, + v.commitment_txid, + v.spent_by, + v.spent, + v.unrolled, + v.preconfirmed, + v.settled_by, + v.ark_txid, + v.intent_id, + v.updated_at, + v.depth, + v.markers, + COALESCE(vc.commitments, '') AS commitments, + ( + EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) + OR EXISTS ( + SELECT 1 FROM swept_vtxo sv + WHERE sv.txid = v.txid AND sv.vout = v.vout + ) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount, + intent.id, + intent.round_id, + intent.proof, + intent.message, + intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo v ON intent.id = v.intent_id +LEFT JOIN LATERAL ( + SELECT string_agg(commitment_txid, ',') AS commitments + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout +) vc ON true +LEFT JOIN ( + SELECT txid, vout, asset_id, amount + FROM asset_projection + GROUP BY txid, vout, asset_id, amount +) ap ON ap.txid = v.txid AND ap.vout = v.vout; diff --git a/internal/infrastructure/db/postgres/offchain_tx_repo.go b/internal/infrastructure/db/postgres/offchain_tx_repo.go index 9f41ea76c..a45be1dbc 100644 --- a/internal/infrastructure/db/postgres/offchain_tx_repo.go +++ b/internal/infrastructure/db/postgres/offchain_tx_repo.go @@ -114,6 +114,62 @@ func (v *offchainTxRepository) GetOffchainTx( }, nil } +func (v *offchainTxRepository) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + if len(txids) == 0 { + return []*domain.OffchainTx{}, nil + } + + rows, err := v.querier.SelectOffchainTxsByTxids(ctx, txids) + if err != nil { + return nil, err + } + + grouped := make(map[string][]queries.OffchainTxVw) + for _, row := range rows { + grouped[row.OffchainTxVw.Txid] = append(grouped[row.OffchainTxVw.Txid], row.OffchainTxVw) + } + + txs := make([]*domain.OffchainTx, 0, len(grouped)) + for _, vws := range grouped { + vt := vws[0] + checkpointTxs := make(map[string]string) + commitmentTxids := make(map[string]string) + rootCommitmentTxId := "" + for _, vw := range vws { + if vw.CheckpointTxid.Valid && vw.CheckpointTx.Valid { + checkpointTxs[vw.CheckpointTxid.String] = vw.CheckpointTx.String + commitmentTxids[vw.CheckpointTxid.String] = vw.CommitmentTxid.String + if vw.IsRootCommitmentTxid.Valid && vw.IsRootCommitmentTxid.Bool { + rootCommitmentTxId = vw.CommitmentTxid.String + } + } + } + stage := domain.Stage{Code: int(vt.StageCode)} + if vt.FailReason.String != "" { + stage.Failed = true + } + if domain.OffchainTxStage(vt.StageCode) == domain.OffchainTxFinalizedStage { + stage.Ended = true + } + txs = append(txs, &domain.OffchainTx{ + ArkTxid: vt.Txid, + ArkTx: vt.Tx, + StartingTimestamp: vt.StartingTimestamp, + EndingTimestamp: vt.EndingTimestamp, + ExpiryTimestamp: vt.ExpiryTimestamp, + FailReason: vt.FailReason.String, + Stage: stage, + CheckpointTxs: checkpointTxs, + CommitmentTxids: commitmentTxids, + RootCommitmentTxId: rootCommitmentTxId, + }) + } + + return txs, nil +} + func (v *offchainTxRepository) Close() { _ = v.db.Close() } diff --git a/internal/infrastructure/db/postgres/sqlc/queries/models.go b/internal/infrastructure/db/postgres/sqlc/queries/models.go index d8dd5c036..e113291de 100644 --- a/internal/infrastructure/db/postgres/sqlc/queries/models.go +++ b/internal/infrastructure/db/postgres/sqlc/queries/models.go @@ -6,6 +6,7 @@ package queries import ( "database/sql" + "encoding/json" "github.com/sqlc-dev/pqtype" ) @@ -73,13 +74,15 @@ type IntentWithInputsVw struct { SpentBy sql.NullString Spent sql.NullBool Unrolled sql.NullBool - Swept sql.NullBool Preconfirmed sql.NullBool SettledBy sql.NullString ArkTxid sql.NullString IntentID sql.NullString UpdatedAt sql.NullInt64 + Depth sql.NullInt32 + Markers pqtype.NullRawMessage Commitments []byte + Swept sql.NullBool AssetID sql.NullString AssetAmount sql.NullString ID sql.NullString @@ -101,6 +104,12 @@ type IntentWithReceiversVw struct { Txid sql.NullString } +type Marker struct { + ID string + Depth int32 + ParentMarkers pqtype.NullRawMessage +} + type MarketHour struct { ID int32 StartTime int64 @@ -206,6 +215,17 @@ type ScheduledSession struct { UpdatedAt int64 } +type SweptMarker struct { + MarkerID string + SweptAt int64 +} + +type SweptVtxo struct { + Txid string + Vout int32 + SweptAt int64 +} + type Tx struct { Txid string Tx string @@ -226,12 +246,13 @@ type Vtxo struct { SpentBy sql.NullString Spent bool Unrolled bool - Swept bool Preconfirmed bool SettledBy sql.NullString ArkTxid sql.NullString IntentID sql.NullString UpdatedAt int64 + Depth int32 + Markers json.RawMessage } type VtxoCommitmentTxid struct { @@ -251,13 +272,15 @@ type VtxoVw struct { SpentBy sql.NullString Spent bool Unrolled bool - Swept bool Preconfirmed bool SettledBy sql.NullString ArkTxid sql.NullString IntentID sql.NullString UpdatedAt int64 + Depth int32 + Markers json.RawMessage Commitments []byte + Swept sql.NullBool AssetID string AssetAmount string } diff --git a/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go b/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go index a59cab93d..b7ce2005b 100644 --- a/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go +++ b/internal/infrastructure/db/postgres/sqlc/queries/query.sql.go @@ -8,6 +8,7 @@ package queries import ( "context" "database/sql" + "encoding/json" "github.com/lib/pq" "github.com/sqlc-dev/pqtype" @@ -62,6 +63,39 @@ func (q *Queries) AddIntentFees(ctx context.Context, arg AddIntentFeesParams) er return err } +const bulkInsertSweptMarkers = `-- name: BulkInsertSweptMarkers :exec +INSERT INTO swept_marker (marker_id, swept_at) +SELECT unnest($1::text[]), $2 +ON CONFLICT(marker_id) DO NOTHING +` + +type BulkInsertSweptMarkersParams struct { + MarkerIds []string + SweptAt int64 +} + +func (q *Queries) BulkInsertSweptMarkers(ctx context.Context, arg BulkInsertSweptMarkersParams) error { + _, err := q.db.ExecContext(ctx, bulkInsertSweptMarkers, pq.Array(arg.MarkerIds), arg.SweptAt) + return err +} + +const bulkInsertSweptVtxos = `-- name: BulkInsertSweptVtxos :exec +INSERT INTO swept_vtxo (txid, vout, swept_at) +SELECT unnest($1::text[]), unnest($2::integer[]), $3 +ON CONFLICT(txid, vout) DO NOTHING +` + +type BulkInsertSweptVtxosParams struct { + Txids []string + Vouts []int32 + SweptAt int64 +} + +func (q *Queries) BulkInsertSweptVtxos(ctx context.Context, arg BulkInsertSweptVtxosParams) error { + _, err := q.db.ExecContext(ctx, bulkInsertSweptVtxos, pq.Array(arg.Txids), pq.Array(arg.Vouts), arg.SweptAt) + return err +} + const clearIntentFees = `-- name: ClearIntentFees :exec INSERT INTO intent_fees ( offchain_input_fee_program, @@ -86,6 +120,60 @@ func (q *Queries) ClearScheduledSession(ctx context.Context) error { return err } +const countUnsweptVtxosByMarkerId = `-- name: CountUnsweptVtxosByMarkerId :one +SELECT COUNT(DISTINCT (txid, vout)) FROM vtxo_vw WHERE markers @> jsonb_build_array($1::TEXT) AND swept = false +` + +// Count VTXOs whose markers JSONB array contains the given marker_id and are not swept +func (q *Queries) CountUnsweptVtxosByMarkerId(ctx context.Context, markerID string) (int64, error) { + row := q.db.QueryRowContext(ctx, countUnsweptVtxosByMarkerId, markerID) + var count int64 + err := row.Scan(&count) + return count, err +} + +const getDescendantMarkerIds = `-- name: GetDescendantMarkerIds :many +WITH RECURSIVE descendant_markers(id) AS ( + -- Base case: the marker being swept + SELECT marker.id FROM marker WHERE marker.id = $1 + UNION + -- Recursive case: find markers whose parent_markers jsonb array contains any descendant + SELECT m.id FROM marker m + INNER JOIN descendant_markers dm ON ( + m.parent_markers @> jsonb_build_array(dm.id) + ) +) +SELECT descendant_markers.id AS marker_id FROM descendant_markers +WHERE descendant_markers.id NOT IN (SELECT sm.marker_id FROM swept_marker sm) +` + +// Recursively get a marker and all its descendants (markers whose parent_markers contain it). +// Uses UNION (set semantics, not UNION ALL) so rows already produced are filtered, +// which makes this cycle-safe. Do not convert to UNION ALL: cycles in parent_markers +// would cause the recursion to run unbounded. +func (q *Queries) GetDescendantMarkerIds(ctx context.Context, rootMarkerID string) ([]string, error) { + rows, err := q.db.QueryContext(ctx, getDescendantMarkerIds, rootMarkerID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var marker_id string + if err := rows.Scan(&marker_id); err != nil { + return nil, err + } + items = append(items, marker_id) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const insertAsset = `-- name: InsertAsset :exec INSERT INTO asset (id, is_immutable, metadata_hash, metadata, control_asset_id) VALUES ($1, $2, $3, $4, $5) @@ -110,6 +198,39 @@ func (q *Queries) InsertAsset(ctx context.Context, arg InsertAssetParams) error return err } +const insertSweptMarker = `-- name: InsertSweptMarker :exec +INSERT INTO swept_marker (marker_id, swept_at) +VALUES ($1, $2) +ON CONFLICT(marker_id) DO NOTHING +` + +type InsertSweptMarkerParams struct { + MarkerID string + SweptAt int64 +} + +func (q *Queries) InsertSweptMarker(ctx context.Context, arg InsertSweptMarkerParams) error { + _, err := q.db.ExecContext(ctx, insertSweptMarker, arg.MarkerID, arg.SweptAt) + return err +} + +const insertSweptVtxo = `-- name: InsertSweptVtxo :exec +INSERT INTO swept_vtxo (txid, vout, swept_at) +VALUES ($1, $2, $3) +ON CONFLICT(txid, vout) DO NOTHING +` + +type InsertSweptVtxoParams struct { + Txid string + Vout int32 + SweptAt int64 +} + +func (q *Queries) InsertSweptVtxo(ctx context.Context, arg InsertSweptVtxoParams) error { + _, err := q.db.ExecContext(ctx, insertSweptVtxo, arg.Txid, arg.Vout, arg.SweptAt) + return err +} + const insertVtxoAssetProjection = `-- name: InsertVtxoAssetProjection :exec INSERT INTO asset_projection (asset_id, txid, vout, amount) VALUES ($1, $2, $3, $4) @@ -148,6 +269,17 @@ func (q *Queries) InsertVtxoCommitmentTxid(ctx context.Context, arg InsertVtxoCo return err } +const isMarkerSwept = `-- name: IsMarkerSwept :one +SELECT EXISTS(SELECT 1 FROM swept_marker WHERE marker_id = $1) AS is_swept +` + +func (q *Queries) IsMarkerSwept(ctx context.Context, markerID string) (bool, error) { + row := q.db.QueryRowContext(ctx, isMarkerSwept, markerID) + var is_swept bool + err := row.Scan(&is_swept) + return is_swept, err +} + const selectActiveScriptConvictions = `-- name: SelectActiveScriptConvictions :many SELECT id, type, created_at, expires_at, crime_type, crime_round_id, crime_reason, pardoned, script FROM conviction WHERE script = $1 @@ -222,7 +354,7 @@ func (q *Queries) SelectAllRoundIds(ctx context.Context) ([]string, error) { } const selectAllVtxos = `-- name: SelectAllVtxos :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw ` type SelectAllVtxosRow struct { @@ -249,13 +381,15 @@ func (q *Queries) SelectAllVtxos(ctx context.Context) ([]SelectAllVtxosRow, erro &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -446,13 +580,16 @@ func (q *Queries) SelectConvictionsInTimeRange(ctx context.Context, arg SelectCo } const selectExpiringLiquidityAmount = `-- name: SelectExpiringLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0)::bigint AS amount -FROM vtxo -WHERE swept = false - AND spent = false - AND unrolled = false - AND expires_at > $1 - AND ($2 <= 0 OR expires_at < $2) +SELECT COALESCE(SUM(v.amount), 0)::bigint AS amount +FROM vtxo v +WHERE NOT EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) + AND v.spent = false + AND v.unrolled = false + AND v.expires_at > $1 + AND ($2 <= 0 OR v.expires_at < $2) ` type SelectExpiringLiquidityAmountParams struct { @@ -529,8 +666,105 @@ func (q *Queries) SelectLatestScheduledSession(ctx context.Context) (ScheduledSe return i, err } +const selectMarker = `-- name: SelectMarker :one +SELECT id, depth, parent_markers FROM marker WHERE id = $1 +` + +func (q *Queries) SelectMarker(ctx context.Context, id string) (Marker, error) { + row := q.db.QueryRowContext(ctx, selectMarker, id) + var i Marker + err := row.Scan(&i.ID, &i.Depth, &i.ParentMarkers) + return i, err +} + +const selectMarkersByDepth = `-- name: SelectMarkersByDepth :many +SELECT id, depth, parent_markers FROM marker WHERE depth = $1 +` + +func (q *Queries) SelectMarkersByDepth(ctx context.Context, depth int32) ([]Marker, error) { + rows, err := q.db.QueryContext(ctx, selectMarkersByDepth, depth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Marker + for rows.Next() { + var i Marker + if err := rows.Scan(&i.ID, &i.Depth, &i.ParentMarkers); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectMarkersByDepthRange = `-- name: SelectMarkersByDepthRange :many +SELECT id, depth, parent_markers FROM marker WHERE depth >= $1 AND depth <= $2 ORDER BY depth +` + +type SelectMarkersByDepthRangeParams struct { + MinDepth int32 + MaxDepth int32 +} + +func (q *Queries) SelectMarkersByDepthRange(ctx context.Context, arg SelectMarkersByDepthRangeParams) ([]Marker, error) { + rows, err := q.db.QueryContext(ctx, selectMarkersByDepthRange, arg.MinDepth, arg.MaxDepth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Marker + for rows.Next() { + var i Marker + if err := rows.Scan(&i.ID, &i.Depth, &i.ParentMarkers); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectMarkersByIds = `-- name: SelectMarkersByIds :many +SELECT id, depth, parent_markers FROM marker WHERE id = ANY($1::text[]) +` + +func (q *Queries) SelectMarkersByIds(ctx context.Context, ids []string) ([]Marker, error) { + rows, err := q.db.QueryContext(ctx, selectMarkersByIds, pq.Array(ids)) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Marker + for rows.Next() { + var i Marker + if err := rows.Scan(&i.ID, &i.Depth, &i.ParentMarkers); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectNotUnrolledVtxos = `-- name: SelectNotUnrolledVtxos :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false ` type SelectNotUnrolledVtxosRow struct { @@ -557,13 +791,15 @@ func (q *Queries) SelectNotUnrolledVtxos(ctx context.Context) ([]SelectNotUnroll &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -581,7 +817,7 @@ func (q *Queries) SelectNotUnrolledVtxos(ctx context.Context) ([]SelectNotUnroll } const selectNotUnrolledVtxosWithPubkey = `-- name: SelectNotUnrolledVtxosWithPubkey :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false AND pubkey = $1 +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false AND pubkey = $1 ` type SelectNotUnrolledVtxosWithPubkeyRow struct { @@ -608,13 +844,15 @@ func (q *Queries) SelectNotUnrolledVtxosWithPubkey(ctx context.Context, pubkey s &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -675,8 +913,52 @@ func (q *Queries) SelectOffchainTx(ctx context.Context, txid string) ([]SelectOf return items, nil } +const selectOffchainTxsByTxids = `-- name: SelectOffchainTxsByTxids :many +SELECT offchain_tx_vw.txid, offchain_tx_vw.tx, offchain_tx_vw.starting_timestamp, offchain_tx_vw.ending_timestamp, offchain_tx_vw.expiry_timestamp, offchain_tx_vw.fail_reason, offchain_tx_vw.stage_code, offchain_tx_vw.checkpoint_txid, offchain_tx_vw.checkpoint_tx, offchain_tx_vw.commitment_txid, offchain_tx_vw.is_root_commitment_txid, offchain_tx_vw.offchain_txid FROM offchain_tx_vw WHERE txid = ANY($1::varchar[]) AND COALESCE(fail_reason, '') = '' +` + +type SelectOffchainTxsByTxidsRow struct { + OffchainTxVw OffchainTxVw +} + +func (q *Queries) SelectOffchainTxsByTxids(ctx context.Context, txids []string) ([]SelectOffchainTxsByTxidsRow, error) { + rows, err := q.db.QueryContext(ctx, selectOffchainTxsByTxids, pq.Array(txids)) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectOffchainTxsByTxidsRow + for rows.Next() { + var i SelectOffchainTxsByTxidsRow + if err := rows.Scan( + &i.OffchainTxVw.Txid, + &i.OffchainTxVw.Tx, + &i.OffchainTxVw.StartingTimestamp, + &i.OffchainTxVw.EndingTimestamp, + &i.OffchainTxVw.ExpiryTimestamp, + &i.OffchainTxVw.FailReason, + &i.OffchainTxVw.StageCode, + &i.OffchainTxVw.CheckpointTxid, + &i.OffchainTxVw.CheckpointTx, + &i.OffchainTxVw.CommitmentTxid, + &i.OffchainTxVw.IsRootCommitmentTxid, + &i.OffchainTxVw.OffchainTxid, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectPendingSpentVtxo = `-- name: SelectPendingSpentVtxo :many -SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.swept, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.commitments, v.asset_id, v.asset_amount +SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.depth, v.markers, v.commitments, v.swept, v.asset_id, v.asset_amount FROM vtxo_vw v WHERE v.txid = $1 AND v.vout = $2 AND v.spent = TRUE AND v.unrolled = FALSE and COALESCE(v.settled_by, '') = '' @@ -710,13 +992,15 @@ func (q *Queries) SelectPendingSpentVtxo(ctx context.Context, arg SelectPendingS &i.SpentBy, &i.Spent, &i.Unrolled, - &i.Swept, &i.Preconfirmed, &i.SettledBy, &i.ArkTxid, &i.IntentID, &i.UpdatedAt, + &i.Depth, + &i.Markers, &i.Commitments, + &i.Swept, &i.AssetID, &i.AssetAmount, ); err != nil { @@ -734,7 +1018,7 @@ func (q *Queries) SelectPendingSpentVtxo(ctx context.Context, arg SelectPendingS } const selectPendingSpentVtxosWithPubkeys = `-- name: SelectPendingSpentVtxosWithPubkeys :many -SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.swept, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.commitments, v.asset_id, v.asset_amount +SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.depth, v.markers, v.commitments, v.swept, v.asset_id, v.asset_amount FROM vtxo_vw v WHERE v.spent = TRUE AND v.unrolled = FALSE and COALESCE(v.settled_by, '') = '' AND v.pubkey = ANY($1::varchar[]) @@ -771,13 +1055,15 @@ func (q *Queries) SelectPendingSpentVtxosWithPubkeys(ctx context.Context, arg Se &i.SpentBy, &i.Spent, &i.Unrolled, - &i.Swept, &i.Preconfirmed, &i.SettledBy, &i.ArkTxid, &i.IntentID, &i.UpdatedAt, + &i.Depth, + &i.Markers, &i.Commitments, + &i.Swept, &i.AssetID, &i.AssetAmount, ); err != nil { @@ -795,10 +1081,13 @@ func (q *Queries) SelectPendingSpentVtxosWithPubkeys(ctx context.Context, arg Se } const selectRecoverableLiquidityAmount = `-- name: SelectRecoverableLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0)::bigint AS amount -FROM vtxo -WHERE swept = true - AND spent = false +SELECT COALESCE(SUM(v.amount), 0)::bigint AS amount +FROM vtxo v +WHERE EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) + AND v.spent = false ` func (q *Queries) SelectRecoverableLiquidityAmount(ctx context.Context) (int64, error) { @@ -1112,7 +1401,7 @@ func (q *Queries) SelectRoundVtxoTree(ctx context.Context, txid string) ([]Tx, e } const selectRoundVtxoTreeLeaves = `-- name: SelectRoundVtxoTreeLeaves :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE commitment_txid = $1 AND preconfirmed = false +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE commitment_txid = $1 AND preconfirmed = false ` type SelectRoundVtxoTreeLeavesRow struct { @@ -1139,13 +1428,15 @@ func (q *Queries) SelectRoundVtxoTreeLeaves(ctx context.Context, commitmentTxid &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1167,7 +1458,7 @@ SELECT round.id, round.starting_timestamp, round.ending_timestamp, round.ended, round_intents_vw.id, round_intents_vw.round_id, round_intents_vw.proof, round_intents_vw.message, round_intents_vw.txid, round_txs_vw.txid, round_txs_vw.tx, round_txs_vw.round_id, round_txs_vw.type, round_txs_vw.position, round_txs_vw.children, intent_with_receivers_vw.intent_id, intent_with_receivers_vw.pubkey, intent_with_receivers_vw.onchain_address, intent_with_receivers_vw.amount, intent_with_receivers_vw.id, intent_with_receivers_vw.round_id, intent_with_receivers_vw.proof, intent_with_receivers_vw.message, intent_with_receivers_vw.txid, - intent_with_inputs_vw.txid, intent_with_inputs_vw.vout, intent_with_inputs_vw.pubkey, intent_with_inputs_vw.amount, intent_with_inputs_vw.expires_at, intent_with_inputs_vw.created_at, intent_with_inputs_vw.commitment_txid, intent_with_inputs_vw.spent_by, intent_with_inputs_vw.spent, intent_with_inputs_vw.unrolled, intent_with_inputs_vw.swept, intent_with_inputs_vw.preconfirmed, intent_with_inputs_vw.settled_by, intent_with_inputs_vw.ark_txid, intent_with_inputs_vw.intent_id, intent_with_inputs_vw.updated_at, intent_with_inputs_vw.commitments, intent_with_inputs_vw.asset_id, intent_with_inputs_vw.asset_amount, intent_with_inputs_vw.id, intent_with_inputs_vw.round_id, intent_with_inputs_vw.proof, intent_with_inputs_vw.message, intent_with_inputs_vw.intent_txid + intent_with_inputs_vw.txid, intent_with_inputs_vw.vout, intent_with_inputs_vw.pubkey, intent_with_inputs_vw.amount, intent_with_inputs_vw.expires_at, intent_with_inputs_vw.created_at, intent_with_inputs_vw.commitment_txid, intent_with_inputs_vw.spent_by, intent_with_inputs_vw.spent, intent_with_inputs_vw.unrolled, intent_with_inputs_vw.preconfirmed, intent_with_inputs_vw.settled_by, intent_with_inputs_vw.ark_txid, intent_with_inputs_vw.intent_id, intent_with_inputs_vw.updated_at, intent_with_inputs_vw.depth, intent_with_inputs_vw.markers, intent_with_inputs_vw.commitments, intent_with_inputs_vw.swept, intent_with_inputs_vw.asset_id, intent_with_inputs_vw.asset_amount, intent_with_inputs_vw.id, intent_with_inputs_vw.round_id, intent_with_inputs_vw.proof, intent_with_inputs_vw.message, intent_with_inputs_vw.intent_txid FROM round LEFT OUTER JOIN round_intents_vw ON round.id=round_intents_vw.round_id LEFT OUTER JOIN round_txs_vw ON round.id=round_txs_vw.round_id @@ -1235,13 +1526,15 @@ func (q *Queries) SelectRoundWithId(ctx context.Context, id string) ([]SelectRou &i.IntentWithInputsVw.SpentBy, &i.IntentWithInputsVw.Spent, &i.IntentWithInputsVw.Unrolled, - &i.IntentWithInputsVw.Swept, &i.IntentWithInputsVw.Preconfirmed, &i.IntentWithInputsVw.SettledBy, &i.IntentWithInputsVw.ArkTxid, &i.IntentWithInputsVw.IntentID, &i.IntentWithInputsVw.UpdatedAt, + &i.IntentWithInputsVw.Depth, + &i.IntentWithInputsVw.Markers, &i.IntentWithInputsVw.Commitments, + &i.IntentWithInputsVw.Swept, &i.IntentWithInputsVw.AssetID, &i.IntentWithInputsVw.AssetAmount, &i.IntentWithInputsVw.ID, @@ -1268,7 +1561,7 @@ SELECT round.id, round.starting_timestamp, round.ending_timestamp, round.ended, round_intents_vw.id, round_intents_vw.round_id, round_intents_vw.proof, round_intents_vw.message, round_intents_vw.txid, round_txs_vw.txid, round_txs_vw.tx, round_txs_vw.round_id, round_txs_vw.type, round_txs_vw.position, round_txs_vw.children, intent_with_receivers_vw.intent_id, intent_with_receivers_vw.pubkey, intent_with_receivers_vw.onchain_address, intent_with_receivers_vw.amount, intent_with_receivers_vw.id, intent_with_receivers_vw.round_id, intent_with_receivers_vw.proof, intent_with_receivers_vw.message, intent_with_receivers_vw.txid, - intent_with_inputs_vw.txid, intent_with_inputs_vw.vout, intent_with_inputs_vw.pubkey, intent_with_inputs_vw.amount, intent_with_inputs_vw.expires_at, intent_with_inputs_vw.created_at, intent_with_inputs_vw.commitment_txid, intent_with_inputs_vw.spent_by, intent_with_inputs_vw.spent, intent_with_inputs_vw.unrolled, intent_with_inputs_vw.swept, intent_with_inputs_vw.preconfirmed, intent_with_inputs_vw.settled_by, intent_with_inputs_vw.ark_txid, intent_with_inputs_vw.intent_id, intent_with_inputs_vw.updated_at, intent_with_inputs_vw.commitments, intent_with_inputs_vw.asset_id, intent_with_inputs_vw.asset_amount, intent_with_inputs_vw.id, intent_with_inputs_vw.round_id, intent_with_inputs_vw.proof, intent_with_inputs_vw.message, intent_with_inputs_vw.intent_txid + intent_with_inputs_vw.txid, intent_with_inputs_vw.vout, intent_with_inputs_vw.pubkey, intent_with_inputs_vw.amount, intent_with_inputs_vw.expires_at, intent_with_inputs_vw.created_at, intent_with_inputs_vw.commitment_txid, intent_with_inputs_vw.spent_by, intent_with_inputs_vw.spent, intent_with_inputs_vw.unrolled, intent_with_inputs_vw.preconfirmed, intent_with_inputs_vw.settled_by, intent_with_inputs_vw.ark_txid, intent_with_inputs_vw.intent_id, intent_with_inputs_vw.updated_at, intent_with_inputs_vw.depth, intent_with_inputs_vw.markers, intent_with_inputs_vw.commitments, intent_with_inputs_vw.swept, intent_with_inputs_vw.asset_id, intent_with_inputs_vw.asset_amount, intent_with_inputs_vw.id, intent_with_inputs_vw.round_id, intent_with_inputs_vw.proof, intent_with_inputs_vw.message, intent_with_inputs_vw.intent_txid FROM round LEFT OUTER JOIN round_intents_vw ON round.id=round_intents_vw.round_id LEFT OUTER JOIN round_txs_vw ON round.id=round_txs_vw.round_id @@ -1338,13 +1631,15 @@ func (q *Queries) SelectRoundWithTxid(ctx context.Context, txid string) ([]Selec &i.IntentWithInputsVw.SpentBy, &i.IntentWithInputsVw.Spent, &i.IntentWithInputsVw.Unrolled, - &i.IntentWithInputsVw.Swept, &i.IntentWithInputsVw.Preconfirmed, &i.IntentWithInputsVw.SettledBy, &i.IntentWithInputsVw.ArkTxid, &i.IntentWithInputsVw.IntentID, &i.IntentWithInputsVw.UpdatedAt, + &i.IntentWithInputsVw.Depth, + &i.IntentWithInputsVw.Markers, &i.IntentWithInputsVw.Commitments, + &i.IntentWithInputsVw.Swept, &i.IntentWithInputsVw.AssetID, &i.IntentWithInputsVw.AssetAmount, &i.IntentWithInputsVw.ID, @@ -1426,7 +1721,7 @@ func (q *Queries) SelectSweepableRounds(ctx context.Context) ([]string, error) { } const selectSweepableUnrolledVtxos = `-- name: SelectSweepableUnrolledVtxos :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE spent = true AND unrolled = true AND swept = false AND COALESCE(settled_by, '') = '' +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE spent = true AND unrolled = true AND swept = false AND COALESCE(settled_by, '') = '' ` type SelectSweepableUnrolledVtxosRow struct { @@ -1453,13 +1748,15 @@ func (q *Queries) SelectSweepableUnrolledVtxos(ctx context.Context) ([]SelectSwe &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1512,6 +1809,44 @@ func (q *Queries) SelectSweepableVtxoOutpointsByCommitmentTxid(ctx context.Conte return items, nil } +const selectSweptMarker = `-- name: SelectSweptMarker :one +SELECT marker_id, swept_at FROM swept_marker WHERE marker_id = $1 +` + +func (q *Queries) SelectSweptMarker(ctx context.Context, markerID string) (SweptMarker, error) { + row := q.db.QueryRowContext(ctx, selectSweptMarker, markerID) + var i SweptMarker + err := row.Scan(&i.MarkerID, &i.SweptAt) + return i, err +} + +const selectSweptMarkersByIds = `-- name: SelectSweptMarkersByIds :many +SELECT marker_id, swept_at FROM swept_marker WHERE marker_id = ANY($1::text[]) +` + +func (q *Queries) SelectSweptMarkersByIds(ctx context.Context, markerIds []string) ([]SweptMarker, error) { + rows, err := q.db.QueryContext(ctx, selectSweptMarkersByIds, pq.Array(markerIds)) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SweptMarker + for rows.Next() { + var i SweptMarker + if err := rows.Scan(&i.MarkerID, &i.SweptAt); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectSweptRoundsConnectorAddress = `-- name: SelectSweptRoundsConnectorAddress :many SELECT round.connector_address FROM round WHERE round.swept = true AND round.failed = false AND round.ended = true AND round.connector_address <> '' @@ -1577,7 +1912,7 @@ func (q *Queries) SelectTxs(ctx context.Context, dollar_1 []string) ([]SelectTxs } const selectVtxo = `-- name: SelectVtxo :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE txid = $1 AND vout = $2 +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE txid = $1 AND vout = $2 ` type SelectVtxoParams struct { @@ -1609,13 +1944,15 @@ func (q *Queries) SelectVtxo(ctx context.Context, arg SelectVtxoParams) ([]Selec &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1632,6 +1969,58 @@ func (q *Queries) SelectVtxo(ctx context.Context, arg SelectVtxoParams) ([]Selec return items, nil } +const selectVtxoChainByMarker = `-- name: SelectVtxoChainByMarker :many +SELECT txid, vout, pubkey, amount, expires_at, created_at, commitment_txid, spent_by, spent, unrolled, preconfirmed, settled_by, ark_txid, intent_id, updated_at, depth, markers, commitments, swept, asset_id, asset_amount FROM vtxo_vw +WHERE markers ?| $1::TEXT[] +ORDER BY depth DESC +` + +// Get VTXOs whose markers JSONB array contains any of the given marker IDs +func (q *Queries) SelectVtxoChainByMarker(ctx context.Context, markerIds []string) ([]VtxoVw, error) { + rows, err := q.db.QueryContext(ctx, selectVtxoChainByMarker, pq.Array(markerIds)) + if err != nil { + return nil, err + } + defer rows.Close() + var items []VtxoVw + for rows.Next() { + var i VtxoVw + if err := rows.Scan( + &i.Txid, + &i.Vout, + &i.Pubkey, + &i.Amount, + &i.ExpiresAt, + &i.CreatedAt, + &i.CommitmentTxid, + &i.SpentBy, + &i.Spent, + &i.Unrolled, + &i.Preconfirmed, + &i.SettledBy, + &i.ArkTxid, + &i.IntentID, + &i.UpdatedAt, + &i.Depth, + &i.Markers, + &i.Commitments, + &i.Swept, + &i.AssetID, + &i.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectVtxoPubKeysByCommitmentTxid = `-- name: SelectVtxoPubKeysByCommitmentTxid :many SELECT DISTINCT v.pubkey FROM vtxo_vw v @@ -1668,14 +2057,177 @@ func (q *Queries) SelectVtxoPubKeysByCommitmentTxid(ctx context.Context, arg Sel return items, nil } +const selectVtxosByArkTxid = `-- name: SelectVtxosByArkTxid :many +SELECT txid, vout, pubkey, amount, expires_at, created_at, commitment_txid, spent_by, spent, unrolled, preconfirmed, settled_by, ark_txid, intent_id, updated_at, depth, markers, commitments, swept, asset_id, asset_amount FROM vtxo_vw WHERE ark_txid = $1 +` + +// Get all VTXOs created by a specific ark tx (offchain tx) +func (q *Queries) SelectVtxosByArkTxid(ctx context.Context, arkTxid sql.NullString) ([]VtxoVw, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosByArkTxid, arkTxid) + if err != nil { + return nil, err + } + defer rows.Close() + var items []VtxoVw + for rows.Next() { + var i VtxoVw + if err := rows.Scan( + &i.Txid, + &i.Vout, + &i.Pubkey, + &i.Amount, + &i.ExpiresAt, + &i.CreatedAt, + &i.CommitmentTxid, + &i.SpentBy, + &i.Spent, + &i.Unrolled, + &i.Preconfirmed, + &i.SettledBy, + &i.ArkTxid, + &i.IntentID, + &i.UpdatedAt, + &i.Depth, + &i.Markers, + &i.Commitments, + &i.Swept, + &i.AssetID, + &i.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectVtxosByDepthRange = `-- name: SelectVtxosByDepthRange :many + +SELECT txid, vout, pubkey, amount, expires_at, created_at, commitment_txid, spent_by, spent, unrolled, preconfirmed, settled_by, ark_txid, intent_id, updated_at, depth, markers, commitments, swept, asset_id, asset_amount FROM vtxo_vw +WHERE depth >= $1 AND depth <= $2 +ORDER BY depth DESC +` + +type SelectVtxosByDepthRangeParams struct { + MinDepth int32 + MaxDepth int32 +} + +// Chain traversal queries for GetVtxoChain optimization +// Get all VTXOs within a depth range, useful for filling gaps between markers +func (q *Queries) SelectVtxosByDepthRange(ctx context.Context, arg SelectVtxosByDepthRangeParams) ([]VtxoVw, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosByDepthRange, arg.MinDepth, arg.MaxDepth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []VtxoVw + for rows.Next() { + var i VtxoVw + if err := rows.Scan( + &i.Txid, + &i.Vout, + &i.Pubkey, + &i.Amount, + &i.ExpiresAt, + &i.CreatedAt, + &i.CommitmentTxid, + &i.SpentBy, + &i.Spent, + &i.Unrolled, + &i.Preconfirmed, + &i.SettledBy, + &i.ArkTxid, + &i.IntentID, + &i.UpdatedAt, + &i.Depth, + &i.Markers, + &i.Commitments, + &i.Swept, + &i.AssetID, + &i.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectVtxosByMarkerId = `-- name: SelectVtxosByMarkerId :many +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE markers @> jsonb_build_array($1::TEXT) +` + +type SelectVtxosByMarkerIdRow struct { + VtxoVw VtxoVw +} + +// Find VTXOs whose markers JSONB array contains the given marker_id +func (q *Queries) SelectVtxosByMarkerId(ctx context.Context, markerID string) ([]SelectVtxosByMarkerIdRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosByMarkerId, markerID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectVtxosByMarkerIdRow + for rows.Next() { + var i SelectVtxosByMarkerIdRow + if err := rows.Scan( + &i.VtxoVw.Txid, + &i.VtxoVw.Vout, + &i.VtxoVw.Pubkey, + &i.VtxoVw.Amount, + &i.VtxoVw.ExpiresAt, + &i.VtxoVw.CreatedAt, + &i.VtxoVw.CommitmentTxid, + &i.VtxoVw.SpentBy, + &i.VtxoVw.Spent, + &i.VtxoVw.Unrolled, + &i.VtxoVw.Preconfirmed, + &i.VtxoVw.SettledBy, + &i.VtxoVw.ArkTxid, + &i.VtxoVw.IntentID, + &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, + &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, + &i.VtxoVw.AssetID, + &i.VtxoVw.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectVtxosOutpointsByArkTxidRecursive = `-- name: SelectVtxosOutpointsByArkTxidRecursive :many WITH RECURSIVE descendants_chain AS ( - -- seed + -- seed: only the specific outpoint, not all vouts of the txid SELECT v.txid, v.vout, v.preconfirmed, v.ark_txid, v.spent_by, 0 AS depth, ARRAY[(v.txid||':'||v.vout)]::text[] AS visited FROM vtxo v - WHERE v.txid = $1 + WHERE v.txid = $1 AND v.vout = $2 UNION ALL @@ -1700,14 +2252,23 @@ FROM nodes ORDER BY depth, txid, vout ` +type SelectVtxosOutpointsByArkTxidRecursiveParams struct { + Txid string + Vout int32 +} + type SelectVtxosOutpointsByArkTxidRecursiveRow struct { Txid string Vout int32 } +// Returns the seed outpoint (txid, vout) and all VTXOs descending from it +// via ark_txid links. Scoped to a single outpoint (not the whole txid) so that +// sibling outputs of the seed tx, which belong to independent lineages, are +// not included. // keep one row per node at its MIN depth (layers) -func (q *Queries) SelectVtxosOutpointsByArkTxidRecursive(ctx context.Context, txid string) ([]SelectVtxosOutpointsByArkTxidRecursiveRow, error) { - rows, err := q.db.QueryContext(ctx, selectVtxosOutpointsByArkTxidRecursive, txid) +func (q *Queries) SelectVtxosOutpointsByArkTxidRecursive(ctx context.Context, arg SelectVtxosOutpointsByArkTxidRecursiveParams) ([]SelectVtxosOutpointsByArkTxidRecursiveRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosOutpointsByArkTxidRecursive, arg.Txid, arg.Vout) if err != nil { return nil, err } @@ -1730,7 +2291,7 @@ func (q *Queries) SelectVtxosOutpointsByArkTxidRecursive(ctx context.Context, tx } const selectVtxosWithPubkeys = `-- name: SelectVtxosWithPubkeys :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE vtxo_vw.pubkey = ANY($1::varchar[]) AND vtxo_vw.updated_at >= $2::bigint AND ($3::bigint = 0 OR vtxo_vw.updated_at <= $3::bigint) @@ -1766,13 +2327,15 @@ func (q *Queries) SelectVtxosWithPubkeys(ctx context.Context, arg SelectVtxosWit &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1828,6 +2391,21 @@ func (q *Queries) UpdateVtxoIntentId(ctx context.Context, arg UpdateVtxoIntentId return err } +const updateVtxoMarkers = `-- name: UpdateVtxoMarkers :exec +UPDATE vtxo SET markers = $1::jsonb WHERE txid = $2 AND vout = $3 +` + +type UpdateVtxoMarkersParams struct { + Markers json.RawMessage + Txid string + Vout int32 +} + +func (q *Queries) UpdateVtxoMarkers(ctx context.Context, arg UpdateVtxoMarkersParams) error { + _, err := q.db.ExecContext(ctx, updateVtxoMarkers, arg.Markers, arg.Txid, arg.Vout) + return err +} + const updateVtxoSettled = `-- name: UpdateVtxoSettled :exec UPDATE vtxo SET spent = true, spent_by = $1, settled_by = $2, updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT WHERE txid = $3 AND vout = $4 @@ -1872,23 +2450,6 @@ func (q *Queries) UpdateVtxoSpent(ctx context.Context, arg UpdateVtxoSpentParams return err } -const updateVtxoSweptIfNotSwept = `-- name: UpdateVtxoSweptIfNotSwept :execrows -UPDATE vtxo SET swept = true, updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT WHERE txid = $1 AND vout = $2 AND swept = false -` - -type UpdateVtxoSweptIfNotSweptParams struct { - Txid string - Vout int32 -} - -func (q *Queries) UpdateVtxoSweptIfNotSwept(ctx context.Context, arg UpdateVtxoSweptIfNotSweptParams) (int64, error) { - result, err := q.db.ExecContext(ctx, updateVtxoSweptIfNotSwept, arg.Txid, arg.Vout) - if err != nil { - return 0, err - } - return result.RowsAffected() -} - const updateVtxoUnrolled = `-- name: UpdateVtxoUnrolled :exec UPDATE vtxo SET unrolled = true, updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT WHERE txid = $1 AND vout = $2 ` @@ -1997,6 +2558,27 @@ func (q *Queries) UpsertIntent(ctx context.Context, arg UpsertIntentParams) erro return err } +const upsertMarker = `-- name: UpsertMarker :exec + +INSERT INTO marker (id, depth, parent_markers) +VALUES ($1, $2, $3) +ON CONFLICT(id) DO UPDATE SET + depth = EXCLUDED.depth, + parent_markers = EXCLUDED.parent_markers +` + +type UpsertMarkerParams struct { + ID string + Depth int32 + ParentMarkers pqtype.NullRawMessage +} + +// Marker queries +func (q *Queries) UpsertMarker(ctx context.Context, arg UpsertMarkerParams) error { + _, err := q.db.ExecContext(ctx, upsertMarker, arg.ID, arg.Depth, arg.ParentMarkers) + return err +} + const upsertOffchainTx = `-- name: UpsertOffchainTx :exec INSERT INTO offchain_tx (txid, tx, starting_timestamp, ending_timestamp, expiry_timestamp, fail_reason, stage_code) VALUES ($1, $2, $3, $4, $5, $6, $7) @@ -2184,11 +2766,11 @@ func (q *Queries) UpsertTx(ctx context.Context, arg UpsertTxParams) error { const upsertVtxo = `-- name: UpsertVtxo :exec INSERT INTO vtxo ( txid, vout, pubkey, amount, commitment_txid, settled_by, ark_txid, - spent_by, spent, unrolled, swept, preconfirmed, expires_at, created_at, updated_at + spent_by, spent, unrolled, preconfirmed, expires_at, created_at, updated_at, depth, markers ) VALUES ( $1, $2, $3, $4, $5, $6, $7, - $8, $9, $10, $11, $12, $13, $14, (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT + $8, $9, $10, $11, $12, $13, (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT, $14, $15::jsonb ) ON CONFLICT(txid, vout) DO UPDATE SET pubkey = EXCLUDED.pubkey, amount = EXCLUDED.amount, @@ -2198,11 +2780,12 @@ VALUES ( spent_by = EXCLUDED.spent_by, spent = EXCLUDED.spent, unrolled = EXCLUDED.unrolled, - swept = EXCLUDED.swept, preconfirmed = EXCLUDED.preconfirmed, expires_at = EXCLUDED.expires_at, created_at = EXCLUDED.created_at, - updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT + updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT, + depth = EXCLUDED.depth, + markers = EXCLUDED.markers ` type UpsertVtxoParams struct { @@ -2216,10 +2799,11 @@ type UpsertVtxoParams struct { SpentBy sql.NullString Spent bool Unrolled bool - Swept bool Preconfirmed bool ExpiresAt int64 CreatedAt int64 + Depth int32 + Markers json.RawMessage } func (q *Queries) UpsertVtxo(ctx context.Context, arg UpsertVtxoParams) error { @@ -2234,10 +2818,11 @@ func (q *Queries) UpsertVtxo(ctx context.Context, arg UpsertVtxoParams) error { arg.SpentBy, arg.Spent, arg.Unrolled, - arg.Swept, arg.Preconfirmed, arg.ExpiresAt, arg.CreatedAt, + arg.Depth, + arg.Markers, ) return err } diff --git a/internal/infrastructure/db/postgres/sqlc/query.sql b/internal/infrastructure/db/postgres/sqlc/query.sql index f24c4f701..5806708ec 100644 --- a/internal/infrastructure/db/postgres/sqlc/query.sql +++ b/internal/infrastructure/db/postgres/sqlc/query.sql @@ -48,11 +48,11 @@ ON CONFLICT(intent_id, pubkey, onchain_address) DO UPDATE SET -- name: UpsertVtxo :exec INSERT INTO vtxo ( txid, vout, pubkey, amount, commitment_txid, settled_by, ark_txid, - spent_by, spent, unrolled, swept, preconfirmed, expires_at, created_at, updated_at + spent_by, spent, unrolled, preconfirmed, expires_at, created_at, updated_at, depth, markers ) VALUES ( @txid, @vout, @pubkey, @amount, @commitment_txid, @settled_by, @ark_txid, - @spent_by, @spent, @unrolled, @swept, @preconfirmed, @expires_at, @created_at, (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT + @spent_by, @spent, @unrolled, @preconfirmed, @expires_at, @created_at, (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT, @depth, @markers::jsonb ) ON CONFLICT(txid, vout) DO UPDATE SET pubkey = EXCLUDED.pubkey, amount = EXCLUDED.amount, @@ -62,11 +62,12 @@ VALUES ( spent_by = EXCLUDED.spent_by, spent = EXCLUDED.spent, unrolled = EXCLUDED.unrolled, - swept = EXCLUDED.swept, preconfirmed = EXCLUDED.preconfirmed, expires_at = EXCLUDED.expires_at, created_at = EXCLUDED.created_at, - updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT; + updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT, + depth = EXCLUDED.depth, + markers = EXCLUDED.markers; -- name: InsertVtxoCommitmentTxid :exec INSERT INTO vtxo_commitment_txid (vtxo_txid, vtxo_vout, commitment_txid) @@ -116,9 +117,6 @@ UPDATE vtxo SET expires_at = @expires_at WHERE txid = @txid AND vout = @vout; -- name: UpdateVtxoUnrolled :exec UPDATE vtxo SET unrolled = true, updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT WHERE txid = @txid AND vout = @vout; --- name: UpdateVtxoSweptIfNotSwept :execrows -UPDATE vtxo SET swept = true, updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT WHERE txid = @txid AND vout = @vout AND swept = false; - -- name: UpdateVtxoSettled :exec UPDATE vtxo SET spent = true, spent_by = @spent_by, settled_by = @settled_by, updated_at = (EXTRACT(EPOCH FROM NOW()) * 1000)::BIGINT WHERE txid = @txid AND vout = @vout; @@ -261,23 +259,32 @@ WHERE vtxo_vw.pubkey = ANY($1::varchar[]) AND (@before::bigint = 0 OR vtxo_vw.updated_at <= @before::bigint); -- name: SelectExpiringLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0)::bigint AS amount -FROM vtxo -WHERE swept = false - AND spent = false - AND unrolled = false - AND expires_at > @after - AND (@before <= 0 OR expires_at < @before); +SELECT COALESCE(SUM(v.amount), 0)::bigint AS amount +FROM vtxo v +WHERE NOT EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) + AND v.spent = false + AND v.unrolled = false + AND v.expires_at > @after + AND (@before <= 0 OR v.expires_at < @before); -- name: SelectRecoverableLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0)::bigint AS amount -FROM vtxo -WHERE swept = true - AND spent = false; +SELECT COALESCE(SUM(v.amount), 0)::bigint AS amount +FROM vtxo v +WHERE EXISTS ( + SELECT 1 FROM swept_marker sm + WHERE v.markers @> jsonb_build_array(sm.marker_id) + ) + AND v.spent = false; -- name: SelectOffchainTx :many SELECT sqlc.embed(offchain_tx_vw) FROM offchain_tx_vw WHERE txid = @txid AND COALESCE(fail_reason, '') = ''; +-- name: SelectOffchainTxsByTxids :many +SELECT sqlc.embed(offchain_tx_vw) FROM offchain_tx_vw WHERE txid = ANY(@txids::varchar[]) AND COALESCE(fail_reason, '') = ''; + -- name: SelectLatestScheduledSession :one SELECT * FROM scheduled_session ORDER BY updated_at DESC LIMIT 1; @@ -296,13 +303,17 @@ WHERE v.swept = false OR (',' || COALESCE(v.commitments::text, '') || ',') LIKE '%,' || @commitment_txid || ',%'); -- name: SelectVtxosOutpointsByArkTxidRecursive :many +-- Returns the seed outpoint (txid, vout) and all VTXOs descending from it +-- via ark_txid links. Scoped to a single outpoint (not the whole txid) so that +-- sibling outputs of the seed tx, which belong to independent lineages, are +-- not included. WITH RECURSIVE descendants_chain AS ( - -- seed + -- seed: only the specific outpoint, not all vouts of the txid SELECT v.txid, v.vout, v.preconfirmed, v.ark_txid, v.spent_by, 0 AS depth, ARRAY[(v.txid||':'||v.vout)]::text[] AS visited FROM vtxo v - WHERE v.txid = @txid + WHERE v.txid = @txid AND v.vout = @vout UNION ALL @@ -430,6 +441,93 @@ VALUES ('', '', '', ''); SELECT id, txid, proof, message FROM intent WHERE txid = @txid; +-- Marker queries + +-- name: UpsertMarker :exec +INSERT INTO marker (id, depth, parent_markers) +VALUES (@id, @depth, @parent_markers) +ON CONFLICT(id) DO UPDATE SET + depth = EXCLUDED.depth, + parent_markers = EXCLUDED.parent_markers; + +-- name: SelectMarker :one +SELECT * FROM marker WHERE id = @id; + +-- name: SelectMarkersByDepth :many +SELECT * FROM marker WHERE depth = @depth; + +-- name: SelectMarkersByDepthRange :many +SELECT * FROM marker WHERE depth >= @min_depth AND depth <= @max_depth ORDER BY depth; + +-- name: SelectMarkersByIds :many +SELECT * FROM marker WHERE id = ANY(@ids::text[]); + +-- name: InsertSweptMarker :exec +INSERT INTO swept_marker (marker_id, swept_at) +VALUES (@marker_id, @swept_at) +ON CONFLICT(marker_id) DO NOTHING; + +-- name: BulkInsertSweptMarkers :exec +INSERT INTO swept_marker (marker_id, swept_at) +SELECT unnest(@marker_ids::text[]), @swept_at +ON CONFLICT(marker_id) DO NOTHING; + +-- name: SelectSweptMarker :one +SELECT * FROM swept_marker WHERE marker_id = @marker_id; + +-- name: SelectSweptMarkersByIds :many +SELECT * FROM swept_marker WHERE marker_id = ANY(@marker_ids::text[]); + +-- name: IsMarkerSwept :one +SELECT EXISTS(SELECT 1 FROM swept_marker WHERE marker_id = @marker_id) AS is_swept; + +-- name: GetDescendantMarkerIds :many +-- Recursively get a marker and all its descendants (markers whose parent_markers contain it). +-- Uses UNION (set semantics, not UNION ALL) so rows already produced are filtered, +-- which makes this cycle-safe. Do not convert to UNION ALL: cycles in parent_markers +-- would cause the recursion to run unbounded. +WITH RECURSIVE descendant_markers(id) AS ( + -- Base case: the marker being swept + SELECT marker.id FROM marker WHERE marker.id = @root_marker_id + UNION + -- Recursive case: find markers whose parent_markers jsonb array contains any descendant + SELECT m.id FROM marker m + INNER JOIN descendant_markers dm ON ( + m.parent_markers @> jsonb_build_array(dm.id) + ) +) +SELECT descendant_markers.id AS marker_id FROM descendant_markers +WHERE descendant_markers.id NOT IN (SELECT sm.marker_id FROM swept_marker sm); + +-- name: UpdateVtxoMarkers :exec +UPDATE vtxo SET markers = @markers::jsonb WHERE txid = @txid AND vout = @vout; + +-- name: SelectVtxosByMarkerId :many +-- Find VTXOs whose markers JSONB array contains the given marker_id +SELECT sqlc.embed(vtxo_vw) FROM vtxo_vw WHERE markers @> jsonb_build_array(@marker_id::TEXT); + +-- name: CountUnsweptVtxosByMarkerId :one +-- Count VTXOs whose markers JSONB array contains the given marker_id and are not swept +SELECT COUNT(DISTINCT (txid, vout)) FROM vtxo_vw WHERE markers @> jsonb_build_array(@marker_id::TEXT) AND swept = false; + +-- Chain traversal queries for GetVtxoChain optimization + +-- name: SelectVtxosByDepthRange :many +-- Get all VTXOs within a depth range, useful for filling gaps between markers +SELECT * FROM vtxo_vw +WHERE depth >= @min_depth AND depth <= @max_depth +ORDER BY depth DESC; + +-- name: SelectVtxosByArkTxid :many +-- Get all VTXOs created by a specific ark tx (offchain tx) +SELECT * FROM vtxo_vw WHERE ark_txid = @ark_txid; + +-- name: SelectVtxoChainByMarker :many +-- Get VTXOs whose markers JSONB array contains any of the given marker IDs +SELECT * FROM vtxo_vw +WHERE markers ?| @marker_ids::TEXT[] +ORDER BY depth DESC; + -- name: InsertAsset :exec INSERT INTO asset (id, is_immutable, metadata_hash, metadata, control_asset_id) VALUES (@id, @is_immutable, @metadata_hash, @metadata, @control_asset_id); @@ -451,4 +549,14 @@ WHERE ap.asset_id = $1 AND v.spent = false; SELECT control_asset_id FROM asset WHERE id = $1; -- name: SelectAssetExists :one -SELECT 1 FROM asset WHERE id = $1 LIMIT 1; \ No newline at end of file +SELECT 1 FROM asset WHERE id = $1 LIMIT 1; + +-- name: InsertSweptVtxo :exec +INSERT INTO swept_vtxo (txid, vout, swept_at) +VALUES (@txid, @vout, @swept_at) +ON CONFLICT(txid, vout) DO NOTHING; + +-- name: BulkInsertSweptVtxos :exec +INSERT INTO swept_vtxo (txid, vout, swept_at) +SELECT unnest(@txids::text[]), unnest(@vouts::integer[]), @swept_at +ON CONFLICT(txid, vout) DO NOTHING; diff --git a/internal/infrastructure/db/postgres/vtxo_repo.go b/internal/infrastructure/db/postgres/vtxo_repo.go index 158872e7d..529eb23d9 100644 --- a/internal/infrastructure/db/postgres/vtxo_repo.go +++ b/internal/infrastructure/db/postgres/vtxo_repo.go @@ -3,6 +3,7 @@ package pgdb import ( "context" "database/sql" + "encoding/json" "errors" "fmt" "sort" @@ -10,6 +11,7 @@ import ( "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/internal/infrastructure/db/postgres/sqlc/queries" + log "github.com/sirupsen/logrus" ) type vtxoRepository struct { @@ -41,6 +43,16 @@ func (v *vtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) erro for i := range vtxos { vtxo := vtxos[i] + markersToMarshal := vtxo.MarkerIDs + if markersToMarshal == nil { + markersToMarshal = []string{} + } + data, err := json.Marshal(markersToMarshal) + if err != nil { + return fmt.Errorf("failed to marshal markers: %w", err) + } + markersJSON := json.RawMessage(data) + if err := querierWithTx.UpsertVtxo( ctx, queries.UpsertVtxoParams{ Txid: vtxo.Txid, @@ -50,7 +62,6 @@ func (v *vtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) erro CommitmentTxid: vtxo.RootCommitmentTxid, Spent: vtxo.Spent, Unrolled: vtxo.Unrolled, - Swept: vtxo.Swept, Preconfirmed: vtxo.Preconfirmed, ExpiresAt: vtxo.ExpiresAt, CreatedAt: vtxo.CreatedAt, @@ -63,6 +74,8 @@ func (v *vtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) erro ArkTxid: sql.NullString{ String: vtxo.ArkTxid, Valid: len(vtxo.ArkTxid) > 0, }, + Depth: int32(vtxo.Depth), + Markers: markersJSON, }, ); err != nil { return err @@ -316,35 +329,6 @@ func (v *vtxoRepository) SpendVtxos( return execTx(ctx, v.db, txBody) } -func (v *vtxoRepository) SweepVtxos(ctx context.Context, vtxos []domain.Outpoint) (int, error) { - sweptCount := 0 - txBody := func(querierWithTx *queries.Queries) error { - for _, outpoint := range vtxos { - affectedRows, err := querierWithTx.UpdateVtxoSweptIfNotSwept( - ctx, - queries.UpdateVtxoSweptIfNotSweptParams{ - Txid: outpoint.Txid, - Vout: int32(outpoint.VOut), - }, - ) - if err != nil { - return err - } - if affectedRows > 0 { - sweptCount++ - } - } - - return nil - } - - if err := execTx(ctx, v.db, txBody); err != nil { - return -1, err - } - - return sweptCount, nil -} - func (v *vtxoRepository) UpdateVtxosExpiration( ctx context.Context, vtxos []domain.Outpoint, expiresAt int64, ) error { @@ -422,9 +406,15 @@ func (v *vtxoRepository) GetSweepableVtxosByCommitmentTxid( } func (v *vtxoRepository) GetAllChildrenVtxos( - ctx context.Context, txid string, + ctx context.Context, outpoint domain.Outpoint, ) ([]domain.Outpoint, error) { - res, err := v.querier.SelectVtxosOutpointsByArkTxidRecursive(ctx, txid) + res, err := v.querier.SelectVtxosOutpointsByArkTxidRecursive( + ctx, + queries.SelectVtxosOutpointsByArkTxidRecursiveParams{ + Txid: outpoint.Txid, + Vout: int32(outpoint.VOut), + }, + ) if err != nil { return nil, err } @@ -544,10 +534,12 @@ func rowToVtxo(row queries.VtxoVw) domain.Vtxo { SpentBy: row.SpentBy.String, Spent: row.Spent, Unrolled: row.Unrolled, - Swept: row.Swept, + Swept: row.Swept.Bool, Preconfirmed: row.Preconfirmed, ExpiresAt: row.ExpiresAt, CreatedAt: row.CreatedAt, + Depth: uint32(row.Depth), + MarkerIDs: parseMarkersJSONBFromVtxo(row.Markers), Assets: assets, } } @@ -561,6 +553,21 @@ func rowToAsset(row queries.VtxoVw) domain.AssetDenomination { } } +// parseMarkersJSONBFromVtxo parses a JSONB array into a slice of strings for vtxo repo. +// Logs and returns nil if the JSON is malformed so that corrupt markers are +// surfaced instead of silently treated as empty. +func parseMarkersJSONBFromVtxo(markers json.RawMessage) []string { + if len(markers) == 0 { + return nil + } + var markerIDs []string + if err := json.Unmarshal(markers, &markerIDs); err != nil { + log.WithError(err).Warnf("failed to parse markers JSONB: %q", string(markers)) + return nil + } + return markerIDs +} + func readRows(rows []queries.VtxoVw) ([]domain.Vtxo, error) { vtxosByOutpoint := make(map[string]domain.Vtxo) for _, row := range rows { diff --git a/internal/infrastructure/db/service.go b/internal/infrastructure/db/service.go index eb251ac71..a61367b80 100644 --- a/internal/infrastructure/db/service.go +++ b/internal/infrastructure/db/service.go @@ -78,10 +78,16 @@ var ( "sqlite": sqlitedb.NewIntentFeesRepository, "postgres": pgdb.NewIntentFeesRepository, } + markerStoreTypes = map[string]func(...interface{}) (domain.MarkerRepository, error){ + "badger": badgerdb.NewMarkerRepository, + "sqlite": sqlitedb.NewMarkerRepository, + "postgres": pgdb.NewMarkerRepository, + } ) const ( - sqliteDbFile = "sqlite.db" + sqliteDbFile = "sqlite.db" + maxProjectionRetry = 5 ) type ServiceConfig struct { @@ -96,6 +102,7 @@ type service struct { eventStore domain.EventRepository roundStore domain.RoundRepository vtxoStore domain.VtxoRepository + markerStore domain.MarkerRepository scheduledSessionStore domain.ScheduledSessionRepo offchainTxStore domain.OffchainTxRepository convictionStore domain.ConvictionRepository @@ -140,10 +147,14 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if !ok { return nil, fmt.Errorf("invalid data store type: %s", config.DataStoreType) } - + markerStoreFactory, ok := markerStoreTypes[config.DataStoreType] + if !ok { + return nil, fmt.Errorf("invalid data store type: %s", config.DataStoreType) + } var eventStore domain.EventRepository var roundStore domain.RoundRepository var vtxoStore domain.VtxoRepository + var markerStore domain.MarkerRepository var scheduledSessionStore domain.ScheduledSessionRepo var offchainTxStore domain.OffchainTxRepository var convictionStore domain.ConvictionRepository @@ -221,6 +232,23 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to create intent fees store: %w", err) } + // Pass the vtxo store to the marker repository so they share the same data + badgerVtxoRepo, ok := vtxoStore.(*badgerdb.VtxoRepository) + if !ok { + return nil, fmt.Errorf("failed to get badger vtxo repository") + } + markerConfig := make( + []interface{}, + len(config.DataStoreConfig), + len(config.DataStoreConfig)+1, + ) + copy(markerConfig, config.DataStoreConfig) + markerConfig = append(markerConfig, badgerVtxoRepo.GetStore()) + markerStore, err = markerStoreFactory(markerConfig...) + if err != nil { + return nil, fmt.Errorf("failed to create marker store: %w", err) + } + case "postgres": if len(config.DataStoreConfig) != 3 { return nil, fmt.Errorf("invalid data store config for postgres") @@ -301,6 +329,11 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to create intent fees store: %w", err) } + markerStore, err = markerStoreFactory(db) + if err != nil { + return nil, fmt.Errorf("failed to create marker store: %w", err) + } + case "sqlite": if len(config.DataStoreConfig) != 1 { return nil, fmt.Errorf("invalid data store config") @@ -369,12 +402,17 @@ func NewService(config ServiceConfig, txDecoder ports.TxDecoder) (ports.RepoMana if err != nil { return nil, fmt.Errorf("failed to create intent fees store: %w", err) } + markerStore, err = markerStoreFactory(db) + if err != nil { + return nil, fmt.Errorf("failed to create marker store: %w", err) + } } svc := &service{ eventStore: eventStore, roundStore: roundStore, vtxoStore: vtxoStore, + markerStore: markerStore, scheduledSessionStore: scheduledSessionStore, offchainTxStore: offchainTxStore, txDecoder: txDecoder, @@ -412,6 +450,10 @@ func (s *service) Vtxos() domain.VtxoRepository { return s.vtxoStore } +func (s *service) Markers() domain.MarkerRepository { + return s.markerStore +} + func (s *service) ScheduledSession() domain.ScheduledSessionRepo { return s.scheduledSessionStore } @@ -440,6 +482,7 @@ func (s *service) Close() { s.eventStore.Close() s.roundStore.Close() s.vtxoStore.Close() + s.markerStore.Close() s.scheduledSessionStore.Close() s.offchainTxStore.Close() s.convictionStore.Close() @@ -448,194 +491,277 @@ func (s *service) Close() { func (s *service) updateProjectionsAfterRoundEvents(events []domain.Event) { ctx := context.Background() round := domain.NewRoundFromEvents(events) - updateFn := func() bool { - if err := s.roundStore.AddOrUpdateRound(ctx, *round); err != nil { - log.WithError(err).Errorf("failed to add or update round %s", round.Id) - return false - } - log.Debugf("added or updated round %s", round.Id) - if !round.IsEnded() { - return true - } + if err := s.roundStore.AddOrUpdateRound(ctx, *round); err != nil { + log.WithError(err).Errorf("failed to add or update round %s", round.Id) + return + } + log.Debugf("added or updated round %s", round.Id) - repo := s.vtxoStore + if !round.IsEnded() { + go s.batchUpdateHandler.dispatch(*round) + return + } - lastEvent := events[len(events)-1] - if lastEvent.GetType() == domain.EventTypeBatchSwept { - event := lastEvent.(domain.BatchSwept) - allSweptVtxos := append(event.LeafVtxos, event.PreconfirmedVtxos...) - sweptCount, err := repo.SweepVtxos(ctx, allSweptVtxos) - if err != nil { - log.WithError(err).Warn("failed to sweep vtxos, retrying...") - return false - } - log.Debugf("swept %d vtxos", sweptCount) + repo := s.vtxoStore - if event.FullySwept { - log.WithField("commitment_txid", round.CommitmentTxid).Debugf( - "round %s fully swept", round.Id, - ) - } - return true + lastEvent := events[len(events)-1] + if lastEvent.GetType() == domain.EventTypeBatchSwept { + event := lastEvent.(domain.BatchSwept) + allSweptVtxos := append(event.LeafVtxos, event.PreconfirmedVtxos...) + + // Per-outpoint sweeping avoids marker over-reach: markers can be shared + // across independent subtrees when offchain txs consolidate inputs from + // different lineages. Sweeping by marker would incorrectly mark unrelated + // VTXOs as swept (same reason the checkpoint path uses SweepVtxoOutpoints). + sweptAt := time.Now().UnixMilli() + if err := s.markerStore.SweepVtxoOutpoints(ctx, allSweptVtxos, sweptAt); err != nil { + log.WithError(err).Warn("failed to sweep vtxo outpoints for batch") + } else if len(allSweptVtxos) > 0 { + log.Debugf("swept %d vtxo outpoints for batch", len(allSweptVtxos)) } - spentVtxos := getSpentVtxoKeysFromRound(*round, s.txDecoder) - newVtxos := getNewVtxosFromRound(*round, s.txDecoder) + if event.FullySwept { + log.WithField("commitment_txid", round.CommitmentTxid).Debugf( + "round %s fully swept", round.Id, + ) + } + go s.batchUpdateHandler.dispatch(*round) + return + } + + spentVtxos := getSpentVtxoKeysFromRound(*round, s.txDecoder) + newVtxos := getNewVtxosFromRound(*round, s.txDecoder) - if len(spentVtxos) > 0 { + if len(spentVtxos) > 0 { + for attempt := range maxProjectionRetry { if err := repo.SettleVtxos(ctx, spentVtxos, round.CommitmentTxid); err != nil { - log.WithError(err).Warn("failed to spend vtxos, retrying...") - return false + log.WithError(err).Warnf( + "failed to spend vtxos (attempt %d/%d)", attempt+1, maxProjectionRetry, + ) + time.Sleep(100 * time.Millisecond) + continue } log.Debugf("spent %d vtxos", len(spentVtxos)) + break } + } - if len(newVtxos) > 0 { + if len(newVtxos) > 0 { + for attempt := range maxProjectionRetry { // this will take care of updating asset projections as well if err := repo.AddVtxos(ctx, newVtxos); err != nil { - log.WithError(err).Warn("failed to add new vtxos, retrying soon") - return false + log.WithError(err).Warnf( + "failed to add new vtxos (attempt %d/%d)", attempt+1, maxProjectionRetry, + ) + time.Sleep(100 * time.Millisecond) + continue } log.Debugf("added %d new vtxos", len(newVtxos)) + break + } + // Create root markers for batch VTXOs (depth 0 is always at marker boundary) + for attempt := range maxProjectionRetry { + if err := s.markerStore.CreateRootMarkersForVtxos(ctx, newVtxos); err != nil { + log.WithError(err).Warnf( + "failed to create root markers for %d vtxos (attempt %d/%d)", + len(newVtxos), attempt+1, maxProjectionRetry, + ) + time.Sleep(100 * time.Millisecond) + continue + } + log.Debugf("created root markers for %d vtxos", len(newVtxos)) + break } - return true } - dispatch := updateFn() - if dispatch { - go s.batchUpdateHandler.dispatch(*round) - } + go s.batchUpdateHandler.dispatch(*round) } func (s *service) updateProjectionsAfterOffchainTxEvents(events []domain.Event) { ctx := context.Background() offchainTx := domain.NewOffchainTxFromEvents(events) - updateFn := func() bool { - if err := s.offchainTxStore.AddOrUpdateOffchainTx(ctx, offchainTx); err != nil { - log.WithError(err).Errorf("failed to add or update offchain tx %s", offchainTx.ArkTxid) - return false - } - log.Debugf("added or updated offchain tx %s", offchainTx.ArkTxid) - - switch { - case offchainTx.IsAccepted(): - spentVtxos := make(map[domain.Outpoint]string) - for _, tx := range offchainTx.CheckpointTxs { - txid, ins, _, err := s.txDecoder.DecodeTx(tx) - if err != nil { - log.WithError(err).Warn("failed to decode checkpoint tx") - continue - } - for _, in := range ins { - spentVtxos[in] = txid - } - } - // as soon as the checkpoint txs are signed by the signer, - // we must mark the vtxos as spent to prevent double spending. - if err := s.vtxoStore.SpendVtxos(ctx, spentVtxos, offchainTx.ArkTxid); err != nil { - log.WithError(err).Warn("failed to spend vtxos") - return false - } - if len(spentVtxos) > 0 { - log.Debugf("spent %d vtxos", len(spentVtxos)) - } - case offchainTx.IsFinalized(): - txid, _, outs, err := s.txDecoder.DecodeTx(offchainTx.ArkTx) + if err := s.offchainTxStore.AddOrUpdateOffchainTx(ctx, offchainTx); err != nil { + log.WithError(err).Errorf("failed to add or update offchain tx %s", offchainTx.ArkTxid) + return + } + log.Debugf("added or updated offchain tx %s", offchainTx.ArkTxid) + + switch { + case offchainTx.IsAccepted(): + spentVtxos := make(map[domain.Outpoint]string) + for _, tx := range offchainTx.CheckpointTxs { + txid, ins, _, err := s.txDecoder.DecodeTx(tx) if err != nil { - log.WithError(err).Warn("failed to decode ark tx") - return false + log.WithError(err).Warn("failed to decode checkpoint tx") + continue + } + for _, in := range ins { + spentVtxos[in] = txid } + } - issuances, assets, err := getAssetsFromTxOuts(txid, outs) - if err != nil { - log.WithError(err).Warn("failed to get assets from tx") - return false + // as soon as the checkpoint txs are signed by the signer, + // we must mark the vtxos as spent to prevent double spending. + if err := s.vtxoStore.SpendVtxos(ctx, spentVtxos, offchainTx.ArkTxid); err != nil { + log.WithError(err).Warn("failed to spend vtxos") + return + } + log.Debugf("spent %d vtxos", len(spentVtxos)) + case offchainTx.IsFinalized(): + txid, _, outs, err := s.txDecoder.DecodeTx(offchainTx.ArkTx) + if err != nil { + log.WithError(err).Warn("failed to decode ark tx") + return + } + + // Depth and parent marker IDs are carried by the OffchainTxAccepted event, + // computed in SubmitOffchainTx from the spent VTXOs. + newDepth := offchainTx.Depth + parentMarkerIDs := offchainTx.ParentMarkerIDs + + // Create marker if at boundary depth, or inherit parent markers + var markerIDs []string + marker, ids := domain.NewMarker(txid, newDepth, parentMarkerIDs) + if marker != nil { + if err := s.markerStore.AddMarker(ctx, *marker); err != nil { + log.WithError(err). + Warn("failed to create marker for chained vtxo, falling back to parent markers") + // Fall back to parent markers so VTXOs are still sweepable. + // Without this, markerIDs stays nil and the VTXOs become + // permanently unsweepable — the swept column was removed and + // swept status is now derived from whether any of a VTXO's + // markers appear in the swept_marker table. + markerIDs = parentMarkerIDs + } else { + log.Debugf("created marker %s at depth %d", marker.ID, newDepth) + markerIDs = ids } + } else { + markerIDs = ids + } - sweepTxs, err := s.roundStore.GetSweepTxs(ctx, offchainTx.RootCommitmentTxId) - // We consider the tx swept if: - // - there is an error fetching the sweep txs (this is just fallback, should never happen) - // - the batch is swept - // - the tx expired (meaning one or all its inputs expired and are already swept or about - // to be swept) - txSwept := err != nil || len(sweepTxs) > 0 || - time.Now().After(time.Unix(offchainTx.ExpiryTimestamp, 0)) - // once the offchain tx is finalized, the user signed the checkpoint txs - // thus, we can create the new vtxos in the db. - newVtxos := make([]domain.Vtxo, 0, len(outs)) - for outIndex, out := range outs { - // ignore anchor and extension - if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) || - extension.IsExtension(out.PkScript) { - continue - } + issuances, assets, err := getAssetsFromTxOuts(txid, outs) + if err != nil { + log.WithError(err).Warn("failed to get assets from tx") + return + } + + txSwept := false + batch, err := s.roundStore.GetRoundWithCommitmentTxid(ctx, offchainTx.RootCommitmentTxId) + // We consider the tx swept if: + // - there is an error fetching the batch (this is just fallback, should never happen) + // - the batch is swept + // - the tx expired (meaning one or all its inputs expired and are already swept or about + // to be swept) + txSwept = err != nil || (batch != nil && len(batch.SweepTxs) > 0) || + time.Now().After(time.Unix(offchainTx.ExpiryTimestamp, 0)) + // once the offchain tx is finalized, the user signed the checkpoint txs + // thus, we can create the new vtxos in the db. + newVtxos := make([]domain.Vtxo, 0, len(outs)) + createdDustMarkerIDs := make([]string, 0) + for outIndex, out := range outs { + // ignore anchor and extension + if bytes.Equal(out.PkScript, txutils.ANCHOR_PKSCRIPT) || + extension.IsExtension(out.PkScript) { + continue + } - // at that point, we should only have valid taproot script - if len(out.PkScript) != 34 { - continue - } + // at that point, we should only have valid taproot script + if len(out.PkScript) != 34 { + continue + } - outputSwept := txSwept - if !outputSwept { - outputSwept = script.IsSubDustScript(out.PkScript) - } + outputSwept := txSwept + if !outputSwept { + outputSwept = script.IsSubDustScript(out.PkScript) + } - newVtxos = append(newVtxos, domain.Vtxo{ - Outpoint: domain.Outpoint{ - Txid: txid, - VOut: uint32(outIndex), - }, - PubKey: hex.EncodeToString(out.PkScript[2:]), - Amount: uint64(out.Amount), - ExpiresAt: offchainTx.ExpiryTimestamp, - CommitmentTxids: offchainTx.CommitmentTxidsList(), - RootCommitmentTxid: offchainTx.RootCommitmentTxId, - Preconfirmed: true, - CreatedAt: offchainTx.StartingTimestamp, - // mark the vtxo as "swept" if it is below dust limit to prevent it from being spent again in a future offchain tx - // the only way to spend a swept vtxo is by collecting enough dust to cover the minSettlementVtxoAmount and then settle. - // because sub-dust vtxos are using OP_RETURN output script, they can't be unilaterally exited. - Swept: outputSwept, - Assets: assets[uint32(outIndex)], - }) + outpoint := domain.Outpoint{ + Txid: txid, + VOut: uint32(outIndex), } - if len(issuances) > 0 { - assetsByTx := map[string][]domain.Asset{ - offchainTx.ArkTxid: issuances, - } - count, err := s.assetStore.AddAssets(ctx, assetsByTx) - if err != nil { - log.WithError(err).Warnf( - "failed to add issued assets in offchain tx %s", offchainTx.ArkTxid, - ) - return false - } - if count > 0 { - log.Infof("added %d issued assets", count) + vtxoMarkerIDs := markerIDs + isDust := script.IsSubDustScript(out.PkScript) + if isDust { + // Dust VTXOs get their own outpoint-based marker so they can be + // swept individually without affecting sibling non-dust VTXOs + // that share the same inherited parent markers. + dustMarkerID := outpoint.String() + if err := s.markerStore.AddMarker(ctx, domain.Marker{ + ID: dustMarkerID, + Depth: newDepth, + ParentMarkerIDs: markerIDs, + }); err != nil { + log.WithError(err).Warnf("failed to create dust marker %s", dustMarkerID) + } else { + createdDustMarkerIDs = append(createdDustMarkerIDs, dustMarkerID) + vtxoMarkerIDs = append(append([]string{}, markerIDs...), dustMarkerID) } } - if err := s.vtxoStore.AddVtxos(ctx, newVtxos); err != nil { - log.WithError(err).Warn("failed to add vtxos") - return false + newVtxos = append(newVtxos, domain.Vtxo{ + Outpoint: outpoint, + PubKey: hex.EncodeToString(out.PkScript[2:]), + Amount: uint64(out.Amount), + ExpiresAt: offchainTx.ExpiryTimestamp, + CommitmentTxids: offchainTx.CommitmentTxidsList(), + RootCommitmentTxid: offchainTx.RootCommitmentTxId, + Preconfirmed: true, + CreatedAt: offchainTx.StartingTimestamp, + // mark the vtxo as "swept" if it is below dust limit to prevent it from being spent again in a future offchain tx + // the only way to spend a swept vtxo is by collecting enough dust to cover the minSettlementVtxoAmount and then settle. + // because sub-dust vtxos are using OP_RETURN output script, they can't be unilaterally exited. + Swept: outputSwept, + Depth: newDepth, + MarkerIDs: vtxoMarkerIDs, + Assets: assets[uint32(outIndex)], + }) + } + + if len(issuances) > 0 { + assetsByTx := map[string][]domain.Asset{ + offchainTx.ArkTxid: issuances, + } + count, err := s.assetStore.AddAssets(ctx, assetsByTx) + if err != nil { + log.WithError(err).Warnf( + "failed to add issued assets in offchain tx %s", offchainTx.ArkTxid, + ) + return } - if len(newVtxos) > 0 { - log.Debugf("added %d vtxos", len(newVtxos)) + if count > 0 { + log.Infof("added %d issued assets", count) } } - return true + if err := s.vtxoStore.AddVtxos(ctx, newVtxos); err != nil { + log.WithError(err).Warn("failed to add vtxos") + return + } + log.Debugf("added %d vtxos at depth %d", len(newVtxos), newDepth) + + // Mark dust VTXOs as swept via their markers + // Dust vtxos are below dust limit and can't be spent again in future offchain tx + // Because sub-dust vtxos are using OP_RETURN output script, they can't be unilaterally exited + if len(createdDustMarkerIDs) > 0 { + sweptAt := time.Now().UnixMilli() + if err := s.markerStore.BulkSweepMarkers( + ctx, + createdDustMarkerIDs, + sweptAt, + ); err != nil { + log.WithError(err). + Warnf("failed to sweep %d dust vtxo markers", len(createdDustMarkerIDs)) + } + } } - dispatch := updateFn() - if dispatch { - go s.offchainTxUpdateHandler.dispatch(*offchainTx) - } + go s.offchainTxUpdateHandler.dispatch(*offchainTx) } func getSpentVtxoKeysFromRound( @@ -702,14 +828,17 @@ func getNewVtxosFromRound(round domain.Round, txDecoder ports.TxDecoder) []domai } vtxoPubkey := hex.EncodeToString(schnorr.SerializePubKey(vtxoTapKey)) + outpoint := domain.Outpoint{Txid: txid, VOut: uint32(i)} vtxos = append(vtxos, domain.Vtxo{ - Outpoint: domain.Outpoint{Txid: txid, VOut: uint32(i)}, + Outpoint: outpoint, PubKey: vtxoPubkey, Amount: out.Amount, CommitmentTxids: []string{round.CommitmentTxid}, RootCommitmentTxid: round.CommitmentTxid, CreatedAt: round.EndingTimestamp, ExpiresAt: round.ExpiryTimestamp(), + Depth: 0, + MarkerIDs: []string{outpoint.String()}, Assets: assets[uint32(i)], }) } diff --git a/internal/infrastructure/db/service_test.go b/internal/infrastructure/db/service_test.go index ee006e436..5a77b09db 100644 --- a/internal/infrastructure/db/service_test.go +++ b/internal/infrastructure/db/service_test.go @@ -4,6 +4,7 @@ import ( "context" "crypto/rand" "encoding/hex" + "fmt" "math" "math/big" "os" @@ -186,6 +187,34 @@ func TestService(t *testing.T) { testOffchainTxRepository(t, svc) testAssetRepository(t, svc) testVtxoRepository(t, svc) + testMarkerBasicOperations(t, svc) + testMarkerSweep(t, svc) + testVtxoMarkerAssociation(t, svc) + testSweepVtxosByMarker(t, svc) + testMarkerDepthRangeQueries(t, svc) + testMarkerChainTraversal(t, svc) + testGetVtxoChainWithMarkerOptimization(t, svc) + testBulkSweepMarkersConcurrent(t, svc) + testCreateRootMarkersForVtxos(t, svc) + testMarkerCreationAtBoundaryDepth(t, svc) + testMarkerInheritanceAtNonBoundary(t, svc) + testDustVtxoMarkersSweptImmediately(t, svc) + testSweepVtxosWithMarkersEmptyInput(t, svc) + testSweepVtxosWithMarkersNoMarkersOnVtxos(t, svc) + testVtxoMarkerIDsRoundTrip(t, svc) + testGetVtxosByArkTxidMultipleOutputs(t, svc) + testCreateRootMarkersForEmptyVtxos(t, svc) + testSweepVtxosWithMarkersIntegration(t, svc) + testDeepChain20kMarkers(t, svc) + testPartialMarkerSweep(t, svc) + testListVtxosMarkerSweptFiltering(t, svc) + testAddMarkerFailureFallbackToParentMarkers(t, svc) + testSweepableUnrolledExcludesMarkerSwept(t, svc) + testSweepVtxoOutpointsNoOverreach(t, svc) + testSweepVtxoOutpointsEdgeCases(t, svc) + testGetAllChildrenVtxosSiblingIsolation(t, svc) + testConvergentMultiParentMarkerDAG(t, svc) + testSweepMarkerWithDescendantsDeepChain(t, svc) testScheduledSessionRepository(t, svc) testConvictionRepository(t, svc) testFeeRepository(t, svc) @@ -684,6 +713,7 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { RootCommitmentTxid: commitmentTxid, CommitmentTxids: []string{commitmentTxid, "cmt1", "cmt2"}, Preconfirmed: true, + Depth: 2, // chained vtxo at depth 2 }, { Outpoint: domain.Outpoint{ @@ -694,6 +724,7 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { Amount: 2000, RootCommitmentTxid: commitmentTxid, CommitmentTxids: []string{commitmentTxid}, + Depth: 0, // batch vtxo at depth 0 }, } assetVtxos := append(userVtxos, []domain.Vtxo{ @@ -742,6 +773,7 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { Amount: 2000, RootCommitmentTxid: commitmentTxid, CommitmentTxids: []string{commitmentTxid}, + Depth: 1, // chained vtxo at depth 1 }) arkTxid := randomString(32) @@ -913,7 +945,7 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { require.Empty(t, children) // Test recursive query starting from vtxo1 - children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, vtxo1.Txid) + children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, vtxo1.Outpoint) require.NoError(t, err) require.Len(t, children, 4) // Should return all 4 vtxos in the chain @@ -924,17 +956,19 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { require.Equal(t, expectedOutpoints, children) // Test starting from middle of chain (vtxo2) - children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, vtxo2.Txid) + children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, vtxo2.Outpoint) require.NoError(t, err) require.Len(t, children, 3) // Should return vtxo2, vtxo3, vtxo4 // Test starting from end of chain (vtxo4) - children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, vtxo4.Txid) + children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, vtxo4.Outpoint) require.NoError(t, err) require.Len(t, children, 1) // Should return only vtxo4 - // Test with non-existent txid - children, err = svc.Vtxos().GetAllChildrenVtxos(ctx, randomString(32)) + // Test with non-existent outpoint + children, err = svc.Vtxos().GetAllChildrenVtxos( + ctx, domain.Outpoint{Txid: randomString(32), VOut: 0}, + ) require.NoError(t, err) require.Empty(t, children) @@ -1214,6 +1248,17 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { before := liquidityNow + 45 liquidityCommitmentTxid := randomString(32) + expiringVtxoToSweep := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 1}, + PubKey: pubkey, + Amount: 200, + RootCommitmentTxid: liquidityCommitmentTxid, + CommitmentTxids: []string{liquidityCommitmentTxid}, + ExpiresAt: liquidityNow + 20, + Swept: false, // Will be marked as swept via markers + Spent: false, + Unrolled: false, + } expiringVtxos := []domain.Vtxo{ { Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 9}, @@ -1237,17 +1282,7 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { Spent: false, Unrolled: false, }, - { - Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 1}, - PubKey: pubkey, - Amount: 200, - RootCommitmentTxid: liquidityCommitmentTxid, - CommitmentTxids: []string{liquidityCommitmentTxid}, - ExpiresAt: liquidityNow + 20, - Swept: true, - Spent: false, - Unrolled: false, - }, + expiringVtxoToSweep, { Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 2}, PubKey: pubkey, @@ -1285,754 +1320,4073 @@ func testVtxoRepository(t *testing.T, svc ports.RepoManager) { err = svc.Vtxos().AddVtxos(ctx, expiringVtxos) require.NoError(t, err) + // Mark the swept vtxo via markers (if marker store is available) + if svc.Markers() != nil { + // Create a marker for the VTXO and sweep it + markerID := expiringVtxoToSweep.Outpoint.String() + err = svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerID, + Depth: 0, + }) + require.NoError(t, err) + err = svc.Markers(). + UpdateVtxoMarkers(ctx, expiringVtxoToSweep.Outpoint, []string{markerID}) + require.NoError(t, err) + sweptAt := time.Now().Unix() + err = svc.Markers().SweepMarker(ctx, markerID, sweptAt) + require.NoError(t, err) + } + amount, err := svc.Vtxos().GetExpiringLiquidity(ctx, after, before) require.NoError(t, err) + // Only vtxo at VOut=0 with Amount=100 is in range (after < expiresAt < before) require.Equal(t, uint64(100), amount) // before=0 means no upper bound. + // Without marker support: 100 + 200 + 500 = 800 (swept vtxo not excluded) + // With marker support: 100 + 500 = 600 (swept vtxo excluded) amount, err = svc.Vtxos().GetExpiringLiquidity(ctx, liquidityNow, 0) require.NoError(t, err) - require.Equal(t, uint64(600), amount) + if svc.Markers() != nil { + require.Equal(t, uint64(600), amount) + } else { + require.Equal(t, uint64(800), amount) + } recoverableBefore, err := svc.Vtxos().GetRecoverableLiquidity(ctx) require.NoError(t, err) recoverableCommitmentTxid := randomString(32) - recoverableVtxos := []domain.Vtxo{ - { - Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 10}, - PubKey: pubkey, - Amount: 111, - RootCommitmentTxid: recoverableCommitmentTxid, - CommitmentTxids: []string{recoverableCommitmentTxid}, - Swept: true, - Spent: false, - }, - { - Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 11}, - PubKey: pubkey, - Amount: 222, - RootCommitmentTxid: recoverableCommitmentTxid, - CommitmentTxids: []string{recoverableCommitmentTxid}, - Swept: true, - Spent: true, - }, - { - Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 12}, - PubKey: pubkey, - Amount: 333, - RootCommitmentTxid: recoverableCommitmentTxid, - CommitmentTxids: []string{recoverableCommitmentTxid}, - Swept: false, - Spent: false, - }, + recoverableVtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 10}, + PubKey: pubkey, + Amount: 111, + RootCommitmentTxid: recoverableCommitmentTxid, + CommitmentTxids: []string{recoverableCommitmentTxid}, + Swept: false, // Will be marked as swept via markers + Spent: false, + } + recoverableVtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 11}, + PubKey: pubkey, + Amount: 222, + RootCommitmentTxid: recoverableCommitmentTxid, + CommitmentTxids: []string{recoverableCommitmentTxid}, + Swept: false, // Will be marked as swept via markers + Spent: true, + } + recoverableVtxo3 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 12}, + PubKey: pubkey, + Amount: 333, + RootCommitmentTxid: recoverableCommitmentTxid, + CommitmentTxids: []string{recoverableCommitmentTxid}, + Swept: false, + Spent: false, } + recoverableVtxos := []domain.Vtxo{recoverableVtxo1, recoverableVtxo2, recoverableVtxo3} err = svc.Vtxos().AddVtxos(ctx, recoverableVtxos) require.NoError(t, err) + // Mark first two vtxos as swept via markers (if marker store is available) + if svc.Markers() != nil { + // Create markers for VTXOs and sweep them + marker1ID := recoverableVtxo1.Outpoint.String() + marker2ID := recoverableVtxo2.Outpoint.String() + err = svc.Markers().AddMarker(ctx, domain.Marker{ID: marker1ID, Depth: 0}) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, domain.Marker{ID: marker2ID, Depth: 0}) + require.NoError(t, err) + err = svc.Markers(). + UpdateVtxoMarkers(ctx, recoverableVtxo1.Outpoint, []string{marker1ID}) + require.NoError(t, err) + err = svc.Markers(). + UpdateVtxoMarkers(ctx, recoverableVtxo2.Outpoint, []string{marker2ID}) + require.NoError(t, err) + sweptAt := time.Now().Unix() + err = svc.Markers().SweepMarker(ctx, marker1ID, sweptAt) + require.NoError(t, err) + err = svc.Markers().SweepMarker(ctx, marker2ID, sweptAt) + require.NoError(t, err) + } + recoverableAfter, err := svc.Vtxos().GetRecoverableLiquidity(ctx) require.NoError(t, err) - require.Equal(t, recoverableBefore+uint64(111), recoverableAfter) + // Only recoverableVtxo1 is swept and not spent, so it contributes 111 + if svc.Markers() != nil { + require.Equal(t, recoverableBefore+uint64(111), recoverableAfter) + } }) - t.Run("test_get_vtxos_with_multiple_pubkeys", func(t *testing.T) { - ctx := t.Context() + // Verifies that the Depth field persists through AddVtxos→GetVtxos for VTXOs + // at various chain depths (0, 1, 2, 100). + t.Run("test_vtxo_depth", func(t *testing.T) { + ctx := context.Background() + commitmentTxid := randomString(32) - pk1 := randomString(32) - pk2 := randomString(32) - cmtTxid := randomString(32) + // Create vtxos with different depths to simulate a chain + // Batch vtxo at depth 0 + batchVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + } - vtxosToAdd := []domain.Vtxo{ - { - Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, - PubKey: pk1, - Amount: 1000, - RootCommitmentTxid: cmtTxid, - CommitmentTxids: []string{cmtTxid}, - }, - { - Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, - PubKey: pk2, - Amount: 2000, - RootCommitmentTxid: cmtTxid, - CommitmentTxids: []string{cmtTxid}, - }, + // First chain at depth 1 + chainedVtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 900, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid, randomString(32)}, + Depth: 1, } - err := svc.Vtxos().AddVtxos(ctx, vtxosToAdd) - require.NoError(t, err) - // Single pubkey should return 1 vtxo. - got, err := svc.Vtxos().GetAllVtxosWithPubKeys(ctx, []string{pk1}, 0, 0) - require.NoError(t, err) - require.Len(t, got, 1) - require.Equal(t, pk1, got[0].PubKey) + // Second chain at depth 2 + chainedVtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 800, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid, randomString(32), randomString(32)}, + Depth: 2, + } - got, err = svc.Vtxos().GetAllVtxosWithPubKeys(ctx, []string{pk2}, 0, 0) + // Deep chain at depth 100 + deepVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 500, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 100, + } + + vtxosToAdd := []domain.Vtxo{batchVtxo, chainedVtxo1, chainedVtxo2, deepVtxo} + err := svc.Vtxos().AddVtxos(ctx, vtxosToAdd) require.NoError(t, err) - require.Len(t, got, 1) - require.Equal(t, pk2, got[0].PubKey) - // Multiple pubkeys should return vtxos for both. - got, err = svc.Vtxos().GetAllVtxosWithPubKeys(ctx, []string{pk1, pk2}, 0, 0) + // Retrieve and verify depths are preserved + outpoints := []domain.Outpoint{ + batchVtxo.Outpoint, + chainedVtxo1.Outpoint, + chainedVtxo2.Outpoint, + deepVtxo.Outpoint, + } + retrievedVtxos, err := svc.Vtxos().GetVtxos(ctx, outpoints) require.NoError(t, err) - require.Len(t, got, 2) + require.Len(t, retrievedVtxos, 4) - gotPubkeys := map[string]bool{got[0].PubKey: true, got[1].PubKey: true} - require.True(t, gotPubkeys[pk1], "expected vtxo with pubkey pk1") - require.True(t, gotPubkeys[pk2], "expected vtxo with pubkey pk2") + // Create a map for easier lookup + vtxoByOutpoint := make(map[string]domain.Vtxo) + for _, v := range retrievedVtxos { + vtxoByOutpoint[v.Outpoint.String()] = v + } + + // Verify each vtxo has correct depth + require.Equal(t, uint32(0), vtxoByOutpoint[batchVtxo.Outpoint.String()].Depth) + require.Equal(t, uint32(1), vtxoByOutpoint[chainedVtxo1.Outpoint.String()].Depth) + require.Equal(t, uint32(2), vtxoByOutpoint[chainedVtxo2.Outpoint.String()].Depth) + require.Equal(t, uint32(100), vtxoByOutpoint[deepVtxo.Outpoint.String()].Depth) }) } -func testScheduledSessionRepository(t *testing.T, svc ports.RepoManager) { - t.Run("test_scheduled_session_repository", func(t *testing.T) { +// testMarkerBasicOperations exercises AddMarker, GetMarker, GetMarkersByDepth, and +// GetMarkersByIds. Creates a 4-marker DAG (root, two at depth 100, one at depth 200 +// with two parents), verifies field round-trips including ParentMarkerIDs, and tests +// edge cases: non-existent ID, empty ID slice, and mixed valid/invalid ID queries. +func testMarkerBasicOperations(t *testing.T, svc ports.RepoManager) { + t.Run("test_marker_basic_operations", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } ctx := context.Background() - repo := svc.ScheduledSession() - - scheduledSession, err := repo.Get(ctx) - require.NoError(t, err) - require.Nil(t, scheduledSession) - now := time.Now().Truncate(time.Second) - expected := domain.ScheduledSession{ - StartTime: now, - Period: time.Duration(3) * time.Hour, - Duration: time.Duration(20) * time.Second, - UpdatedAt: now, + // Create markers with AddMarker + marker1 := domain.Marker{ + ID: randomString(32), + Depth: 0, + ParentMarkerIDs: nil, + } + marker2 := domain.Marker{ + ID: randomString(32), + Depth: 100, + ParentMarkerIDs: []string{marker1.ID}, + } + marker3 := domain.Marker{ + ID: randomString(32), + Depth: 100, + ParentMarkerIDs: []string{marker1.ID}, + } + marker4 := domain.Marker{ + ID: randomString(32), + Depth: 200, + ParentMarkerIDs: []string{marker2.ID, marker3.ID}, } - err = repo.Upsert(ctx, expected) + err := svc.Markers().AddMarker(ctx, marker1) require.NoError(t, err) - - got, err := repo.Get(ctx) + err = svc.Markers().AddMarker(ctx, marker2) require.NoError(t, err) - require.NotNil(t, got) - assertScheduledSessionEqual(t, expected, *got) - - expected.Period = time.Duration(4) * time.Hour - expected.Duration = time.Duration(40) * time.Second - expected.UpdatedAt = now.Add(100 * time.Second) - - err = repo.Upsert(ctx, expected) + err = svc.Markers().AddMarker(ctx, marker3) require.NoError(t, err) - - got, err = repo.Get(ctx) + err = svc.Markers().AddMarker(ctx, marker4) require.NoError(t, err) - require.NotNil(t, got) - assertScheduledSessionEqual(t, expected, *got) - err = repo.Clear(ctx) + // Test GetMarker - retrieve single marker and verify all fields + retrievedMarker1, err := svc.Markers().GetMarker(ctx, marker1.ID) require.NoError(t, err) + require.NotNil(t, retrievedMarker1) + require.Equal(t, marker1.ID, retrievedMarker1.ID) + require.Equal(t, marker1.Depth, retrievedMarker1.Depth) + require.Empty(t, retrievedMarker1.ParentMarkerIDs) - scheduledSession, err = repo.Get(ctx) + retrievedMarker2, err := svc.Markers().GetMarker(ctx, marker2.ID) require.NoError(t, err) - require.Nil(t, scheduledSession) + require.NotNil(t, retrievedMarker2) + require.Equal(t, marker2.ID, retrievedMarker2.ID) + require.Equal(t, marker2.Depth, retrievedMarker2.Depth) + require.ElementsMatch(t, marker2.ParentMarkerIDs, retrievedMarker2.ParentMarkerIDs) - // No error if trying to clear already cleared scheduled session - err = repo.Clear(ctx) + retrievedMarker4, err := svc.Markers().GetMarker(ctx, marker4.ID) require.NoError(t, err) - }) -} + require.NotNil(t, retrievedMarker4) + require.Equal(t, marker4.ID, retrievedMarker4.ID) + require.Equal(t, marker4.Depth, retrievedMarker4.Depth) + require.ElementsMatch(t, marker4.ParentMarkerIDs, retrievedMarker4.ParentMarkerIDs) -func testOffchainTxRepository(t *testing.T, svc ports.RepoManager) { - t.Run("test_offchain_tx_repository", func(t *testing.T) { - ctx := context.Background() - repo := svc.OffchainTxs() + // Test GetMarker with non-existent ID + nonExistent, err := svc.Markers().GetMarker(ctx, "nonexistent") + require.NoError(t, err) + require.Nil(t, nonExistent) - offchainTx, err := repo.GetOffchainTx(ctx, arkTxid) - require.Nil(t, offchainTx) - require.Error(t, err) + // Test GetMarkersByDepth - markers at same depth + markersAtDepth100, err := svc.Markers().GetMarkersByDepth(ctx, 100) + require.NoError(t, err) + require.Len(t, markersAtDepth100, 2) + markerIdsAtDepth100 := []string{markersAtDepth100[0].ID, markersAtDepth100[1].ID} + require.ElementsMatch(t, []string{marker2.ID, marker3.ID}, markerIdsAtDepth100) - checkpointTxid1 := "0000000000000000000000000000000000000000000000000000000000000001" - signedCheckpointPtx1 := "cHNldP8BAgQCAAAAAQQBAAEFAQABBgEDAfsEAgAAAAA=signed" - checkpointTxid2 := "0000000000000000000000000000000000000000000000000000000000000002" - signedCheckpointPtx2 := "cHNldP8BAgQCAAAAAQQBAAEFAQABBgEDAfsEAgAAAAB=signed" - rootCommitmentTxid := "0000000000000000000000000000000000000000000000000000000000000003" - commitmentTxid := "0000000000000000000000000000000000000000000000000000000000000004" - events := []domain.Event{ - domain.OffchainTxRequested{ - OffchainTxEvent: domain.OffchainTxEvent{ - Id: arkTxid, - Type: domain.EventTypeOffchainTxRequested, - }, - ArkTx: "", - UnsignedCheckpointTxs: nil, - StartingTimestamp: now.Unix(), - }, - domain.OffchainTxAccepted{ - OffchainTxEvent: domain.OffchainTxEvent{ - Id: arkTxid, - Type: domain.EventTypeOffchainTxAccepted, - }, - CommitmentTxids: map[string]string{ - checkpointTxid1: rootCommitmentTxid, - checkpointTxid2: commitmentTxid, - }, - FinalArkTx: "", - SignedCheckpointTxs: map[string]string{ - checkpointTxid1: signedCheckpointPtx1, - checkpointTxid2: signedCheckpointPtx2, - }, - RootCommitmentTxid: rootCommitmentTxid, - }, - } - offchainTx = domain.NewOffchainTxFromEvents(events) - err = repo.AddOrUpdateOffchainTx(ctx, offchainTx) + markersAtDepth0, err := svc.Markers().GetMarkersByDepth(ctx, 0) require.NoError(t, err) + require.GreaterOrEqual(t, len(markersAtDepth0), 1) + var foundMarker1 bool + for _, m := range markersAtDepth0 { + if m.ID == marker1.ID { + foundMarker1 = true + break + } + } + require.True(t, foundMarker1) - gotOffchainTx, err := repo.GetOffchainTx(ctx, arkTxid) + markersAtDepth200, err := svc.Markers().GetMarkersByDepth(ctx, 200) require.NoError(t, err) - require.NotNil(t, offchainTx) - require.True(t, gotOffchainTx.IsAccepted()) - require.Equal(t, rootCommitmentTxid, gotOffchainTx.RootCommitmentTxId) - require.Condition(t, offchainTxMatch(*offchainTx, *gotOffchainTx)) + require.GreaterOrEqual(t, len(markersAtDepth200), 1) + var foundMarker4 bool + for _, m := range markersAtDepth200 { + if m.ID == marker4.ID { + foundMarker4 = true + break + } + } + require.True(t, foundMarker4) - newEvents := []domain.Event{ - domain.OffchainTxFinalized{ - OffchainTxEvent: domain.OffchainTxEvent{ - Id: arkTxid, - Type: domain.EventTypeOffchainTxFinalized, - }, - FinalCheckpointTxs: nil, - Timestamp: endTimestamp, - }, + // Test GetMarkersByIds - batch retrieve + markersById, err := svc.Markers(). + GetMarkersByIds(ctx, []string{marker1.ID, marker3.ID, marker4.ID}) + require.NoError(t, err) + require.Len(t, markersById, 3) + retrievedIds := make([]string, len(markersById)) + for i, m := range markersById { + retrievedIds[i] = m.ID } - events = append(events, newEvents...) - offchainTx = domain.NewOffchainTxFromEvents(events) - err = repo.AddOrUpdateOffchainTx(ctx, offchainTx) + require.ElementsMatch(t, []string{marker1.ID, marker3.ID, marker4.ID}, retrievedIds) + + // Test GetMarkersByIds with empty slice + emptyMarkers, err := svc.Markers().GetMarkersByIds(ctx, []string{}) require.NoError(t, err) + require.Nil(t, emptyMarkers) - gotOffchainTx, err = repo.GetOffchainTx(ctx, arkTxid) + // Test GetMarkersByIds with non-existent IDs mixed with valid + mixedMarkers, err := svc.Markers().GetMarkersByIds(ctx, []string{marker1.ID, "nonexistent"}) require.NoError(t, err) - require.NotNil(t, offchainTx) - require.True(t, gotOffchainTx.IsFinalized()) - require.Condition(t, offchainTxMatch(*offchainTx, *gotOffchainTx)) + require.Len(t, mixedMarkers, 1) + require.Equal(t, marker1.ID, mixedMarkers[0].ID) }) } -func testConvictionRepository(t *testing.T, svc ports.RepoManager) { - t.Run("test_conviction_repository", func(t *testing.T) { +// testMarkerSweep exercises the full marker sweep lifecycle: SweepMarker, IsMarkerSwept, +// GetSweptMarkers, and SweepMarkerWithDescendants. Verifies idempotency (ON CONFLICT +// DO NOTHING preserves original timestamp), multi-marker retrieval, empty-slice edge +// cases, non-existent marker handling, and recursive descendant sweeping with hierarchy +// (root→child1→grandchild1, root→child2). +func testMarkerSweep(t *testing.T, svc ports.RepoManager) { + t.Run("test_marker_sweep", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } ctx := context.Background() - repo := svc.Convictions() - conviction, err := repo.Get(ctx, "non-existent-id") - require.Error(t, err) - require.Nil(t, conviction) + // Create a marker + marker := domain.Marker{ + ID: randomString(32), + Depth: 0, + ParentMarkerIDs: nil, + } + err := svc.Markers().AddMarker(ctx, marker) + require.NoError(t, err) - scriptConviction, err := repo.GetActiveScriptConvictions(ctx, "non-existent-script") + // Verify marker is not swept initially + isSwept, err := svc.Markers().IsMarkerSwept(ctx, marker.ID) require.NoError(t, err) - require.Empty(t, scriptConviction) + require.False(t, isSwept) - convictions, err := repo.GetAll(ctx, time.Now().Add(-time.Hour), time.Now()) + // Sweep the marker + sweptAt := time.Now().UnixMilli() + err = svc.Markers().SweepMarker(ctx, marker.ID, sweptAt) require.NoError(t, err) - require.Empty(t, convictions) - roundConvictions, err := repo.GetByRoundID(ctx, "non-existent-round") + // Verify IsMarkerSwept returns true + isSwept, err = svc.Markers().IsMarkerSwept(ctx, marker.ID) require.NoError(t, err) - require.Empty(t, roundConvictions) + require.True(t, isSwept) - roundID1 := uuid.New().String() - roundID2 := uuid.New().String() - script1 := randomString(32) - script2 := randomString(32) - banDuration := time.Duration(1) * time.Hour + // Verify GetSweptMarkers returns correct record + sweptMarkers, err := svc.Markers().GetSweptMarkers(ctx, []string{marker.ID}) + require.NoError(t, err) + require.Len(t, sweptMarkers, 1) + require.Equal(t, marker.ID, sweptMarkers[0].MarkerID) + require.Equal(t, sweptAt, sweptMarkers[0].SweptAt) - crime1 := domain.Crime{ - Type: domain.CrimeTypeMusig2NonceSubmission, - RoundID: roundID1, - Reason: "Test crime 1", - } - crime2 := domain.Crime{ - Type: domain.CrimeTypeMusig2SignatureSubmission, - RoundID: roundID2, - Reason: "Test crime 2", - } + // Test idempotency - sweeping again should not error (ON CONFLICT DO NOTHING) + err = svc.Markers().SweepMarker(ctx, marker.ID, sweptAt+1000) + require.NoError(t, err) - conviction1 := domain.NewScriptConviction(script1, crime1, &banDuration) - conviction2 := domain.NewScriptConviction(script2, crime2, nil) // Permanent ban + // Verify the original swept_at is preserved (not updated) + sweptMarkers, err = svc.Markers().GetSweptMarkers(ctx, []string{marker.ID}) + require.NoError(t, err) + require.Len(t, sweptMarkers, 1) + require.Equal(t, sweptAt, sweptMarkers[0].SweptAt) + + // Test GetSweptMarkers with multiple markers + marker2 := domain.Marker{ + ID: randomString(32), + Depth: 100, + ParentMarkerIDs: []string{marker.ID}, + } + err = svc.Markers().AddMarker(ctx, marker2) + require.NoError(t, err) - err = repo.Add(ctx, conviction1, conviction2) + sweptAt2 := time.Now().UnixMilli() + err = svc.Markers().SweepMarker(ctx, marker2.ID, sweptAt2) require.NoError(t, err) - retrievedConviction1, err := repo.Get(ctx, conviction1.GetID()) + sweptMarkers, err = svc.Markers().GetSweptMarkers(ctx, []string{marker.ID, marker2.ID}) require.NoError(t, err) - require.NotNil(t, retrievedConviction1) - assertConvictionEqual(t, conviction1, retrievedConviction1) + require.Len(t, sweptMarkers, 2) - retrievedConviction2, err := repo.Get(ctx, conviction2.GetID()) + // Test GetSweptMarkers with empty slice + emptySwept, err := svc.Markers().GetSweptMarkers(ctx, []string{}) require.NoError(t, err) - require.NotNil(t, retrievedConviction2) - assertConvictionEqual(t, conviction2, retrievedConviction2) + require.Nil(t, emptySwept) - activeConviction1, err := repo.GetActiveScriptConvictions(ctx, script1) + // Test IsMarkerSwept for non-existent marker + isSwept, err = svc.Markers().IsMarkerSwept(ctx, "nonexistent") require.NoError(t, err) - require.NotNil(t, activeConviction1) - require.Len(t, activeConviction1, 1) - require.Equal(t, script1, activeConviction1[0].Script) - require.False(t, activeConviction1[0].IsPardoned()) + require.False(t, isSwept) + }) - activeConviction2, err := repo.GetActiveScriptConvictions(ctx, script2) - require.NoError(t, err) - require.NotNil(t, activeConviction2) - require.Len(t, activeConviction2, 1) - require.Equal(t, script2, activeConviction2[0].Script) - require.False(t, activeConviction2[0].IsPardoned()) + t.Run("test_sweep_marker_with_descendants", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() - round1Convictions, err := repo.GetByRoundID(ctx, roundID1) - require.NoError(t, err) - require.Len(t, round1Convictions, 1) - assertConvictionEqual(t, conviction1, round1Convictions[0]) + // Create a marker hierarchy: + // root -> child1 -> grandchild1 + // -> child2 + root := domain.Marker{ + ID: "sweep_desc_root_" + randomString(16), + Depth: 0, + ParentMarkerIDs: nil, + } + child1 := domain.Marker{ + ID: "sweep_desc_child1_" + randomString(16), + Depth: 100, + ParentMarkerIDs: []string{root.ID}, + } + child2 := domain.Marker{ + ID: "sweep_desc_child2_" + randomString(16), + Depth: 100, + ParentMarkerIDs: []string{root.ID}, + } + grandchild1 := domain.Marker{ + ID: "sweep_desc_grandchild1_" + randomString(16), + Depth: 200, + ParentMarkerIDs: []string{child1.ID}, + } - round2Convictions, err := repo.GetByRoundID(ctx, roundID2) + err := svc.Markers().AddMarker(ctx, root) require.NoError(t, err) - require.Len(t, round2Convictions, 1) - assertConvictionEqual(t, conviction2, round2Convictions[0]) - - allConvictions, err := repo.GetAll( - ctx, - time.Now().Add(-time.Hour), - time.Now().Add(time.Hour), - ) + err = svc.Markers().AddMarker(ctx, child1) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, child2) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, grandchild1) require.NoError(t, err) - require.Len(t, allConvictions, 2) - err = repo.Pardon(ctx, conviction1.GetID()) + // Verify none are swept initially + isSwept, err := svc.Markers().IsMarkerSwept(ctx, root.ID) require.NoError(t, err) + require.False(t, isSwept) - pardonedConviction, err := repo.Get(ctx, conviction1.GetID()) + // Sweep root with descendants + sweptAt := time.Now().UnixMilli() + count, err := svc.Markers().SweepMarkerWithDescendants(ctx, root.ID, sweptAt) require.NoError(t, err) - require.NotNil(t, pardonedConviction) - require.True(t, pardonedConviction.IsPardoned()) + require.Equal(t, int64(4), count) // root + child1 + child2 + grandchild1 - activeConvictionAfterPardon, err := repo.GetActiveScriptConvictions(ctx, script1) + // Verify all markers are now swept + for _, m := range []domain.Marker{root, child1, child2, grandchild1} { + isSwept, err := svc.Markers().IsMarkerSwept(ctx, m.ID) + require.NoError(t, err) + require.True(t, isSwept, "Marker %s should be swept", m.ID) + } + + // Test idempotency - calling again should return 0 + count, err = svc.Markers().SweepMarkerWithDescendants(ctx, root.ID, sweptAt+1000) require.NoError(t, err) - require.Empty(t, activeConvictionAfterPardon) + require.Equal(t, int64(0), count) - shortDuration := time.Duration(1) * time.Millisecond - crime3 := domain.Crime{ - Type: domain.CrimeTypeMusig2InvalidSignature, - RoundID: roundID1, - Reason: "Test expired crime", + // Test sweeping a leaf node (no descendants) + leaf := domain.Marker{ + ID: "sweep_desc_leaf_" + randomString(16), + Depth: 300, + ParentMarkerIDs: []string{grandchild1.ID}, } - expiredConviction := domain.NewScriptConviction(script1, crime3, &shortDuration) - err = repo.Add(ctx, expiredConviction) + err = svc.Markers().AddMarker(ctx, leaf) require.NoError(t, err) - time.Sleep(10 * time.Millisecond) + count, err = svc.Markers().SweepMarkerWithDescendants(ctx, leaf.ID, sweptAt) + require.NoError(t, err) + require.Equal(t, int64(1), count) // Just the leaf itself - _, err = repo.GetActiveScriptConvictions(ctx, script1) + // Test with non-existent marker (should return 0) + count, err = svc.Markers().SweepMarkerWithDescendants(ctx, "nonexistent", sweptAt) require.NoError(t, err) + require.Equal(t, int64(0), count) }) } -// requireAssetsMatch compares two asset slices by Id, ControlAssetId, Metadata, and Supply (using big.Int.Cmp). -func requireAssetsMatch(t *testing.T, expected, actual []domain.Asset) { - t.Helper() - require.Len(t, actual, len(expected)) - byId := make(map[string]domain.Asset) - for _, a := range actual { - byId[a.Id] = a - } - for _, exp := range expected { - got, ok := byId[exp.Id] - require.True(t, ok) - require.Equal(t, exp.ControlAssetId, got.ControlAssetId) - require.Equal(t, exp.Metadata, got.Metadata) - require.Zero(t, (&exp.Supply).Cmp(&got.Supply)) - } -} +// testVtxoMarkerAssociation verifies UpdateVtxoMarkers correctly links VTXOs to markers +// and that the association is visible through both GetVtxosByMarker and GetVtxos. Tests +// that unassociated VTXOs remain marker-free and that non-existent markers return empty. +func testVtxoMarkerAssociation(t *testing.T, svc ports.RepoManager) { + t.Run("test_vtxo_marker_association", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) -func testAssetRepository(t *testing.T, svc ports.RepoManager) { - t.Run("test_asset_repository", func(t *testing.T) { - ctx := t.Context() - repo := svc.Assets() - vtxoRepo := svc.Vtxos() + // Create a marker + markerID := randomString(32) + marker := domain.Marker{ + ID: markerID, + Depth: 0, + ParentMarkerIDs: nil, + } + err := svc.Markers().AddMarker(ctx, marker) + require.NoError(t, err) - newAssets := []domain.Asset{ - { - Id: "asset1", - ControlAssetId: "asset2", - Metadata: []asset.Metadata{ - { - Key: []byte("key1"), - Value: []byte("value1"), - }, - { - Key: []byte("abc"), - Value: []byte("cde"), - }, - }, - }, - { - Id: "asset2", - Metadata: []asset.Metadata{ - { - Key: []byte("this is"), - Value: []byte("control asset"), - }, - }, - }, + // Add VTXOs without marker_id + vtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + } + vtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 50, + } + vtxo3 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey2, + Amount: 3000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 75, } - assetIds := []string{"asset1", "asset2", "non-existent-asset"} - // assets should not exist yet - assets, err := repo.GetAssets(ctx, assetIds) + err = svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{vtxo1, vtxo2, vtxo3}) require.NoError(t, err) - require.Len(t, assets, 0) - assetsByTx := map[string][]domain.Asset{arkTxid: newAssets} - count, err := repo.AddAssets(ctx, assetsByTx) + // Verify VTXOs initially have no markers + retrievedVtxos, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{vtxo1.Outpoint}) require.NoError(t, err) - require.Equal(t, 2, count) + require.Len(t, retrievedVtxos, 1) + require.Empty(t, retrievedVtxos[0].MarkerIDs) - count, err = repo.AddAssets(ctx, assetsByTx) + // Call UpdateVtxoMarkers to associate VTXOs with marker + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxo1.Outpoint, []string{markerID}) + require.NoError(t, err) + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxo2.Outpoint, []string{markerID}) require.NoError(t, err) - require.Zero(t, count) - assets, err = repo.GetAssets(ctx, assetIds) + // Verify GetVtxosByMarker returns the associated VTXOs + vtxosByMarker, err := svc.Markers().GetVtxosByMarker(ctx, markerID) require.NoError(t, err) - require.Len(t, assets, 2) - requireAssetsMatch(t, newAssets, assets) + require.Len(t, vtxosByMarker, 2) + outpoints := []string{ + vtxosByMarker[0].Outpoint.String(), + vtxosByMarker[1].Outpoint.String(), + } + require.ElementsMatch( + t, + []string{vtxo1.Outpoint.String(), vtxo2.Outpoint.String()}, + outpoints, + ) - assets, err = repo.GetAssets(ctx, assetIds[2:]) + // Verify VTXO.MarkerIDs field is populated when retrieved via GetVtxos + retrievedVtxos, err = svc.Vtxos(). + GetVtxos(ctx, []domain.Outpoint{vtxo1.Outpoint, vtxo2.Outpoint}) require.NoError(t, err) - require.Empty(t, assets) + require.Len(t, retrievedVtxos, 2) + for _, v := range retrievedVtxos { + require.Contains(t, v.MarkerIDs, markerID) + } - // GetControlAsset: asset1 has control asset asset2, asset2 is control asset (no parent) - controlID, err := repo.GetControlAsset(ctx, "asset1") + // Verify vtxo3 still has no markers + retrievedVtxos, err = svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{vtxo3.Outpoint}) require.NoError(t, err) - require.Equal(t, "asset2", controlID) - controlID, err = repo.GetControlAsset(ctx, "asset2") + require.Len(t, retrievedVtxos, 1) + require.Empty(t, retrievedVtxos[0].MarkerIDs) + + // Test GetVtxosByMarker with non-existent marker + vtxosByNonExistent, err := svc.Markers().GetVtxosByMarker(ctx, "nonexistent") require.NoError(t, err) - require.Empty(t, controlID) - _, err = repo.GetControlAsset(ctx, "non-existent-asset") - require.Error(t, err) - require.Contains(t, err.Error(), "no control asset found") + require.Empty(t, vtxosByNonExistent) + }) +} - // AssetExists - exists, err := repo.AssetExists(ctx, "asset1") +// testSweepVtxosByMarker creates 5 VTXOs sharing a marker, pre-sweeps 2 via individual +// markers, then calls SweepVtxosByMarker on the shared marker. Verifies only the 3 +// previously-unswept VTXOs are newly swept, tests idempotency (second call returns 0), +// and checks the non-existent marker edge case. +func testSweepVtxosByMarker(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_vtxos_by_marker", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // Create a marker + markerID := randomString(32) + marker := domain.Marker{ + ID: markerID, + Depth: 0, + ParentMarkerIDs: nil, + } + err := svc.Markers().AddMarker(ctx, marker) require.NoError(t, err) - require.True(t, exists) - exists, err = repo.AssetExists(ctx, "asset2") + + // Add 5 VTXOs - all start as unswept + vtxos := make([]domain.Vtxo, 5) + for i := 0; i < 5; i++ { + vtxos[i] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: uint64(1000 * (i + 1)), + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: uint32(i * 10), + Swept: false, + } + } + + err = svc.Vtxos().AddVtxos(ctx, vtxos) require.NoError(t, err) - require.True(t, exists) - exists, err = repo.AssetExists(ctx, "non-existent-asset") + + // Associate all VTXOs with the marker + for _, v := range vtxos { + err = svc.Markers().UpdateVtxoMarkers(ctx, v.Outpoint, []string{markerID}) + require.NoError(t, err) + } + + // Mark vtxos[3] and vtxos[4] as swept via individual markers + // Create individual markers for these VTXOs and sweep them + marker3ID := vtxos[3].Outpoint.String() + marker4ID := vtxos[4].Outpoint.String() + err = svc.Markers().AddMarker(ctx, domain.Marker{ID: marker3ID, Depth: vtxos[3].Depth}) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, domain.Marker{ID: marker4ID, Depth: vtxos[4].Depth}) + require.NoError(t, err) + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxos[3].Outpoint, []string{markerID, marker3ID}) + require.NoError(t, err) + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxos[4].Outpoint, []string{markerID, marker4ID}) + require.NoError(t, err) + sweptAt := time.Now().Unix() + err = svc.Markers().SweepMarker(ctx, marker3ID, sweptAt) + require.NoError(t, err) + err = svc.Markers().SweepMarker(ctx, marker4ID, sweptAt) require.NoError(t, err) - require.False(t, exists) - // test asset supply overflow - vtxos := []domain.Vtxo{{ - Outpoint: domain.Outpoint{ - Txid: "supplyOverflowVtxo1", - VOut: 0, - }, - Amount: 330, - Assets: []domain.AssetDenomination{ - { - AssetId: "assetSupplyOverflow", - Amount: math.MaxUint64, - }, - }, - }, - { - Outpoint: domain.Outpoint{ - Txid: "supplyOverflowVtxo2", - VOut: 0, - }, - Amount: 330, - Assets: []domain.AssetDenomination{ - { - AssetId: "assetSupplyOverflow", - Amount: math.MaxUint64, - }, - }, - }} - count, err = repo.AddAssets(ctx, map[string][]domain.Asset{"assetSupplyOverflowTx": { - { - Id: "assetSupplyOverflow", - Metadata: []asset.Metadata{}, - }, - }}) + // Verify initial state - vtxos[3] and vtxos[4] should be swept + vtxosByMarker, err := svc.Markers().GetVtxosByMarker(ctx, markerID) require.NoError(t, err) - require.Equal(t, 1, count) + require.Len(t, vtxosByMarker, 5) - err = vtxoRepo.AddVtxos(ctx, vtxos) + sweptCount := 0 + for _, v := range vtxosByMarker { + if v.Swept { + sweptCount++ + } + } + require.Equal(t, 2, sweptCount) + + // Call SweepVtxosByMarker - this sweeps by marking the marker itself as swept + count, err := svc.Markers().SweepVtxosByMarker(ctx, markerID) require.NoError(t, err) + require.Equal(t, int64(3), count) // Only 3 were newly swept - assets, err = repo.GetAssets(ctx, []string{"assetSupplyOverflow"}) + // Verify all 5 VTXOs now have swept=true + vtxosByMarker, err = svc.Markers().GetVtxosByMarker(ctx, markerID) require.NoError(t, err) - require.Len(t, assets, 1) + require.Len(t, vtxosByMarker, 5) + for _, v := range vtxosByMarker { + require.True(t, v.Swept, "VTXO %s should be swept", v.Outpoint.String()) + } - expectedSupply := new(big.Int). - Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2)) + // Call SweepVtxosByMarker again - should return 0 (all already swept) + count, err = svc.Markers().SweepVtxosByMarker(ctx, markerID) + require.NoError(t, err) + require.Equal(t, int64(0), count) - require.Equal(t, expectedSupply.String(), assets[0].Supply.String()) + // Test with non-existent marker + count, err = svc.Markers().SweepVtxosByMarker(ctx, "nonexistent") + require.NoError(t, err) + require.Equal(t, int64(0), count) }) } -func testFeeRepository(t *testing.T, svc ports.RepoManager) { - t.Run("test_fee_repository", func(t *testing.T) { +// testMarkerDepthRangeQueries verifies GetMarkersByDepthRange and GetVtxosByDepthRange +// return correct results for inclusive depth ranges. Tests partial ranges, full ranges, +// and empty ranges for both markers (at depths 0/100/200/300) and VTXOs (at depths +// 0/50/100/150). +func testMarkerDepthRangeQueries(t *testing.T, svc ports.RepoManager) { + t.Run("test_marker_depth_range_queries", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } ctx := context.Background() - repo := svc.Fees() - - // fees should be initialized to empty strings - currentFees, err := repo.GetIntentFees(ctx) - require.NoError(t, err) - require.NotNil(t, currentFees) - require.Equal(t, "", currentFees.OnchainInputFee) - require.Equal(t, "", currentFees.OffchainInputFee) - require.Equal(t, "", currentFees.OnchainOutputFee) - require.Equal(t, "", currentFees.OffchainOutputFee) + commitmentTxid := randomString(32) - newFees := domain.IntentFees{ - OnchainInputFee: "0.25", - OffchainInputFee: "0.30", - OnchainOutputFee: "0.35", - OffchainOutputFee: "0.40", + // Add markers at depths 0, 100, 200, 300 with unique IDs + markerDepth0 := domain.Marker{ + ID: "range_test_" + randomString(16), + Depth: 0, + ParentMarkerIDs: nil, + } + markerDepth100 := domain.Marker{ + ID: "range_test_" + randomString(16), + Depth: 100, + ParentMarkerIDs: []string{markerDepth0.ID}, + } + markerDepth200 := domain.Marker{ + ID: "range_test_" + randomString(16), + Depth: 200, + ParentMarkerIDs: []string{markerDepth100.ID}, + } + markerDepth300 := domain.Marker{ + ID: "range_test_" + randomString(16), + Depth: 300, + ParentMarkerIDs: []string{markerDepth200.ID}, } - // sqlite and postgres use millisecond precision for created_at so we need to - // wait to ensure the updated_at is different. - // set the new fees - time.Sleep(10 * time.Millisecond) - err = repo.UpdateIntentFees(ctx, newFees) + err := svc.Markers().AddMarker(ctx, markerDepth0) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, markerDepth100) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, markerDepth200) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, markerDepth300) require.NoError(t, err) - updatedFees, err := repo.GetIntentFees(ctx) + // Test GetMarkersByDepthRange(50, 250) - should return markers at 100 and 200 + markersInRange, err := svc.Markers().GetMarkersByDepthRange(ctx, 50, 250) require.NoError(t, err) - require.NotNil(t, updatedFees) - require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) - require.Equal(t, newFees.OffchainInputFee, updatedFees.OffchainInputFee) - require.Equal(t, newFees.OnchainOutputFee, updatedFees.OnchainOutputFee) - require.Equal(t, newFees.OffchainOutputFee, updatedFees.OffchainOutputFee) - time.Sleep(10 * time.Millisecond) - // zero out the fees - err = repo.ClearIntentFees(ctx) - require.NoError(t, err) - - clearedFees, err := repo.GetIntentFees(ctx) - require.NoError(t, err) - require.NotNil(t, clearedFees) - require.Equal(t, "", clearedFees.OnchainInputFee) - require.Equal(t, "", clearedFees.OffchainInputFee) - require.Equal(t, "", clearedFees.OnchainOutputFee) - require.Equal(t, "", clearedFees.OffchainOutputFee) - // set the fees back to newFees - time.Sleep(10 * time.Millisecond) - err = repo.UpdateIntentFees(ctx, newFees) - require.NoError(t, err) + // Filter to only our test markers + var ourMarkers []domain.Marker + testMarkerIDs := map[string]bool{ + markerDepth0.ID: true, + markerDepth100.ID: true, + markerDepth200.ID: true, + markerDepth300.ID: true, + } + for _, m := range markersInRange { + if testMarkerIDs[m.ID] { + ourMarkers = append(ourMarkers, m) + } + } + require.Len(t, ourMarkers, 2) + foundDepths := []uint32{ourMarkers[0].Depth, ourMarkers[1].Depth} + require.ElementsMatch(t, []uint32{100, 200}, foundDepths) - updatedFees, err = repo.GetIntentFees(ctx) + // Test range that includes all + markersInRange, err = svc.Markers().GetMarkersByDepthRange(ctx, 0, 300) require.NoError(t, err) - require.NotNil(t, updatedFees) - require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) - require.Equal(t, newFees.OffchainInputFee, updatedFees.OffchainInputFee) - require.Equal(t, newFees.OnchainOutputFee, updatedFees.OnchainOutputFee) - require.Equal(t, newFees.OffchainOutputFee, updatedFees.OffchainOutputFee) - - // only change 2 of the fees, the others should remain the same (testing partial updates) - newFees = domain.IntentFees{ - OnchainInputFee: "0.25", - OffchainOutputFee: "0.40", + ourMarkers = nil + for _, m := range markersInRange { + if testMarkerIDs[m.ID] { + ourMarkers = append(ourMarkers, m) + } } - time.Sleep(10 * time.Millisecond) - err = repo.UpdateIntentFees(ctx, newFees) - require.NoError(t, err) + require.Len(t, ourMarkers, 4) - updatedFees, err = repo.GetIntentFees(ctx) + // Test range that includes none of our test markers + markersInRange, err = svc.Markers().GetMarkersByDepthRange(ctx, 350, 400) require.NoError(t, err) - require.NotNil(t, updatedFees) - require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) - require.Equal(t, "0.30", updatedFees.OffchainInputFee) - require.Equal(t, "0.35", updatedFees.OnchainOutputFee) - require.Equal(t, newFees.OffchainOutputFee, updatedFees.OffchainOutputFee) + ourMarkers = nil + for _, m := range markersInRange { + if testMarkerIDs[m.ID] { + ourMarkers = append(ourMarkers, m) + } + } + require.Empty(t, ourMarkers) - // test that updating with no fees yields an error and does not change existing fees - newFees = domain.IntentFees{} - time.Sleep(10 * time.Millisecond) - err = repo.UpdateIntentFees(ctx, newFees) - require.Error(t, err) + // Add VTXOs at depths 0, 50, 100, 150 with unique IDs + vtxoDepth0 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo_range_" + randomString(24), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + } + vtxoDepth50 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo_range_" + randomString(24), VOut: 0}, + PubKey: pubkey, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 50, + } + vtxoDepth100 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo_range_" + randomString(24), VOut: 0}, + PubKey: pubkey, + Amount: 3000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 100, + } + vtxoDepth150 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "vtxo_range_" + randomString(24), VOut: 0}, + PubKey: pubkey, + Amount: 4000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 150, + } - updatedFees, err = repo.GetIntentFees(ctx) + err = svc.Vtxos(). + AddVtxos(ctx, []domain.Vtxo{vtxoDepth0, vtxoDepth50, vtxoDepth100, vtxoDepth150}) require.NoError(t, err) - require.NotNil(t, updatedFees) - require.Equal(t, "0.25", updatedFees.OnchainInputFee) - require.Equal(t, "0.30", updatedFees.OffchainInputFee) - require.Equal(t, "0.35", updatedFees.OnchainOutputFee) - require.Equal(t, "0.40", updatedFees.OffchainOutputFee) - // zero out the fees - err = repo.ClearIntentFees(ctx) + // Test GetVtxosByDepthRange(25, 125) - should return VTXOs at 50 and 100 + vtxosInRange, err := svc.Markers().GetVtxosByDepthRange(ctx, 25, 125) require.NoError(t, err) - // do partial update after clearing to ensure fees are set correctly from zero state - newFees = domain.IntentFees{ - OnchainInputFee: "0.15", - OffchainInputFee: "0.20", + // Filter to only our test vtxos + testVtxoTxids := map[string]bool{ + vtxoDepth0.Txid: true, + vtxoDepth50.Txid: true, + vtxoDepth100.Txid: true, + vtxoDepth150.Txid: true, } - time.Sleep(10 * time.Millisecond) - err = repo.UpdateIntentFees(ctx, newFees) + var ourVtxos []domain.Vtxo + for _, v := range vtxosInRange { + if testVtxoTxids[v.Txid] { + ourVtxos = append(ourVtxos, v) + } + } + require.Len(t, ourVtxos, 2) + foundVtxoDepths := []uint32{ourVtxos[0].Depth, ourVtxos[1].Depth} + require.ElementsMatch(t, []uint32{50, 100}, foundVtxoDepths) + + // Test range that includes all test vtxos + vtxosInRange, err = svc.Markers().GetVtxosByDepthRange(ctx, 0, 150) require.NoError(t, err) + ourVtxos = nil + for _, v := range vtxosInRange { + if testVtxoTxids[v.Txid] { + ourVtxos = append(ourVtxos, v) + } + } + require.Len(t, ourVtxos, 4) - updatedFees, err = repo.GetIntentFees(ctx) + // Test range that includes none + vtxosInRange, err = svc.Markers().GetVtxosByDepthRange(ctx, 200, 300) require.NoError(t, err) - require.NotNil(t, updatedFees) - require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) - require.Equal(t, newFees.OffchainInputFee, updatedFees.OffchainInputFee) - require.Equal(t, "", updatedFees.OnchainOutputFee) - require.Equal(t, "", updatedFees.OffchainOutputFee) + ourVtxos = nil + for _, v := range vtxosInRange { + if testVtxoTxids[v.Txid] { + ourVtxos = append(ourVtxos, v) + } + } + require.Empty(t, ourVtxos) }) } -func assertScheduledSessionEqual(t *testing.T, expected, actual domain.ScheduledSession) { - assert.True(t, expected.StartTime.Equal(actual.StartTime), "StartTime not equal") - assert.Equal(t, expected.Period, actual.Period, "Period not equal") - assert.Equal(t, expected.Duration, actual.Duration, "Duration not equal") - assert.True(t, expected.UpdatedAt.Equal(actual.UpdatedAt), "UpdatedAt not equal") - assert.True(t, expected.EndTime.Equal(actual.EndTime), "EndTime not equal") -} +// testMarkerChainTraversal creates a two-marker chain with VTXOs linked by ark txid, +// then verifies GetVtxoChainByMarkers returns the correct VTXOs for single and +// multi-marker queries. Also tests GetVtxosByArkTxid and edge cases (empty/non-existent). +func testMarkerChainTraversal(t *testing.T, svc ports.RepoManager) { + t.Run("test_marker_chain_traversal", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) -func assertConvictionEqual(t *testing.T, expected, actual domain.Conviction) { - require.Equal(t, expected.GetID(), actual.GetID()) - require.Equal(t, expected.GetType(), actual.GetType()) - require.Equal(t, expected.GetCrime(), actual.GetCrime()) - require.Equal(t, expected.IsPardoned(), actual.IsPardoned()) + // Create markers for the chain + marker1 := domain.Marker{ + ID: "chain_marker_" + randomString(16), + Depth: 0, + ParentMarkerIDs: nil, + } + marker2 := domain.Marker{ + ID: "chain_marker_" + randomString(16), + Depth: 100, + ParentMarkerIDs: []string{marker1.ID}, + } - require.WithinDuration(t, expected.GetCreatedAt(), actual.GetCreatedAt(), time.Second) + err := svc.Markers().AddMarker(ctx, marker1) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, marker2) + require.NoError(t, err) - if expected.GetExpiresAt() == nil { - require.Nil(t, actual.GetExpiresAt()) - } else { - require.NotNil(t, actual.GetExpiresAt()) - require.WithinDuration(t, *expected.GetExpiresAt(), *actual.GetExpiresAt(), time.Second) - } + // Create an ark_txid that links vtxos together + arkTxid := "ark_chain_" + randomString(24) - if expectedConv, ok := expected.(domain.ScriptConviction); ok { - if actualConv, ok := actual.(domain.ScriptConviction); ok { - require.Equal(t, expectedConv.Script, actualConv.Script) + // Add VTXOs with ark_txid (marker_ids will be set via UpdateVtxoMarker) + vtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "chain_vtxo_" + randomString(20), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + ArkTxid: arkTxid, + } + vtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: arkTxid, VOut: 0}, // Created by arkTxid + PubKey: pubkey, + Amount: 900, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 1, + } + vtxo3 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "chain_vtxo_" + randomString(20), VOut: 0}, + PubKey: pubkey, + Amount: 800, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 100, } - } -} -func roundsMatch(t *testing.T, expected, got domain.Round) { - require.Equal(t, expected.Id, got.Id) - require.Equal(t, expected.StartingTimestamp, got.StartingTimestamp) - require.Equal(t, expected.EndingTimestamp, got.EndingTimestamp) - require.Equal(t, expected.Stage, got.Stage) - require.Equal(t, expected.CommitmentTxid, got.CommitmentTxid) - require.Equal(t, expected.CommitmentTx, got.CommitmentTx) - require.Exactly(t, expected.VtxoTree, got.VtxoTree) + err = svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{vtxo1, vtxo2, vtxo3}) + require.NoError(t, err) - for k, v := range expected.Intents { - gotValue, ok := got.Intents[k] - require.True(t, ok) + // Associate VTXOs with their markers using UpdateVtxoMarkers + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxo1.Outpoint, []string{marker1.ID}) + require.NoError(t, err) + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxo2.Outpoint, []string{marker1.ID}) + require.NoError(t, err) + err = svc.Markers().UpdateVtxoMarkers(ctx, vtxo3.Outpoint, []string{marker2.ID}) + require.NoError(t, err) - require.ElementsMatch(t, v.Receivers, gotValue.Receivers) - require.ElementsMatch(t, v.Inputs, gotValue.Inputs) - require.Equal(t, v.Txid, gotValue.Txid) - require.Equal(t, v.Proof, gotValue.Proof) - require.Equal(t, v.Message, gotValue.Message) - } + // Test GetVtxoChainByMarkers - returns VTXOs for given marker list + vtxosByMarkers, err := svc.Markers().GetVtxoChainByMarkers(ctx, []string{marker1.ID}) + require.NoError(t, err) + require.Len(t, vtxosByMarkers, 2) // vtxo1 and vtxo2 have marker1.ID + foundTxids := make(map[string]bool) + for _, v := range vtxosByMarkers { + foundTxids[v.Txid] = true + } + require.True(t, foundTxids[vtxo1.Txid]) + require.True(t, foundTxids[vtxo2.Txid]) - if len(expected.ForfeitTxs) > 0 { - sort.SliceStable(expected.ForfeitTxs, func(i, j int) bool { - return expected.ForfeitTxs[i].Txid < expected.ForfeitTxs[j].Txid - }) - sort.SliceStable(got.ForfeitTxs, func(i, j int) bool { - return got.ForfeitTxs[i].Txid < got.ForfeitTxs[j].Txid - }) + // Test with both markers + vtxosByMarkers, err = svc.Markers(). + GetVtxoChainByMarkers(ctx, []string{marker1.ID, marker2.ID}) + require.NoError(t, err) + require.Len(t, vtxosByMarkers, 3) - require.Exactly(t, expected.ForfeitTxs, got.ForfeitTxs) - } + // Test with empty marker list + vtxosByMarkers, err = svc.Markers().GetVtxoChainByMarkers(ctx, []string{}) + require.NoError(t, err) + require.Nil(t, vtxosByMarkers) - if len(expected.Connectors) > 0 { - require.Exactly(t, expected.Connectors, got.Connectors) - } + // Test with non-existent marker + vtxosByMarkers, err = svc.Markers().GetVtxoChainByMarkers(ctx, []string{"nonexistent"}) + require.NoError(t, err) + require.Empty(t, vtxosByMarkers) - if len(expected.VtxoTree) > 0 { - require.Exactly(t, expected.VtxoTree, got.VtxoTree) - } + // Test GetVtxosByArkTxid - returns VTXOs created by specific ark tx + vtxosByArkTxid, err := svc.Markers().GetVtxosByArkTxid(ctx, arkTxid) + require.NoError(t, err) + require.Len(t, vtxosByArkTxid, 1) // Only vtxo1 has ArkTxid == arkTxid + require.Equal(t, vtxo1.Txid, vtxosByArkTxid[0].Txid) - require.Equal(t, expected.Swept, got.Swept) - for k, v := range expected.SweepTxs { - gotValue, ok := got.SweepTxs[k] - require.True(t, ok) - require.Equal(t, v, gotValue) - } + // Test GetVtxosByArkTxid with non-existent ark txid + vtxosByArkTxid, err = svc.Markers().GetVtxosByArkTxid(ctx, "nonexistent") + require.NoError(t, err) + require.Empty(t, vtxosByArkTxid) + }) } -func offchainTxMatch(expected, got domain.OffchainTx) assert.Comparison { - return func() bool { - if expected.Stage != got.Stage { - return false - } - if expected.StartingTimestamp != got.StartingTimestamp { - return false +// testGetVtxoChainWithMarkerOptimization tests that GetVtxoChain correctly +// traverses a deep VTXO chain and uses marker-based prefetching. +// This verifies: +// 1. Markers are correctly created at depth boundaries (0, 100, 200) +// 2. VTXOs have correct marker assignments +// 3. GetVtxoChainByMarkers returns all VTXOs for the marker chain +func testGetVtxoChainWithMarkerOptimization(t *testing.T, svc ports.RepoManager) { + t.Run("test_get_vtxo_chain_with_marker_optimization", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") } - if expected.EndingTimestamp != got.EndingTimestamp { - return false + ctx := context.Background() + commitmentTxid := randomString(32) + + // Create markers at depths 0, 100, 200 (simulating a chain spanning 250 depths) + marker0 := domain.Marker{ + ID: "opt_marker_0_" + randomString(16), + Depth: 0, + ParentMarkerIDs: nil, } - if expected.ArkTxid != got.ArkTxid { - return false + marker100 := domain.Marker{ + ID: "opt_marker_100_" + randomString(16), + Depth: 100, + ParentMarkerIDs: []string{marker0.ID}, } - if expected.ArkTx != got.ArkTx { - return false + marker200 := domain.Marker{ + ID: "opt_marker_200_" + randomString(16), + Depth: 200, + ParentMarkerIDs: []string{marker100.ID}, } - for k, v := range expected.CheckpointTxs { - gotValue, ok := got.CheckpointTxs[k] - if !ok { - return false - } - if v != gotValue { - return false + + err := svc.Markers().AddMarker(ctx, marker0) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, marker100) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, marker200) + require.NoError(t, err) + + // Create VTXOs at various depths across the marker boundaries: + // - VTXOs at depth 0-99 should have marker0.ID + // - VTXOs at depth 100-199 should have marker100.ID + // - VTXOs at depth 200-250 should have marker200.ID + vtxos := make([]domain.Vtxo, 0) + vtxoMarkerMap := make(map[string]string) // outpoint -> markerID + + // Helper to determine which marker a VTXO should have based on depth + getMarkerForDepth := func(depth uint32) string { + if depth >= 200 { + return marker200.ID + } else if depth >= 100 { + return marker100.ID } + return marker0.ID } - if len(expected.CommitmentTxids) > 0 { - if !reflect.DeepEqual(expected.CommitmentTxids, got.CommitmentTxids) { - return false + + // Create VTXOs at sample depths: 0, 50, 99, 100, 150, 199, 200, 225, 250 + sampleDepths := []uint32{0, 50, 99, 100, 150, 199, 200, 225, 250} + for i, depth := range sampleDepths { + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: "opt_chain_vtxo_" + randomString(16), + VOut: uint32(i), + }, + PubKey: pubkey, + Amount: uint64(1000 * (i + 1)), + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: depth, } + vtxos = append(vtxos, vtxo) + vtxoMarkerMap[vtxo.Outpoint.String()] = getMarkerForDepth(depth) } - if expected.ExpiryTimestamp != got.ExpiryTimestamp { - return false + + // Add all VTXOs + err = svc.Vtxos().AddVtxos(ctx, vtxos) + require.NoError(t, err) + + // Associate VTXOs with their markers + for _, v := range vtxos { + markerID := vtxoMarkerMap[v.Outpoint.String()] + err = svc.Markers().UpdateVtxoMarkers(ctx, v.Outpoint, []string{markerID}) + require.NoError(t, err) } - if expected.FailReason != got.FailReason { - return false + + // Verify each VTXO has the correct marker assigned + for _, v := range vtxos { + retrievedVtxos, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{v.Outpoint}) + require.NoError(t, err) + require.Len(t, retrievedVtxos, 1) + expectedMarker := vtxoMarkerMap[v.Outpoint.String()] + require.Contains(t, retrievedVtxos[0].MarkerIDs, expectedMarker, + "VTXO at depth %d should have marker %s", v.Depth, expectedMarker) } - return true - } -} -func randomString(len int) string { - buf := make([]byte, len) - // nolint - rand.Read(buf) - return hex.EncodeToString(buf) -} + // Test 1: Query VTXOs using the full marker chain (marker200 -> marker100 -> marker0) + // This simulates what prefetchVtxosByMarkers does + fullMarkerChain := []string{marker200.ID, marker100.ID, marker0.ID} + allChainVtxos, err := svc.Markers().GetVtxoChainByMarkers(ctx, fullMarkerChain) + require.NoError(t, err) + require.Len(t, allChainVtxos, len(vtxos), "Should return all VTXOs in the chain") -func randomTx() string { - hash, _ := chainhash.NewHashFromStr(randomString(32)) + // Verify all our VTXOs are in the result + resultOutpoints := make(map[string]bool) + for _, v := range allChainVtxos { + resultOutpoints[v.Outpoint.String()] = true + } + for _, v := range vtxos { + require.True(t, resultOutpoints[v.Outpoint.String()], + "VTXO %s at depth %d should be in result", v.Outpoint.String(), v.Depth) + } - ptx, _ := psbt.New( + // Test 2: Query with just marker0 - should return only depth 0-99 VTXOs + marker0Vtxos, err := svc.Markers().GetVtxoChainByMarkers(ctx, []string{marker0.ID}) + require.NoError(t, err) + for _, v := range marker0Vtxos { + // Only check our test VTXOs (filter by prefix) + if len(v.Txid) > 0 && v.Txid[:13] == "opt_chain_vtx" { + require.True(t, v.Depth < 100, + "VTXOs with marker0 should have depth < 100, got depth %d", v.Depth) + } + } + + // Test 3: Query with marker200 only - should return only depth 200+ VTXOs + marker200Vtxos, err := svc.Markers().GetVtxoChainByMarkers(ctx, []string{marker200.ID}) + require.NoError(t, err) + for _, v := range marker200Vtxos { + if len(v.Txid) > 0 && v.Txid[:13] == "opt_chain_vtx" { + require.True(t, v.Depth >= 200, + "VTXOs with marker200 should have depth >= 200, got depth %d", v.Depth) + } + } + + // Test 4: Verify marker chain can be followed via ParentMarkerIDs + // Starting from marker200, should be able to traverse to marker0 + currentMarker, err := svc.Markers().GetMarker(ctx, marker200.ID) + require.NoError(t, err) + require.NotNil(t, currentMarker) + require.Equal(t, uint32(200), currentMarker.Depth) + require.Len(t, currentMarker.ParentMarkerIDs, 1) + require.Equal(t, marker100.ID, currentMarker.ParentMarkerIDs[0]) + + currentMarker, err = svc.Markers().GetMarker(ctx, currentMarker.ParentMarkerIDs[0]) + require.NoError(t, err) + require.NotNil(t, currentMarker) + require.Equal(t, uint32(100), currentMarker.Depth) + require.Len(t, currentMarker.ParentMarkerIDs, 1) + require.Equal(t, marker0.ID, currentMarker.ParentMarkerIDs[0]) + + currentMarker, err = svc.Markers().GetMarker(ctx, currentMarker.ParentMarkerIDs[0]) + require.NoError(t, err) + require.NotNil(t, currentMarker) + require.Equal(t, uint32(0), currentMarker.Depth) + require.Empty(t, currentMarker.ParentMarkerIDs) // Root marker has no parents + + // Test 5: Test GetMarkersByIds with the full chain + markers, err := svc.Markers().GetMarkersByIds(ctx, fullMarkerChain) + require.NoError(t, err) + require.Len(t, markers, 3) + markerDepths := make(map[uint32]bool) + for _, m := range markers { + markerDepths[m.Depth] = true + } + require.True(t, markerDepths[0]) + require.True(t, markerDepths[100]) + require.True(t, markerDepths[200]) + + // Test 6: Verify VTXOs can be retrieved by depth range + vtxosDepth50to150, err := svc.Markers().GetVtxosByDepthRange(ctx, 50, 150) + require.NoError(t, err) + // Filter to our test VTXOs + ourVtxosInRange := 0 + for _, v := range vtxosDepth50to150 { + if len(v.Txid) > 13 && v.Txid[:13] == "opt_chain_vtx" { + ourVtxosInRange++ + require.True(t, v.Depth >= 50 && v.Depth <= 150, + "VTXO depth %d should be in range [50, 150]", v.Depth) + } + } + // We expect VTXOs at depths 50, 99, 100, 150 to be in range + require.Equal(t, 4, ourVtxosInRange, "Expected 4 VTXOs in depth range 50-150") + }) +} + +// testBulkSweepMarkersConcurrent tests that BulkSweepMarkers is thread-safe +// when multiple goroutines attempt to sweep the same markers concurrently. +// This verifies: +// 1. No race conditions occur with concurrent sweeps +// 2. Idempotency is maintained (same markers can be swept multiple times safely) +// 3. All markers end up in the correct swept state +func testBulkSweepMarkersConcurrent(t *testing.T, svc ports.RepoManager) { + t.Run("test_bulk_sweep_markers_concurrent", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Create 20 markers to sweep concurrently + numMarkers := 20 + markers := make([]domain.Marker, numMarkers) + markerIDs := make([]string, numMarkers) + for i := 0; i < numMarkers; i++ { + markers[i] = domain.Marker{ + ID: "concurrent_marker_" + randomString(16), + Depth: uint32(i * 100), + ParentMarkerIDs: nil, + } + if i > 0 { + markers[i].ParentMarkerIDs = []string{markers[i-1].ID} + } + markerIDs[i] = markers[i].ID + } + + // Add all markers + for _, m := range markers { + err := svc.Markers().AddMarker(ctx, m) + require.NoError(t, err) + } + + // Verify none are swept initially + for _, id := range markerIDs { + isSwept, err := svc.Markers().IsMarkerSwept(ctx, id) + require.NoError(t, err) + require.False(t, isSwept, "Marker %s should not be swept initially", id) + } + + // Launch concurrent goroutines to sweep the same markers + numGoroutines := 10 + sweptAt := time.Now().UnixMilli() + + var wg sync.WaitGroup + errChan := make(chan error, numGoroutines) + + for i := 0; i < numGoroutines; i++ { + wg.Add(1) + go func(goroutineID int) { + defer wg.Done() + // Each goroutine sweeps all markers with slightly different timestamp + err := svc.Markers().BulkSweepMarkers(ctx, markerIDs, sweptAt+int64(goroutineID)) + if err != nil { + errChan <- err + } + }(i) + } + + wg.Wait() + close(errChan) + + // Check for errors from goroutines + for err := range errChan { + require.NoError(t, err, "BulkSweepMarkers should not error on concurrent calls") + } + + // Verify all markers are now swept + for _, id := range markerIDs { + isSwept, err := svc.Markers().IsMarkerSwept(ctx, id) + require.NoError(t, err) + require.True(t, isSwept, "Marker %s should be swept after concurrent operations", id) + } + + // Verify swept markers can be retrieved + sweptMarkers, err := svc.Markers().GetSweptMarkers(ctx, markerIDs) + require.NoError(t, err) + require.Len(t, sweptMarkers, numMarkers) + }) + + t.Run("test_bulk_sweep_overlapping_marker_sets", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Create 30 markers + numMarkers := 30 + markers := make([]domain.Marker, numMarkers) + markerIDs := make([]string, numMarkers) + for i := 0; i < numMarkers; i++ { + markers[i] = domain.Marker{ + ID: "overlap_marker_" + randomString(16), + Depth: uint32(i * 50), + ParentMarkerIDs: nil, + } + markerIDs[i] = markers[i].ID + } + + // Add all markers + for _, m := range markers { + err := svc.Markers().AddMarker(ctx, m) + require.NoError(t, err) + } + + // Create overlapping subsets + // Set A: markers 0-19 + // Set B: markers 10-29 + // Overlap: markers 10-19 + setA := markerIDs[0:20] + setB := markerIDs[10:30] + + sweptAt := time.Now().UnixMilli() + + var wg sync.WaitGroup + errChan := make(chan error, 2) + + // Sweep set A and set B concurrently + wg.Add(2) + go func() { + defer wg.Done() + if err := svc.Markers().BulkSweepMarkers(ctx, setA, sweptAt); err != nil { + errChan <- err + } + }() + go func() { + defer wg.Done() + if err := svc.Markers().BulkSweepMarkers(ctx, setB, sweptAt+1); err != nil { + errChan <- err + } + }() + + wg.Wait() + close(errChan) + + // Check for errors + for err := range errChan { + require.NoError(t, err, "BulkSweepMarkers should handle overlapping sets") + } + + // Verify all markers are swept + for _, id := range markerIDs { + isSwept, err := svc.Markers().IsMarkerSwept(ctx, id) + require.NoError(t, err) + require.True(t, isSwept, "Marker %s should be swept", id) + } + }) + + t.Run("test_bulk_sweep_empty_and_non_empty_concurrent", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Create 5 markers + markers := make([]domain.Marker, 5) + markerIDs := make([]string, 5) + for i := 0; i < 5; i++ { + markers[i] = domain.Marker{ + ID: "empty_nonempty_marker_" + randomString(16), + Depth: uint32(i * 100), + ParentMarkerIDs: nil, + } + markerIDs[i] = markers[i].ID + err := svc.Markers().AddMarker(ctx, markers[i]) + require.NoError(t, err) + } + + sweptAt := time.Now().UnixMilli() + + var wg sync.WaitGroup + errChan := make(chan error, 4) + + // Mix of empty and non-empty sweeps concurrently + wg.Add(4) + go func() { + defer wg.Done() + if err := svc.Markers().BulkSweepMarkers(ctx, markerIDs, sweptAt); err != nil { + errChan <- err + } + }() + go func() { + defer wg.Done() + // Empty slice should not error + if err := svc.Markers().BulkSweepMarkers(ctx, []string{}, sweptAt); err != nil { + errChan <- err + } + }() + go func() { + defer wg.Done() + if err := svc.Markers().BulkSweepMarkers(ctx, markerIDs[0:2], sweptAt); err != nil { + errChan <- err + } + }() + go func() { + defer wg.Done() + // Empty slice again + if err := svc.Markers().BulkSweepMarkers(ctx, []string{}, sweptAt); err != nil { + errChan <- err + } + }() + + wg.Wait() + close(errChan) + + for err := range errChan { + require.NoError(t, err) + } + + // All markers should be swept + for _, id := range markerIDs { + isSwept, err := svc.Markers().IsMarkerSwept(ctx, id) + require.NoError(t, err) + require.True(t, isSwept, "Marker %s should be swept", id) + } + }) + + t.Run("test_bulk_sweep_idempotency_rapid_fire", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Create a single marker and sweep it many times concurrently + marker := domain.Marker{ + ID: "rapid_fire_marker_" + randomString(16), + Depth: 0, + ParentMarkerIDs: nil, + } + err := svc.Markers().AddMarker(ctx, marker) + require.NoError(t, err) + + sweptAt := time.Now().UnixMilli() + + // Launch 50 concurrent sweeps on the same marker + numSweeps := 50 + var wg sync.WaitGroup + errChan := make(chan error, numSweeps) + + for i := 0; i < numSweeps; i++ { + wg.Add(1) + go func(idx int) { + defer wg.Done() + if err := svc.Markers(). + BulkSweepMarkers(ctx, []string{marker.ID}, sweptAt+int64(idx)); err != nil { + errChan <- err + } + }(i) + } + + wg.Wait() + close(errChan) + + for err := range errChan { + require.NoError(t, err, "Rapid-fire sweeps should all succeed") + } + + // Verify marker is swept and only one record exists + isSwept, err := svc.Markers().IsMarkerSwept(ctx, marker.ID) + require.NoError(t, err) + require.True(t, isSwept) + + // Get swept markers should return exactly 1 entry + sweptMarkers, err := svc.Markers().GetSweptMarkers(ctx, []string{marker.ID}) + require.NoError(t, err) + require.Len(t, sweptMarkers, 1) + }) +} + +// testCreateRootMarkersForVtxos verifies that CreateRootMarkersForVtxos creates a +// depth-0 root marker for each batch VTXO using the outpoint string as the marker ID. +// Also tests idempotency — calling again with the same VTXOs does not error. +func testCreateRootMarkersForVtxos(t *testing.T, svc ports.RepoManager) { + t.Run("test_create_root_markers_for_vtxos", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // Create batch VTXOs at depth 0 with MarkerIDs = outpoint.String() + vtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: nil, // will be set to outpoint.String() by convention + } + vtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: nil, + } + vtxo3 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 1}, + PubKey: pubkey, + Amount: 3000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: nil, + } + + // Set MarkerIDs to outpoint.String() as the service does for batch VTXOs + vtxo1.MarkerIDs = []string{vtxo1.Outpoint.String()} + vtxo2.MarkerIDs = []string{vtxo2.Outpoint.String()} + vtxo3.MarkerIDs = []string{vtxo3.Outpoint.String()} + + vtxos := []domain.Vtxo{vtxo1, vtxo2, vtxo3} + + // Add VTXOs first + err := svc.Vtxos().AddVtxos(ctx, vtxos) + require.NoError(t, err) + + // Create root markers + err = svc.Markers().CreateRootMarkersForVtxos(ctx, vtxos) + require.NoError(t, err) + + // Verify each VTXO got a root marker with ID = outpoint.String() + for _, vtxo := range vtxos { + expectedMarkerID := vtxo.Outpoint.String() + marker, err := svc.Markers().GetMarker(ctx, expectedMarkerID) + require.NoError(t, err) + require.NotNil(t, marker, "root marker should exist for vtxo %s", expectedMarkerID) + require.Equal(t, expectedMarkerID, marker.ID) + require.Equal(t, uint32(0), marker.Depth) + require.Empty(t, marker.ParentMarkerIDs, "root markers should have no parents") + } + + // Idempotency: calling again should not error + err = svc.Markers().CreateRootMarkersForVtxos(ctx, vtxos) + require.NoError(t, err) + }) +} + +// testMarkerCreationAtBoundaryDepth simulates the service logic when a child VTXO +// lands at a marker boundary (depth 100). Verifies that a new marker is created with +// the parent's marker IDs as its ParentMarkerIDs, and that the child VTXO carries +// only the new marker ID. +func testMarkerCreationAtBoundaryDepth(t *testing.T, svc ports.RepoManager) { + t.Run("test_marker_creation_at_boundary_depth", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // Create parent VTXOs at depth 99 with root markers + parentMarkerID := "root_boundary_" + randomString(16) + parentMarker := domain.Marker{ + ID: parentMarkerID, + Depth: 0, + ParentMarkerIDs: nil, + } + err := svc.Markers().AddMarker(ctx, parentMarker) + require.NoError(t, err) + + parentVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 5000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 99, + MarkerIDs: []string{parentMarkerID}, + } + err = svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{parentVtxo}) + require.NoError(t, err) + + // Simulate offchain tx: child at depth 100 (marker boundary) + newDepth := uint32(100) + require.True(t, newDepth%domain.MarkerInterval == 0) + + // Collect parent markers (mimics service logic) + parentMarkerIDs := parentVtxo.MarkerIDs + + // Create new marker at boundary + newMarkerID := "boundary_marker_" + randomString(16) + newMarker := domain.Marker{ + ID: newMarkerID, + Depth: newDepth, + ParentMarkerIDs: parentMarkerIDs, + } + err = svc.Markers().AddMarker(ctx, newMarker) + require.NoError(t, err) + + // Create child VTXO with the new marker + childVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 4500, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid, randomString(32)}, + Depth: newDepth, + MarkerIDs: []string{newMarkerID}, + } + err = svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{childVtxo}) + require.NoError(t, err) + + // Verify the marker was created correctly + retrieved, err := svc.Markers().GetMarker(ctx, newMarkerID) + require.NoError(t, err) + require.NotNil(t, retrieved) + require.Equal(t, newDepth, retrieved.Depth) + require.ElementsMatch(t, parentMarkerIDs, retrieved.ParentMarkerIDs) + + // Verify the child VTXO has only the new marker (not parent markers) + childVtxos, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{childVtxo.Outpoint}) + require.NoError(t, err) + require.Len(t, childVtxos, 1) + require.Equal(t, []string{newMarkerID}, childVtxos[0].MarkerIDs) + require.Equal(t, newDepth, childVtxos[0].Depth) + }) +} + +// testMarkerInheritanceAtNonBoundary verifies that a child VTXO at a non-boundary +// depth (e.g. 51) inherits all parent marker IDs rather than creating a new marker. +// Confirms the inherited markers persist through a DB round trip and no spurious +// marker is created. +func testMarkerInheritanceAtNonBoundary(t *testing.T, svc ports.RepoManager) { + t.Run("test_marker_inheritance_at_non_boundary", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // Create two parent VTXOs at depth 50 with different markers + markerA := "inherit_marker_A_" + randomString(16) + markerB := "inherit_marker_B_" + randomString(16) + + err := svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerA, Depth: 0, ParentMarkerIDs: nil, + }) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerB, Depth: 0, ParentMarkerIDs: nil, + }) + require.NoError(t, err) + + parent1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 3000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 50, + MarkerIDs: []string{markerA}, + } + parent2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 50, + MarkerIDs: []string{markerB}, + } + + err = svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{parent1, parent2}) + require.NoError(t, err) + + // Child at depth 51 (NOT a boundary) should inherit both parent markers + newDepth := uint32(51) + require.False(t, newDepth%domain.MarkerInterval == 0) + + inheritedMarkers := []string{markerA, markerB} + childVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 4500, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid, randomString(32)}, + Depth: newDepth, + MarkerIDs: inheritedMarkers, + } + err = svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{childVtxo}) + require.NoError(t, err) + + // Verify the child VTXO inherited both parent markers + childVtxos, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{childVtxo.Outpoint}) + require.NoError(t, err) + require.Len(t, childVtxos, 1) + require.ElementsMatch(t, inheritedMarkers, childVtxos[0].MarkerIDs) + require.Equal(t, newDepth, childVtxos[0].Depth) + + // No new marker should have been created for this depth + // (verify by checking there's no marker with this child's txid) + nonExistent, err := svc.Markers().GetMarker(ctx, childVtxo.Outpoint.String()) + require.NoError(t, err) + require.Nil(t, nonExistent) + }) +} + +// testDustVtxoMarkersSweptImmediately simulates the immediate sweep of dust VTXO +// markers that occurs in updateProjectionsAfterOffchainTxEvents. Verifies that +// BulkSweepMarkers marks dust markers as swept with the correct timestamp. +func testDustVtxoMarkersSweptImmediately(t *testing.T, svc ports.RepoManager) { + t.Run("test_dust_vtxo_markers_swept_immediately", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Create markers that represent dust VTXOs (outpoint-based IDs) + dustOutpoint1 := domain.Outpoint{Txid: randomString(32), VOut: 0} + dustOutpoint2 := domain.Outpoint{Txid: randomString(32), VOut: 1} + + dustMarkerID1 := dustOutpoint1.String() + dustMarkerID2 := dustOutpoint2.String() + + // Add root markers for these dust VTXOs + err := svc.Markers().AddMarker(ctx, domain.Marker{ + ID: dustMarkerID1, Depth: 0, ParentMarkerIDs: nil, + }) + require.NoError(t, err) + err = svc.Markers().AddMarker(ctx, domain.Marker{ + ID: dustMarkerID2, Depth: 0, ParentMarkerIDs: nil, + }) + require.NoError(t, err) + + // Verify they are NOT swept initially + isSwept, err := svc.Markers().IsMarkerSwept(ctx, dustMarkerID1) + require.NoError(t, err) + require.False(t, isSwept) + + isSwept, err = svc.Markers().IsMarkerSwept(ctx, dustMarkerID2) + require.NoError(t, err) + require.False(t, isSwept) + + // Simulate the dust sweep that happens in updateProjectionsAfterOffchainTxEvents: + // BulkSweepMarkers is called immediately for dust VTXOs + sweptAt := time.Now().Unix() + err = svc.Markers().BulkSweepMarkers(ctx, []string{dustMarkerID1, dustMarkerID2}, sweptAt) + require.NoError(t, err) + + // Verify both dust markers are now swept + isSwept, err = svc.Markers().IsMarkerSwept(ctx, dustMarkerID1) + require.NoError(t, err) + require.True(t, isSwept, "dust marker 1 should be swept immediately") + + isSwept, err = svc.Markers().IsMarkerSwept(ctx, dustMarkerID2) + require.NoError(t, err) + require.True(t, isSwept, "dust marker 2 should be swept immediately") + + // Verify swept records have correct timestamp + sweptMarkers, err := svc.Markers(). + GetSweptMarkers(ctx, []string{dustMarkerID1, dustMarkerID2}) + require.NoError(t, err) + require.Len(t, sweptMarkers, 2) + for _, sm := range sweptMarkers { + require.Equal(t, sweptAt, sm.SweptAt) + } + }) +} + +// testSweepVtxosWithMarkersEmptyInput verifies that BulkSweepMarkers handles an +// empty marker ID slice without errors, covering the early-return path when there +// are no VTXOs to sweep. +func testSweepVtxosWithMarkersEmptyInput(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_vtxos_with_markers_empty_input", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Simulate what sweepVtxosWithMarkers does with empty input: + // it should return early without touching the DB. + vtxoOutpoints := []domain.Outpoint{} + + // Empty outpoints → nothing to fetch, nothing to sweep + require.Empty(t, vtxoOutpoints) + + // BulkSweepMarkers with empty slice should not error + err := svc.Markers().BulkSweepMarkers(ctx, []string{}, time.Now().Unix()) + require.NoError(t, err) + }) +} + +// testSweepVtxosWithMarkersNoMarkersOnVtxos verifies that VTXOs with empty or nil +// MarkerIDs produce an empty marker set when collected, ensuring the sweep logic +// gracefully skips marker operations for legacy or marker-less VTXOs. +func testSweepVtxosWithMarkersNoMarkersOnVtxos(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_vtxos_with_markers_no_markers_on_vtxos", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // Create VTXOs with empty MarkerIDs (legacy / edge case) + vtxo1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: []string{}, // empty + } + vtxo2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: nil, // nil + } + + err := svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{vtxo1, vtxo2}) + require.NoError(t, err) + + // Simulate sweepVtxosWithMarkers logic: + // fetch VTXOs, collect markers, if no markers → return 0 + vtxos, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{vtxo1.Outpoint, vtxo2.Outpoint}) + require.NoError(t, err) + require.Len(t, vtxos, 2) + + // Collect unique markers (should be empty) + uniqueMarkers := make(map[string]struct{}) + for _, vtxo := range vtxos { + for _, markerID := range vtxo.MarkerIDs { + uniqueMarkers[markerID] = struct{}{} + } + } + + // No markers to sweep → would return 0 in sweepVtxosWithMarkers + require.Empty(t, uniqueMarkers, "VTXOs with no markers should yield empty marker set") + }) +} + +// testVtxoMarkerIDsRoundTrip verifies that MarkerIDs and Depth survive a write→read +// round trip through the database for various configurations: single marker, multiple +// markers, empty markers, nil markers, and deep VTXOs with two markers. +func testVtxoMarkerIDsRoundTrip(t *testing.T, svc ports.RepoManager) { + t.Run("test_vtxo_marker_ids_round_trip", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // VTXOs with various MarkerIDs configurations + vtxoSingle := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: []string{"single-marker"}, + } + vtxoMulti := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 150, + MarkerIDs: []string{"marker-A", "marker-B", "marker-C"}, + } + vtxoEmpty := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 3000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: []string{}, + } + vtxoNil := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 4000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 0, + MarkerIDs: nil, + } + vtxoDeep := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pubkey, + Amount: 5000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Depth: 500, + MarkerIDs: []string{"marker-500", "marker-400"}, + } + + allVtxos := []domain.Vtxo{vtxoSingle, vtxoMulti, vtxoEmpty, vtxoNil, vtxoDeep} + err := svc.Vtxos().AddVtxos(ctx, allVtxos) + require.NoError(t, err) + + // Retrieve all and verify + outpoints := make([]domain.Outpoint, len(allVtxos)) + for i, v := range allVtxos { + outpoints[i] = v.Outpoint + } + retrieved, err := svc.Vtxos().GetVtxos(ctx, outpoints) + require.NoError(t, err) + require.Len(t, retrieved, 5) + + byOutpoint := make(map[string]domain.Vtxo) + for _, v := range retrieved { + byOutpoint[v.Outpoint.String()] = v + } + + // Single marker + got := byOutpoint[vtxoSingle.Outpoint.String()] + require.Equal(t, uint32(0), got.Depth) + require.Equal(t, []string{"single-marker"}, got.MarkerIDs) + + // Multiple markers — order may vary, use ElementsMatch + got = byOutpoint[vtxoMulti.Outpoint.String()] + require.Equal(t, uint32(150), got.Depth) + require.ElementsMatch(t, []string{"marker-A", "marker-B", "marker-C"}, got.MarkerIDs) + + // Empty markers — should come back as empty or nil (both acceptable) + got = byOutpoint[vtxoEmpty.Outpoint.String()] + require.Equal(t, uint32(0), got.Depth) + require.Empty(t, got.MarkerIDs) + + // Nil markers — should come back as empty or nil + got = byOutpoint[vtxoNil.Outpoint.String()] + require.Empty(t, got.MarkerIDs) + + // Deep VTXO with two markers + got = byOutpoint[vtxoDeep.Outpoint.String()] + require.Equal(t, uint32(500), got.Depth) + require.ElementsMatch(t, []string{"marker-500", "marker-400"}, got.MarkerIDs) + }) +} + +// testGetVtxosByArkTxidMultipleOutputs verifies that GetVtxosByArkTxid returns all +// VTXOs (multiple vouts) produced by a single ark transaction, each with the correct +// depth, markers, and amounts. Also checks that a non-existent ark txid returns empty. +func testGetVtxosByArkTxidMultipleOutputs(t *testing.T, svc ports.RepoManager) { + t.Run("test_get_vtxos_by_ark_txid_multiple_outputs", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + commitmentTxid := randomString(32) + + // An ark txid producing multiple VTXOs (different vouts) at the same depth + arkTxid := randomString(32) + sharedMarkers := []string{"shared-marker-" + randomString(8)} + sharedDepth := uint32(100) + + vtxoOut0 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: arkTxid, VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Preconfirmed: true, + ArkTxid: arkTxid, + Depth: sharedDepth, + MarkerIDs: sharedMarkers, + } + vtxoOut1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: arkTxid, VOut: 1}, + PubKey: pubkey2, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Preconfirmed: true, + ArkTxid: arkTxid, + Depth: sharedDepth, + MarkerIDs: sharedMarkers, + } + vtxoOut2 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: arkTxid, VOut: 2}, + PubKey: pubkey, + Amount: 500, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + Preconfirmed: true, + ArkTxid: arkTxid, + Depth: sharedDepth, + MarkerIDs: sharedMarkers, + } + + err := svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{vtxoOut0, vtxoOut1, vtxoOut2}) + require.NoError(t, err) + + // Query by ark txid + results, err := svc.Markers().GetVtxosByArkTxid(ctx, arkTxid) + require.NoError(t, err) + require.Len(t, results, 3) + + // Verify all outputs are returned with correct depth and markers + for _, v := range results { + require.Equal(t, arkTxid, v.Txid) + require.Equal(t, sharedDepth, v.Depth) + require.ElementsMatch(t, sharedMarkers, v.MarkerIDs) + } + + // Verify all vouts are present + vouts := make([]uint32, len(results)) + for i, v := range results { + vouts[i] = v.VOut + } + require.ElementsMatch(t, []uint32{0, 1, 2}, vouts) + + // Non-existent ark txid returns empty + empty, err := svc.Markers().GetVtxosByArkTxid(ctx, "nonexistent") + require.NoError(t, err) + require.Empty(t, empty) + }) +} + +// testCreateRootMarkersForEmptyVtxos verifies that CreateRootMarkersForVtxos handles +// empty and nil VTXO slices gracefully without errors or side effects. +func testCreateRootMarkersForEmptyVtxos(t *testing.T, svc ports.RepoManager) { + t.Run("test_create_root_markers_for_empty_vtxos", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + + // Empty slice should not error and have no side effects + err := svc.Markers().CreateRootMarkersForVtxos(ctx, []domain.Vtxo{}) + require.NoError(t, err) + + // Nil slice should also not error + err = svc.Markers().CreateRootMarkersForVtxos(ctx, nil) + require.NoError(t, err) + }) + + t.Run("test_get_vtxos_with_multiple_pubkeys", func(t *testing.T) { + ctx := t.Context() + + pk1 := randomString(32) + pk2 := randomString(32) + cmtTxid := randomString(32) + + vtxosToAdd := []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pk1, + Amount: 1000, + RootCommitmentTxid: cmtTxid, + CommitmentTxids: []string{cmtTxid}, + }, + { + Outpoint: domain.Outpoint{Txid: randomString(32), VOut: 0}, + PubKey: pk2, + Amount: 2000, + RootCommitmentTxid: cmtTxid, + CommitmentTxids: []string{cmtTxid}, + }, + } + err := svc.Vtxos().AddVtxos(ctx, vtxosToAdd) + require.NoError(t, err) + + // Single pubkey should return 1 vtxo. + got, err := svc.Vtxos().GetAllVtxosWithPubKeys(ctx, []string{pk1}, 0, 0) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, pk1, got[0].PubKey) + + got, err = svc.Vtxos().GetAllVtxosWithPubKeys(ctx, []string{pk2}, 0, 0) + require.NoError(t, err) + require.Len(t, got, 1) + require.Equal(t, pk2, got[0].PubKey) + + // Multiple pubkeys should return vtxos for both. + got, err = svc.Vtxos().GetAllVtxosWithPubKeys(ctx, []string{pk1, pk2}, 0, 0) + require.NoError(t, err) + require.Len(t, got, 2) + + gotPubkeys := map[string]bool{got[0].PubKey: true, got[1].PubKey: true} + require.True(t, gotPubkeys[pk1], "expected vtxo with pubkey pk1") + require.True(t, gotPubkeys[pk2], "expected vtxo with pubkey pk2") + }) +} + +func testScheduledSessionRepository(t *testing.T, svc ports.RepoManager) { + t.Run("test_scheduled_session_repository", func(t *testing.T) { + ctx := context.Background() + repo := svc.ScheduledSession() + + scheduledSession, err := repo.Get(ctx) + require.NoError(t, err) + require.Nil(t, scheduledSession) + + now := time.Now().Truncate(time.Second) + expected := domain.ScheduledSession{ + StartTime: now, + Period: time.Duration(3) * time.Hour, + Duration: time.Duration(20) * time.Second, + UpdatedAt: now, + } + + err = repo.Upsert(ctx, expected) + require.NoError(t, err) + + got, err := repo.Get(ctx) + require.NoError(t, err) + require.NotNil(t, got) + assertScheduledSessionEqual(t, expected, *got) + + expected.Period = time.Duration(4) * time.Hour + expected.Duration = time.Duration(40) * time.Second + expected.UpdatedAt = now.Add(100 * time.Second) + + err = repo.Upsert(ctx, expected) + require.NoError(t, err) + + got, err = repo.Get(ctx) + require.NoError(t, err) + require.NotNil(t, got) + assertScheduledSessionEqual(t, expected, *got) + + err = repo.Clear(ctx) + require.NoError(t, err) + + scheduledSession, err = repo.Get(ctx) + require.NoError(t, err) + require.Nil(t, scheduledSession) + + // No error if trying to clear already cleared scheduled session + err = repo.Clear(ctx) + require.NoError(t, err) + }) +} + +func testOffchainTxRepository(t *testing.T, svc ports.RepoManager) { + t.Run("test_offchain_tx_repository", func(t *testing.T) { + ctx := context.Background() + repo := svc.OffchainTxs() + + offchainTx, err := repo.GetOffchainTx(ctx, arkTxid) + require.Nil(t, offchainTx) + require.Error(t, err) + + checkpointTxid1 := "0000000000000000000000000000000000000000000000000000000000000001" + signedCheckpointPtx1 := "cHNldP8BAgQCAAAAAQQBAAEFAQABBgEDAfsEAgAAAAA=signed" + checkpointTxid2 := "0000000000000000000000000000000000000000000000000000000000000002" + signedCheckpointPtx2 := "cHNldP8BAgQCAAAAAQQBAAEFAQABBgEDAfsEAgAAAAB=signed" + rootCommitmentTxid := "0000000000000000000000000000000000000000000000000000000000000003" + commitmentTxid := "0000000000000000000000000000000000000000000000000000000000000004" + events := []domain.Event{ + domain.OffchainTxRequested{ + OffchainTxEvent: domain.OffchainTxEvent{ + Id: arkTxid, + Type: domain.EventTypeOffchainTxRequested, + }, + ArkTx: "", + UnsignedCheckpointTxs: nil, + StartingTimestamp: now.Unix(), + }, + domain.OffchainTxAccepted{ + OffchainTxEvent: domain.OffchainTxEvent{ + Id: arkTxid, + Type: domain.EventTypeOffchainTxAccepted, + }, + CommitmentTxids: map[string]string{ + checkpointTxid1: rootCommitmentTxid, + checkpointTxid2: commitmentTxid, + }, + FinalArkTx: "", + SignedCheckpointTxs: map[string]string{ + checkpointTxid1: signedCheckpointPtx1, + checkpointTxid2: signedCheckpointPtx2, + }, + RootCommitmentTxid: rootCommitmentTxid, + }, + } + offchainTx = domain.NewOffchainTxFromEvents(events) + err = repo.AddOrUpdateOffchainTx(ctx, offchainTx) + require.NoError(t, err) + + gotOffchainTx, err := repo.GetOffchainTx(ctx, arkTxid) + require.NoError(t, err) + require.NotNil(t, offchainTx) + require.True(t, gotOffchainTx.IsAccepted()) + require.Equal(t, rootCommitmentTxid, gotOffchainTx.RootCommitmentTxId) + require.Condition(t, offchainTxMatch(*offchainTx, *gotOffchainTx)) + + newEvents := []domain.Event{ + domain.OffchainTxFinalized{ + OffchainTxEvent: domain.OffchainTxEvent{ + Id: arkTxid, + Type: domain.EventTypeOffchainTxFinalized, + }, + FinalCheckpointTxs: nil, + Timestamp: endTimestamp, + }, + } + events = append(events, newEvents...) + offchainTx = domain.NewOffchainTxFromEvents(events) + err = repo.AddOrUpdateOffchainTx(ctx, offchainTx) + require.NoError(t, err) + + gotOffchainTx, err = repo.GetOffchainTx(ctx, arkTxid) + require.NoError(t, err) + require.NotNil(t, offchainTx) + require.True(t, gotOffchainTx.IsFinalized()) + require.Condition(t, offchainTxMatch(*offchainTx, *gotOffchainTx)) + + bulkFetchedTxs, err := repo.GetOffchainTxsByTxids(ctx, []string{arkTxid}) + require.NoError(t, err) + require.Len(t, bulkFetchedTxs, 1) + require.Equal(t, arkTxid, bulkFetchedTxs[0].ArkTxid) + + bulkFetchedTxs, err = repo.GetOffchainTxsByTxids(ctx, []string{"missing-txid"}) + require.NoError(t, err) + require.Empty(t, bulkFetchedTxs) + + // Insert a second offchain tx so we can exercise multi-txid bulk fetch. + secondArkTxid := txidb + secondCheckpointTxid := "0000000000000000000000000000000000000000000000000000000000000005" + secondCheckpointPtx := "cHNldP8BAgQCAAAAAQQBAAEFAQABBgEDAfsEAgAAAAA=signed-2" + secondEvents := []domain.Event{ + domain.OffchainTxRequested{ + OffchainTxEvent: domain.OffchainTxEvent{ + Id: secondArkTxid, + Type: domain.EventTypeOffchainTxRequested, + }, + StartingTimestamp: now.Unix(), + }, + domain.OffchainTxAccepted{ + OffchainTxEvent: domain.OffchainTxEvent{ + Id: secondArkTxid, + Type: domain.EventTypeOffchainTxAccepted, + }, + CommitmentTxids: map[string]string{ + secondCheckpointTxid: rootCommitmentTxid, + }, + SignedCheckpointTxs: map[string]string{ + secondCheckpointTxid: secondCheckpointPtx, + }, + RootCommitmentTxid: rootCommitmentTxid, + }, + } + secondOffchainTx := domain.NewOffchainTxFromEvents(secondEvents) + require.NoError(t, repo.AddOrUpdateOffchainTx(ctx, secondOffchainTx)) + + // Multi-txid fetch returns both, plus tolerates a missing entry. + bulkFetchedTxs, err = repo.GetOffchainTxsByTxids( + ctx, []string{arkTxid, secondArkTxid, "missing-txid"}, + ) + require.NoError(t, err) + require.Len(t, bulkFetchedTxs, 2) + + got := make(map[string]*domain.OffchainTx, len(bulkFetchedTxs)) + for _, tx := range bulkFetchedTxs { + got[tx.ArkTxid] = tx + } + require.Contains(t, got, arkTxid) + require.Contains(t, got, secondArkTxid) + + // Each result must carry its own checkpoint mapping — guards the + // row-grouping logic against cross-txid contamination. + require.Contains(t, got[arkTxid].CheckpointTxs, checkpointTxid1) + require.Contains(t, got[arkTxid].CheckpointTxs, checkpointTxid2) + require.NotContains(t, got[arkTxid].CheckpointTxs, secondCheckpointTxid) + require.Contains(t, got[secondArkTxid].CheckpointTxs, secondCheckpointTxid) + require.NotContains(t, got[secondArkTxid].CheckpointTxs, checkpointTxid1) + require.NotContains(t, got[secondArkTxid].CheckpointTxs, checkpointTxid2) + }) +} + +func testConvictionRepository(t *testing.T, svc ports.RepoManager) { + t.Run("test_conviction_repository", func(t *testing.T) { + ctx := context.Background() + repo := svc.Convictions() + + conviction, err := repo.Get(ctx, "non-existent-id") + require.Error(t, err) + require.Nil(t, conviction) + + scriptConviction, err := repo.GetActiveScriptConvictions(ctx, "non-existent-script") + require.NoError(t, err) + require.Empty(t, scriptConviction) + + convictions, err := repo.GetAll(ctx, time.Now().Add(-time.Hour), time.Now()) + require.NoError(t, err) + require.Empty(t, convictions) + + roundConvictions, err := repo.GetByRoundID(ctx, "non-existent-round") + require.NoError(t, err) + require.Empty(t, roundConvictions) + + roundID1 := uuid.New().String() + roundID2 := uuid.New().String() + script1 := randomString(32) + script2 := randomString(32) + banDuration := time.Duration(1) * time.Hour + + crime1 := domain.Crime{ + Type: domain.CrimeTypeMusig2NonceSubmission, + RoundID: roundID1, + Reason: "Test crime 1", + } + crime2 := domain.Crime{ + Type: domain.CrimeTypeMusig2SignatureSubmission, + RoundID: roundID2, + Reason: "Test crime 2", + } + + conviction1 := domain.NewScriptConviction(script1, crime1, &banDuration) + conviction2 := domain.NewScriptConviction(script2, crime2, nil) // Permanent ban + + err = repo.Add(ctx, conviction1, conviction2) + require.NoError(t, err) + + retrievedConviction1, err := repo.Get(ctx, conviction1.GetID()) + require.NoError(t, err) + require.NotNil(t, retrievedConviction1) + assertConvictionEqual(t, conviction1, retrievedConviction1) + + retrievedConviction2, err := repo.Get(ctx, conviction2.GetID()) + require.NoError(t, err) + require.NotNil(t, retrievedConviction2) + assertConvictionEqual(t, conviction2, retrievedConviction2) + + activeConviction1, err := repo.GetActiveScriptConvictions(ctx, script1) + require.NoError(t, err) + require.NotNil(t, activeConviction1) + require.Len(t, activeConviction1, 1) + require.Equal(t, script1, activeConviction1[0].Script) + require.False(t, activeConviction1[0].IsPardoned()) + + activeConviction2, err := repo.GetActiveScriptConvictions(ctx, script2) + require.NoError(t, err) + require.NotNil(t, activeConviction2) + require.Len(t, activeConviction2, 1) + require.Equal(t, script2, activeConviction2[0].Script) + require.False(t, activeConviction2[0].IsPardoned()) + + round1Convictions, err := repo.GetByRoundID(ctx, roundID1) + require.NoError(t, err) + require.Len(t, round1Convictions, 1) + assertConvictionEqual(t, conviction1, round1Convictions[0]) + + round2Convictions, err := repo.GetByRoundID(ctx, roundID2) + require.NoError(t, err) + require.Len(t, round2Convictions, 1) + assertConvictionEqual(t, conviction2, round2Convictions[0]) + + allConvictions, err := repo.GetAll( + ctx, + time.Now().Add(-time.Hour), + time.Now().Add(time.Hour), + ) + require.NoError(t, err) + require.Len(t, allConvictions, 2) + + err = repo.Pardon(ctx, conviction1.GetID()) + require.NoError(t, err) + + pardonedConviction, err := repo.Get(ctx, conviction1.GetID()) + require.NoError(t, err) + require.NotNil(t, pardonedConviction) + require.True(t, pardonedConviction.IsPardoned()) + + activeConvictionAfterPardon, err := repo.GetActiveScriptConvictions(ctx, script1) + require.NoError(t, err) + require.Empty(t, activeConvictionAfterPardon) + + shortDuration := time.Duration(1) * time.Millisecond + crime3 := domain.Crime{ + Type: domain.CrimeTypeMusig2InvalidSignature, + RoundID: roundID1, + Reason: "Test expired crime", + } + expiredConviction := domain.NewScriptConviction(script1, crime3, &shortDuration) + err = repo.Add(ctx, expiredConviction) + require.NoError(t, err) + + time.Sleep(10 * time.Millisecond) + + _, err = repo.GetActiveScriptConvictions(ctx, script1) + require.NoError(t, err) + }) +} + +// requireAssetsMatch compares two asset slices by Id, ControlAssetId, Metadata, and Supply (using big.Int.Cmp). +func requireAssetsMatch(t *testing.T, expected, actual []domain.Asset) { + t.Helper() + require.Len(t, actual, len(expected)) + byId := make(map[string]domain.Asset) + for _, a := range actual { + byId[a.Id] = a + } + for _, exp := range expected { + got, ok := byId[exp.Id] + require.True(t, ok) + require.Equal(t, exp.ControlAssetId, got.ControlAssetId) + require.Equal(t, exp.Metadata, got.Metadata) + require.Zero(t, (&exp.Supply).Cmp(&got.Supply)) + } +} + +func testAssetRepository(t *testing.T, svc ports.RepoManager) { + t.Run("test_asset_repository", func(t *testing.T) { + ctx := t.Context() + repo := svc.Assets() + vtxoRepo := svc.Vtxos() + + newAssets := []domain.Asset{ + { + Id: "asset1", + ControlAssetId: "asset2", + Metadata: []asset.Metadata{ + { + Key: []byte("key1"), + Value: []byte("value1"), + }, + { + Key: []byte("abc"), + Value: []byte("cde"), + }, + }, + }, + { + Id: "asset2", + Metadata: []asset.Metadata{ + { + Key: []byte("this is"), + Value: []byte("control asset"), + }, + }, + }, + } + assetIds := []string{"asset1", "asset2", "non-existent-asset"} + + // assets should not exist yet + assets, err := repo.GetAssets(ctx, assetIds) + require.NoError(t, err) + require.Len(t, assets, 0) + + assetsByTx := map[string][]domain.Asset{arkTxid: newAssets} + count, err := repo.AddAssets(ctx, assetsByTx) + require.NoError(t, err) + require.Equal(t, 2, count) + + count, err = repo.AddAssets(ctx, assetsByTx) + require.NoError(t, err) + require.Zero(t, count) + + assets, err = repo.GetAssets(ctx, assetIds) + require.NoError(t, err) + require.Len(t, assets, 2) + requireAssetsMatch(t, newAssets, assets) + + assets, err = repo.GetAssets(ctx, assetIds[2:]) + require.NoError(t, err) + require.Empty(t, assets) + + // GetControlAsset: asset1 has control asset asset2, asset2 is control asset (no parent) + controlID, err := repo.GetControlAsset(ctx, "asset1") + require.NoError(t, err) + require.Equal(t, "asset2", controlID) + controlID, err = repo.GetControlAsset(ctx, "asset2") + require.NoError(t, err) + require.Empty(t, controlID) + _, err = repo.GetControlAsset(ctx, "non-existent-asset") + require.Error(t, err) + require.Contains(t, err.Error(), "no control asset found") + + // AssetExists + exists, err := repo.AssetExists(ctx, "asset1") + require.NoError(t, err) + require.True(t, exists) + exists, err = repo.AssetExists(ctx, "asset2") + require.NoError(t, err) + require.True(t, exists) + exists, err = repo.AssetExists(ctx, "non-existent-asset") + require.NoError(t, err) + require.False(t, exists) + + // test asset supply overflow + vtxos := []domain.Vtxo{{ + Outpoint: domain.Outpoint{ + Txid: "supplyOverflowVtxo1", + VOut: 0, + }, + Amount: 330, + Assets: []domain.AssetDenomination{ + { + AssetId: "assetSupplyOverflow", + Amount: math.MaxUint64, + }, + }, + }, + { + Outpoint: domain.Outpoint{ + Txid: "supplyOverflowVtxo2", + VOut: 0, + }, + Amount: 330, + Assets: []domain.AssetDenomination{ + { + AssetId: "assetSupplyOverflow", + Amount: math.MaxUint64, + }, + }, + }} + count, err = repo.AddAssets(ctx, map[string][]domain.Asset{"assetSupplyOverflowTx": { + { + Id: "assetSupplyOverflow", + Metadata: []asset.Metadata{}, + }, + }}) + require.NoError(t, err) + require.Equal(t, 1, count) + + err = vtxoRepo.AddVtxos(ctx, vtxos) + require.NoError(t, err) + + assets, err = repo.GetAssets(ctx, []string{"assetSupplyOverflow"}) + require.NoError(t, err) + require.Len(t, assets, 1) + + expectedSupply := new(big.Int). + Mul(new(big.Int).SetUint64(math.MaxUint64), big.NewInt(2)) + + require.Equal(t, expectedSupply.String(), assets[0].Supply.String()) + }) +} + +func testFeeRepository(t *testing.T, svc ports.RepoManager) { + t.Run("test_fee_repository", func(t *testing.T) { + ctx := context.Background() + repo := svc.Fees() + + // fees should be initialized to empty strings + currentFees, err := repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, currentFees) + require.Equal(t, "", currentFees.OnchainInputFee) + require.Equal(t, "", currentFees.OffchainInputFee) + require.Equal(t, "", currentFees.OnchainOutputFee) + require.Equal(t, "", currentFees.OffchainOutputFee) + + newFees := domain.IntentFees{ + OnchainInputFee: "0.25", + OffchainInputFee: "0.30", + OnchainOutputFee: "0.35", + OffchainOutputFee: "0.40", + } + + // sqlite and postgres use millisecond precision for created_at so we need to + // wait to ensure the updated_at is different. + // set the new fees + time.Sleep(10 * time.Millisecond) + err = repo.UpdateIntentFees(ctx, newFees) + require.NoError(t, err) + + updatedFees, err := repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, updatedFees) + require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) + require.Equal(t, newFees.OffchainInputFee, updatedFees.OffchainInputFee) + require.Equal(t, newFees.OnchainOutputFee, updatedFees.OnchainOutputFee) + require.Equal(t, newFees.OffchainOutputFee, updatedFees.OffchainOutputFee) + time.Sleep(10 * time.Millisecond) + // zero out the fees + err = repo.ClearIntentFees(ctx) + require.NoError(t, err) + + clearedFees, err := repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, clearedFees) + require.Equal(t, "", clearedFees.OnchainInputFee) + require.Equal(t, "", clearedFees.OffchainInputFee) + require.Equal(t, "", clearedFees.OnchainOutputFee) + require.Equal(t, "", clearedFees.OffchainOutputFee) + + // set the fees back to newFees + time.Sleep(10 * time.Millisecond) + err = repo.UpdateIntentFees(ctx, newFees) + require.NoError(t, err) + + updatedFees, err = repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, updatedFees) + require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) + require.Equal(t, newFees.OffchainInputFee, updatedFees.OffchainInputFee) + require.Equal(t, newFees.OnchainOutputFee, updatedFees.OnchainOutputFee) + require.Equal(t, newFees.OffchainOutputFee, updatedFees.OffchainOutputFee) + + // only change 2 of the fees, the others should remain the same (testing partial updates) + newFees = domain.IntentFees{ + OnchainInputFee: "0.25", + OffchainOutputFee: "0.40", + } + time.Sleep(10 * time.Millisecond) + err = repo.UpdateIntentFees(ctx, newFees) + require.NoError(t, err) + + updatedFees, err = repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, updatedFees) + require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) + require.Equal(t, "0.30", updatedFees.OffchainInputFee) + require.Equal(t, "0.35", updatedFees.OnchainOutputFee) + require.Equal(t, newFees.OffchainOutputFee, updatedFees.OffchainOutputFee) + + // test that updating with no fees yields an error and does not change existing fees + newFees = domain.IntentFees{} + time.Sleep(10 * time.Millisecond) + err = repo.UpdateIntentFees(ctx, newFees) + require.Error(t, err) + + updatedFees, err = repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, updatedFees) + require.Equal(t, "0.25", updatedFees.OnchainInputFee) + require.Equal(t, "0.30", updatedFees.OffchainInputFee) + require.Equal(t, "0.35", updatedFees.OnchainOutputFee) + require.Equal(t, "0.40", updatedFees.OffchainOutputFee) + + // zero out the fees + err = repo.ClearIntentFees(ctx) + require.NoError(t, err) + + // do partial update after clearing to ensure fees are set correctly from zero state + newFees = domain.IntentFees{ + OnchainInputFee: "0.15", + OffchainInputFee: "0.20", + } + time.Sleep(10 * time.Millisecond) + err = repo.UpdateIntentFees(ctx, newFees) + require.NoError(t, err) + + updatedFees, err = repo.GetIntentFees(ctx) + require.NoError(t, err) + require.NotNil(t, updatedFees) + require.Equal(t, newFees.OnchainInputFee, updatedFees.OnchainInputFee) + require.Equal(t, newFees.OffchainInputFee, updatedFees.OffchainInputFee) + require.Equal(t, "", updatedFees.OnchainOutputFee) + require.Equal(t, "", updatedFees.OffchainOutputFee) + }) +} + +func assertScheduledSessionEqual(t *testing.T, expected, actual domain.ScheduledSession) { + assert.True(t, expected.StartTime.Equal(actual.StartTime), "StartTime not equal") + assert.Equal(t, expected.Period, actual.Period, "Period not equal") + assert.Equal(t, expected.Duration, actual.Duration, "Duration not equal") + assert.True(t, expected.UpdatedAt.Equal(actual.UpdatedAt), "UpdatedAt not equal") + assert.True(t, expected.EndTime.Equal(actual.EndTime), "EndTime not equal") +} + +func assertConvictionEqual(t *testing.T, expected, actual domain.Conviction) { + require.Equal(t, expected.GetID(), actual.GetID()) + require.Equal(t, expected.GetType(), actual.GetType()) + require.Equal(t, expected.GetCrime(), actual.GetCrime()) + require.Equal(t, expected.IsPardoned(), actual.IsPardoned()) + + require.WithinDuration(t, expected.GetCreatedAt(), actual.GetCreatedAt(), time.Second) + + if expected.GetExpiresAt() == nil { + require.Nil(t, actual.GetExpiresAt()) + } else { + require.NotNil(t, actual.GetExpiresAt()) + require.WithinDuration(t, *expected.GetExpiresAt(), *actual.GetExpiresAt(), time.Second) + } + + if expectedConv, ok := expected.(domain.ScriptConviction); ok { + if actualConv, ok := actual.(domain.ScriptConviction); ok { + require.Equal(t, expectedConv.Script, actualConv.Script) + } + } +} + +func roundsMatch(t *testing.T, expected, got domain.Round) { + require.Equal(t, expected.Id, got.Id) + require.Equal(t, expected.StartingTimestamp, got.StartingTimestamp) + require.Equal(t, expected.EndingTimestamp, got.EndingTimestamp) + require.Equal(t, expected.Stage, got.Stage) + require.Equal(t, expected.CommitmentTxid, got.CommitmentTxid) + require.Equal(t, expected.CommitmentTx, got.CommitmentTx) + require.Exactly(t, expected.VtxoTree, got.VtxoTree) + + for k, v := range expected.Intents { + gotValue, ok := got.Intents[k] + require.True(t, ok) + + require.ElementsMatch(t, v.Receivers, gotValue.Receivers) + require.ElementsMatch(t, v.Inputs, gotValue.Inputs) + require.Equal(t, v.Txid, gotValue.Txid) + require.Equal(t, v.Proof, gotValue.Proof) + require.Equal(t, v.Message, gotValue.Message) + } + + if len(expected.ForfeitTxs) > 0 { + sort.SliceStable(expected.ForfeitTxs, func(i, j int) bool { + return expected.ForfeitTxs[i].Txid < expected.ForfeitTxs[j].Txid + }) + sort.SliceStable(got.ForfeitTxs, func(i, j int) bool { + return got.ForfeitTxs[i].Txid < got.ForfeitTxs[j].Txid + }) + + require.Exactly(t, expected.ForfeitTxs, got.ForfeitTxs) + } + + if len(expected.Connectors) > 0 { + require.Exactly(t, expected.Connectors, got.Connectors) + } + + if len(expected.VtxoTree) > 0 { + require.Exactly(t, expected.VtxoTree, got.VtxoTree) + } + + require.Equal(t, expected.Swept, got.Swept) + for k, v := range expected.SweepTxs { + gotValue, ok := got.SweepTxs[k] + require.True(t, ok) + require.Equal(t, v, gotValue) + } +} + +func offchainTxMatch(expected, got domain.OffchainTx) assert.Comparison { + return func() bool { + if expected.Stage != got.Stage { + return false + } + if expected.StartingTimestamp != got.StartingTimestamp { + return false + } + if expected.EndingTimestamp != got.EndingTimestamp { + return false + } + if expected.ArkTxid != got.ArkTxid { + return false + } + if expected.ArkTx != got.ArkTx { + return false + } + for k, v := range expected.CheckpointTxs { + gotValue, ok := got.CheckpointTxs[k] + if !ok { + return false + } + if v != gotValue { + return false + } + } + if len(expected.CommitmentTxids) > 0 { + if !reflect.DeepEqual(expected.CommitmentTxids, got.CommitmentTxids) { + return false + } + } + if expected.ExpiryTimestamp != got.ExpiryTimestamp { + return false + } + if expected.FailReason != got.FailReason { + return false + } + return true + } +} + +func randomString(len int) string { + buf := make([]byte, len) + // nolint + rand.Read(buf) + return hex.EncodeToString(buf) +} + +func randomTx() string { + hash, _ := chainhash.NewHashFromStr(randomString(32)) + + ptx, _ := psbt.New( []*wire.OutPoint{ { - Hash: *hash, - Index: 0, + Hash: *hash, + Index: 0, + }, + }, + []*wire.TxOut{ + { + Value: 1000000, + }, + }, + 3, + 0, + []uint32{ + wire.MaxTxInSequenceNum, + }, + ) + + b64, _ := ptx.B64Encode() + return b64 +} + +// testDeepChain20kMarkers creates a 200-marker chain (depth 0 to 20000) in the +// database, associates VTXOs at various depths, verifies GetVtxoChainByMarkers +// retrieves all VTXOs across the full chain, and then bulk sweeps all markers. +// This validates the system can handle the target maximum depth of 20000. +func testDeepChain20kMarkers(t *testing.T, svc ports.RepoManager) { + t.Run("test_deep_chain_20k_markers", func(t *testing.T) { + ctx := context.Background() + + const maxDepth = 20000 + const markerInterval = 100 + const numMarkers = maxDepth/markerInterval + 1 // 201 markers (0, 100, ..., 20000) + + // Create a round for VTXO commitment references + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // Build the 201-marker chain: marker-0 (root) -> marker-100 -> ... -> marker-20000 + allMarkerIDs := make([]string, 0, numMarkers) + for depth := uint32(0); depth <= maxDepth; depth += markerInterval { + markerID := fmt.Sprintf("deep20k-%s-marker-%d", roundId[:8], depth) + allMarkerIDs = append(allMarkerIDs, markerID) + + var parentMarkerIDs []string + if depth > 0 { + parentMarkerIDs = []string{ + fmt.Sprintf("deep20k-%s-marker-%d", roundId[:8], depth-markerInterval), + } + } + + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerID, + Depth: depth, + ParentMarkerIDs: parentMarkerIDs, + })) + } + require.Len(t, allMarkerIDs, numMarkers) + + // Create VTXOs at selected depths across the chain: every 1000th depth + // Each VTXO is associated with the marker at the nearest boundary below it + vtxosToAdd := make([]domain.Vtxo, 0) + vtxoOutpoints := make([]domain.Outpoint, 0) + for depth := uint32(0); depth <= maxDepth; depth += 1000 { + txid := fmt.Sprintf("deep20k-%s-vtxo-%d", roundId[:8], depth) + outpoint := domain.Outpoint{Txid: txid, VOut: 0} + // Nearest marker at or below this depth + nearestMarkerDepth := (depth / markerInterval) * markerInterval + markerID := fmt.Sprintf("deep20k-%s-marker-%d", roundId[:8], nearestMarkerDepth) + + vtxosToAdd = append(vtxosToAdd, domain.Vtxo{ + Outpoint: outpoint, + PubKey: pubkey, + Amount: 1000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: depth, + MarkerIDs: []string{markerID}, + }) + vtxoOutpoints = append(vtxoOutpoints, outpoint) + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, vtxosToAdd)) + + // Associate each VTXO with its marker + for _, vtxo := range vtxosToAdd { + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, vtxo.Outpoint, vtxo.MarkerIDs)) + } + + // Verify: GetVtxoChainByMarkers with ALL markers returns ALL VTXOs + chainVtxos, err := svc.Markers().GetVtxoChainByMarkers(ctx, allMarkerIDs) + require.NoError(t, err) + require.Len(t, chainVtxos, len(vtxosToAdd), + "GetVtxoChainByMarkers should return all %d VTXOs across 200 markers", len(vtxosToAdd)) + + // Verify: VTXOs are not swept initially + fetchedVtxos, err := svc.Vtxos().GetVtxos(ctx, vtxoOutpoints) + require.NoError(t, err) + for _, v := range fetchedVtxos { + require.False(t, v.Swept, "vtxo at depth %d should not be swept yet", v.Depth) + } + + // Bulk sweep ALL 201 markers at once + sweptAt := time.Now().Unix() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, allMarkerIDs, sweptAt)) + + // Verify: all VTXOs now appear as swept + fetchedAfter, err := svc.Vtxos().GetVtxos(ctx, vtxoOutpoints) + require.NoError(t, err) + for _, v := range fetchedAfter { + require.True(t, v.Swept, "vtxo at depth %d should be swept after bulk sweep", v.Depth) + } + + // Verify: all markers are recorded as swept + sweptMarkers, err := svc.Markers().GetSweptMarkers(ctx, allMarkerIDs) + require.NoError(t, err) + require.Len(t, sweptMarkers, numMarkers, + "all %d markers should be swept", numMarkers) + }) +} + +// testSweepVtxosWithMarkersIntegration tests the full marker-based sweep flow: +// create VTXOs with markers, then bulk sweep the markers and verify VTXOs +// appear as swept via the marker-based view. +func testSweepVtxosWithMarkersIntegration(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_vtxos_with_markers_integration", func(t *testing.T) { + ctx := context.Background() + + // Create a finalized round so VTXOs have a valid commitment txid + roundId := uuid.New().String() + commitmentTxid := randomString(32) + now := time.Now() + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: now.Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: now.Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // Create 3 VTXOs, two sharing a marker and one with its own + txidA := randomString(32) + txidB := randomString(32) + txidC := randomString(32) + sharedMarkerID := "shared-marker-sweep-" + randomString(8) + uniqueMarkerID := "unique-marker-sweep-" + randomString(8) + + vtxosToAdd := []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: txidA, VOut: 0}, + PubKey: pubkey, + Amount: 1000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 50, + MarkerIDs: []string{sharedMarkerID}, + }, + { + Outpoint: domain.Outpoint{Txid: txidB, VOut: 0}, + PubKey: pubkey, + Amount: 2000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 50, + MarkerIDs: []string{sharedMarkerID}, + }, + { + Outpoint: domain.Outpoint{Txid: txidC, VOut: 0}, + PubKey: pubkey, + Amount: 3000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 75, + MarkerIDs: []string{uniqueMarkerID}, + }, + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, vtxosToAdd)) + + // Create the markers + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: sharedMarkerID, + Depth: 50, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: uniqueMarkerID, + Depth: 75, + })) + + // Associate VTXOs with their markers + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, + domain.Outpoint{Txid: txidA, VOut: 0}, []string{sharedMarkerID})) + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, + domain.Outpoint{Txid: txidB, VOut: 0}, []string{sharedMarkerID})) + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, + domain.Outpoint{Txid: txidC, VOut: 0}, []string{uniqueMarkerID})) + + // Verify VTXOs are not swept before + fetchedBefore, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{ + {Txid: txidA, VOut: 0}, {Txid: txidB, VOut: 0}, {Txid: txidC, VOut: 0}, + }) + require.NoError(t, err) + require.Len(t, fetchedBefore, 3) + for _, v := range fetchedBefore { + require.False(t, v.Swept, "vtxo %s should not be swept yet", v.Txid) + } + + // Simulate sweepVtxosWithMarkers: collect unique markers, then bulk sweep + uniqueMarkers := make(map[string]struct{}) + for _, vtxo := range vtxosToAdd { + for _, markerID := range vtxo.MarkerIDs { + uniqueMarkers[markerID] = struct{}{} + } + } + markerIDs := make([]string, 0, len(uniqueMarkers)) + for markerID := range uniqueMarkers { + markerIDs = append(markerIDs, markerID) + } + require.Len(t, markerIDs, 2, "should deduplicate to 2 unique markers") + + sweptAt := time.Now().Unix() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, markerIDs, sweptAt)) + + // Verify all VTXOs now appear as swept + fetchedAfter, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{ + {Txid: txidA, VOut: 0}, {Txid: txidB, VOut: 0}, {Txid: txidC, VOut: 0}, + }) + require.NoError(t, err) + require.Len(t, fetchedAfter, 3) + for _, v := range fetchedAfter { + require.True(t, v.Swept, "vtxo %s should be swept", v.Txid) + } + + // Verify both markers are recorded as swept + sweptMarkers, err := svc.Markers().GetSweptMarkers(ctx, markerIDs) + require.NoError(t, err) + require.Len(t, sweptMarkers, 2) + for _, sm := range sweptMarkers { + require.Equal(t, sweptAt, sm.SweptAt) + } + }) +} + +// testPartialMarkerSweep creates a 3-marker chain (depth 0→100→200) with 2 VTXOs +// per marker, sweeps only the deeper two markers, and verifies that VTXOs under the +// unswept root marker remain unswept while VTXOs under swept markers are marked as swept. +func testPartialMarkerSweep(t *testing.T, svc ports.RepoManager) { + t.Run("test_partial_marker_sweep", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + + // Create a finalized round + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // 3 markers: marker-0 (depth 0) -> marker-100 (depth 100) -> marker-200 (depth 200) + marker0ID := "partial-m0-" + suffix + marker100ID := "partial-m100-" + suffix + marker200ID := "partial-m200-" + suffix + + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: marker0ID, Depth: 0, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: marker100ID, Depth: 100, ParentMarkerIDs: []string{marker0ID}, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: marker200ID, Depth: 200, ParentMarkerIDs: []string{marker100ID}, + })) + + // 6 VTXOs: 2 per marker + type vtxoSpec struct { + txid string + depth uint32 + markerID string + } + specs := []vtxoSpec{ + {txid: "partial-v25-" + suffix, depth: 25, markerID: marker0ID}, + {txid: "partial-v75-" + suffix, depth: 75, markerID: marker0ID}, + {txid: "partial-v125-" + suffix, depth: 125, markerID: marker100ID}, + {txid: "partial-v175-" + suffix, depth: 175, markerID: marker100ID}, + {txid: "partial-v225-" + suffix, depth: 225, markerID: marker200ID}, + {txid: "partial-v250-" + suffix, depth: 250, markerID: marker200ID}, + } + + vtxosToAdd := make([]domain.Vtxo, len(specs)) + for i, s := range specs { + vtxosToAdd[i] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: s.txid, VOut: 0}, + PubKey: pubkey, + Amount: 1000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: s.depth, + MarkerIDs: []string{s.markerID}, + } + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, vtxosToAdd)) + + // Associate VTXOs with their markers + for _, s := range specs { + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, + domain.Outpoint{Txid: s.txid, VOut: 0}, []string{s.markerID})) + } + + // Sweep only marker-100 and marker-200 (NOT marker-0) + sweptAt := time.Now().Unix() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, + []string{marker100ID, marker200ID}, sweptAt)) + + // Fetch all 6 VTXOs and check swept status + outpoints := make([]domain.Outpoint, len(specs)) + for i, s := range specs { + outpoints[i] = domain.Outpoint{Txid: s.txid, VOut: 0} + } + fetched, err := svc.Vtxos().GetVtxos(ctx, outpoints) + require.NoError(t, err) + require.Len(t, fetched, 6) + + for _, v := range fetched { + switch v.Txid { + case specs[0].txid, specs[1].txid: + // depth 25, 75 → marker-0 → NOT swept + require.False( + t, + v.Swept, + "vtxo %s (depth %d, marker-0) should NOT be swept", + v.Txid, + v.Depth, + ) + case specs[2].txid, specs[3].txid, specs[4].txid, specs[5].txid: + // depth 125, 175, 225, 250 → marker-100 or marker-200 → swept + require.True(t, v.Swept, "vtxo %s (depth %d) should be swept", v.Txid, v.Depth) + default: + t.Fatalf("unexpected vtxo txid: %s", v.Txid) + } + } + + // Verify IsMarkerSwept + isSwept, err := svc.Markers().IsMarkerSwept(ctx, marker0ID) + require.NoError(t, err) + require.False(t, isSwept, "marker-0 should NOT be swept") + + isSwept, err = svc.Markers().IsMarkerSwept(ctx, marker100ID) + require.NoError(t, err) + require.True(t, isSwept, "marker-100 should be swept") + + isSwept, err = svc.Markers().IsMarkerSwept(ctx, marker200ID) + require.NoError(t, err) + require.True(t, isSwept, "marker-200 should be swept") + }) +} + +// testListVtxosMarkerSweptFiltering verifies that GetAllNonUnrolledVtxos correctly +// classifies VTXOs as spent/unspent based on marker sweep status. Creates 4 VTXOs +// across two markers, sweeps one marker, and confirms the swept VTXOs appear in the +// spent list while the unswept ones remain in the unspent list. +func testListVtxosMarkerSweptFiltering(t *testing.T, svc ports.RepoManager) { + t.Run("test_list_vtxos_marker_swept_filtering", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + testPubkey := "listfilter-pk-" + suffix + + // Create a finalized round + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // 2 markers + markerAID := "listfilt-mA-" + suffix + markerBID := "listfilt-mB-" + suffix + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerAID, Depth: 0, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerBID, Depth: 0, + })) + + // 4 VTXOs: 2 with marker-A, 2 with marker-B (not unrolled, not spent) + txidA1 := "listfilt-a1-" + suffix + txidA2 := "listfilt-a2-" + suffix + txidB1 := "listfilt-b1-" + suffix + txidB2 := "listfilt-b2-" + suffix + + vtxosToAdd := []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: txidA1, VOut: 0}, + PubKey: testPubkey, + Amount: 1000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 10, + MarkerIDs: []string{markerAID}, + }, + { + Outpoint: domain.Outpoint{Txid: txidA2, VOut: 0}, + PubKey: testPubkey, + Amount: 2000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 20, + MarkerIDs: []string{markerAID}, + }, + { + Outpoint: domain.Outpoint{Txid: txidB1, VOut: 0}, + PubKey: testPubkey, + Amount: 3000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 30, + MarkerIDs: []string{markerBID}, + }, + { + Outpoint: domain.Outpoint{Txid: txidB2, VOut: 0}, + PubKey: testPubkey, + Amount: 4000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 40, + MarkerIDs: []string{markerBID}, + }, + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, vtxosToAdd)) + + for _, v := range vtxosToAdd { + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, v.Outpoint, v.MarkerIDs)) + } + + // Sweep only marker-A + sweptAt := time.Now().Unix() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, []string{markerAID}, sweptAt)) + + // Call GetAllNonUnrolledVtxos + unspent, spent, err := svc.Vtxos().GetAllNonUnrolledVtxos(ctx, testPubkey) + require.NoError(t, err) + + // Unspent should be exactly the 2 VTXOs with marker-B + unspentTxids := make(map[string]bool) + for _, v := range unspent { + unspentTxids[v.Txid] = true + } + require.Len(t, unspent, 2, "expected 2 unspent vtxos (marker-B)") + require.True(t, unspentTxids[txidB1], "vtxo B1 should be unspent") + require.True(t, unspentTxids[txidB2], "vtxo B2 should be unspent") + + // Spent should be exactly the 2 VTXOs with marker-A (swept via marker) + spentTxids := make(map[string]bool) + for _, v := range spent { + spentTxids[v.Txid] = true + } + require.True(t, spentTxids[txidA1], "vtxo A1 should be in spent list (swept)") + require.True(t, spentTxids[txidA2], "vtxo A2 should be in spent list (swept)") + }) +} + +// testAddMarkerFailureFallbackToParentMarkers verifies the fix for the AddMarker +// failure path in service.go:593. When AddMarker fails at a boundary depth, VTXOs +// should fall back to inheriting parentMarkerIDs instead of getting nil markers. +// This test simulates that fallback and proves the VTXOs remain sweepable via the +// parent marker. +func testAddMarkerFailureFallbackToParentMarkers(t *testing.T, svc ports.RepoManager) { + t.Run("test_add_marker_failure_fallback_to_parent_markers", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + testPubkey := "fallback-pk-" + suffix + + // Create a finalized round. + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // Create a parent marker (depth 0) — this is the marker the parent VTXO carries. + parentMarkerID := "fallback-parent-m-" + suffix + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: parentMarkerID, Depth: 0, + })) + + // Simulate the fix: at boundary depth 100, AddMarker failed, so we fall + // back to parentMarkerIDs. The child VTXO inherits the parent marker + // instead of getting nil. + parentMarkerIDs := []string{parentMarkerID} + + // Reproduce the fixed code path from service.go: + // marker, ids := domain.NewMarker(txid, 100, parentMarkerIDs) + // // AddMarker fails... + // markerIDs = parentMarkerIDs <-- the fix + marker, _ := domain.NewMarker("some-txid", 100, parentMarkerIDs) + require.NotNil(t, marker, "depth 100 is a boundary, should produce a marker") + // We intentionally skip AddMarker (simulating failure) and fall back: + markerIDs := parentMarkerIDs + + childVtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "fallback-child-" + suffix, VOut: 0}, + PubKey: testPubkey, + Amount: 4000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 100, + MarkerIDs: markerIDs, + } + + require.NoError(t, svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{childVtxo})) + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, childVtxo.Outpoint, childVtxo.MarkerIDs)) + + // Verify the child VTXO inherited the parent marker. + vtxos, err := svc.Vtxos().GetVtxos(ctx, []domain.Outpoint{childVtxo.Outpoint}) + require.NoError(t, err) + require.Len(t, vtxos, 1) + require.Equal(t, parentMarkerIDs, vtxos[0].MarkerIDs, + "child VTXO should carry parent markers after AddMarker failure fallback") + + // Sweep the parent marker. + sweptAt := time.Now().UnixMilli() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, []string{parentMarkerID}, sweptAt)) + + // Verify the child VTXO is now swept — the fix works. + unspent, spent, err := svc.Vtxos().GetAllNonUnrolledVtxos(ctx, testPubkey) + require.NoError(t, err) + + spentTxids := make(map[string]bool) + for _, v := range spent { + spentTxids[v.Txid] = true + } + require.True(t, spentTxids[childVtxo.Outpoint.Txid], + "child VTXO with inherited parent markers should be swept") + + for _, v := range unspent { + require.NotEqual(t, childVtxo.Outpoint.Txid, v.Txid, + "child VTXO should not appear in unspent list after parent marker sweep") + } + }) +} + +// testSweepableUnrolledExcludesMarkerSwept verifies that GetAllSweepableUnrolledVtxos +// excludes VTXOs whose markers have been swept. Creates 3 spent+unrolled VTXOs across +// two markers, sweeps one marker, and confirms only the unswept VTXOs appear as sweepable. +func testSweepableUnrolledExcludesMarkerSwept(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweepable_unrolled_excludes_marker_swept", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + + // Create a finalized round + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // 2 markers + markerXID := "sweepable-mX-" + suffix + markerYID := "sweepable-mY-" + suffix + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerXID, Depth: 0, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerYID, Depth: 0, + })) + + // 3 VTXOs: VTXO-1 with marker-X, VTXO-2 and VTXO-3 with marker-Y + txid1 := "sweepable-v1-" + suffix + txid2 := "sweepable-v2-" + suffix + txid3 := "sweepable-v3-" + suffix + + vtxosToAdd := []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: txid1, VOut: 0}, + PubKey: pubkey, + Amount: 1000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 10, + MarkerIDs: []string{markerXID}, }, - }, - []*wire.TxOut{ { - Value: 1000000, + Outpoint: domain.Outpoint{Txid: txid2, VOut: 0}, + PubKey: pubkey, + Amount: 2000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 20, + MarkerIDs: []string{markerYID}, }, - }, - 3, - 0, - []uint32{ - wire.MaxTxInSequenceNum, - }, - ) + { + Outpoint: domain.Outpoint{Txid: txid3, VOut: 0}, + PubKey: pubkey, + Amount: 3000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 30, + MarkerIDs: []string{markerYID}, + }, + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, vtxosToAdd)) - b64, _ := ptx.B64Encode() - return b64 + for _, v := range vtxosToAdd { + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, v.Outpoint, v.MarkerIDs)) + } + + // Mark all as spent + spentVtxos := map[domain.Outpoint]string{ + {Txid: txid1, VOut: 0}: "spentby-" + suffix, + {Txid: txid2, VOut: 0}: "spentby-" + suffix, + {Txid: txid3, VOut: 0}: "spentby-" + suffix, + } + require.NoError(t, svc.Vtxos().SpendVtxos(ctx, spentVtxos, "arktx-"+suffix)) + + // Mark all as unrolled + unrollOutpoints := []domain.Outpoint{ + {Txid: txid1, VOut: 0}, + {Txid: txid2, VOut: 0}, + {Txid: txid3, VOut: 0}, + } + require.NoError(t, svc.Vtxos().UnrollVtxos(ctx, unrollOutpoints)) + + // Sweep only marker-X + sweptAt := time.Now().Unix() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, []string{markerXID}, sweptAt)) + + // Call GetAllSweepableUnrolledVtxos + sweepable, err := svc.Vtxos().GetAllSweepableUnrolledVtxos(ctx) + require.NoError(t, err) + + // Result should contain VTXO-2 and VTXO-3 only (not VTXO-1 which is swept) + sweepableTxids := make(map[string]bool) + for _, v := range sweepable { + sweepableTxids[v.Txid] = true + } + require.True(t, sweepableTxids[txid2], "vtxo-2 (marker-Y, not swept) should be sweepable") + require.True(t, sweepableTxids[txid3], "vtxo-3 (marker-Y, not swept) should be sweepable") + require.False(t, sweepableTxids[txid1], "vtxo-1 (marker-X, swept) should NOT be sweepable") + }) +} + +// testSweepVtxoOutpointsNoOverreach proves that per-outpoint sweeping via +// SweepVtxoOutpoints does NOT over-reach across independent subtrees that share +// a marker. This is the scenario where marker-based sweeping (BulkSweepMarkers) +// would incorrectly sweep an unrelated sibling VTXO. +// +// Setup: two batch VTXOs (X, Y) from the same round share a marker M_root. +// Sweeping X's outpoint via SweepVtxoOutpoints should mark X as swept but +// leave Y unswept, even though both carry M_root. +func testSweepVtxoOutpointsNoOverreach(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_vtxo_outpoints_no_overreach", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + testPubkey := "overreach-pk-" + suffix + + // Create a finalized round. + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // Create a shared marker — simulates two sibling VTXOs from the same + // offchain tx inheriting the same parent marker. + sharedMarkerID := "overreach-shared-m-" + suffix + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: sharedMarkerID, Depth: 0, + })) + + // VTXO X — will be swept via SweepVtxoOutpoints + vtxoX := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "overreach-X-" + suffix, VOut: 0}, + PubKey: testPubkey, + Amount: 5000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 1, + MarkerIDs: []string{sharedMarkerID}, + } + + // VTXO Y — independent sibling sharing the same marker, should NOT be swept + vtxoY := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "overreach-Y-" + suffix, VOut: 0}, + PubKey: testPubkey, + Amount: 3000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 1, + MarkerIDs: []string{sharedMarkerID}, + } + + require.NoError(t, svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{vtxoX, vtxoY})) + for _, v := range []domain.Vtxo{vtxoX, vtxoY} { + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, v.Outpoint, v.MarkerIDs)) + } + + // Sweep ONLY vtxoX via per-outpoint sweeping. + sweptAt := time.Now().UnixMilli() + err := svc.Markers().SweepVtxoOutpoints(ctx, []domain.Outpoint{vtxoX.Outpoint}, sweptAt) + require.NoError(t, err) + + // Verify: X is swept, Y is NOT swept. + unspent, spent, err := svc.Vtxos().GetAllNonUnrolledVtxos(ctx, testPubkey) + require.NoError(t, err) + + spentTxids := make(map[string]bool) + for _, v := range spent { + spentTxids[v.Txid] = true + } + unspentTxids := make(map[string]bool) + for _, v := range unspent { + unspentTxids[v.Txid] = true + } + + require.True(t, spentTxids[vtxoX.Outpoint.Txid], + "vtxo X should be swept via SweepVtxoOutpoints") + require.True(t, unspentTxids[vtxoY.Outpoint.Txid], + "vtxo Y must NOT be swept — it shares a marker with X but was not in the sweep set") + + // Contrast: if we had used BulkSweepMarkers(sharedMarkerID) instead, + // Y would also be swept. That's the over-reach this fix prevents. + }) +} + +// testSweepVtxoOutpointsEdgeCases covers edge cases for the dual sweep tracking: +// - Double sweep: a VTXO swept via both marker AND outpoint stays swept +// - Non-existent outpoints: SweepVtxoOutpoints silently ignores them +// - Empty outpoints: no-op without error +func testSweepVtxoOutpointsEdgeCases(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_vtxo_outpoints_edge_cases", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + testPubkey := "edge-pk-" + suffix + + // Create round. + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + markerID := "edge-m-" + suffix + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerID, Depth: 0, + })) + + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "edge-vtxo-" + suffix, VOut: 0}, + PubKey: testPubkey, + Amount: 5000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: 0, + MarkerIDs: []string{markerID}, + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{vtxo})) + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, vtxo.Outpoint, vtxo.MarkerIDs)) + + sweptAt := time.Now().UnixMilli() + + // Edge case 1: empty outpoints — should be a no-op + err := svc.Markers().SweepVtxoOutpoints(ctx, []domain.Outpoint{}, sweptAt) + require.NoError(t, err) + + // Edge case 2: non-existent outpoints — should not error + err = svc.Markers().SweepVtxoOutpoints(ctx, []domain.Outpoint{ + {Txid: "does-not-exist", VOut: 99}, + }, sweptAt) + require.NoError(t, err) + + // Verify VTXO is still unswept after those no-ops + unspent, _, err := svc.Vtxos().GetAllNonUnrolledVtxos(ctx, testPubkey) + require.NoError(t, err) + found := false + for _, v := range unspent { + if v.Txid == vtxo.Outpoint.Txid { + found = true + } + } + require.True(t, found, "vtxo should still be unswept after empty/nonexistent sweep calls") + + // Edge case 3: double sweep — sweep via marker THEN via outpoint + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, []string{markerID}, sweptAt)) + + // VTXO is now swept via marker + _, spent, err := svc.Vtxos().GetAllNonUnrolledVtxos(ctx, testPubkey) + require.NoError(t, err) + foundInSpent := false + for _, v := range spent { + if v.Txid == vtxo.Outpoint.Txid { + foundInSpent = true + } + } + require.True(t, foundInSpent, "vtxo should be swept after BulkSweepMarkers") + + // Now also sweep via outpoint — should not error (idempotent) + err = svc.Markers().SweepVtxoOutpoints(ctx, []domain.Outpoint{vtxo.Outpoint}, sweptAt) + require.NoError(t, err) + + // VTXO should still be swept (via both paths now) + _, spent2, err := svc.Vtxos().GetAllNonUnrolledVtxos(ctx, testPubkey) + require.NoError(t, err) + foundInSpent2 := false + for _, v := range spent2 { + if v.Txid == vtxo.Outpoint.Txid { + foundInSpent2 = true + } + } + require.True(t, foundInSpent2, "vtxo should remain swept after double sweep via both paths") + }) +} + +// testGetAllChildrenVtxosSiblingIsolation verifies that GetAllChildrenVtxos, +// when called with a specific (txid, vout) outpoint, returns only that +// outpoint's descendant lineage and does not include sibling outpoints of the +// same txid or their descendants. +// +// Scenario: a parent tx A produces two outputs (A, 0) and (A, 1). Each is +// spent by a different offchain tx (ark_txid X vs Y), each of which has its +// own descendant. Sweeping the checkpoint for (A, 0) must not sweep (A, 1)'s +// lineage, since those funds belong to an independent subtree. +func testGetAllChildrenVtxosSiblingIsolation(t *testing.T, svc ports.RepoManager) { + t.Run("test_get_all_children_vtxos_sibling_isolation", func(t *testing.T) { + ctx := context.Background() + suffix := randomString(16) + + commitmentTxid := randomString(32) + parentTxid := "sibling-parent-" + suffix + arkTxidForVout0 := "sibling-arktx-0-" + suffix + arkTxidForVout1 := "sibling-arktx-1-" + suffix + + // Parent outputs: (parent, 0) spent by arkTxidForVout0, + // (parent, 1) spent by arkTxidForVout1. Same txid, different lineages. + parentVout0 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: parentTxid, VOut: 0}, + PubKey: pubkey, + Amount: 1000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + ArkTxid: arkTxidForVout0, + } + parentVout1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: parentTxid, VOut: 1}, + PubKey: pubkey2, + Amount: 2000, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + ArkTxid: arkTxidForVout1, + } + + // Descendant of (parent, 0): belongs to the lineage we're sweeping. + descendantOfVout0 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: arkTxidForVout0, VOut: 0}, + PubKey: pubkey, + Amount: 900, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + ArkTxid: "", + } + + // Descendant of (parent, 1): belongs to an independent lineage — + // must NOT be returned when we query (parent, 0). + descendantOfVout1 := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: arkTxidForVout1, VOut: 0}, + PubKey: pubkey2, + Amount: 1900, + RootCommitmentTxid: commitmentTxid, + CommitmentTxids: []string{commitmentTxid}, + ArkTxid: "", + } + + require.NoError(t, svc.Vtxos().AddVtxos(ctx, []domain.Vtxo{ + parentVout0, parentVout1, descendantOfVout0, descendantOfVout1, + })) + + // Querying (parent, 0) should return (parent, 0) and its descendant + // only — NOT (parent, 1) or its descendant. + got, err := svc.Vtxos().GetAllChildrenVtxos(ctx, parentVout0.Outpoint) + require.NoError(t, err) + gotSet := make(map[domain.Outpoint]bool, len(got)) + for _, op := range got { + gotSet[op] = true + } + require.True(t, gotSet[parentVout0.Outpoint], + "seed outpoint (parent, 0) should be in result") + require.True(t, gotSet[descendantOfVout0.Outpoint], + "descendant of (parent, 0) should be in result") + require.False(t, gotSet[parentVout1.Outpoint], + "sibling (parent, 1) MUST NOT be in result — independent lineage") + require.False(t, gotSet[descendantOfVout1.Outpoint], + "descendant of sibling (parent, 1) MUST NOT be in result") + + // Symmetric check: querying (parent, 1) only returns its own lineage. + got, err = svc.Vtxos().GetAllChildrenVtxos(ctx, parentVout1.Outpoint) + require.NoError(t, err) + gotSet = make(map[domain.Outpoint]bool, len(got)) + for _, op := range got { + gotSet[op] = true + } + require.True(t, gotSet[parentVout1.Outpoint]) + require.True(t, gotSet[descendantOfVout1.Outpoint]) + require.False(t, gotSet[parentVout0.Outpoint]) + require.False(t, gotSet[descendantOfVout0.Outpoint]) + }) +} + +// testConvergentMultiParentMarkerDAG builds a diamond-shaped marker DAG where two +// independent root→mid branches converge into a single merge marker, then extend +// to a leaf. Verifies GetVtxoChainByMarkers returns correct VTXOs per marker set, +// and that sweeping individual markers only affects VTXOs associated with those markers. +func testConvergentMultiParentMarkerDAG(t *testing.T, svc ports.RepoManager) { + t.Run("test_convergent_multi_parent_marker_dag", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + + // Create a finalized round + roundId := uuid.New().String() + commitmentTxid := randomString(32) + round := domain.NewRoundFromEvents([]domain.Event{ + domain.RoundStarted{ + RoundEvent: domain.RoundEvent{Id: roundId, Type: domain.EventTypeRoundStarted}, + Timestamp: time.Now().Unix(), + }, + domain.RoundFinalizationStarted{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalizationStarted, + }, + CommitmentTxid: commitmentTxid, + CommitmentTx: emptyTx, + VtxoTree: vtxoTree, + Connectors: connectorsTree, + VtxoTreeExpiration: 3600, + }, + domain.RoundFinalized{ + RoundEvent: domain.RoundEvent{ + Id: roundId, + Type: domain.EventTypeRoundFinalized, + }, + FinalCommitmentTx: emptyTx, + Timestamp: time.Now().Unix(), + }, + }) + require.NoError(t, svc.Rounds().AddOrUpdateRound(ctx, *round)) + + // Build convergent DAG: + // root-A (depth 0) root-B (depth 0) + // \ / + // mid-A (depth 100) mid-B (depth 100) + // \ / + // merge (depth 200, parents: [mid-A, mid-B]) + // | + // leaf (depth 300, parent: [merge]) + rootAID := "dag-rootA-" + suffix + rootBID := "dag-rootB-" + suffix + midAID := "dag-midA-" + suffix + midBID := "dag-midB-" + suffix + mergeID := "dag-merge-" + suffix + leafID := "dag-leaf-" + suffix + + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: rootAID, Depth: 0, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: rootBID, Depth: 0, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: midAID, Depth: 100, ParentMarkerIDs: []string{rootAID}, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: midBID, Depth: 100, ParentMarkerIDs: []string{rootBID}, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: mergeID, Depth: 200, ParentMarkerIDs: []string{midAID, midBID}, + })) + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: leafID, Depth: 300, ParentMarkerIDs: []string{mergeID}, + })) + + // 6 VTXOs, one per marker at intermediate depths + type vtxoSpec struct { + txid string + depth uint32 + markerID string + } + specs := []vtxoSpec{ + {txid: "dag-vrA-" + suffix, depth: 50, markerID: rootAID}, + {txid: "dag-vrB-" + suffix, depth: 50, markerID: rootBID}, + {txid: "dag-vmA-" + suffix, depth: 150, markerID: midAID}, + {txid: "dag-vmB-" + suffix, depth: 150, markerID: midBID}, + {txid: "dag-vmerge-" + suffix, depth: 250, markerID: mergeID}, + {txid: "dag-vleaf-" + suffix, depth: 350, markerID: leafID}, + } + + vtxosToAdd := make([]domain.Vtxo, len(specs)) + for i, s := range specs { + vtxosToAdd[i] = domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: s.txid, VOut: 0}, + PubKey: pubkey, + Amount: 1000, + CommitmentTxids: []string{commitmentTxid}, + RootCommitmentTxid: commitmentTxid, + CreatedAt: time.Now().Unix(), + ExpiresAt: time.Now().Add(time.Hour).Unix(), + Depth: s.depth, + MarkerIDs: []string{s.markerID}, + } + } + require.NoError(t, svc.Vtxos().AddVtxos(ctx, vtxosToAdd)) + + for _, s := range specs { + require.NoError(t, svc.Markers().UpdateVtxoMarkers(ctx, + domain.Outpoint{Txid: s.txid, VOut: 0}, []string{s.markerID})) + } + + allMarkerIDs := []string{rootAID, rootBID, midAID, midBID, mergeID, leafID} + + // GetVtxoChainByMarkers with all 6 markers → returns all 6 VTXOs + chainAll, err := svc.Markers().GetVtxoChainByMarkers(ctx, allMarkerIDs) + require.NoError(t, err) + require.Len(t, chainAll, 6, "all 6 markers should return all 6 VTXOs") + + // GetVtxoChainByMarkers with just [merge] → returns only VTXO-merge + chainMerge, err := svc.Markers().GetVtxoChainByMarkers(ctx, []string{mergeID}) + require.NoError(t, err) + require.Len(t, chainMerge, 1, "merge marker should return 1 VTXO") + require.Equal(t, specs[4].txid, chainMerge[0].Txid) + + // Sweep only root-A → only VTXO-rA is swept; others unswept + sweptAt := time.Now().Unix() + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, []string{rootAID}, sweptAt)) + + outpoints := make([]domain.Outpoint, len(specs)) + for i, s := range specs { + outpoints[i] = domain.Outpoint{Txid: s.txid, VOut: 0} + } + fetched, err := svc.Vtxos().GetVtxos(ctx, outpoints) + require.NoError(t, err) + require.Len(t, fetched, 6) + + for _, v := range fetched { + if v.Txid == specs[0].txid { + require.True(t, v.Swept, "vtxo root-A should be swept") + } else { + require.False( + t, + v.Swept, + "vtxo %s should NOT be swept after sweeping only root-A", + v.Txid, + ) + } + } + + // Sweep merge → VTXO-merge becomes swept; VTXO-leaf still unswept + require.NoError(t, svc.Markers().BulkSweepMarkers(ctx, []string{mergeID}, sweptAt)) + + fetched2, err := svc.Vtxos().GetVtxos(ctx, outpoints) + require.NoError(t, err) + require.Len(t, fetched2, 6) + + for _, v := range fetched2 { + switch v.Txid { + case specs[0].txid: // root-A + require.True(t, v.Swept, "vtxo root-A should still be swept") + case specs[4].txid: // merge + require.True(t, v.Swept, "vtxo merge should be swept") + case specs[5].txid: // leaf + require.False(t, v.Swept, "vtxo leaf should NOT be swept (different marker)") + default: + // root-B, mid-A, mid-B remain unswept + require.False(t, v.Swept, "vtxo %s should NOT be swept", v.Txid) + } + } + }) +} + +// testSweepMarkerWithDescendantsDeepChain builds a 201-marker linear chain (depth 0 +// to 20000) and calls SweepMarkerWithDescendants from the root. Verifies all 201 +// markers are swept in a single recursive operation and that a second call is +// idempotent (returns 0). +func testSweepMarkerWithDescendantsDeepChain(t *testing.T, svc ports.RepoManager) { + t.Run("test_sweep_marker_with_descendants_deep_chain", func(t *testing.T) { + if svc.Markers() == nil { + t.Skip("marker repository not available for this data store") + } + ctx := context.Background() + suffix := randomString(16) + + const maxDepth = 20000 + const markerInterval = 100 + const numMarkers = maxDepth/markerInterval + 1 // 201 + + // Build linear chain: marker-0 → marker-100 → ... → marker-20000 + allMarkerIDs := make([]string, 0, numMarkers) + + for depth := uint32(0); depth <= maxDepth; depth += markerInterval { + markerID := fmt.Sprintf("descdep-%s-m%d", suffix, depth) + allMarkerIDs = append(allMarkerIDs, markerID) + + var parentMarkerIDs []string + if depth > 0 { + parentMarkerIDs = []string{ + fmt.Sprintf("descdep-%s-m%d", suffix, depth-markerInterval), + } + } + + require.NoError(t, svc.Markers().AddMarker(ctx, domain.Marker{ + ID: markerID, + Depth: depth, + ParentMarkerIDs: parentMarkerIDs, + })) + } + require.Len(t, allMarkerIDs, numMarkers) + rootID := allMarkerIDs[0] + + // SweepMarkerWithDescendants from root + sweptAt := time.Now().Unix() + count, err := svc.Markers().SweepMarkerWithDescendants(ctx, rootID, sweptAt) + require.NoError(t, err) + require.Equal(t, int64(numMarkers), count, + "should sweep all %d markers", numMarkers) + + // Spot-check: root (0), middle (10000), leaf (20000) → all true + for _, depth := range []uint32{0, 10000, 20000} { + markerID := fmt.Sprintf("descdep-%s-m%d", suffix, depth) + isSwept, err := svc.Markers().IsMarkerSwept(ctx, markerID) + require.NoError(t, err) + require.True(t, isSwept, "marker at depth %d should be swept", depth) + } + + // Idempotency: second call returns 0 + count, err = svc.Markers().SweepMarkerWithDescendants(ctx, rootID, sweptAt) + require.NoError(t, err) + require.Equal(t, int64(0), count, "second call should be idempotent (0 new sweeps)") + }) } func checkVtxos(t *testing.T, expectedVtxos, gotVtxos []domain.Vtxo) { @@ -2059,6 +5413,7 @@ func checkVtxos(t *testing.T, expectedVtxos, gotVtxos []domain.Vtxo) { require.Exactly(t, expected.Spent, v.Spent) require.Exactly(t, expected.SpentBy, v.SpentBy) require.Exactly(t, expected.Swept, v.Swept) + require.Exactly(t, expected.Depth, v.Depth) require.ElementsMatch(t, expected.CommitmentTxids, v.CommitmentTxids) require.ElementsMatch(t, expected.Assets, v.Assets) } diff --git a/internal/infrastructure/db/sqlite/marker_repo.go b/internal/infrastructure/db/sqlite/marker_repo.go new file mode 100644 index 000000000..49f91a449 --- /dev/null +++ b/internal/infrastructure/db/sqlite/marker_repo.go @@ -0,0 +1,560 @@ +package sqlitedb + +import ( + "context" + "database/sql" + "encoding/json" + "fmt" + "strings" + "time" + + "github.com/arkade-os/arkd/internal/core/domain" + "github.com/arkade-os/arkd/internal/infrastructure/db/sqlite/sqlc/queries" + log "github.com/sirupsen/logrus" +) + +type markerRepository struct { + db *sql.DB + querier *queries.Queries +} + +func NewMarkerRepository(config ...interface{}) (domain.MarkerRepository, error) { + if len(config) != 1 { + return nil, fmt.Errorf("invalid config") + } + db, ok := config[0].(*sql.DB) + if !ok { + return nil, fmt.Errorf("cannot open marker repository: invalid config") + } + + return &markerRepository{ + db: db, + querier: queries.New(db), + }, nil +} + +func (m *markerRepository) Close() { + _ = m.db.Close() +} + +func (m *markerRepository) AddMarker(ctx context.Context, marker domain.Marker) error { + parentMarkersJSON, err := json.Marshal(marker.ParentMarkerIDs) + if err != nil { + return fmt.Errorf("failed to marshal parent markers: %w", err) + } + + return m.querier.UpsertMarker(ctx, queries.UpsertMarkerParams{ + ID: marker.ID, + Depth: int64(marker.Depth), + ParentMarkers: sql.NullString{String: string(parentMarkersJSON), Valid: true}, + }) +} + +func (m *markerRepository) GetMarker(ctx context.Context, id string) (*domain.Marker, error) { + row, err := m.querier.SelectMarker(ctx, id) + if err != nil { + if err == sql.ErrNoRows { + return nil, nil + } + return nil, err + } + + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + return &marker, nil +} + +func (m *markerRepository) GetMarkersByDepth( + ctx context.Context, + depth uint32, +) ([]domain.Marker, error) { + rows, err := m.querier.SelectMarkersByDepth(ctx, int64(depth)) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(rows)) + for _, row := range rows { + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + markers = append(markers, marker) + } + return markers, nil +} + +func (m *markerRepository) GetMarkersByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Marker, error) { + rows, err := m.querier.SelectMarkersByDepthRange(ctx, queries.SelectMarkersByDepthRangeParams{ + MinDepth: int64(minDepth), + MaxDepth: int64(maxDepth), + }) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(rows)) + for _, row := range rows { + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + markers = append(markers, marker) + } + return markers, nil +} + +func (m *markerRepository) GetMarkersByIds( + ctx context.Context, + ids []string, +) ([]domain.Marker, error) { + if len(ids) == 0 { + return nil, nil + } + + rows, err := m.querier.SelectMarkersByIds(ctx, ids) + if err != nil { + return nil, err + } + + markers := make([]domain.Marker, 0, len(rows)) + for _, row := range rows { + marker, err := rowToMarker(row) + if err != nil { + return nil, err + } + markers = append(markers, marker) + } + return markers, nil +} + +func (m *markerRepository) SweepMarker(ctx context.Context, markerID string, sweptAt int64) error { + return m.querier.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: markerID, + SweptAt: sweptAt, + }) +} + +func (m *markerRepository) BulkSweepMarkers( + ctx context.Context, + markerIDs []string, + sweptAt int64, +) error { + if len(markerIDs) == 0 { + return nil + } + txBody := func(querierWithTx *queries.Queries) error { + for _, markerID := range markerIDs { + if err := querierWithTx.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: markerID, + SweptAt: sweptAt, + }); err != nil { + return err + } + } + return nil + } + return execTx(ctx, m.db, txBody) +} + +func (m *markerRepository) SweepVtxoOutpoints( + ctx context.Context, + outpoints []domain.Outpoint, + sweptAt int64, +) error { + if len(outpoints) == 0 { + return nil + } + txBody := func(qtx *queries.Queries) error { + for _, op := range outpoints { + if err := qtx.InsertSweptVtxo(ctx, queries.InsertSweptVtxoParams{ + Txid: op.Txid, + Vout: int64(op.VOut), + SweptAt: sweptAt, + }); err != nil { + return err + } + } + return nil + } + return execTx(ctx, m.db, txBody) +} + +func (m *markerRepository) SweepMarkerWithDescendants( + ctx context.Context, + markerID string, + sweptAt int64, +) (int64, error) { + var count int64 + txBody := func(qtx *queries.Queries) error { + // Get all descendant marker IDs (including the root marker) that are not already swept + descendantIDs, err := qtx.GetDescendantMarkerIds(ctx, markerID) + if err != nil { + return fmt.Errorf("failed to get descendant markers: %w", err) + } + + // Insert each descendant into swept_marker + for _, id := range descendantIDs { + if err := qtx.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: id, + SweptAt: sweptAt, + }); err != nil { + return fmt.Errorf("failed to sweep marker %s: %w", id, err) + } + count++ + } + return nil + } + if err := execTx(ctx, m.db, txBody); err != nil { + return 0, err + } + return count, nil +} + +func (m *markerRepository) IsMarkerSwept(ctx context.Context, markerID string) (bool, error) { + result, err := m.querier.IsMarkerSwept(ctx, markerID) + if err != nil { + return false, err + } + return result == 1, nil +} + +func (m *markerRepository) GetSweptMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.SweptMarker, error) { + if len(markerIDs) == 0 { + return nil, nil + } + + rows, err := m.querier.SelectSweptMarkersByIds(ctx, markerIDs) + if err != nil { + return nil, err + } + + sweptMarkers := make([]domain.SweptMarker, 0, len(rows)) + for _, row := range rows { + sweptMarkers = append(sweptMarkers, domain.SweptMarker{ + MarkerID: row.MarkerID, + SweptAt: row.SweptAt, + }) + } + return sweptMarkers, nil +} + +func (m *markerRepository) UpdateVtxoMarkers( + ctx context.Context, + outpoint domain.Outpoint, + markerIDs []string, +) error { + markersJSON, err := json.Marshal(markerIDs) + if err != nil { + return fmt.Errorf("failed to marshal markers: %w", err) + } + return m.querier.UpdateVtxoMarkers(ctx, queries.UpdateVtxoMarkersParams{ + Markers: string(markersJSON), + Txid: outpoint.Txid, + Vout: int64(outpoint.VOut), + }) +} + +func (m *markerRepository) GetVtxosByMarker( + ctx context.Context, + markerID string, +) ([]domain.Vtxo, error) { + rows, err := m.querier.SelectVtxosByMarkerId( + ctx, + sql.NullString{String: markerID, Valid: len(markerID) > 0}, + ) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromMarkerQuery(row)) + } + return vtxos, nil +} + +func (m *markerRepository) SweepVtxosByMarker(ctx context.Context, markerID string) (int64, error) { + var count int64 + txBody := func(qtx *queries.Queries) error { + // First check if the marker exists (foreign key constraint on swept_marker) + if _, err := qtx.SelectMarker(ctx, markerID); err != nil { + if err == sql.ErrNoRows { + return nil // Marker doesn't exist, nothing to sweep + } + return fmt.Errorf("failed to check marker existence: %w", err) + } + + // Count unswept VTXOs with this marker before inserting to swept_marker + c, err := qtx.CountUnsweptVtxosByMarkerId( + ctx, + sql.NullString{String: markerID, Valid: len(markerID) > 0}, + ) + if err != nil { + return fmt.Errorf("failed to count unswept vtxos: %w", err) + } + + // Insert the marker into swept_marker (sweep state is computed via view) + if err := qtx.InsertSweptMarker(ctx, queries.InsertSweptMarkerParams{ + MarkerID: markerID, + SweptAt: time.Now().UnixMilli(), + }); err != nil { + return fmt.Errorf("failed to insert swept marker: %w", err) + } + count = c + return nil + } + if err := execTx(ctx, m.db, txBody); err != nil { + return 0, err + } + return count, nil +} + +func (m *markerRepository) CreateRootMarkersForVtxos( + ctx context.Context, + vtxos []domain.Vtxo, +) error { + if len(vtxos) == 0 { + return nil + } + + txBody := func(querierWithTx *queries.Queries) error { + for _, vtxo := range vtxos { + markerID := vtxo.Outpoint.String() + + // Create the root marker (depth 0, no parents) + // Note: vtxo.MarkerIDs should already be set before AddVtxos is called + if err := querierWithTx.UpsertMarker(ctx, queries.UpsertMarkerParams{ + ID: markerID, + Depth: 0, + ParentMarkers: sql.NullString{String: "[]", Valid: true}, + }); err != nil { + return fmt.Errorf("failed to create marker for vtxo %s: %w", markerID, err) + } + } + return nil + } + + return execTx(ctx, m.db, txBody) +} + +func (m *markerRepository) GetVtxosByDepthRange( + ctx context.Context, + minDepth, maxDepth uint32, +) ([]domain.Vtxo, error) { + rows, err := m.querier.SelectVtxosByDepthRange(ctx, queries.SelectVtxosByDepthRangeParams{ + MinDepth: int64(minDepth), + MaxDepth: int64(maxDepth), + }) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromDepthRangeQuery(row)) + } + return vtxos, nil +} + +func (m *markerRepository) GetVtxosByArkTxid( + ctx context.Context, + arkTxid string, +) ([]domain.Vtxo, error) { + rows, err := m.querier.SelectVtxosByArkTxid( + ctx, + sql.NullString{String: arkTxid, Valid: arkTxid != ""}, + ) + if err != nil { + return nil, err + } + + vtxos := make([]domain.Vtxo, 0, len(rows)) + for _, row := range rows { + vtxos = append(vtxos, rowToVtxoFromArkTxidQuery(row)) + } + return vtxos, nil +} + +func (m *markerRepository) GetVtxoChainByMarkers( + ctx context.Context, + markerIDs []string, +) ([]domain.Vtxo, error) { + if len(markerIDs) == 0 { + return nil, nil + } + + // Since SQLite query handles one marker at a time, we need to query for each marker + // and deduplicate results + seen := make(map[string]bool) + vtxos := make([]domain.Vtxo, 0) + + for _, markerID := range markerIDs { + rows, err := m.querier.SelectVtxoChainByMarker( + ctx, + sql.NullString{String: markerID, Valid: len(markerID) > 0}, + ) + if err != nil { + return nil, err + } + + for _, row := range rows { + key := row.VtxoVw.Txid + ":" + fmt.Sprintf("%d", row.VtxoVw.Vout) + if !seen[key] { + seen[key] = true + vtxos = append(vtxos, rowToVtxoFromChainQuery(row)) + } + } + } + return vtxos, nil +} + +func rowToMarker(row queries.Marker) (domain.Marker, error) { + var parentMarkerIDs []string + if row.ParentMarkers.Valid && row.ParentMarkers.String != "" { + if err := json.Unmarshal([]byte(row.ParentMarkers.String), &parentMarkerIDs); err != nil { + return domain.Marker{}, fmt.Errorf("failed to unmarshal parent markers: %w", err) + } + } + + return domain.Marker{ + ID: row.ID, + Depth: uint32(row.Depth), + ParentMarkerIDs: parentMarkerIDs, + }, nil +} + +func rowToVtxoFromMarkerQuery(row queries.SelectVtxosByMarkerIdRow) domain.Vtxo { + var commitmentTxids []string + if commitments, ok := row.VtxoVw.Commitments.(string); ok && commitments != "" { + commitmentTxids = strings.Split(commitments, ",") + } + return domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: row.VtxoVw.Txid, + VOut: uint32(row.VtxoVw.Vout), + }, + Amount: uint64(row.VtxoVw.Amount), + PubKey: row.VtxoVw.Pubkey, + RootCommitmentTxid: row.VtxoVw.CommitmentTxid, + CommitmentTxids: commitmentTxids, + SettledBy: row.VtxoVw.SettledBy.String, + ArkTxid: row.VtxoVw.ArkTxid.String, + SpentBy: row.VtxoVw.SpentBy.String, + Spent: row.VtxoVw.Spent, + Unrolled: row.VtxoVw.Unrolled, + Swept: toBool(row.VtxoVw.Swept), + Preconfirmed: row.VtxoVw.Preconfirmed, + ExpiresAt: row.VtxoVw.ExpiresAt, + CreatedAt: row.VtxoVw.CreatedAt, + Depth: uint32(row.VtxoVw.Depth), + MarkerIDs: parseMarkersJSON(row.VtxoVw.Markers), + } +} + +func rowToVtxoFromDepthRangeQuery(row queries.SelectVtxosByDepthRangeRow) domain.Vtxo { + var commitmentTxids []string + if commitments, ok := row.VtxoVw.Commitments.(string); ok && commitments != "" { + commitmentTxids = strings.Split(commitments, ",") + } + return domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: row.VtxoVw.Txid, + VOut: uint32(row.VtxoVw.Vout), + }, + Amount: uint64(row.VtxoVw.Amount), + PubKey: row.VtxoVw.Pubkey, + RootCommitmentTxid: row.VtxoVw.CommitmentTxid, + CommitmentTxids: commitmentTxids, + SettledBy: row.VtxoVw.SettledBy.String, + ArkTxid: row.VtxoVw.ArkTxid.String, + SpentBy: row.VtxoVw.SpentBy.String, + Spent: row.VtxoVw.Spent, + Unrolled: row.VtxoVw.Unrolled, + Swept: toBool(row.VtxoVw.Swept), + Preconfirmed: row.VtxoVw.Preconfirmed, + ExpiresAt: row.VtxoVw.ExpiresAt, + CreatedAt: row.VtxoVw.CreatedAt, + Depth: uint32(row.VtxoVw.Depth), + MarkerIDs: parseMarkersJSON(row.VtxoVw.Markers), + } +} + +func rowToVtxoFromArkTxidQuery(row queries.SelectVtxosByArkTxidRow) domain.Vtxo { + var commitmentTxids []string + if commitments, ok := row.VtxoVw.Commitments.(string); ok && commitments != "" { + commitmentTxids = strings.Split(commitments, ",") + } + return domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: row.VtxoVw.Txid, + VOut: uint32(row.VtxoVw.Vout), + }, + Amount: uint64(row.VtxoVw.Amount), + PubKey: row.VtxoVw.Pubkey, + RootCommitmentTxid: row.VtxoVw.CommitmentTxid, + CommitmentTxids: commitmentTxids, + SettledBy: row.VtxoVw.SettledBy.String, + ArkTxid: row.VtxoVw.ArkTxid.String, + SpentBy: row.VtxoVw.SpentBy.String, + Spent: row.VtxoVw.Spent, + Unrolled: row.VtxoVw.Unrolled, + Swept: toBool(row.VtxoVw.Swept), + Preconfirmed: row.VtxoVw.Preconfirmed, + ExpiresAt: row.VtxoVw.ExpiresAt, + CreatedAt: row.VtxoVw.CreatedAt, + Depth: uint32(row.VtxoVw.Depth), + MarkerIDs: parseMarkersJSON(row.VtxoVw.Markers), + } +} + +func rowToVtxoFromChainQuery(row queries.SelectVtxoChainByMarkerRow) domain.Vtxo { + var commitmentTxids []string + if commitments, ok := row.VtxoVw.Commitments.(string); ok && commitments != "" { + commitmentTxids = strings.Split(commitments, ",") + } + return domain.Vtxo{ + Outpoint: domain.Outpoint{ + Txid: row.VtxoVw.Txid, + VOut: uint32(row.VtxoVw.Vout), + }, + Amount: uint64(row.VtxoVw.Amount), + PubKey: row.VtxoVw.Pubkey, + RootCommitmentTxid: row.VtxoVw.CommitmentTxid, + CommitmentTxids: commitmentTxids, + SettledBy: row.VtxoVw.SettledBy.String, + ArkTxid: row.VtxoVw.ArkTxid.String, + SpentBy: row.VtxoVw.SpentBy.String, + Spent: row.VtxoVw.Spent, + Unrolled: row.VtxoVw.Unrolled, + Swept: toBool(row.VtxoVw.Swept), + Preconfirmed: row.VtxoVw.Preconfirmed, + ExpiresAt: row.VtxoVw.ExpiresAt, + CreatedAt: row.VtxoVw.CreatedAt, + Depth: uint32(row.VtxoVw.Depth), + MarkerIDs: parseMarkersJSON(row.VtxoVw.Markers), + } +} + +// parseMarkersJSON parses a JSON array string into a slice of strings. +// Logs and returns nil if the JSON is malformed so that corrupt markers are +// surfaced instead of silently treated as empty. +func parseMarkersJSON(markersJSON string) []string { + if markersJSON == "" { + return nil + } + var markerIDs []string + if err := json.Unmarshal([]byte(markersJSON), &markerIDs); err != nil { + log.WithError(err).Warnf("failed to parse markers JSON: %q", markersJSON) + return nil + } + return markerIDs +} diff --git a/internal/infrastructure/db/sqlite/marker_repo_test.go b/internal/infrastructure/db/sqlite/marker_repo_test.go new file mode 100644 index 000000000..34f6baf2d --- /dev/null +++ b/internal/infrastructure/db/sqlite/marker_repo_test.go @@ -0,0 +1,83 @@ +package sqlitedb + +import ( + "testing" + + log "github.com/sirupsen/logrus" + "github.com/sirupsen/logrus/hooks/test" + "github.com/stretchr/testify/require" +) + +// TestParseMarkersJSON_LogsOnMalformed verifies that parseMarkersJSON emits a +// warn log on malformed JSON (instead of silently swallowing the unmarshal +// error and returning nil). Surfacing corrupt markers is important so that +// operators can detect data corruption rather than having it masquerade as +// "no markers present". +func TestParseMarkersJSON_LogsOnMalformed(t *testing.T) { + t.Run("malformed_json_logs_warning", func(t *testing.T) { + hook := test.NewGlobal() + t.Cleanup(func() { + hook.Reset() + log.SetOutput(log.StandardLogger().Out) // restore default + }) + + got := parseMarkersJSON(`not-valid-json`) + require.Nil(t, got, "malformed JSON must still return nil for compatibility") + + entries := hook.AllEntries() + require.NotEmpty(t, entries, "expected a warn log for malformed markers JSON") + + var matched bool + for _, e := range entries { + if e.Level == log.WarnLevel && + e.Message != "" && + containsAll(e.Message, "failed to parse markers JSON") { + matched = true + break + } + } + require.True(t, matched, + "expected a warn entry mentioning 'failed to parse markers JSON', got: %v", + hook.AllEntries()) + }) + + t.Run("empty_input_no_log", func(t *testing.T) { + hook := test.NewGlobal() + t.Cleanup(func() { + hook.Reset() + }) + + got := parseMarkersJSON("") + require.Nil(t, got) + require.Empty(t, hook.AllEntries(), + "empty input is not an error and must not log") + }) + + t.Run("valid_json_no_log", func(t *testing.T) { + hook := test.NewGlobal() + t.Cleanup(func() { + hook.Reset() + }) + + got := parseMarkersJSON(`["m1","m2"]`) + require.Equal(t, []string{"m1", "m2"}, got) + require.Empty(t, hook.AllEntries(), + "valid input must not log") + }) +} + +func containsAll(s string, subs ...string) bool { + for _, sub := range subs { + found := false + for i := 0; i+len(sub) <= len(s); i++ { + if s[i:i+len(sub)] == sub { + found = true + break + } + } + if !found { + return false + } + } + return true +} diff --git a/internal/infrastructure/db/sqlite/migration/20260210000000_add_depth_and_markers.down.sql b/internal/infrastructure/db/sqlite/migration/20260210000000_add_depth_and_markers.down.sql new file mode 100644 index 000000000..7e0e5880f --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20260210000000_add_depth_and_markers.down.sql @@ -0,0 +1,67 @@ +-- SQLite doesn't support DROP COLUMN directly, so we need to recreate the table + +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +-- Create temp table without depth and markers columns +CREATE TABLE vtxo_temp ( + txid TEXT NOT NULL, + vout INTEGER NOT NULL, + pubkey TEXT NOT NULL, + amount INTEGER NOT NULL, + expires_at INTEGER NOT NULL, + created_at INTEGER NOT NULL, + commitment_txid TEXT NOT NULL, + spent_by TEXT, + spent BOOLEAN NOT NULL DEFAULT FALSE, + unrolled BOOLEAN NOT NULL DEFAULT FALSE, + swept BOOLEAN NOT NULL DEFAULT FALSE, + preconfirmed BOOLEAN NOT NULL DEFAULT FALSE, + settled_by TEXT, + ark_txid TEXT, + intent_id TEXT, + updated_at INTEGER, + PRIMARY KEY (txid, vout), + FOREIGN KEY (intent_id) REFERENCES intent(id) +); + +-- Copy data, computing swept from swept_marker since the column was removed in the up migration +INSERT INTO vtxo_temp SELECT + v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, + v.spent_by, v.spent, v.unrolled, + EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) AS swept, + v.preconfirmed, v.settled_by, v.ark_txid, + v.intent_id, v.updated_at +FROM vtxo v; + +-- Drop old table and rename +DROP TABLE vtxo; +ALTER TABLE vtxo_temp RENAME TO vtxo; + +-- Recreate indexes +CREATE INDEX IF NOT EXISTS fk_vtxo_intent_id ON vtxo(intent_id); + +-- Drop marker tables +DROP TABLE IF EXISTS swept_marker; +DROP TABLE IF EXISTS marker; + +-- Recreate views without depth and markers columns +CREATE VIEW vtxo_vw AS +SELECT v.*, COALESCE(group_concat(vc.commitment_txid), '') AS commitments +FROM vtxo v +LEFT JOIN vtxo_commitment_txid vc +ON v.txid = vc.vtxo_txid AND v.vout = vc.vtxo_vout +GROUP BY v.txid, v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, + intent.id, + intent.round_id, + intent.proof, + intent.message +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; diff --git a/internal/infrastructure/db/sqlite/migration/20260210000000_add_depth_and_markers.up.sql b/internal/infrastructure/db/sqlite/migration/20260210000000_add_depth_and_markers.up.sql new file mode 100644 index 000000000..0e61bf3fa --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20260210000000_add_depth_and_markers.up.sql @@ -0,0 +1,140 @@ +-- Add depth column +ALTER TABLE vtxo ADD COLUMN depth INTEGER NOT NULL DEFAULT 0; + +-- Create marker table +CREATE TABLE IF NOT EXISTS marker ( + id TEXT PRIMARY KEY, + depth INTEGER NOT NULL, + parent_markers TEXT -- JSON array of parent marker IDs +); +CREATE INDEX IF NOT EXISTS idx_marker_depth ON marker(depth); + +-- Create swept_marker table (append-only) +CREATE TABLE IF NOT EXISTS swept_marker ( + marker_id TEXT PRIMARY KEY REFERENCES marker(id), + swept_at INTEGER NOT NULL +); + +-- Add markers column (JSON array, not single marker_id) +ALTER TABLE vtxo ADD COLUMN markers TEXT NOT NULL DEFAULT '[]'; +CREATE INDEX IF NOT EXISTS idx_vtxo_markers ON vtxo(markers); + +-- Recreate views to include the new columns +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +CREATE VIEW vtxo_vw AS +SELECT + v.*, + COALESCE(( + SELECT group_concat(commitment_txid, ',') + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout + ), '') AS commitments, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN ( + SELECT DISTINCT txid, vout, asset_id, amount + FROM asset_projection +) AS ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, intent.id, intent.round_id, intent.proof, intent.message, intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; + +-- Backfill: Create a marker for every existing VTXO using its outpoint as marker ID +-- This ensures every VTXO has at least 1 marker +-- NOTE: this INSERT and the UPDATE below run in a single transaction over all VTXOs. +-- On large production DBs (millions of rows) expect a table lock for 10-60 seconds. +-- Plan a maintenance window or apply with a connection timeout if live traffic is present. +INSERT INTO marker (id, depth, parent_markers) +SELECT + v.txid || ':' || v.vout, + v.depth, + '[]' +FROM vtxo v; + +-- Assign the marker to every VTXO +UPDATE vtxo SET markers = '["' || txid || ':' || vout || '"]'; + +-- Migrate existing swept VTXOs to swept_marker table before dropping column +-- Insert the VTXO's marker into swept_marker +INSERT OR IGNORE INTO swept_marker (marker_id, swept_at) +SELECT + v.txid || ':' || v.vout, + strftime('%s', 'now') * 1000 +FROM vtxo v +WHERE v.swept = 1; + +-- SQLite doesn't support DROP COLUMN easily, so we recreate the table +-- Create new vtxo table without swept column +CREATE TABLE vtxo_new ( + txid TEXT NOT NULL, + vout INTEGER NOT NULL, + pubkey TEXT NOT NULL, + amount INTEGER NOT NULL, + expires_at INTEGER NOT NULL, + created_at INTEGER NOT NULL, + commitment_txid TEXT NOT NULL, + spent_by TEXT, + spent BOOLEAN NOT NULL DEFAULT FALSE, + unrolled BOOLEAN NOT NULL DEFAULT FALSE, + preconfirmed BOOLEAN NOT NULL DEFAULT FALSE, + settled_by TEXT, + ark_txid TEXT, + intent_id TEXT, + updated_at INTEGER, + depth INTEGER NOT NULL DEFAULT 0, + markers TEXT NOT NULL DEFAULT '[]', + PRIMARY KEY (txid, vout), + FOREIGN KEY (intent_id) REFERENCES intent(id) +); + +-- Copy data from old table (excluding swept column) +INSERT INTO vtxo_new (txid, vout, pubkey, amount, expires_at, created_at, commitment_txid, + spent_by, spent, unrolled, preconfirmed, settled_by, ark_txid, intent_id, updated_at, depth, markers) +SELECT txid, vout, pubkey, amount, expires_at, created_at, commitment_txid, + spent_by, spent, unrolled, preconfirmed, settled_by, ark_txid, intent_id, updated_at, depth, markers +FROM vtxo; + +-- Drop old views that depend on vtxo +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +-- Drop old table and rename new one +DROP TABLE vtxo; +ALTER TABLE vtxo_new RENAME TO vtxo; + +-- Recreate indexes +CREATE INDEX IF NOT EXISTS fk_vtxo_intent_id ON vtxo(intent_id); + +-- Recreate views to compute swept status dynamically +CREATE VIEW vtxo_vw AS +SELECT v.*, + COALESCE(( + SELECT group_concat(commitment_txid, ',') + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout + ), '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN ( + SELECT DISTINCT txid, vout, asset_id, amount + FROM asset_projection +) AS ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, intent.id, intent.round_id, intent.proof, intent.message, intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; diff --git a/internal/infrastructure/db/sqlite/migration/20260219082956_fix_vtxo_vw_perf.up.sql b/internal/infrastructure/db/sqlite/migration/20260219082956_fix_vtxo_vw_perf.up.sql index 62a2139cf..878bb495a 100644 --- a/internal/infrastructure/db/sqlite/migration/20260219082956_fix_vtxo_vw_perf.up.sql +++ b/internal/infrastructure/db/sqlite/migration/20260219082956_fix_vtxo_vw_perf.up.sql @@ -12,17 +12,22 @@ SELECT v.spent_by, v.spent, v.unrolled, - v.swept, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, + v.depth, + v.markers, COALESCE(( SELECT group_concat(vc.commitment_txid) FROM vtxo_commitment_txid vc WHERE vc.vtxo_txid = v.txid AND vc.vtxo_vout = v.vout ), '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) AS swept, COALESCE(ap.asset_id, '') AS asset_id, COALESCE(ap.amount, 0) AS asset_amount, intent.id, @@ -35,4 +40,3 @@ LEFT OUTER JOIN vtxo v ON intent.id = v.intent_id LEFT JOIN asset_projection ap ON v.txid = ap.txid AND v.vout = ap.vout; CREATE INDEX IF NOT EXISTS idx_asset_projection_vtxo ON asset_projection(txid, vout); - diff --git a/internal/infrastructure/db/sqlite/migration/20260409140000_checkpoint_tx_offchain_txid_index.down.sql b/internal/infrastructure/db/sqlite/migration/20260409140000_checkpoint_tx_offchain_txid_index.down.sql new file mode 100644 index 000000000..3bf97317f --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20260409140000_checkpoint_tx_offchain_txid_index.down.sql @@ -0,0 +1 @@ +DROP INDEX IF EXISTS idx_checkpoint_tx_offchain_txid; diff --git a/internal/infrastructure/db/sqlite/migration/20260409140000_checkpoint_tx_offchain_txid_index.up.sql b/internal/infrastructure/db/sqlite/migration/20260409140000_checkpoint_tx_offchain_txid_index.up.sql new file mode 100644 index 000000000..4fddcadc2 --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20260409140000_checkpoint_tx_offchain_txid_index.up.sql @@ -0,0 +1,2 @@ +CREATE INDEX IF NOT EXISTS idx_checkpoint_tx_offchain_txid + ON checkpoint_tx (offchain_txid); diff --git a/internal/infrastructure/db/sqlite/migration/20260416120000_add_swept_vtxo.down.sql b/internal/infrastructure/db/sqlite/migration/20260416120000_add_swept_vtxo.down.sql new file mode 100644 index 000000000..db97e34ac --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20260416120000_add_swept_vtxo.down.sql @@ -0,0 +1,52 @@ +-- Guard against silently resurrecting swept VTXOs. +-- +-- swept_vtxo holds per-outpoint sweep state for the checkpoint-sweep path. +-- Dropping the table would make vtxo_vw.swept flip back to false for every +-- outpoint tracked only here (marker-based sweeps still survive via +-- swept_marker). When the table has data, fail loudly rather than silently +-- discard it. When the table is empty, the rollback is safe — drop the +-- table and restore the pre-swept_vtxo view shape. +-- +-- SQLite has no RAISE outside of triggers, so we route through a trigger on +-- a throwaway temp table. The conditional INSERT fires the trigger only when +-- swept_vtxo has at least one row; otherwise it's a no-op and we fall through +-- to the drop + view recreation. +CREATE TEMP TABLE __abort_swept_vtxo_down (x INTEGER); +CREATE TEMP TRIGGER __abort_swept_vtxo_down_trigger BEFORE INSERT ON __abort_swept_vtxo_down +BEGIN + SELECT RAISE(ABORT, 'irreversible migration: swept_vtxo contains entries; rolling back would resurrect swept VTXOs. Truncate swept_vtxo manually if you accept the data loss, then re-run.'); +END; +INSERT INTO __abort_swept_vtxo_down SELECT 1 FROM swept_vtxo LIMIT 1; +DROP TRIGGER __abort_swept_vtxo_down_trigger; +DROP TABLE __abort_swept_vtxo_down; + +DROP TABLE IF EXISTS swept_vtxo; + +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +CREATE VIEW vtxo_vw AS +SELECT v.*, + COALESCE(( + SELECT group_concat(commitment_txid, ',') + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout + ), '') AS commitments, + EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN ( + SELECT DISTINCT txid, vout, asset_id, amount + FROM asset_projection +) AS ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT vtxo_vw.*, intent.id, intent.round_id, intent.proof, intent.message, intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo_vw +ON intent.id = vtxo_vw.intent_id; diff --git a/internal/infrastructure/db/sqlite/migration/20260416120000_add_swept_vtxo.up.sql b/internal/infrastructure/db/sqlite/migration/20260416120000_add_swept_vtxo.up.sql new file mode 100644 index 000000000..3d7056995 --- /dev/null +++ b/internal/infrastructure/db/sqlite/migration/20260416120000_add_swept_vtxo.up.sql @@ -0,0 +1,91 @@ +-- Per-outpoint sweep tracking for checkpoint sweeps (see Postgres migration). +CREATE TABLE IF NOT EXISTS swept_vtxo ( + txid TEXT NOT NULL, + vout INTEGER NOT NULL, + swept_at INTEGER NOT NULL, + PRIMARY KEY (txid, vout) +); + +-- Rebuild vtxo_vw: swept if marker in swept_marker OR outpoint in swept_vtxo +DROP VIEW IF EXISTS intent_with_inputs_vw; +DROP VIEW IF EXISTS vtxo_vw; + +-- swept is OR'd across two sources on purpose: +-- * swept_marker — populated by batch/round sweeps. Coarse-grained: a single +-- marker can cover many VTXOs, so marker-based sweeping is efficient for +-- whole-round sweeps but would over-reach if applied to checkpoint sweeps +-- (markers are shared across independent subtrees). +-- * swept_vtxo — populated by checkpoint sweeps. Fine-grained: one row per +-- (txid, vout), so it safely scopes to a single outpoint's lineage. +-- New sweep code paths must pick the right table; maintainers adding a third +-- sweep path should extend this OR rather than re-overloading one of them. +CREATE VIEW vtxo_vw AS +SELECT v.*, + COALESCE(( + SELECT group_concat(commitment_txid, ',') + FROM vtxo_commitment_txid + WHERE vtxo_txid = v.txid AND vtxo_vout = v.vout + ), '') AS commitments, + ( + EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) + OR EXISTS ( + SELECT 1 FROM swept_vtxo sv + WHERE sv.txid = v.txid AND sv.vout = v.vout + ) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount +FROM vtxo v +LEFT JOIN ( + SELECT DISTINCT txid, vout, asset_id, amount + FROM asset_projection +) AS ap +ON ap.txid = v.txid AND ap.vout = v.vout; + +CREATE VIEW intent_with_inputs_vw AS +SELECT + v.txid, + v.vout, + v.pubkey, + v.amount, + v.expires_at, + v.created_at, + v.commitment_txid, + v.spent_by, + v.spent, + v.unrolled, + v.preconfirmed, + v.settled_by, + v.ark_txid, + v.intent_id, + v.updated_at, + v.depth, + v.markers, + COALESCE(( + SELECT group_concat(vc.commitment_txid) + FROM vtxo_commitment_txid vc + WHERE vc.vtxo_txid = v.txid AND vc.vtxo_vout = v.vout + ), '') AS commitments, + ( + EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) + OR EXISTS ( + SELECT 1 FROM swept_vtxo sv + WHERE sv.txid = v.txid AND sv.vout = v.vout + ) + ) AS swept, + COALESCE(ap.asset_id, '') AS asset_id, + COALESCE(ap.amount, 0) AS asset_amount, + intent.id, + intent.round_id, + intent.proof, + intent.message, + intent.txid AS intent_txid +FROM intent +LEFT OUTER JOIN vtxo v ON intent.id = v.intent_id +LEFT JOIN asset_projection ap ON v.txid = ap.txid AND v.vout = ap.vout; diff --git a/internal/infrastructure/db/sqlite/offchain_tx_repo.go b/internal/infrastructure/db/sqlite/offchain_tx_repo.go index c9df73897..3f30b7d85 100644 --- a/internal/infrastructure/db/sqlite/offchain_tx_repo.go +++ b/internal/infrastructure/db/sqlite/offchain_tx_repo.go @@ -9,6 +9,11 @@ import ( "github.com/arkade-os/arkd/internal/infrastructure/db/sqlite/sqlc/queries" ) +// sqliteMaxBulkTxids caps the per-query batch for GetOffchainTxsByTxids to stay +// well under SQLITE_MAX_VARIABLE_NUMBER (default 999 on SQLite < 3.32). The +// SLICE expansion in the generated query emits one bound parameter per txid. +const sqliteMaxBulkTxids = 500 + type offchainTxRepository struct { db *sql.DB querier *queries.Queries @@ -114,6 +119,67 @@ func (v *offchainTxRepository) GetOffchainTx( }, nil } +func (v *offchainTxRepository) GetOffchainTxsByTxids( + ctx context.Context, txids []string, +) ([]*domain.OffchainTx, error) { + if len(txids) == 0 { + return []*domain.OffchainTx{}, nil + } + + grouped := make(map[string][]queries.OffchainTxVw) + for start := 0; start < len(txids); start += sqliteMaxBulkTxids { + end := min(start+sqliteMaxBulkTxids, len(txids)) + rows, err := v.querier.SelectOffchainTxsByTxids(ctx, txids[start:end]) + if err != nil { + return nil, err + } + for _, row := range rows { + grouped[row.OffchainTxVw.Txid] = append( + grouped[row.OffchainTxVw.Txid], + row.OffchainTxVw, + ) + } + } + + txs := make([]*domain.OffchainTx, 0, len(grouped)) + for _, vws := range grouped { + vt := vws[0] + checkpointTxs := make(map[string]string) + commitmentTxids := make(map[string]string) + rootCommitmentTxId := "" + for _, vw := range vws { + if vw.CheckpointTxid != "" && vw.CheckpointTx != "" { + checkpointTxs[vw.CheckpointTxid] = vw.CheckpointTx + commitmentTxids[vw.CheckpointTxid] = vw.CommitmentTxid.String + if vw.IsRootCommitmentTxid.Bool { + rootCommitmentTxId = vw.CommitmentTxid.String + } + } + } + stage := domain.Stage{Code: int(vt.StageCode)} + if vt.FailReason.String != "" { + stage.Failed = true + } + if domain.OffchainTxStage(vt.StageCode) == domain.OffchainTxFinalizedStage { + stage.Ended = true + } + txs = append(txs, &domain.OffchainTx{ + ArkTxid: vt.Txid, + ArkTx: vt.Tx, + StartingTimestamp: vt.StartingTimestamp, + EndingTimestamp: vt.EndingTimestamp, + ExpiryTimestamp: vt.ExpiryTimestamp, + FailReason: vt.FailReason.String, + Stage: stage, + CheckpointTxs: checkpointTxs, + CommitmentTxids: commitmentTxids, + RootCommitmentTxId: rootCommitmentTxId, + }) + } + + return txs, nil +} + func (v *offchainTxRepository) Close() { _ = v.db.Close() } diff --git a/internal/infrastructure/db/sqlite/round_repo.go b/internal/infrastructure/db/sqlite/round_repo.go index ba4d4efac..6ad5f9803 100644 --- a/internal/infrastructure/db/sqlite/round_repo.go +++ b/internal/infrastructure/db/sqlite/round_repo.go @@ -726,7 +726,7 @@ func combinedRowToVtxo(row queries.IntentWithInputsVw) domain.Vtxo { SpentBy: row.SpentBy.String, Spent: row.Spent.Bool, Unrolled: row.Unrolled.Bool, - Swept: row.Swept.Bool, + Swept: toBool(row.Swept), Preconfirmed: row.Preconfirmed.Bool, ExpiresAt: row.ExpiresAt.Int64, CreatedAt: row.CreatedAt.Int64, diff --git a/internal/infrastructure/db/sqlite/sqlc/queries/models.go b/internal/infrastructure/db/sqlite/sqlc/queries/models.go index f5ded82b3..d7cfb7279 100644 --- a/internal/infrastructure/db/sqlite/sqlc/queries/models.go +++ b/internal/infrastructure/db/sqlite/sqlc/queries/models.go @@ -71,13 +71,15 @@ type IntentWithInputsVw struct { SpentBy sql.NullString Spent sql.NullBool Unrolled sql.NullBool - Swept sql.NullBool Preconfirmed sql.NullBool SettledBy sql.NullString ArkTxid sql.NullString IntentID sql.NullString UpdatedAt sql.NullInt64 + Depth sql.NullInt64 + Markers sql.NullString Commitments interface{} + Swept interface{} AssetID string AssetAmount string ID sql.NullString @@ -99,6 +101,12 @@ type IntentWithReceiversVw struct { Txid sql.NullString } +type Marker struct { + ID string + Depth int64 + ParentMarkers sql.NullString +} + type OffchainTx struct { Txid string Tx string @@ -193,6 +201,17 @@ type ScheduledSession struct { UpdatedAt int64 } +type SweptMarker struct { + MarkerID string + SweptAt int64 +} + +type SweptVtxo struct { + Txid string + Vout int64 + SweptAt int64 +} + type Tx struct { Txid string Tx string @@ -213,12 +232,13 @@ type Vtxo struct { SpentBy sql.NullString Spent bool Unrolled bool - Swept bool Preconfirmed bool SettledBy sql.NullString ArkTxid sql.NullString IntentID sql.NullString UpdatedAt sql.NullInt64 + Depth int64 + Markers string } type VtxoCommitmentTxid struct { @@ -238,13 +258,15 @@ type VtxoVw struct { SpentBy sql.NullString Spent bool Unrolled bool - Swept bool Preconfirmed bool SettledBy sql.NullString ArkTxid sql.NullString IntentID sql.NullString UpdatedAt sql.NullInt64 + Depth int64 + Markers string Commitments interface{} + Swept interface{} AssetID string AssetAmount string } diff --git a/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go b/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go index 0bb137d25..3576b3dbf 100644 --- a/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go +++ b/internal/infrastructure/db/sqlite/sqlc/queries/query.sql.go @@ -84,6 +84,63 @@ func (q *Queries) ClearScheduledSession(ctx context.Context) error { return err } +const countUnsweptVtxosByMarkerId = `-- name: CountUnsweptVtxosByMarkerId :one +SELECT COUNT(DISTINCT txid || ':' || CAST(vout AS TEXT)) FROM vtxo_vw WHERE markers LIKE '%"' || ?1 || '"%' AND swept = false +` + +// Count VTXOs whose markers JSON array contains the given marker_id and are not swept. +// Uses LIKE because sqlc cannot parse json_each with view columns. +// Uses DISTINCT to avoid double-counting VTXOs with multiple asset projections. +func (q *Queries) CountUnsweptVtxosByMarkerId(ctx context.Context, markerID sql.NullString) (int64, error) { + row := q.db.QueryRowContext(ctx, countUnsweptVtxosByMarkerId, markerID) + var count int64 + err := row.Scan(&count) + return count, err +} + +const getDescendantMarkerIds = `-- name: GetDescendantMarkerIds :many +WITH RECURSIVE descendant_markers(id) AS ( + -- Base case: the marker being swept + SELECT marker.id FROM marker WHERE marker.id = ?1 + UNION + -- Recursive case: find markers whose parent_markers JSON array contains any descendant + SELECT m.id FROM marker m + INNER JOIN descendant_markers dm ON EXISTS ( + SELECT 1 FROM json_each(m.parent_markers) j WHERE j.value = dm.id + ) +) +SELECT descendant_markers.id AS marker_id FROM descendant_markers +WHERE descendant_markers.id NOT IN (SELECT sm.marker_id FROM swept_marker sm) +` + +// Recursively get a marker and all its descendants (markers whose parent_markers contain it) +// Uses json_each instead of LIKE to avoid false positives with special characters (%, _). +// Uses UNION (set semantics, not UNION ALL) so rows already produced are filtered, +// which makes this cycle-safe. Do not convert to UNION ALL: cycles in parent_markers +// would cause the recursion to run unbounded. +func (q *Queries) GetDescendantMarkerIds(ctx context.Context, rootMarkerID string) ([]string, error) { + rows, err := q.db.QueryContext(ctx, getDescendantMarkerIds, rootMarkerID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []string + for rows.Next() { + var marker_id string + if err := rows.Scan(&marker_id); err != nil { + return nil, err + } + items = append(items, marker_id) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const insertAsset = `-- name: InsertAsset :exec INSERT INTO asset (id, is_immutable, metadata_hash, metadata, control_asset_id) VALUES (?1, ?2, ?3, ?4, ?5) @@ -108,6 +165,38 @@ func (q *Queries) InsertAsset(ctx context.Context, arg InsertAssetParams) error return err } +const insertSweptMarker = `-- name: InsertSweptMarker :exec +INSERT INTO swept_marker (marker_id, swept_at) +VALUES (?1, ?2) +ON CONFLICT(marker_id) DO NOTHING +` + +type InsertSweptMarkerParams struct { + MarkerID string + SweptAt int64 +} + +func (q *Queries) InsertSweptMarker(ctx context.Context, arg InsertSweptMarkerParams) error { + _, err := q.db.ExecContext(ctx, insertSweptMarker, arg.MarkerID, arg.SweptAt) + return err +} + +const insertSweptVtxo = `-- name: InsertSweptVtxo :exec +INSERT OR IGNORE INTO swept_vtxo (txid, vout, swept_at) +VALUES (?, ?, ?) +` + +type InsertSweptVtxoParams struct { + Txid string + Vout int64 + SweptAt int64 +} + +func (q *Queries) InsertSweptVtxo(ctx context.Context, arg InsertSweptVtxoParams) error { + _, err := q.db.ExecContext(ctx, insertSweptVtxo, arg.Txid, arg.Vout, arg.SweptAt) + return err +} + const insertVtxoAssetProjection = `-- name: InsertVtxoAssetProjection :exec INSERT INTO asset_projection (asset_id, txid, vout, amount) VALUES (?1, ?2, ?3, ?4) @@ -146,6 +235,17 @@ func (q *Queries) InsertVtxoCommitmentTxid(ctx context.Context, arg InsertVtxoCo return err } +const isMarkerSwept = `-- name: IsMarkerSwept :one +SELECT EXISTS(SELECT 1 FROM swept_marker WHERE marker_id = ?1) AS is_swept +` + +func (q *Queries) IsMarkerSwept(ctx context.Context, markerID string) (int64, error) { + row := q.db.QueryRowContext(ctx, isMarkerSwept, markerID) + var is_swept int64 + err := row.Scan(&is_swept) + return is_swept, err +} + const selectActiveScriptConvictions = `-- name: SelectActiveScriptConvictions :many SELECT id, type, created_at, expires_at, crime_type, crime_round_id, crime_reason, pardoned, script FROM conviction WHERE script = ?1 @@ -220,7 +320,7 @@ func (q *Queries) SelectAllRoundIds(ctx context.Context) ([]string, error) { } const selectAllVtxos = `-- name: SelectAllVtxos :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw ` type SelectAllVtxosRow struct { @@ -247,13 +347,15 @@ func (q *Queries) SelectAllVtxos(ctx context.Context) ([]SelectAllVtxosRow, erro &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -468,13 +570,16 @@ func (q *Queries) SelectConvictionsInTimeRange(ctx context.Context, arg SelectCo } const selectExpiringLiquidityAmount = `-- name: SelectExpiringLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0) AS amount -FROM vtxo -WHERE swept = false - AND spent = false - AND unrolled = false - AND expires_at > ?1 - AND (?2 <= 0 OR expires_at < ?2) +SELECT COALESCE(SUM(v.amount), 0) AS amount +FROM vtxo v +WHERE NOT EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) + AND v.spent = false + AND v.unrolled = false + AND v.expires_at > ?1 + AND (?2 <= 0 OR v.expires_at < ?2) ` type SelectExpiringLiquidityAmountParams struct { @@ -594,8 +699,115 @@ func (q *Queries) SelectLatestScheduledSession(ctx context.Context) (ScheduledSe return i, err } +const selectMarker = `-- name: SelectMarker :one +SELECT id, depth, parent_markers FROM marker WHERE id = ?1 +` + +func (q *Queries) SelectMarker(ctx context.Context, id string) (Marker, error) { + row := q.db.QueryRowContext(ctx, selectMarker, id) + var i Marker + err := row.Scan(&i.ID, &i.Depth, &i.ParentMarkers) + return i, err +} + +const selectMarkersByDepth = `-- name: SelectMarkersByDepth :many +SELECT id, depth, parent_markers FROM marker WHERE depth = ?1 +` + +func (q *Queries) SelectMarkersByDepth(ctx context.Context, depth int64) ([]Marker, error) { + rows, err := q.db.QueryContext(ctx, selectMarkersByDepth, depth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Marker + for rows.Next() { + var i Marker + if err := rows.Scan(&i.ID, &i.Depth, &i.ParentMarkers); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectMarkersByDepthRange = `-- name: SelectMarkersByDepthRange :many +SELECT id, depth, parent_markers FROM marker WHERE depth >= ?1 AND depth <= ?2 ORDER BY depth +` + +type SelectMarkersByDepthRangeParams struct { + MinDepth int64 + MaxDepth int64 +} + +func (q *Queries) SelectMarkersByDepthRange(ctx context.Context, arg SelectMarkersByDepthRangeParams) ([]Marker, error) { + rows, err := q.db.QueryContext(ctx, selectMarkersByDepthRange, arg.MinDepth, arg.MaxDepth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Marker + for rows.Next() { + var i Marker + if err := rows.Scan(&i.ID, &i.Depth, &i.ParentMarkers); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectMarkersByIds = `-- name: SelectMarkersByIds :many +SELECT id, depth, parent_markers FROM marker WHERE id IN (/*SLICE:ids*/?) +` + +func (q *Queries) SelectMarkersByIds(ctx context.Context, ids []string) ([]Marker, error) { + query := selectMarkersByIds + var queryParams []interface{} + if len(ids) > 0 { + for _, v := range ids { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:ids*/?", strings.Repeat(",?", len(ids))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []Marker + for rows.Next() { + var i Marker + if err := rows.Scan(&i.ID, &i.Depth, &i.ParentMarkers); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectNotUnrolledVtxos = `-- name: SelectNotUnrolledVtxos :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false ` type SelectNotUnrolledVtxosRow struct { @@ -622,13 +834,15 @@ func (q *Queries) SelectNotUnrolledVtxos(ctx context.Context) ([]SelectNotUnroll &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -646,7 +860,7 @@ func (q *Queries) SelectNotUnrolledVtxos(ctx context.Context) ([]SelectNotUnroll } const selectNotUnrolledVtxosWithPubkey = `-- name: SelectNotUnrolledVtxosWithPubkey :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false AND pubkey = ?1 +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE unrolled = false AND pubkey = ?1 ` type SelectNotUnrolledVtxosWithPubkeyRow struct { @@ -673,13 +887,15 @@ func (q *Queries) SelectNotUnrolledVtxosWithPubkey(ctx context.Context, pubkey s &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -740,8 +956,62 @@ func (q *Queries) SelectOffchainTx(ctx context.Context, txid string) ([]SelectOf return items, nil } +const selectOffchainTxsByTxids = `-- name: SelectOffchainTxsByTxids :many +SELECT offchain_tx_vw.txid, offchain_tx_vw.tx, offchain_tx_vw.starting_timestamp, offchain_tx_vw.ending_timestamp, offchain_tx_vw.expiry_timestamp, offchain_tx_vw.fail_reason, offchain_tx_vw.stage_code, offchain_tx_vw.checkpoint_txid, offchain_tx_vw.checkpoint_tx, offchain_tx_vw.commitment_txid, offchain_tx_vw.is_root_commitment_txid, offchain_tx_vw.offchain_txid FROM offchain_tx_vw WHERE txid IN (/*SLICE:txids*/?) AND COALESCE(fail_reason, '') = '' +` + +type SelectOffchainTxsByTxidsRow struct { + OffchainTxVw OffchainTxVw +} + +func (q *Queries) SelectOffchainTxsByTxids(ctx context.Context, txids []string) ([]SelectOffchainTxsByTxidsRow, error) { + query := selectOffchainTxsByTxids + var queryParams []interface{} + if len(txids) > 0 { + for _, v := range txids { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:txids*/?", strings.Repeat(",?", len(txids))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:txids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectOffchainTxsByTxidsRow + for rows.Next() { + var i SelectOffchainTxsByTxidsRow + if err := rows.Scan( + &i.OffchainTxVw.Txid, + &i.OffchainTxVw.Tx, + &i.OffchainTxVw.StartingTimestamp, + &i.OffchainTxVw.EndingTimestamp, + &i.OffchainTxVw.ExpiryTimestamp, + &i.OffchainTxVw.FailReason, + &i.OffchainTxVw.StageCode, + &i.OffchainTxVw.CheckpointTxid, + &i.OffchainTxVw.CheckpointTx, + &i.OffchainTxVw.CommitmentTxid, + &i.OffchainTxVw.IsRootCommitmentTxid, + &i.OffchainTxVw.OffchainTxid, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectPendingSpentVtxo = `-- name: SelectPendingSpentVtxo :many -SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.swept, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.commitments, v.asset_id, v.asset_amount +SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.depth, v.markers, v.commitments, v.swept, v.asset_id, v.asset_amount FROM vtxo_vw v WHERE v.txid = ?1 AND v.vout = ?2 AND v.spent = TRUE AND v.unrolled = FALSE AND COALESCE(v.settled_by, '') = '' @@ -775,13 +1045,15 @@ func (q *Queries) SelectPendingSpentVtxo(ctx context.Context, arg SelectPendingS &i.SpentBy, &i.Spent, &i.Unrolled, - &i.Swept, &i.Preconfirmed, &i.SettledBy, &i.ArkTxid, &i.IntentID, &i.UpdatedAt, + &i.Depth, + &i.Markers, &i.Commitments, + &i.Swept, &i.AssetID, &i.AssetAmount, ); err != nil { @@ -799,7 +1071,7 @@ func (q *Queries) SelectPendingSpentVtxo(ctx context.Context, arg SelectPendingS } const selectPendingSpentVtxosWithPubkeys = `-- name: SelectPendingSpentVtxosWithPubkeys :many -SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.swept, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.commitments, v.asset_id, v.asset_amount +SELECT v.txid, v.vout, v.pubkey, v.amount, v.expires_at, v.created_at, v.commitment_txid, v.spent_by, v.spent, v.unrolled, v.preconfirmed, v.settled_by, v.ark_txid, v.intent_id, v.updated_at, v.depth, v.markers, v.commitments, v.swept, v.asset_id, v.asset_amount FROM vtxo_vw v WHERE v.spent = TRUE AND v.unrolled = FALSE AND COALESCE(v.settled_by, '') = '' AND v.ark_txid IS NOT NULL AND NOT EXISTS ( @@ -848,13 +1120,15 @@ func (q *Queries) SelectPendingSpentVtxosWithPubkeys(ctx context.Context, arg Se &i.SpentBy, &i.Spent, &i.Unrolled, - &i.Swept, &i.Preconfirmed, &i.SettledBy, &i.ArkTxid, &i.IntentID, &i.UpdatedAt, + &i.Depth, + &i.Markers, &i.Commitments, + &i.Swept, &i.AssetID, &i.AssetAmount, ); err != nil { @@ -872,10 +1146,13 @@ func (q *Queries) SelectPendingSpentVtxosWithPubkeys(ctx context.Context, arg Se } const selectRecoverableLiquidityAmount = `-- name: SelectRecoverableLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0) AS amount -FROM vtxo -WHERE swept = true - AND spent = false +SELECT COALESCE(SUM(v.amount), 0) AS amount +FROM vtxo v +WHERE EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) + AND v.spent = false ` func (q *Queries) SelectRecoverableLiquidityAmount(ctx context.Context) (interface{}, error) { @@ -1073,7 +1350,7 @@ SELECT r.ending_timestamp, ( SELECT COALESCE(SUM(amount), 0) FROM ( - SELECT DISTINCT v2.txid, v2.vout, v2.pubkey, v2.amount, v2.expires_at, v2.created_at, v2.commitment_txid, v2.spent_by, v2.spent, v2.unrolled, v2.swept, v2.preconfirmed, v2.settled_by, v2.ark_txid, v2.intent_id, v2.updated_at FROM vtxo v2 JOIN intent i2 ON i2.id = v2.intent_id WHERE i2.round_id = r.id + SELECT DISTINCT v2.txid, v2.vout, v2.pubkey, v2.amount, v2.expires_at, v2.created_at, v2.commitment_txid, v2.spent_by, v2.spent, v2.unrolled, v2.preconfirmed, v2.settled_by, v2.ark_txid, v2.intent_id, v2.updated_at, v2.depth, v2.markers FROM vtxo v2 JOIN intent i2 ON i2.id = v2.intent_id WHERE i2.round_id = r.id ) as intent_with_inputs_amount ) AS total_forfeit_amount, ( @@ -1194,7 +1471,7 @@ func (q *Queries) SelectRoundVtxoTree(ctx context.Context, txid string) ([]Tx, e } const selectRoundVtxoTreeLeaves = `-- name: SelectRoundVtxoTreeLeaves :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE commitment_txid = ?1 AND preconfirmed = false +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE commitment_txid = ?1 AND preconfirmed = false ` type SelectRoundVtxoTreeLeavesRow struct { @@ -1221,13 +1498,15 @@ func (q *Queries) SelectRoundVtxoTreeLeaves(ctx context.Context, commitmentTxid &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1440,7 +1719,7 @@ func (q *Queries) SelectSweepableRounds(ctx context.Context) ([]string, error) { } const selectSweepableUnrolledVtxos = `-- name: SelectSweepableUnrolledVtxos :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE spent = true AND unrolled = true AND swept = false AND (COALESCE(settled_by, '') = '') +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE spent = true AND unrolled = true AND swept = false AND (COALESCE(settled_by, '') = '') ` type SelectSweepableUnrolledVtxosRow struct { @@ -1467,13 +1746,15 @@ func (q *Queries) SelectSweepableUnrolledVtxos(ctx context.Context) ([]SelectSwe &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1526,6 +1807,54 @@ func (q *Queries) SelectSweepableVtxoOutpointsByCommitmentTxid(ctx context.Conte return items, nil } +const selectSweptMarker = `-- name: SelectSweptMarker :one +SELECT marker_id, swept_at FROM swept_marker WHERE marker_id = ?1 +` + +func (q *Queries) SelectSweptMarker(ctx context.Context, markerID string) (SweptMarker, error) { + row := q.db.QueryRowContext(ctx, selectSweptMarker, markerID) + var i SweptMarker + err := row.Scan(&i.MarkerID, &i.SweptAt) + return i, err +} + +const selectSweptMarkersByIds = `-- name: SelectSweptMarkersByIds :many +SELECT marker_id, swept_at FROM swept_marker WHERE marker_id IN (/*SLICE:marker_ids*/?) +` + +func (q *Queries) SelectSweptMarkersByIds(ctx context.Context, markerIds []string) ([]SweptMarker, error) { + query := selectSweptMarkersByIds + var queryParams []interface{} + if len(markerIds) > 0 { + for _, v := range markerIds { + queryParams = append(queryParams, v) + } + query = strings.Replace(query, "/*SLICE:marker_ids*/?", strings.Repeat(",?", len(markerIds))[1:], 1) + } else { + query = strings.Replace(query, "/*SLICE:marker_ids*/?", "NULL", 1) + } + rows, err := q.db.QueryContext(ctx, query, queryParams...) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SweptMarker + for rows.Next() { + var i SweptMarker + if err := rows.Scan(&i.MarkerID, &i.SweptAt); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectSweptRoundsConnectorAddress = `-- name: SelectSweptRoundsConnectorAddress :many SELECT round.connector_address FROM round WHERE round.swept = true AND round.failed = false AND round.ended = true AND round.connector_address <> '' @@ -1623,7 +1952,7 @@ func (q *Queries) SelectTxs(ctx context.Context, arg SelectTxsParams) ([]SelectT } const selectVtxo = `-- name: SelectVtxo :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE txid = ?1 AND vout = ?2 +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE txid = ?1 AND vout = ?2 ` type SelectVtxoParams struct { @@ -1655,13 +1984,73 @@ func (q *Queries) SelectVtxo(ctx context.Context, arg SelectVtxoParams) ([]Selec &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, + &i.VtxoVw.Preconfirmed, + &i.VtxoVw.SettledBy, + &i.VtxoVw.ArkTxid, + &i.VtxoVw.IntentID, + &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, + &i.VtxoVw.Commitments, &i.VtxoVw.Swept, + &i.VtxoVw.AssetID, + &i.VtxoVw.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectVtxoChainByMarker = `-- name: SelectVtxoChainByMarker :many +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw +WHERE markers LIKE '%"' || ?1 || '"%' +ORDER BY vtxo_vw.depth DESC +` + +type SelectVtxoChainByMarkerRow struct { + VtxoVw VtxoVw +} + +// Get VTXOs whose markers array contains the given marker_id. +// For multiple markers, call this multiple times and deduplicate in Go. +// Uses LIKE because sqlc cannot parse json_each with view columns. +func (q *Queries) SelectVtxoChainByMarker(ctx context.Context, markerID sql.NullString) ([]SelectVtxoChainByMarkerRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxoChainByMarker, markerID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectVtxoChainByMarkerRow + for rows.Next() { + var i SelectVtxoChainByMarkerRow + if err := rows.Scan( + &i.VtxoVw.Txid, + &i.VtxoVw.Vout, + &i.VtxoVw.Pubkey, + &i.VtxoVw.Amount, + &i.VtxoVw.ExpiresAt, + &i.VtxoVw.CreatedAt, + &i.VtxoVw.CommitmentTxid, + &i.VtxoVw.SpentBy, + &i.VtxoVw.Spent, + &i.VtxoVw.Unrolled, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1679,7 +2068,7 @@ func (q *Queries) SelectVtxo(ctx context.Context, arg SelectVtxoParams) ([]Selec } const selectVtxoInputsByRoundId = `-- name: SelectVtxoInputsByRoundId :many -SELECT intent_with_inputs_vw.txid, intent_with_inputs_vw.vout, intent_with_inputs_vw.pubkey, intent_with_inputs_vw.amount, intent_with_inputs_vw.expires_at, intent_with_inputs_vw.created_at, intent_with_inputs_vw.commitment_txid, intent_with_inputs_vw.spent_by, intent_with_inputs_vw.spent, intent_with_inputs_vw.unrolled, intent_with_inputs_vw.swept, intent_with_inputs_vw.preconfirmed, intent_with_inputs_vw.settled_by, intent_with_inputs_vw.ark_txid, intent_with_inputs_vw.intent_id, intent_with_inputs_vw.updated_at, intent_with_inputs_vw.commitments, intent_with_inputs_vw.asset_id, intent_with_inputs_vw.asset_amount, intent_with_inputs_vw.id, intent_with_inputs_vw.round_id, intent_with_inputs_vw.proof, intent_with_inputs_vw.message, intent_with_inputs_vw.intent_txid +SELECT intent_with_inputs_vw.txid, intent_with_inputs_vw.vout, intent_with_inputs_vw.pubkey, intent_with_inputs_vw.amount, intent_with_inputs_vw.expires_at, intent_with_inputs_vw.created_at, intent_with_inputs_vw.commitment_txid, intent_with_inputs_vw.spent_by, intent_with_inputs_vw.spent, intent_with_inputs_vw.unrolled, intent_with_inputs_vw.preconfirmed, intent_with_inputs_vw.settled_by, intent_with_inputs_vw.ark_txid, intent_with_inputs_vw.intent_id, intent_with_inputs_vw.updated_at, intent_with_inputs_vw.depth, intent_with_inputs_vw.markers, intent_with_inputs_vw.commitments, intent_with_inputs_vw.swept, intent_with_inputs_vw.asset_id, intent_with_inputs_vw.asset_amount, intent_with_inputs_vw.id, intent_with_inputs_vw.round_id, intent_with_inputs_vw.proof, intent_with_inputs_vw.message, intent_with_inputs_vw.intent_txid FROM intent_with_inputs_vw WHERE intent_with_inputs_vw.round_id = ?1 ` @@ -1708,13 +2097,15 @@ func (q *Queries) SelectVtxoInputsByRoundId(ctx context.Context, roundID sql.Nul &i.IntentWithInputsVw.SpentBy, &i.IntentWithInputsVw.Spent, &i.IntentWithInputsVw.Unrolled, - &i.IntentWithInputsVw.Swept, &i.IntentWithInputsVw.Preconfirmed, &i.IntentWithInputsVw.SettledBy, &i.IntentWithInputsVw.ArkTxid, &i.IntentWithInputsVw.IntentID, &i.IntentWithInputsVw.UpdatedAt, + &i.IntentWithInputsVw.Depth, + &i.IntentWithInputsVw.Markers, &i.IntentWithInputsVw.Commitments, + &i.IntentWithInputsVw.Swept, &i.IntentWithInputsVw.AssetID, &i.IntentWithInputsVw.AssetAmount, &i.IntentWithInputsVw.ID, @@ -1772,14 +2163,187 @@ func (q *Queries) SelectVtxoPubKeysByCommitmentTxid(ctx context.Context, arg Sel return items, nil } +const selectVtxosByArkTxid = `-- name: SelectVtxosByArkTxid :many +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE ark_txid = ?1 +` + +type SelectVtxosByArkTxidRow struct { + VtxoVw VtxoVw +} + +// Get all VTXOs created by a specific ark tx (offchain tx) +func (q *Queries) SelectVtxosByArkTxid(ctx context.Context, arkTxid sql.NullString) ([]SelectVtxosByArkTxidRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosByArkTxid, arkTxid) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectVtxosByArkTxidRow + for rows.Next() { + var i SelectVtxosByArkTxidRow + if err := rows.Scan( + &i.VtxoVw.Txid, + &i.VtxoVw.Vout, + &i.VtxoVw.Pubkey, + &i.VtxoVw.Amount, + &i.VtxoVw.ExpiresAt, + &i.VtxoVw.CreatedAt, + &i.VtxoVw.CommitmentTxid, + &i.VtxoVw.SpentBy, + &i.VtxoVw.Spent, + &i.VtxoVw.Unrolled, + &i.VtxoVw.Preconfirmed, + &i.VtxoVw.SettledBy, + &i.VtxoVw.ArkTxid, + &i.VtxoVw.IntentID, + &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, + &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, + &i.VtxoVw.AssetID, + &i.VtxoVw.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectVtxosByDepthRange = `-- name: SelectVtxosByDepthRange :many + +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw +WHERE depth >= ?1 AND depth <= ?2 +ORDER BY depth DESC +` + +type SelectVtxosByDepthRangeParams struct { + MinDepth int64 + MaxDepth int64 +} + +type SelectVtxosByDepthRangeRow struct { + VtxoVw VtxoVw +} + +// Chain traversal queries for GetVtxoChain optimization +// Get all VTXOs within a depth range, useful for filling gaps between markers +func (q *Queries) SelectVtxosByDepthRange(ctx context.Context, arg SelectVtxosByDepthRangeParams) ([]SelectVtxosByDepthRangeRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosByDepthRange, arg.MinDepth, arg.MaxDepth) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectVtxosByDepthRangeRow + for rows.Next() { + var i SelectVtxosByDepthRangeRow + if err := rows.Scan( + &i.VtxoVw.Txid, + &i.VtxoVw.Vout, + &i.VtxoVw.Pubkey, + &i.VtxoVw.Amount, + &i.VtxoVw.ExpiresAt, + &i.VtxoVw.CreatedAt, + &i.VtxoVw.CommitmentTxid, + &i.VtxoVw.SpentBy, + &i.VtxoVw.Spent, + &i.VtxoVw.Unrolled, + &i.VtxoVw.Preconfirmed, + &i.VtxoVw.SettledBy, + &i.VtxoVw.ArkTxid, + &i.VtxoVw.IntentID, + &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, + &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, + &i.VtxoVw.AssetID, + &i.VtxoVw.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + +const selectVtxosByMarkerId = `-- name: SelectVtxosByMarkerId :many +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE markers LIKE '%"' || ?1 || '"%' +` + +type SelectVtxosByMarkerIdRow struct { + VtxoVw VtxoVw +} + +// Find VTXOs whose markers JSON array contains the given marker_id. +// Uses LIKE because sqlc cannot parse json_each with view columns. +// Safe for txid:vout format marker IDs (no special characters). +func (q *Queries) SelectVtxosByMarkerId(ctx context.Context, markerID sql.NullString) ([]SelectVtxosByMarkerIdRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosByMarkerId, markerID) + if err != nil { + return nil, err + } + defer rows.Close() + var items []SelectVtxosByMarkerIdRow + for rows.Next() { + var i SelectVtxosByMarkerIdRow + if err := rows.Scan( + &i.VtxoVw.Txid, + &i.VtxoVw.Vout, + &i.VtxoVw.Pubkey, + &i.VtxoVw.Amount, + &i.VtxoVw.ExpiresAt, + &i.VtxoVw.CreatedAt, + &i.VtxoVw.CommitmentTxid, + &i.VtxoVw.SpentBy, + &i.VtxoVw.Spent, + &i.VtxoVw.Unrolled, + &i.VtxoVw.Preconfirmed, + &i.VtxoVw.SettledBy, + &i.VtxoVw.ArkTxid, + &i.VtxoVw.IntentID, + &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, + &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, + &i.VtxoVw.AssetID, + &i.VtxoVw.AssetAmount, + ); err != nil { + return nil, err + } + items = append(items, i) + } + if err := rows.Close(); err != nil { + return nil, err + } + if err := rows.Err(); err != nil { + return nil, err + } + return items, nil +} + const selectVtxosOutpointsByArkTxidRecursive = `-- name: SelectVtxosOutpointsByArkTxidRecursive :many WITH RECURSIVE descendants_chain AS ( - -- seed + -- seed: only the specific outpoint, not all vouts of the txid SELECT v.txid, v.vout, v.preconfirmed, v.ark_txid, v.spent_by, 0 AS depth, v.txid||':'||v.vout AS visited FROM vtxo v - WHERE v.txid = ?1 + WHERE v.txid = ?1 AND v.vout = ?2 UNION ALL @@ -1803,14 +2367,23 @@ FROM nodes ORDER BY depth, txid, vout ` +type SelectVtxosOutpointsByArkTxidRecursiveParams struct { + Txid string + Vout int64 +} + type SelectVtxosOutpointsByArkTxidRecursiveRow struct { Txid string Vout int64 } +// Returns the seed outpoint (txid, vout) and all VTXOs descending from it +// via ark_txid links. Scoped to a single outpoint (not the whole txid) so that +// sibling outputs of the seed tx, which belong to independent lineages, are +// not included. // keep one row per node at its MIN depth (layers) -func (q *Queries) SelectVtxosOutpointsByArkTxidRecursive(ctx context.Context, txid string) ([]SelectVtxosOutpointsByArkTxidRecursiveRow, error) { - rows, err := q.db.QueryContext(ctx, selectVtxosOutpointsByArkTxidRecursive, txid) +func (q *Queries) SelectVtxosOutpointsByArkTxidRecursive(ctx context.Context, arg SelectVtxosOutpointsByArkTxidRecursiveParams) ([]SelectVtxosOutpointsByArkTxidRecursiveRow, error) { + rows, err := q.db.QueryContext(ctx, selectVtxosOutpointsByArkTxidRecursive, arg.Txid, arg.Vout) if err != nil { return nil, err } @@ -1833,7 +2406,7 @@ func (q *Queries) SelectVtxosOutpointsByArkTxidRecursive(ctx context.Context, tx } const selectVtxosWithPubkeys = `-- name: SelectVtxosWithPubkeys :many -SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.swept, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.commitments, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE updated_at >= ?1 +SELECT vtxo_vw.txid, vtxo_vw.vout, vtxo_vw.pubkey, vtxo_vw.amount, vtxo_vw.expires_at, vtxo_vw.created_at, vtxo_vw.commitment_txid, vtxo_vw.spent_by, vtxo_vw.spent, vtxo_vw.unrolled, vtxo_vw.preconfirmed, vtxo_vw.settled_by, vtxo_vw.ark_txid, vtxo_vw.intent_id, vtxo_vw.updated_at, vtxo_vw.depth, vtxo_vw.markers, vtxo_vw.commitments, vtxo_vw.swept, vtxo_vw.asset_id, vtxo_vw.asset_amount FROM vtxo_vw WHERE updated_at >= ?1 AND (CAST(?2 AS INTEGER) = 0 OR updated_at <= CAST(?2 AS INTEGER)) AND pubkey IN (/*SLICE:pubkeys*/?) ` @@ -1880,13 +2453,15 @@ func (q *Queries) SelectVtxosWithPubkeys(ctx context.Context, arg SelectVtxosWit &i.VtxoVw.SpentBy, &i.VtxoVw.Spent, &i.VtxoVw.Unrolled, - &i.VtxoVw.Swept, &i.VtxoVw.Preconfirmed, &i.VtxoVw.SettledBy, &i.VtxoVw.ArkTxid, &i.VtxoVw.IntentID, &i.VtxoVw.UpdatedAt, + &i.VtxoVw.Depth, + &i.VtxoVw.Markers, &i.VtxoVw.Commitments, + &i.VtxoVw.Swept, &i.VtxoVw.AssetID, &i.VtxoVw.AssetAmount, ); err != nil { @@ -1942,6 +2517,21 @@ func (q *Queries) UpdateVtxoIntentId(ctx context.Context, arg UpdateVtxoIntentId return err } +const updateVtxoMarkers = `-- name: UpdateVtxoMarkers :exec +UPDATE vtxo SET markers = ?1 WHERE txid = ?2 AND vout = ?3 +` + +type UpdateVtxoMarkersParams struct { + Markers string + Txid string + Vout int64 +} + +func (q *Queries) UpdateVtxoMarkers(ctx context.Context, arg UpdateVtxoMarkersParams) error { + _, err := q.db.ExecContext(ctx, updateVtxoMarkers, arg.Markers, arg.Txid, arg.Vout) + return err +} + const updateVtxoSettled = `-- name: UpdateVtxoSettled :exec UPDATE vtxo SET spent = true, spent_by = ?1, settled_by = ?2, updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) WHERE txid = ?3 AND vout = ?4 @@ -1986,23 +2576,6 @@ func (q *Queries) UpdateVtxoSpent(ctx context.Context, arg UpdateVtxoSpentParams return err } -const updateVtxoSweptIfNotSwept = `-- name: UpdateVtxoSweptIfNotSwept :execrows -UPDATE vtxo SET swept = true, updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) WHERE txid = ?1 AND vout = ?2 AND swept = false -` - -type UpdateVtxoSweptIfNotSweptParams struct { - Txid string - Vout int64 -} - -func (q *Queries) UpdateVtxoSweptIfNotSwept(ctx context.Context, arg UpdateVtxoSweptIfNotSweptParams) (int64, error) { - result, err := q.db.ExecContext(ctx, updateVtxoSweptIfNotSwept, arg.Txid, arg.Vout) - if err != nil { - return 0, err - } - return result.RowsAffected() -} - const updateVtxoUnrolled = `-- name: UpdateVtxoUnrolled :exec UPDATE vtxo SET unrolled = true, updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) WHERE txid = ?1 AND vout = ?2 ` @@ -2111,6 +2684,27 @@ func (q *Queries) UpsertIntent(ctx context.Context, arg UpsertIntentParams) erro return err } +const upsertMarker = `-- name: UpsertMarker :exec + +INSERT INTO marker (id, depth, parent_markers) +VALUES (?1, ?2, ?3) +ON CONFLICT(id) DO UPDATE SET + depth = EXCLUDED.depth, + parent_markers = EXCLUDED.parent_markers +` + +type UpsertMarkerParams struct { + ID string + Depth int64 + ParentMarkers sql.NullString +} + +// Marker queries +func (q *Queries) UpsertMarker(ctx context.Context, arg UpsertMarkerParams) error { + _, err := q.db.ExecContext(ctx, upsertMarker, arg.ID, arg.Depth, arg.ParentMarkers) + return err +} + const upsertOffchainTx = `-- name: UpsertOffchainTx :exec INSERT INTO offchain_tx (txid, tx, starting_timestamp, ending_timestamp, expiry_timestamp, fail_reason, stage_code) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7) @@ -2298,11 +2892,11 @@ func (q *Queries) UpsertTx(ctx context.Context, arg UpsertTxParams) error { const upsertVtxo = `-- name: UpsertVtxo :exec INSERT INTO vtxo ( txid, vout, pubkey, amount, commitment_txid, settled_by, ark_txid, - spent_by, spent, unrolled, swept, preconfirmed, expires_at, created_at, updated_at + spent_by, spent, unrolled, preconfirmed, expires_at, created_at, updated_at, depth, markers ) VALUES ( ?1, ?2, ?3, ?4, ?5, ?6, ?7, - ?8, ?9, ?10, ?11, ?12, ?13, ?14, (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) + ?8, ?9, ?10, ?11, ?12, ?13, (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)), ?14, ?15 ) ON CONFLICT(txid, vout) DO UPDATE SET pubkey = EXCLUDED.pubkey, amount = EXCLUDED.amount, @@ -2312,11 +2906,12 @@ VALUES ( spent_by = EXCLUDED.spent_by, spent = EXCLUDED.spent, unrolled = EXCLUDED.unrolled, - swept = EXCLUDED.swept, preconfirmed = EXCLUDED.preconfirmed, expires_at = EXCLUDED.expires_at, created_at = EXCLUDED.created_at, - updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) + updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)), + depth = EXCLUDED.depth, + markers = EXCLUDED.markers ` type UpsertVtxoParams struct { @@ -2330,10 +2925,11 @@ type UpsertVtxoParams struct { SpentBy sql.NullString Spent bool Unrolled bool - Swept bool Preconfirmed bool ExpiresAt int64 CreatedAt int64 + Depth int64 + Markers string } func (q *Queries) UpsertVtxo(ctx context.Context, arg UpsertVtxoParams) error { @@ -2348,10 +2944,11 @@ func (q *Queries) UpsertVtxo(ctx context.Context, arg UpsertVtxoParams) error { arg.SpentBy, arg.Spent, arg.Unrolled, - arg.Swept, arg.Preconfirmed, arg.ExpiresAt, arg.CreatedAt, + arg.Depth, + arg.Markers, ) return err } diff --git a/internal/infrastructure/db/sqlite/sqlc/query.sql b/internal/infrastructure/db/sqlite/sqlc/query.sql index f4e2e9fe1..9ed1356b3 100644 --- a/internal/infrastructure/db/sqlite/sqlc/query.sql +++ b/internal/infrastructure/db/sqlite/sqlc/query.sql @@ -48,11 +48,11 @@ ON CONFLICT(intent_id, pubkey, onchain_address) DO UPDATE SET -- name: UpsertVtxo :exec INSERT INTO vtxo ( txid, vout, pubkey, amount, commitment_txid, settled_by, ark_txid, - spent_by, spent, unrolled, swept, preconfirmed, expires_at, created_at, updated_at + spent_by, spent, unrolled, preconfirmed, expires_at, created_at, updated_at, depth, markers ) VALUES ( @txid, @vout, @pubkey, @amount, @commitment_txid, @settled_by, @ark_txid, - @spent_by, @spent, @unrolled, @swept, @preconfirmed, @expires_at, @created_at, (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) + @spent_by, @spent, @unrolled, @preconfirmed, @expires_at, @created_at, (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)), @depth, @markers ) ON CONFLICT(txid, vout) DO UPDATE SET pubkey = EXCLUDED.pubkey, amount = EXCLUDED.amount, @@ -62,11 +62,12 @@ VALUES ( spent_by = EXCLUDED.spent_by, spent = EXCLUDED.spent, unrolled = EXCLUDED.unrolled, - swept = EXCLUDED.swept, preconfirmed = EXCLUDED.preconfirmed, expires_at = EXCLUDED.expires_at, created_at = EXCLUDED.created_at, - updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)); + updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)), + depth = EXCLUDED.depth, + markers = EXCLUDED.markers; -- name: InsertVtxoCommitmentTxid :exec INSERT INTO vtxo_commitment_txid (vtxo_txid, vtxo_vout, commitment_txid) @@ -116,9 +117,6 @@ UPDATE vtxo SET expires_at = @expires_at WHERE txid = @txid AND vout = @vout; -- name: UpdateVtxoUnrolled :exec UPDATE vtxo SET unrolled = true, updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) WHERE txid = @txid AND vout = @vout; --- name: UpdateVtxoSweptIfNotSwept :execrows -UPDATE vtxo SET swept = true, updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) WHERE txid = @txid AND vout = @vout AND swept = false; - -- name: UpdateVtxoSettled :exec UPDATE vtxo SET spent = true, spent_by = @spent_by, settled_by = @settled_by, updated_at = (CAST((strftime('%s','now') || substr(strftime('%f','now'),4,3)) AS INTEGER)) WHERE txid = @txid AND vout = @vout; @@ -267,23 +265,32 @@ SELECT sqlc.embed(vtxo_vw) FROM vtxo_vw WHERE updated_at >= :after AND pubkey IN (sqlc.slice('pubkeys')); -- name: SelectExpiringLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0) AS amount -FROM vtxo -WHERE swept = false - AND spent = false - AND unrolled = false - AND expires_at > sqlc.arg('after') - AND (sqlc.arg('before') <= 0 OR expires_at < sqlc.arg('before')); +SELECT COALESCE(SUM(v.amount), 0) AS amount +FROM vtxo v +WHERE NOT EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) + AND v.spent = false + AND v.unrolled = false + AND v.expires_at > sqlc.arg('after') + AND (sqlc.arg('before') <= 0 OR v.expires_at < sqlc.arg('before')); -- name: SelectRecoverableLiquidityAmount :one -SELECT COALESCE(SUM(amount), 0) AS amount -FROM vtxo -WHERE swept = true - AND spent = false; +SELECT COALESCE(SUM(v.amount), 0) AS amount +FROM vtxo v +WHERE EXISTS ( + SELECT 1 FROM swept_marker sm + JOIN json_each(v.markers) j ON j.value = sm.marker_id + ) + AND v.spent = false; -- name: SelectOffchainTx :many SELECT sqlc.embed(offchain_tx_vw) FROM offchain_tx_vw WHERE txid = @txid AND COALESCE(fail_reason, '') = ''; +-- name: SelectOffchainTxsByTxids :many +SELECT sqlc.embed(offchain_tx_vw) FROM offchain_tx_vw WHERE txid IN (sqlc.slice('txids')) AND COALESCE(fail_reason, '') = ''; + -- name: SelectLatestScheduledSession :one SELECT * FROM scheduled_session ORDER BY updated_at DESC LIMIT 1; @@ -302,13 +309,17 @@ WHERE v.swept = false OR (',' || COALESCE(v.commitments, '') || ',') LIKE '%,' || @commitment_txid || ',%'); -- name: SelectVtxosOutpointsByArkTxidRecursive :many +-- Returns the seed outpoint (txid, vout) and all VTXOs descending from it +-- via ark_txid links. Scoped to a single outpoint (not the whole txid) so that +-- sibling outputs of the seed tx, which belong to independent lineages, are +-- not included. WITH RECURSIVE descendants_chain AS ( - -- seed + -- seed: only the specific outpoint, not all vouts of the txid SELECT v.txid, v.vout, v.preconfirmed, v.ark_txid, v.spent_by, 0 AS depth, v.txid||':'||v.vout AS visited FROM vtxo v - WHERE v.txid = @txid + WHERE v.txid = @txid AND v.vout = @vout UNION ALL @@ -435,6 +446,96 @@ VALUES ('', '', '', ''); SELECT id, txid, proof, message FROM intent WHERE txid = @txid; +-- Marker queries + +-- name: UpsertMarker :exec +INSERT INTO marker (id, depth, parent_markers) +VALUES (@id, @depth, @parent_markers) +ON CONFLICT(id) DO UPDATE SET + depth = EXCLUDED.depth, + parent_markers = EXCLUDED.parent_markers; + +-- name: SelectMarker :one +SELECT * FROM marker WHERE id = @id; + +-- name: SelectMarkersByDepth :many +SELECT * FROM marker WHERE depth = @depth; + +-- name: SelectMarkersByDepthRange :many +SELECT * FROM marker WHERE depth >= @min_depth AND depth <= @max_depth ORDER BY depth; + +-- name: SelectMarkersByIds :many +SELECT * FROM marker WHERE id IN (sqlc.slice('ids')); + +-- name: InsertSweptMarker :exec +INSERT INTO swept_marker (marker_id, swept_at) +VALUES (@marker_id, @swept_at) +ON CONFLICT(marker_id) DO NOTHING; + + +-- name: SelectSweptMarker :one +SELECT * FROM swept_marker WHERE marker_id = @marker_id; + +-- name: SelectSweptMarkersByIds :many +SELECT * FROM swept_marker WHERE marker_id IN (sqlc.slice('marker_ids')); + +-- name: IsMarkerSwept :one +SELECT EXISTS(SELECT 1 FROM swept_marker WHERE marker_id = @marker_id) AS is_swept; + +-- name: GetDescendantMarkerIds :many +-- Recursively get a marker and all its descendants (markers whose parent_markers contain it) +-- Uses json_each instead of LIKE to avoid false positives with special characters (%, _). +-- Uses UNION (set semantics, not UNION ALL) so rows already produced are filtered, +-- which makes this cycle-safe. Do not convert to UNION ALL: cycles in parent_markers +-- would cause the recursion to run unbounded. +WITH RECURSIVE descendant_markers(id) AS ( + -- Base case: the marker being swept + SELECT marker.id FROM marker WHERE marker.id = @root_marker_id + UNION + -- Recursive case: find markers whose parent_markers JSON array contains any descendant + SELECT m.id FROM marker m + INNER JOIN descendant_markers dm ON EXISTS ( + SELECT 1 FROM json_each(m.parent_markers) j WHERE j.value = dm.id + ) +) +SELECT descendant_markers.id AS marker_id FROM descendant_markers +WHERE descendant_markers.id NOT IN (SELECT sm.marker_id FROM swept_marker sm); + +-- name: UpdateVtxoMarkers :exec +UPDATE vtxo SET markers = @markers WHERE txid = @txid AND vout = @vout; + +-- name: SelectVtxosByMarkerId :many +-- Find VTXOs whose markers JSON array contains the given marker_id. +-- Uses LIKE because sqlc cannot parse json_each with view columns. +-- Safe for txid:vout format marker IDs (no special characters). +SELECT sqlc.embed(vtxo_vw) FROM vtxo_vw WHERE markers LIKE '%"' || @marker_id || '"%'; + +-- name: CountUnsweptVtxosByMarkerId :one +-- Count VTXOs whose markers JSON array contains the given marker_id and are not swept. +-- Uses LIKE because sqlc cannot parse json_each with view columns. +-- Uses DISTINCT to avoid double-counting VTXOs with multiple asset projections. +SELECT COUNT(DISTINCT txid || ':' || CAST(vout AS TEXT)) FROM vtxo_vw WHERE markers LIKE '%"' || @marker_id || '"%' AND swept = false; + +-- Chain traversal queries for GetVtxoChain optimization + +-- name: SelectVtxosByDepthRange :many +-- Get all VTXOs within a depth range, useful for filling gaps between markers +SELECT sqlc.embed(vtxo_vw) FROM vtxo_vw +WHERE depth >= @min_depth AND depth <= @max_depth +ORDER BY depth DESC; + +-- name: SelectVtxosByArkTxid :many +-- Get all VTXOs created by a specific ark tx (offchain tx) +SELECT sqlc.embed(vtxo_vw) FROM vtxo_vw WHERE ark_txid = @ark_txid; + +-- name: SelectVtxoChainByMarker :many +-- Get VTXOs whose markers array contains the given marker_id. +-- For multiple markers, call this multiple times and deduplicate in Go. +-- Uses LIKE because sqlc cannot parse json_each with view columns. +SELECT sqlc.embed(vtxo_vw) FROM vtxo_vw +WHERE markers LIKE '%"' || @marker_id || '"%' +ORDER BY vtxo_vw.depth DESC; + -- name: InsertAsset :exec INSERT INTO asset (id, is_immutable, metadata_hash, metadata, control_asset_id) VALUES (@id, @is_immutable, @metadata_hash, @metadata, @control_asset_id); @@ -454,4 +555,8 @@ WHERE v.asset_id = ? AND v.spent = false AND v.asset_amount > 0; SELECT control_asset_id FROM asset WHERE id = ?; -- name: SelectAssetExists :one -SELECT 1 FROM asset WHERE id = ? LIMIT 1; \ No newline at end of file +SELECT 1 FROM asset WHERE id = ? LIMIT 1; + +-- name: InsertSweptVtxo :exec +INSERT OR IGNORE INTO swept_vtxo (txid, vout, swept_at) +VALUES (?, ?, ?); diff --git a/internal/infrastructure/db/sqlite/vtxo_repo.go b/internal/infrastructure/db/sqlite/vtxo_repo.go index fd86e1fc5..6035c3e7c 100644 --- a/internal/infrastructure/db/sqlite/vtxo_repo.go +++ b/internal/infrastructure/db/sqlite/vtxo_repo.go @@ -3,6 +3,7 @@ package sqlitedb import ( "context" "database/sql" + "encoding/json" "errors" "fmt" "sort" @@ -11,6 +12,7 @@ import ( "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/internal/infrastructure/db/sqlite/sqlc/queries" + log "github.com/sirupsen/logrus" ) type vtxoRepository struct { @@ -42,6 +44,16 @@ func (v *vtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) erro for i := range vtxos { vtxo := vtxos[i] + markersToMarshal := vtxo.MarkerIDs + if markersToMarshal == nil { + markersToMarshal = []string{} + } + markersData, err := json.Marshal(markersToMarshal) + if err != nil { + return fmt.Errorf("failed to marshal markers: %w", err) + } + markersJSON := string(markersData) + if err := querierWithTx.UpsertVtxo( ctx, queries.UpsertVtxoParams{ Txid: vtxo.Txid, @@ -55,7 +67,6 @@ func (v *vtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) erro }, Spent: vtxo.Spent, Unrolled: vtxo.Unrolled, - Swept: vtxo.Swept, Preconfirmed: vtxo.Preconfirmed, ExpiresAt: vtxo.ExpiresAt, CreatedAt: vtxo.CreatedAt, @@ -67,6 +78,8 @@ func (v *vtxoRepository) AddVtxos(ctx context.Context, vtxos []domain.Vtxo) erro String: vtxo.SettledBy, Valid: len(vtxo.SettledBy) > 0, }, + Depth: int64(vtxo.Depth), + Markers: markersJSON, }, ); err != nil { return err @@ -334,35 +347,6 @@ func (v *vtxoRepository) SpendVtxos( return execTx(ctx, v.db, txBody) } -func (v *vtxoRepository) SweepVtxos(ctx context.Context, vtxos []domain.Outpoint) (int, error) { - sweptCount := 0 - txBody := func(querierWithTx *queries.Queries) error { - for _, outpoint := range vtxos { - affectedRows, err := querierWithTx.UpdateVtxoSweptIfNotSwept( - ctx, - queries.UpdateVtxoSweptIfNotSweptParams{ - Txid: outpoint.Txid, - Vout: int64(outpoint.VOut), - }, - ) - if err != nil { - return err - } - if affectedRows > 0 { - sweptCount++ - } - } - - return nil - } - - if err := execTx(ctx, v.db, txBody); err != nil { - return -1, err - } - - return sweptCount, nil -} - func (v *vtxoRepository) UpdateVtxosExpiration( ctx context.Context, vtxos []domain.Outpoint, expiresAt int64, ) error { @@ -442,9 +426,15 @@ func (v *vtxoRepository) GetSweepableVtxosByCommitmentTxid( } func (v *vtxoRepository) GetAllChildrenVtxos( - ctx context.Context, txid string, + ctx context.Context, outpoint domain.Outpoint, ) ([]domain.Outpoint, error) { - res, err := v.querier.SelectVtxosOutpointsByArkTxidRecursive(ctx, txid) + res, err := v.querier.SelectVtxosOutpointsByArkTxidRecursive( + ctx, + queries.SelectVtxosOutpointsByArkTxidRecursiveParams{ + Txid: outpoint.Txid, + Vout: int64(outpoint.VOut), + }, + ) if err != nil { return nil, err } @@ -568,10 +558,12 @@ func rowToVtxo(row queries.VtxoVw) domain.Vtxo { SpentBy: row.SpentBy.String, Spent: row.Spent, Unrolled: row.Unrolled, - Swept: row.Swept, + Swept: toBool(row.Swept), Preconfirmed: row.Preconfirmed, ExpiresAt: row.ExpiresAt, CreatedAt: row.CreatedAt, + Depth: uint32(row.Depth), + MarkerIDs: parseMarkersJSONFromVtxo(row.Markers), Assets: assets, } } @@ -585,6 +577,36 @@ func rowToAsset(row queries.VtxoVw) domain.AssetDenomination { } } +// toBool converts an interface{} (from a SQLite view expression that sqlc types +// as interface{}) to a Go bool. Handles int64(0/1) from SQLite. +func toBool(v interface{}) bool { + switch val := v.(type) { + case bool: + return val + case int64: + return val != 0 + case int: + return val != 0 + default: + return false + } +} + +// parseMarkersJSONFromVtxo parses a JSON array string into a slice of strings for vtxo repo. +// Logs and returns nil if the JSON is malformed so that corrupt markers are +// surfaced instead of silently treated as empty. +func parseMarkersJSONFromVtxo(markersJSON string) []string { + if markersJSON == "" { + return nil + } + var markerIDs []string + if err := json.Unmarshal([]byte(markersJSON), &markerIDs); err != nil { + log.WithError(err).Warnf("failed to parse markers JSON: %q", markersJSON) + return nil + } + return markerIDs +} + func readRows(rows []queries.VtxoVw) ([]domain.Vtxo, error) { vtxosByOutpoint := make(map[string]domain.Vtxo) for _, row := range rows { diff --git a/internal/infrastructure/db/swept_vtxo_down_test.go b/internal/infrastructure/db/swept_vtxo_down_test.go new file mode 100644 index 000000000..7de65f70e --- /dev/null +++ b/internal/infrastructure/db/swept_vtxo_down_test.go @@ -0,0 +1,112 @@ +package db_test + +import ( + "database/sql" + "embed" + "strings" + "testing" + + sqlitedb "github.com/arkade-os/arkd/internal/infrastructure/db/sqlite" + "github.com/golang-migrate/migrate/v4" + sqlitemigrate "github.com/golang-migrate/migrate/v4/database/sqlite" + "github.com/golang-migrate/migrate/v4/source/iofs" + "github.com/stretchr/testify/require" +) + +//go:embed sqlite/migration/* +var sweptVtxoTestMigrations embed.FS + +const sweptVtxoMigrationVersion = 20260416120000 + +// TestSweptVtxoDownMigration_Guard verifies that the sqlite down migration +// for 20260416120000_add_swept_vtxo aborts when swept_vtxo has data (to +// prevent silently resurrecting swept VTXOs) but proceeds cleanly when the +// table is empty. +func TestSweptVtxoDownMigration_Guard(t *testing.T) { + t.Run("aborts_when_swept_vtxo_has_data", func(t *testing.T) { + m, db := newSweptVtxoMigrator(t) + t.Cleanup(func() { + // Force the version so cleanup doesn't complain about a dirty + // migration state left by the expected failure. + _ = m.Force(sweptVtxoMigrationVersion) + //nolint:errcheck + db.Close() + }) + + require.NoError(t, m.Migrate(sweptVtxoMigrationVersion)) + + _, err := db.Exec( + `INSERT INTO swept_vtxo (txid, vout, swept_at) VALUES (?, ?, ?)`, + "deadbeef", 0, 1234567890, + ) + require.NoError(t, err, "seed insert must succeed before the guard test") + + // Stepping back one migration should fail: the guard trigger fires + // because swept_vtxo has a row. + err = m.Steps(-1) + require.Error(t, err, "down migration must abort when swept_vtxo is non-empty") + require.True(t, + strings.Contains(err.Error(), "irreversible migration") || + strings.Contains(err.Error(), "swept_vtxo"), + "error should mention the guard: got %v", err, + ) + + // swept_vtxo must still exist and still contain the row — the + // transaction aborted before the DROP ran. + var count int + err = db.QueryRow(`SELECT count(*) FROM swept_vtxo`).Scan(&count) + require.NoError(t, err, + "swept_vtxo should still exist after the aborted down migration") + require.Equal(t, 1, count, + "swept_vtxo data must be preserved when the guard fires") + }) + + t.Run("proceeds_when_swept_vtxo_is_empty", func(t *testing.T) { + m, db := newSweptVtxoMigrator(t) + t.Cleanup(func() { + //nolint:errcheck + db.Close() + }) + + require.NoError(t, m.Migrate(sweptVtxoMigrationVersion)) + + // swept_vtxo exists but is empty — the guard should not fire. + var count int + require.NoError(t, db.QueryRow(`SELECT count(*) FROM swept_vtxo`).Scan(&count)) + require.Equal(t, 0, count) + + require.NoError(t, m.Steps(-1), + "down migration must succeed when swept_vtxo is empty") + + // swept_vtxo should be gone; vtxo_vw should still exist (restored by + // the down migration body that runs past the guard). + err := db.QueryRow(`SELECT count(*) FROM swept_vtxo`).Scan(&count) + require.Error(t, err, "swept_vtxo should have been dropped") + require.Contains(t, err.Error(), "no such table") + + rows, err := db.Query(`SELECT name FROM sqlite_master WHERE type='view' AND name='vtxo_vw'`) + require.NoError(t, err) + defer rows.Close() + require.True(t, rows.Next(), + "vtxo_vw view should have been recreated by the down migration") + }) +} + +// newSweptVtxoMigrator returns a fresh in-memory sqlite DB paired with a +// migrate.Migrate bound to the embedded sqlite migration source. +func newSweptVtxoMigrator(t *testing.T) (*migrate.Migrate, *sql.DB) { + t.Helper() + db, err := sqlitedb.OpenDb(":memory:") + require.NoError(t, err) + + driver, err := sqlitemigrate.WithInstance(db, &sqlitemigrate.Config{}) + require.NoError(t, err) + + source, err := iofs.New(sweptVtxoTestMigrations, "sqlite/migration") + require.NoError(t, err) + + m, err := migrate.NewWithInstance("iofs", source, "arkdb", driver) + require.NoError(t, err) + + return m, db +} diff --git a/internal/interface/grpc/handlers/indexer.go b/internal/interface/grpc/handlers/indexer.go index 636e04b2d..a47ce1e91 100644 --- a/internal/interface/grpc/handlers/indexer.go +++ b/internal/interface/grpc/handlers/indexer.go @@ -336,7 +336,13 @@ func (e *indexerService) GetVtxoChain( if parseErr != nil { return nil, status.Error(codes.InvalidArgument, parseErr.Error()) } - resp, err = e.indexerSvc.GetVtxoChain(ctx, request.GetToken(), *outpoint, page) + resp, err = e.indexerSvc.GetVtxoChain( + ctx, + request.GetToken(), + *outpoint, + page, + request.GetPageToken(), + ) } if err != nil { return nil, status.Errorf(codes.Internal, "%s", err.Error()) @@ -365,9 +371,10 @@ func (e *indexerService) GetVtxoChain( } return &arkv1.GetVtxoChainResponse{ - Chain: chain, - Page: protoPage(resp.Page), - AuthToken: resp.AuthToken, + Chain: chain, + Page: protoPage(resp.Page), + AuthToken: resp.AuthToken, + NextPageToken: resp.NextPageToken, }, nil } @@ -775,6 +782,7 @@ func newIndexerVtxo(vtxo domain.Vtxo) *arkv1.IndexerVtxo { CommitmentTxids: vtxo.CommitmentTxids, SettledBy: vtxo.SettledBy, ArkTxid: vtxo.ArkTxid, + Depth: vtxo.Depth, Assets: assets, } } diff --git a/internal/interface/grpc/handlers/parser.go b/internal/interface/grpc/handlers/parser.go index 6b21fef5e..f839b3ec8 100644 --- a/internal/interface/grpc/handlers/parser.go +++ b/internal/interface/grpc/handlers/parser.go @@ -223,6 +223,7 @@ func (v vtxoList) toProto() []*arkv1.Vtxo { CreatedAt: vv.CreatedAt, SettledBy: vv.SettledBy, ArkTxid: vv.ArkTxid, + Depth: vv.Depth, Assets: toAssets(vv), }) } diff --git a/internal/interface/grpc/handlers/parser_test.go b/internal/interface/grpc/handlers/parser_test.go index 7e54361a3..f795d6db1 100644 --- a/internal/interface/grpc/handlers/parser_test.go +++ b/internal/interface/grpc/handlers/parser_test.go @@ -6,14 +6,227 @@ import ( "testing" arkv1 "github.com/arkade-os/arkd/api-spec/protobuf/gen/ark/v1" + "github.com/arkade-os/arkd/internal/core/application" + "github.com/arkade-os/arkd/internal/core/domain" "github.com/arkade-os/arkd/pkg/ark-lib/intent" "github.com/stretchr/testify/require" ) +func TestVtxoListToProto_DepthAndNewFields(t *testing.T) { + vtxos := vtxoList{ + { + Outpoint: domain.Outpoint{Txid: "aaa", VOut: 0}, + PubKey: "25a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + Amount: 50000, + CommitmentTxids: []string{"commit-1"}, + Spent: false, + ExpiresAt: 1700000000, + SpentBy: "spender-tx", + Swept: false, + Preconfirmed: true, + Unrolled: false, + CreatedAt: 1699000000, + SettledBy: "settler-tx", + ArkTxid: "ark-tx-1", + Depth: 42, + }, + { + Outpoint: domain.Outpoint{Txid: "bbb", VOut: 1}, + PubKey: "33ffb3dee353b1a9ebe4ced64b946238d0a4ac364f275d771da6ad2445d07ae0", + Amount: 100000, + CommitmentTxids: []string{"commit-2", "commit-3"}, + Spent: true, + ExpiresAt: 1700100000, + SpentBy: "", + Swept: true, + Preconfirmed: false, + Unrolled: true, + CreatedAt: 1699100000, + SettledBy: "", + ArkTxid: "", + Depth: 200, + }, + { + Outpoint: domain.Outpoint{Txid: "ccc", VOut: 2}, + PubKey: "25a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + Amount: 0, + Depth: 0, + }, + } + + protos := vtxos.toProto() + require.Len(t, protos, 3) + + // First VTXO: all fields populated + p0 := protos[0] + require.Equal(t, "aaa", p0.Outpoint.Txid) + require.Equal(t, uint32(0), p0.Outpoint.Vout) + require.Equal(t, uint64(50000), p0.Amount) + require.Equal(t, []string{"commit-1"}, p0.CommitmentTxids) + require.False(t, p0.IsSpent) + require.Equal(t, int64(1700000000), p0.ExpiresAt) + require.Equal(t, "spender-tx", p0.SpentBy) + require.False(t, p0.IsSwept) + require.True(t, p0.IsPreconfirmed) + require.False(t, p0.IsUnrolled) + require.Equal(t, int64(1699000000), p0.CreatedAt) + require.Equal(t, "settler-tx", p0.SettledBy) + require.Equal(t, "ark-tx-1", p0.ArkTxid) + require.Equal(t, uint32(42), p0.Depth) + require.Equal( + t, + "512025a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + p0.Script, + ) + + // Second VTXO: different depth, spent/swept/unrolled flags + p1 := protos[1] + require.Equal(t, "bbb", p1.Outpoint.Txid) + require.Equal(t, uint32(1), p1.Outpoint.Vout) + require.Equal(t, uint32(200), p1.Depth) + require.True(t, p1.IsSpent) + require.True(t, p1.IsSwept) + require.True(t, p1.IsUnrolled) + require.Equal(t, []string{"commit-2", "commit-3"}, p1.CommitmentTxids) + + // Third VTXO: zero depth (batch vtxo) + p2 := protos[2] + require.Equal(t, uint32(0), p2.Depth) + require.Equal(t, uint64(0), p2.Amount) +} + +func TestNewIndexerVtxo_DepthMapping(t *testing.T) { + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "idx-tx", VOut: 3}, + PubKey: "25a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + Amount: 75000, + CommitmentTxids: []string{"commit-a"}, + CreatedAt: 1699500000, + ExpiresAt: 1700500000, + Preconfirmed: true, + Swept: false, + Unrolled: false, + Spent: false, + SpentBy: "spender", + SettledBy: "settler", + ArkTxid: "ark-tx-idx", + Depth: 150, + } + + proto := newIndexerVtxo(vtxo) + + require.Equal(t, "idx-tx", proto.Outpoint.Txid) + require.Equal(t, uint32(3), proto.Outpoint.Vout) + require.Equal(t, uint64(75000), proto.Amount) + require.Equal(t, int64(1699500000), proto.CreatedAt) + require.Equal(t, int64(1700500000), proto.ExpiresAt) + require.True(t, proto.IsPreconfirmed) + require.False(t, proto.IsSwept) + require.False(t, proto.IsUnrolled) + require.False(t, proto.IsSpent) + require.Equal(t, "spender", proto.SpentBy) + require.Equal(t, "settler", proto.SettledBy) + require.Equal(t, "ark-tx-idx", proto.ArkTxid) + require.Equal(t, uint32(150), proto.Depth) + require.Equal(t, []string{"commit-a"}, proto.CommitmentTxids) + require.Equal( + t, + "512025a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + proto.Script, + ) +} + +func TestNewIndexerVtxo_ZeroDepth(t *testing.T) { + vtxo := domain.Vtxo{ + Outpoint: domain.Outpoint{Txid: "batch-tx", VOut: 0}, + PubKey: "25a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + Depth: 0, + } + + proto := newIndexerVtxo(vtxo) + require.Equal(t, uint32(0), proto.Depth) +} + +func TestTxEventToProto_DepthPreserved(t *testing.T) { + event := txEvent{ + TxData: application.TxData{ + Tx: "raw-tx-data", + Txid: "event-txid", + }, + SpentVtxos: []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: "spent-1", VOut: 0}, + PubKey: "25a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + Depth: 99, + Amount: 10000, + }, + }, + SpendableVtxos: []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: "new-1", VOut: 0}, + PubKey: "33ffb3dee353b1a9ebe4ced64b946238d0a4ac364f275d771da6ad2445d07ae0", + Depth: 100, + Amount: 9000, + }, + { + Outpoint: domain.Outpoint{Txid: "new-1", VOut: 1}, + PubKey: "33ffb3dee353b1a9ebe4ced64b946238d0a4ac364f275d771da6ad2445d07ae0", + Depth: 100, + Amount: 500, + }, + }, + CheckpointTxs: map[string]application.TxData{ + "cp-1": {Txid: "cp-txid-1", Tx: "cp-raw-1"}, + }, + } + + proto := event.toProto() + + require.Equal(t, "event-txid", proto.Txid) + require.Equal(t, "raw-tx-data", proto.Tx) + + // Spent VTXOs preserve depth + require.Len(t, proto.SpentVtxos, 1) + require.Equal(t, uint32(99), proto.SpentVtxos[0].Depth) + require.Equal(t, "spent-1", proto.SpentVtxos[0].Outpoint.Txid) + + // Spendable VTXOs preserve depth + require.Len(t, proto.SpendableVtxos, 2) + require.Equal(t, uint32(100), proto.SpendableVtxos[0].Depth) + require.Equal(t, uint32(100), proto.SpendableVtxos[1].Depth) + + // Checkpoint txs mapped correctly + require.Len(t, proto.CheckpointTxs, 1) + require.Equal(t, "cp-txid-1", proto.CheckpointTxs["cp-1"].Txid) + require.Equal(t, "cp-raw-1", proto.CheckpointTxs["cp-1"].Tx) +} + +func TestTxEventToProto_EmptyCheckpointTxs(t *testing.T) { + event := txEvent{ + TxData: application.TxData{ + Txid: "simple-event", + }, + SpentVtxos: []domain.Vtxo{ + { + Outpoint: domain.Outpoint{Txid: "s1", VOut: 0}, + PubKey: "25a43cecfa0e1b1a4f72d64ad15f4cfa7a84d0723e8511c969aa543638ea9967", + Depth: 0, + }, + }, + SpendableVtxos: []domain.Vtxo{}, + } + + proto := event.toProto() + require.Nil(t, proto.CheckpointTxs) + require.Len(t, proto.SpentVtxos, 1) + require.Equal(t, uint32(0), proto.SpentVtxos[0].Depth) + require.Empty(t, proto.SpendableVtxos) +} + type parserFixtures struct { - ValidProof string `json:"valid_proof"` - ParseGetIntent parserTestGroup `json:"parse_get_intent"` - ParseDeleteIntent parserTestGroup `json:"parse_delete_intent"` + ValidProof string `json:"valid_proof"` + ParseGetIntent parserTestGroup `json:"parse_get_intent"` + ParseDeleteIntent parserTestGroup `json:"parse_delete_intent"` } type parserTestGroup struct { @@ -22,10 +235,10 @@ type parserTestGroup struct { } type validParserFixture struct { - Name string `json:"name"` - Message string `json:"message"` - ExpectedType string `json:"expected_type"` - ExpectedExpireAt int64 `json:"expected_expire_at"` + Name string `json:"name"` + Message string `json:"message"` + ExpectedType string `json:"expected_type"` + ExpectedExpireAt int64 `json:"expected_expire_at"` } type invalidParserFixture struct { diff --git a/internal/test/e2e/vtxo_chain_test.go b/internal/test/e2e/vtxo_chain_test.go new file mode 100644 index 000000000..146a1135a --- /dev/null +++ b/internal/test/e2e/vtxo_chain_test.go @@ -0,0 +1,175 @@ +package e2e_test + +import ( + "bytes" + "encoding/hex" + "encoding/json" + "flag" + "fmt" + "net/http" + "sync" + "testing" + "time" + + arksdk "github.com/arkade-os/arkd/pkg/client-lib" + grpcindexer "github.com/arkade-os/arkd/pkg/client-lib/indexer/grpc" + "github.com/arkade-os/arkd/pkg/client-lib/store" + "github.com/arkade-os/arkd/pkg/client-lib/types" + "github.com/btcsuite/btcd/btcec/v2" + "github.com/stretchr/testify/require" +) + +var ( + chainLength = flag.Int("chain-length", 10, "Number of self-send hops in the VTXO chain") + initialAmount = flag.Int("initial-amount", 1000, "Initial funding amount in satoshis") + arkServerUrl = flag.String("server-url", serverUrl, "Ark server gRPC address") + arkAdminUrl = flag.String("admin-url", adminUrl, "Ark admin HTTP address") + walletSeed = flag.String("seed", "", "Wallet private key hex (random if empty)") + skipChain = flag.Bool("skip-chain", false, "Skip chain creation, only run GetVtxoChain on existing wallet") +) + +// TestVtxoChain creates a long VTXO chain by repeatedly self-sending. +// Run with: +// +// go test -v -run TestVtxoChain -args -chain-length=50 -initial-amount=10000 +func TestVtxoChain(t *testing.T) { + if !flag.Parsed() { + flag.Parse() + } + + ctx := t.Context() + + appDataStore, err := store.NewStore(store.Config{ + ConfigStoreType: types.InMemoryStore, + }) + require.NoError(t, err) + + client, err := arksdk.NewArkClient(appDataStore) + require.NoError(t, err) + t.Cleanup(client.Stop) + + seed := *walletSeed + if seed == "" { + privkey, err := btcec.NewPrivateKey() + require.NoError(t, err) + seed = hex.EncodeToString(privkey.Serialize()) + } + t.Logf("wallet seed: %s", seed) + + err = client.Init(ctx, arksdk.InitArgs{ + ServerUrl: *arkServerUrl, + Password: password, + Seed: seed, + ExplorerURL: explorerUrl, + }) + require.NoError(t, err) + + err = client.Unlock(ctx, password) + require.NoError(t, err) + + _, offchainAddr, _, err := client.Receive(ctx) + require.NoError(t, err) + + if !*skipChain { + // Fund the client offchain via admin note. + note := chainGenerateNote(t, uint64(*initialAmount)) + + wg := &sync.WaitGroup{} + var notifyErr error + wg.Go(func() { + _, notifyErr = client.NotifyIncomingFunds(ctx, offchainAddr.Address) + }) + + redeemTxid, err := client.RedeemNotes(ctx, []string{note}) + require.NoError(t, err) + require.NotEmpty(t, redeemTxid) + + wg.Wait() + require.NoError(t, notifyErr) + + time.Sleep(time.Second) + + spendable, _, err := client.ListVtxos(ctx) + require.NoError(t, err) + require.NotEmpty(t, spendable, "no spendable VTXOs after faucet") + + start := time.Now() + hops := 0 + + for i := range *chainLength { + spendable, _, err = client.ListVtxos(ctx) + require.NoError(t, err) + for len(spendable) == 0 { + spendable, _, err = client.ListVtxos(ctx) + require.NoError(t, err) + } + require.Len(t, spendable, 1) + tip := spendable[0] + + wg := &sync.WaitGroup{} + var notifyErr error + wg.Go(func() { + _, notifyErr = client.NotifyIncomingFunds(ctx, offchainAddr.Address) + }) + + res, err := client.SendOffChain(ctx, []types.Receiver{{ + To: offchainAddr.Address, + Amount: tip.Amount, + }}) + require.NoError(t, err) + + wg.Wait() + require.NoError(t, notifyErr) + + hops++ + t.Logf("hop %d: txid=%s", i, res.Txid) + } + + chainElapsed := time.Since(start) + t.Logf("chain built: %d hops in %s", hops, chainElapsed) + + time.Sleep(2 * time.Second) + } + + spendable, _, err := client.ListVtxos(ctx) + require.NoError(t, err) + tip := spendable[0] + + // Benchmark GetVtxoChain on the last VTXO in the chain. + last := types.Outpoint{Txid: tip.Txid, VOut: tip.VOut} + idx, err := grpcindexer.NewClient(*arkServerUrl) + require.NoError(t, err) + + getChainStart := time.Now() + resp, err := idx.GetVtxoChain(ctx, last) + getChainElapsed := time.Since(getChainStart) + require.NoError(t, err) + + t.Logf("GetVtxoChain: %d entries in %s (tip=%s:%d)", len(resp.Chain), getChainElapsed, last.Txid, last.VOut) +} + +func chainGenerateNote(t *testing.T, amount uint64) string { + t.Helper() + + httpClient := &http.Client{Timeout: 15 * time.Second} + + reqBody := bytes.NewReader([]byte(fmt.Sprintf(`{"amount": "%d"}`, amount))) + req, err := http.NewRequest("POST", *arkAdminUrl+"/v1/admin/note", reqBody) + require.NoError(t, err) + + req.Header.Set("Authorization", "Basic YWRtaW46YWRtaW4=") + req.Header.Set("Content-Type", "application/json") + + resp, err := httpClient.Do(req) + require.NoError(t, err) + defer resp.Body.Close() + + var noteResp struct { + Notes []string `json:"notes"` + } + err = json.NewDecoder(resp.Body).Decode(¬eResp) + require.NoError(t, err) + require.NotEmpty(t, noteResp.Notes) + + return noteResp.Notes[0] +}