-
Notifications
You must be signed in to change notification settings - Fork 2.3k
AMP support for SendPaymentV2 #5159
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
7795353
a9f19b1
6474b25
41ae353
06f045f
e1399fb
c1e82e5
2d397b1
5531b81
6104d12
f07c9d0
56a2c65
0b9137c
8f57dcf
c4fc72d
a2a61a1
13c0012
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,165 @@ | ||
| package amp | ||
|
|
||
| import ( | ||
| "crypto/rand" | ||
| "encoding/binary" | ||
| "fmt" | ||
| "sync" | ||
|
|
||
| "github.com/lightningnetwork/lnd/lntypes" | ||
| "github.com/lightningnetwork/lnd/lnwire" | ||
| "github.com/lightningnetwork/lnd/record" | ||
| "github.com/lightningnetwork/lnd/routing/shards" | ||
| ) | ||
|
|
||
| // Shard is an implementation of the shards.PaymentShards interface specific | ||
| // to AMP payments. | ||
| type Shard struct { | ||
| child *Child | ||
| mpp *record.MPP | ||
| amp *record.AMP | ||
| } | ||
|
|
||
| // A compile time check to ensure Shard implements the shards.PaymentShard | ||
| // interface. | ||
| var _ shards.PaymentShard = (*Shard)(nil) | ||
|
|
||
| // Hash returns the hash used for the HTLC representing this AMP shard. | ||
| func (s *Shard) Hash() lntypes.Hash { | ||
| return s.child.Hash | ||
| } | ||
|
|
||
| // MPP returns any extra MPP records that should be set for the final hop on | ||
| // the route used by this shard. | ||
| func (s *Shard) MPP() *record.MPP { | ||
| return s.mpp | ||
| } | ||
|
|
||
| // AMP returns any extra AMP records that should be set for the final hop on | ||
| // the route used by this shard. | ||
| func (s *Shard) AMP() *record.AMP { | ||
| return s.amp | ||
| } | ||
|
|
||
| // ShardTracker is an implementation of the shards.ShardTracker interface | ||
| // that is able to generate payment shards according to the AMP splitting | ||
| // algorithm. It can be used to generate new hashes to use for HTLCs, and also | ||
| // cancel shares used for failed payment shards. | ||
| type ShardTracker struct { | ||
| setID [32]byte | ||
| paymentAddr [32]byte | ||
| totalAmt lnwire.MilliSatoshi | ||
|
|
||
| sharer Sharer | ||
|
|
||
| shards map[uint64]*Child | ||
| sync.Mutex | ||
| } | ||
|
|
||
| // A compile time check to ensure ShardTracker implements the | ||
| // shards.ShardTracker interface. | ||
| var _ shards.ShardTracker = (*ShardTracker)(nil) | ||
|
|
||
| // NewShardTracker creates a new shard tracker to use for AMP payments. The | ||
| // root shard, setID, payment address and total amount must be correctly set in | ||
| // order for the TLV options to include with each shard to be created | ||
| // correctly. | ||
| func NewShardTracker(root, setID, payAddr [32]byte, | ||
| totalAmt lnwire.MilliSatoshi) *ShardTracker { | ||
|
|
||
| // Create a new seed sharer from this root. | ||
| rootShare := Share(root) | ||
| rootSharer := SeedSharerFromRoot(&rootShare) | ||
|
|
||
| return &ShardTracker{ | ||
| setID: setID, | ||
| paymentAddr: payAddr, | ||
| totalAmt: totalAmt, | ||
| sharer: rootSharer, | ||
| shards: make(map[uint64]*Child), | ||
| } | ||
| } | ||
|
|
||
| // NewShard registers a new attempt with the ShardTracker and returns a | ||
| // new shard representing this attempt. This attempt's shard should be canceled | ||
| // if it ends up not being used by the overall payment, i.e. if the attempt | ||
| // fails. | ||
| func (s *ShardTracker) NewShard(pid uint64, last bool) (shards.PaymentShard, | ||
| error) { | ||
|
|
||
| s.Lock() | ||
| defer s.Unlock() | ||
|
|
||
| // Use a random child index. | ||
| var childIndex [4]byte | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. With a 32-bit random value, isn't the risk of a collision too high? Maybe the htlc id or the pid can be used here or a set-specific incrementing id.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. I guess it depends on how it is being used. Currently a child gets created, and if it fails we xor the share back into the parent. This means that it will be split again most likely (which adds 32 byte randomness) before a new shard is sent. In the scenario where it's the last shard that is being created, we can end up deriving multiple children from it. However, you must send a lot of attempts before a collision is likely. Maybe that potential privacy leak is not worth optimizing out? (since I think using the same index only would be a privacy concern)
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yes, just wanted to bring it up. I think it is safe to assume that overall the vast majority of the payments will be single shot and in a post-hyperbitcoinization world with money streaming from every netflix subscriber - maybe collisions happen.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. And what about adding a counter field to the last share struct? That doesn't leak info about other payments to the receiver.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. at 32-bits, we expect a payment hash collision after 2^16 retries of the same HTLC. even at 1 sec per attempt, that's continually resending the same AMP HTLC for 18 hours. AMP HTLCs with different shares shouldn't ever collide in practice because they have plenty of entropy. if we are really concerned about collisions, we could use a CTR mode (random offset + increment) and then we'd only get collisions on wrap around. in the prior example—after 136 years of sending the same HTLC. in the end, the worst that happens is payment hash reuse. if a new use case comes along that might require this sort of sustained retry behavior then we can easily switch to CTR without any protocol changes
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. But what is wrong with my suggestion to use a counter to not have this problem at all, without sacrificing privacy?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. More, real work, for only a theoretical gain? :p
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more.
In case it isn't clear, this index is just within the context of a given setID. So the ensure that all the world streaming netflix payment doesn't collide, we just need to ensure that the Ultimately, with the route Johan has taken here with the implementation, since we just XOR out the failed shares rather than increment the childIndex (which would result in a distinct payment hash for a same share), we don't really need to worry about this colliding in practice. The alternative would've been to get another random child index, for the same share, then try again (uses a diff payment hash, but same share). This alternative route would result in more collisions in practice.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. The context was clear. I just meant to say that if massive amounts of payments happen, the overall chance of a collision somewhere on the earth increases. It was totally clear that independent payments cannot collide. Let's say in some time frame, 2^32 independent payments happen that need a second attempt. There will then be on average one user that experiences a collision. The out-xor'ing does not apply to the final share and I'd say that almost all AMP payments will be single-shot and have only a final share that may be retried with a different child index.
So what I am proposing is to just keep a counter for each (split) share that is generated. So there will be a counter within the context of a final share. Then keep incrementing that with each attempt. Then there is no risk of collision, so in my opinion a better option.
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Is this like something you had in mind? #5255 However there is still the case that after 2^32 "last shards" the counter will wrap around and collisions will start happening. |
||
| if _, err := rand.Read(childIndex[:]); err != nil { | ||
| return nil, err | ||
| } | ||
| idx := binary.BigEndian.Uint32(childIndex[:]) | ||
|
|
||
| // Depending on whether we are requesting the last shard or not, either | ||
| // split the current share into two, or get a Child directly from the | ||
| // current sharer. | ||
| var child *Child | ||
| if last { | ||
|
Roasbeef marked this conversation as resolved.
Outdated
|
||
| child = s.sharer.Child(idx) | ||
|
|
||
| // If this was the last shard, set the current share to the | ||
| // zero share to indicate we cannot split it further. | ||
| s.sharer = s.sharer.Zero() | ||
| } else { | ||
|
Roasbeef marked this conversation as resolved.
Outdated
|
||
| left, sharer, err := s.sharer.Split() | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Minor comment: The naming left and right led me to think that the xor tree is more than it really is. Somehow I expected left and right to be different. That there is some kind of condition that determines what's left and what's right. Could be that I am the only one that has this. Perhaps something like
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. It can be hard to grok at first as there's actually less enforced structure than one might think. For example, there's no requirement that we split using our current right-skewed binary tree. Instead we could opt to make it more balanced. With the way
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Yeah, I found it easier to just visualize it as a set off pebbles that we can split and merge at will. A tree is perhaps not the best description, as we can also merge leafs from different sub-branches IIUC? @cfromknecht
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Indeed, something like a cord that you can cut and glue together again matches much better with how I assume it works. |
||
| if err != nil { | ||
| return nil, err | ||
| } | ||
|
|
||
| s.sharer = sharer | ||
| child = left.Child(idx) | ||
| } | ||
|
|
||
| // Track the new child and return the shard. | ||
| s.shards[pid] = child | ||
|
|
||
| mpp := record.NewMPP(s.totalAmt, s.paymentAddr) | ||
| amp := record.NewAMP( | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Question about
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. at the time i think I used that as short hand for |
||
| child.ChildDesc.Share, s.setID, child.ChildDesc.Index, | ||
| ) | ||
|
|
||
| return &Shard{ | ||
| child: child, | ||
| mpp: mpp, | ||
| amp: amp, | ||
| }, nil | ||
| } | ||
|
|
||
| // CancelShard cancel's the shard corresponding to the given attempt ID. | ||
| func (s *ShardTracker) CancelShard(pid uint64) error { | ||
| s.Lock() | ||
| defer s.Unlock() | ||
|
|
||
| c, ok := s.shards[pid] | ||
| if !ok { | ||
| return fmt.Errorf("pid not found") | ||
| } | ||
| delete(s.shards, pid) | ||
|
|
||
| // Now that we are canceling this shard, we XOR the share back into our | ||
| // current share. | ||
| s.sharer = s.sharer.Merge(c) | ||
| return nil | ||
| } | ||
|
|
||
| // GetHash retrieves the hash used by the shard of the given attempt ID. This | ||
| // will return an error if the attempt ID is unknown. | ||
| func (s *ShardTracker) GetHash(pid uint64) (lntypes.Hash, error) { | ||
| s.Lock() | ||
| defer s.Unlock() | ||
|
|
||
| c, ok := s.shards[pid] | ||
| if !ok { | ||
| return lntypes.Hash{}, fmt.Errorf("AMP shard for attempt %v "+ | ||
| "not found", pid) | ||
| } | ||
|
|
||
| return c.Hash, nil | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,95 @@ | ||
| package amp_test | ||
|
|
||
| import ( | ||
| "crypto/rand" | ||
| "testing" | ||
|
|
||
| "github.com/lightningnetwork/lnd/amp" | ||
| "github.com/lightningnetwork/lnd/lnwire" | ||
| "github.com/lightningnetwork/lnd/routing/shards" | ||
| "github.com/stretchr/testify/require" | ||
| ) | ||
|
|
||
| // TestAMPShardTracker tests that we can derive and cancel shards at will using | ||
| // the AMP shard tracker. | ||
| func TestAMPShardTracker(t *testing.T) { | ||
|
Member
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 👍 |
||
| var root, setID, payAddr [32]byte | ||
| _, err := rand.Read(root[:]) | ||
| require.NoError(t, err) | ||
|
|
||
| _, err = rand.Read(setID[:]) | ||
| require.NoError(t, err) | ||
|
|
||
| _, err = rand.Read(payAddr[:]) | ||
| require.NoError(t, err) | ||
|
|
||
| var totalAmt lnwire.MilliSatoshi = 1000 | ||
|
|
||
| // Create an AMP shard tracker using the random data we just generated. | ||
| tracker := amp.NewShardTracker(root, setID, payAddr, totalAmt) | ||
|
|
||
| // Trying to retrieve a hash for id 0 should result in an error. | ||
| _, err = tracker.GetHash(0) | ||
| require.Error(t, err) | ||
|
|
||
| // We start by creating 20 shards. | ||
| const numShards = 20 | ||
|
|
||
| var shards []shards.PaymentShard | ||
| for i := uint64(0); i < numShards; i++ { | ||
| s, err := tracker.NewShard(i, i == numShards-1) | ||
| require.NoError(t, err) | ||
|
|
||
| // Check that the shards have their payloads set as expected. | ||
| require.Equal(t, setID, s.AMP().SetID()) | ||
| require.Equal(t, totalAmt, s.MPP().TotalMsat()) | ||
| require.Equal(t, payAddr, s.MPP().PaymentAddr()) | ||
|
|
||
| shards = append(shards, s) | ||
| } | ||
|
|
||
| // Make sure we can retrieve the hash for all of them. | ||
| for i := uint64(0); i < numShards; i++ { | ||
| hash, err := tracker.GetHash(i) | ||
| require.NoError(t, err) | ||
| require.Equal(t, shards[i].Hash(), hash) | ||
| } | ||
|
|
||
| // Now cancel half of the shards. | ||
| j := 0 | ||
| for i := uint64(0); i < numShards; i++ { | ||
| if i%2 == 0 { | ||
| err := tracker.CancelShard(i) | ||
| require.NoError(t, err) | ||
| continue | ||
| } | ||
|
|
||
| // Keep shard. | ||
| shards[j] = shards[i] | ||
| j++ | ||
| } | ||
| shards = shards[:j] | ||
|
|
||
| // Get a new last shard. | ||
| s, err := tracker.NewShard(numShards, true) | ||
| require.NoError(t, err) | ||
| shards = append(shards, s) | ||
|
|
||
| // Finally make sure these shards together can be used to reconstruct | ||
| // the children. | ||
| childDescs := make([]amp.ChildDesc, len(shards)) | ||
| for i, s := range shards { | ||
| childDescs[i] = amp.ChildDesc{ | ||
| Share: s.AMP().RootShare(), | ||
| Index: s.AMP().ChildIndex(), | ||
| } | ||
| } | ||
|
|
||
| // Using the child descriptors, reconstruct the children. | ||
| children := amp.ReconstructChildren(childDescs...) | ||
|
|
||
| // Validate that the derived child preimages match the hash of each shard. | ||
| for i, child := range children { | ||
| require.Equal(t, shards[i].Hash(), child.Hash) | ||
| } | ||
| } | ||
Uh oh!
There was an error while loading. Please reload this page.