From 8cde4253011fe866f8fe42a70f30df2a24bc53ce Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Fri, 21 Apr 2023 16:31:01 -0400 Subject: [PATCH 1/8] Add share pkg WIP --- appconsts/appconsts.go | 104 +++++++ appconsts/consensus_consts.go | 8 + shares/compact_shares_test.go | 197 ++++++++++++ shares/doc.go | 73 +++++ shares/info_byte.go | 43 +++ shares/info_byte_test.go | 103 +++++++ shares/non_interactive_defaults.go | 95 ++++++ shares/non_interactive_defaults_test.go | 368 ++++++++++++++++++++++ shares/padding.go | 69 +++++ shares/padding_test.go | 82 +++++ shares/parse.go | 81 +++++ shares/parse_compact_shares.go | 83 +++++ shares/parse_sparse_shares.go | 88 ++++++ shares/parse_sparse_shares_test.go | 154 ++++++++++ shares/parse_test.go | 196 ++++++++++++ shares/powers_of_two.go | 44 +++ shares/powers_of_two_test.go | 101 +++++++ shares/reserved_bytes.go | 33 ++ shares/reserved_bytes_test.go | 84 +++++ shares/share_builder.go | 227 ++++++++++++++ shares/share_builder_test.go | 320 ++++++++++++++++++++ shares/share_sequence.go | 123 ++++++++ shares/share_sequence_test.go | 137 +++++++++ shares/share_splitting.go | 168 ++++++++++ shares/share_splitting_test.go | 387 ++++++++++++++++++++++++ shares/shares.go | 213 +++++++++++++ shares/shares_test.go | 325 ++++++++++++++++++++ shares/sparse_shares_test.go | 86 ++++++ shares/split_compact_shares.go | 232 ++++++++++++++ shares/split_compact_shares_test.go | 382 +++++++++++++++++++++++ shares/split_sparse_shares.go | 128 ++++++++ shares/split_sparse_shares_test.go | 1 + shares/testdata/sample-block.json | 62 ++++ shares/utils.go | 101 +++++++ shares/utils_test.go | 70 +++++ 35 files changed, 4968 insertions(+) create mode 100644 appconsts/appconsts.go create mode 100644 appconsts/consensus_consts.go create mode 100644 shares/compact_shares_test.go create mode 100644 shares/doc.go create mode 100644 shares/info_byte.go create mode 100644 shares/info_byte_test.go create mode 100644 shares/non_interactive_defaults.go create mode 100644 shares/non_interactive_defaults_test.go create mode 100644 shares/padding.go create mode 100644 shares/padding_test.go create mode 100644 shares/parse.go create mode 100644 shares/parse_compact_shares.go create mode 100644 shares/parse_sparse_shares.go create mode 100644 shares/parse_sparse_shares_test.go create mode 100644 shares/parse_test.go create mode 100644 shares/powers_of_two.go create mode 100644 shares/powers_of_two_test.go create mode 100644 shares/reserved_bytes.go create mode 100644 shares/reserved_bytes_test.go create mode 100644 shares/share_builder.go create mode 100644 shares/share_builder_test.go create mode 100644 shares/share_sequence.go create mode 100644 shares/share_sequence_test.go create mode 100644 shares/share_splitting.go create mode 100644 shares/share_splitting_test.go create mode 100644 shares/shares.go create mode 100644 shares/shares_test.go create mode 100644 shares/sparse_shares_test.go create mode 100644 shares/split_compact_shares.go create mode 100644 shares/split_compact_shares_test.go create mode 100644 shares/split_sparse_shares.go create mode 100644 shares/split_sparse_shares_test.go create mode 100755 shares/testdata/sample-block.json create mode 100644 shares/utils.go create mode 100644 shares/utils_test.go diff --git a/appconsts/appconsts.go b/appconsts/appconsts.go new file mode 100644 index 0000000000..6ab84338fb --- /dev/null +++ b/appconsts/appconsts.go @@ -0,0 +1,104 @@ +package appconsts + +import ( + "github.com/celestiaorg/rsmt2d" + "github.com/tendermint/tendermint/pkg/consts" +) + +// These constants were originally sourced from: +// https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants +const ( + // NamespaveVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = 1 + + // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = 32 + + // NamespaceSize is the size of a namespace (version + ID) in bytes. + NamespaceSize = NamespaceVersionSize + NamespaceIDSize + + // ShareSize is the size of a share in bytes. + ShareSize = 512 + + // ShareInfoBytes is the number of bytes reserved for information. The info + // byte contains the share version and a sequence start idicator. + ShareInfoBytes = 1 + + // SequenceLenBytes is the number of bytes reserved for the sequence length + // that is present in the first share of a sequence. + SequenceLenBytes = 4 + + // ShareVersionZero is the first share version format. + ShareVersionZero = uint8(0) + + // DefaultShareVersion is the defacto share version. Use this if you are + // unsure of which version to use. + DefaultShareVersion = ShareVersionZero + + // CompactShareReservedBytes is the number of bytes reserved for the location of + // the first unit (transaction, ISR) in a compact share. + CompactShareReservedBytes = 4 + + // FirstCompactShareContentSize is the number of bytes usable for data in + // the first compact share of a sequence. + FirstCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes - CompactShareReservedBytes + + // ContinuationCompactShareContentSize is the number of bytes usable for + // data in a continuation compact share of a sequence. + ContinuationCompactShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - CompactShareReservedBytes + + // FirstSparseShareContentSize is the number of bytes usable for data in the + // first sparse share of a sequence. + FirstSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes - SequenceLenBytes + + // ContinuationSparseShareContentSize is the number of bytes usable for data + // in a continuation sparse share of a sequence. + ContinuationSparseShareContentSize = ShareSize - NamespaceSize - ShareInfoBytes + + // DefaultMaxSquareSize is the maximum original square width. + // + // Note: 128 shares in a row * 128 shares in a column * 512 bytes in a share + // = 8 MiB + DefaultMaxSquareSize = 128 + + // MaxShareCount is the maximum number of shares allowed in the original + // data square. + MaxShareCount = DefaultMaxSquareSize * DefaultMaxSquareSize + + // DefaultMinSquareSize is the smallest original square width. + DefaultMinSquareSize = 1 + + // MinshareCount is the minimum number of shares allowed in the original + // data square. + MinShareCount = DefaultMinSquareSize * DefaultMinSquareSize + + // MaxShareVersion is the maximum value a share version can be. + MaxShareVersion = 127 + + // DefaultGasPerBlobByte is the default gas cost deducted per byte of blob + // included in a PayForBlobs txn + DefaultGasPerBlobByte = 8 + + // TransactionsPerBlockLimit is the maximum number of transactions a block + // producer will include in a block. + // + // NOTE: Currently this value is set at roughly the number of PFBs that + // would fill one quarter of the max square size. + TransactionsPerBlockLimit = 5090 +) + +var ( + // NewBaseHashFunc is the base hash function used by NMT. Change accordingly + // if another hash.Hash should be used as a base hasher in the NMT. + NewBaseHashFunc = consts.NewBaseHashFunc + + // DefaultCodec is the default codec creator used for data erasure. + DefaultCodec = rsmt2d.NewLeoRSCodec + + // DataCommitmentBlocksLimit is the limit to the number of blocks we can + // generate a data commitment for. + DataCommitmentBlocksLimit = consts.DataCommitmentBlocksLimit + + // SupportedShareVersions is a list of supported share versions. + SupportedShareVersions = []uint8{ShareVersionZero} +) diff --git a/appconsts/consensus_consts.go b/appconsts/consensus_consts.go new file mode 100644 index 0000000000..f2f12736a1 --- /dev/null +++ b/appconsts/consensus_consts.go @@ -0,0 +1,8 @@ +package appconsts + +import "time" + +const ( + TimeoutPropose = time.Second * 10 + TimeoutCommit = time.Second * 10 +) diff --git a/shares/compact_shares_test.go b/shares/compact_shares_test.go new file mode 100644 index 0000000000..2c2f9c55ab --- /dev/null +++ b/shares/compact_shares_test.go @@ -0,0 +1,197 @@ +package shares + +import ( + "context" + "fmt" + "testing" + "time" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/celestiaorg/celestia-app/testutil/testfactory" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" +) + +func TestCompactShareSplitter(t *testing.T) { + // note that this test is mainly for debugging purposes, the main round trip + // tests occur in TestMerge and Test_processCompactShares + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + txs := testfactory.GenerateRandomTxs(33, 200) + for _, tx := range txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + shares, _, err := css.Export(0) + require.NoError(t, err) + + rawResTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) + resTxs := coretypes.ToTxs(rawResTxs) + require.NoError(t, err) + + assert.Equal(t, txs, resTxs) +} + +func TestFuzz_processCompactShares(t *testing.T) { + t.Skip() + // run random shares through processCompactShares for a minute + ctx, cancel := context.WithTimeout(context.Background(), time.Minute) + defer cancel() + for { + select { + case <-ctx.Done(): + return + default: + Test_processCompactShares(t) + } + } +} + +func Test_processCompactShares(t *testing.T) { + // exactTxShareSize is the length of tx that will fit exactly into a single + // share, accounting for the tx length delimiter prepended to + // each tx. Note that the length delimiter can be 1 to 10 bytes (varint) but + // this test assumes it is 1 byte. + const exactTxShareSize = appconsts.FirstCompactShareContentSize - 1 + + type test struct { + name string + txSize int + txCount int + } + + // each test is ran twice, once using txSize as an exact size, and again + // using it as a cap for randomly sized txs + tests := []test{ + {"single small tx", appconsts.ContinuationCompactShareContentSize / 8, 1}, + {"many small txs", appconsts.ContinuationCompactShareContentSize / 8, 10}, + {"single big tx", appconsts.ContinuationCompactShareContentSize * 4, 1}, + {"many big txs", appconsts.ContinuationCompactShareContentSize * 4, 10}, + {"single exact size tx", exactTxShareSize, 1}, + {"many exact size txs", exactTxShareSize, 100}, + } + + for _, tc := range tests { + tc := tc + + // run the tests with identically sized txs + t.Run(fmt.Sprintf("%s idendically sized", tc.name), func(t *testing.T) { + txs := testfactory.GenerateRandomTxs(tc.txCount, tc.txSize) + + shares, _, _, err := SplitTxs(txs) + require.NoError(t, err) + + parsedTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) + if err != nil { + t.Error(err) + } + + // check that the data parsed is identical + for i := 0; i < len(txs); i++ { + assert.Equal(t, []byte(txs[i]), parsedTxs[i]) + } + }) + + // run the same tests using randomly sized txs with caps of tc.txSize + t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { + txs := testfactory.GenerateRandomlySizedTxs(tc.txCount, tc.txSize) + + txShares, _, _, err := SplitTxs(txs) + require.NoError(t, err) + parsedTxs, err := parseCompactShares(txShares, appconsts.SupportedShareVersions) + if err != nil { + t.Error(err) + } + + // check that the data parsed is identical to the original + for i := 0; i < len(txs); i++ { + assert.Equal(t, []byte(txs[i]), parsedTxs[i]) + } + }) + } +} + +func TestCompactShareContainsInfoByte(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + txs := testfactory.GenerateRandomTxs(1, appconsts.ContinuationCompactShareContentSize/4) + + for _, tx := range txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Condition(t, func() bool { return len(shares) == 1 }) + + infoByte := shares[0].data[appconsts.NamespaceSize : appconsts.NamespaceSize+appconsts.ShareInfoBytes][0] + + isSequenceStart := true + want, err := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + + require.NoError(t, err) + assert.Equal(t, byte(want), infoByte) +} + +func TestContiguousCompactShareContainsInfoByte(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + txs := testfactory.GenerateRandomTxs(1, appconsts.ContinuationCompactShareContentSize*4) + + for _, tx := range txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Condition(t, func() bool { return len(shares) > 1 }) + + infoByte := shares[1].data[appconsts.NamespaceSize : appconsts.NamespaceSize+appconsts.ShareInfoBytes][0] + + isSequenceStart := false + want, err := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + + require.NoError(t, err) + assert.Equal(t, byte(want), infoByte) +} + +func Test_parseCompactSharesErrors(t *testing.T) { + type testCase struct { + name string + shares []Share + } + + txs := testfactory.GenerateRandomTxs(2, appconsts.ContinuationCompactShareContentSize*4) + txShares, _, _, err := SplitTxs(txs) + require.NoError(t, err) + rawShares := ToBytes(txShares) + + unsupportedShareVersion := 5 + infoByte, _ := NewInfoByte(uint8(unsupportedShareVersion), true) + shareWithUnsupportedShareVersionBytes := rawShares[0] + shareWithUnsupportedShareVersionBytes[appconsts.NamespaceSize] = byte(infoByte) + + shareWithUnsupportedShareVersion, err := NewShare(shareWithUnsupportedShareVersionBytes) + if err != nil { + t.Fatal(err) + } + + testCases := []testCase{ + { + "share with start indicator false", + txShares[1:], // set the first share to the second share which has the start indicator set to false + }, + { + "share with unsupported share version", + []Share{*shareWithUnsupportedShareVersion}, + }, + } + + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + _, err := parseCompactShares(tt.shares, appconsts.SupportedShareVersions) + assert.Error(t, err) + }) + } +} diff --git a/shares/doc.go b/shares/doc.go new file mode 100644 index 0000000000..2b5353fa57 --- /dev/null +++ b/shares/doc.go @@ -0,0 +1,73 @@ +// Package shares provides primitives for splitting block data into shares and +// parsing shares back into block data. +// +// # Compact vs. Sparse +// +// There are two types of shares: +// 1. Compact +// 2. Sparse +// +// Compact shares can contain data from one or more unit (transactions or +// intermediate state roots). Sparse shares can contain data from zero or one +// blob. Compact shares and sparse shares are encoded differently. The +// motivation behind the distinction is that transactions and intermediate state +// roots are expected to have small lengths so they are encoded in compact +// shares to minimize the number of shares needed to store them. On the other +// hand, blobs are expected to be larger and have the desideratum that clients +// should be able to create proofs of blob inclusion. This desiradum is +// infeasible if client A's blob is encoded into a share with another client B's +// blob that is unknown to A. It follows that client A's blob is encoded into a +// share such that the contents can be determined by client A without any +// additional information. See [message layout rational] or +// [adr-006-non-interactive-defaults] for more details. +// +// # Universal Prefix +// +// Both types of shares have a universal prefix. The first 1 byte of a share +// contains the namespace version. The next 32 bytes contain the namespace ID. +// The next one byte contains an [InfoByte] that contains the +// share version and a sequence start indicator. If the sequence start indicator +// is `1` (i.e. this is the first share of a sequence) then the next 4 bytes +// contain a big endian uint32 of the sequence length. +// +// For the first share of a sequence: +// +// | namespace_version | namespace_id | info_byte | sequence_length | sequence_data | +// | 1 byte | 32 bytes | 1 byte | 4 bytes | remaining bytes of share | +// +// For continuation share of a sequence: +// +// | namespace_version | namespace_id | info_byte | sequence_data | +// | 1 byte | 32 bytes | 1 byte | remaining bytes of share | +// +// The remaining bytes depend on the share type. +// +// # Compact Share Schema +// +// The four bytes after the universal prefix are reserved for +// the location in the share of the first unit of data that starts in this +// share. +// +// For the first compact share: +// +// | namespace_version | namespace_id | info_byte | sequence_length | location_of_first_unit | transactions or intermediate state roots | +// | 1 byte | 32 bytes | 1 byte | 4 bytes | 4 bytes | remaining bytes of share | +// +// For continuation compact share: +// +// | namespace_version | namespace_id | info_byte | location_of_first_unit | transactions or intermediate state roots | +// | 1 byte | 32 bytes | 1 byte | 4 bytes | remaining bytes of share | +// +// Notes +// - All shares in a reserved namespace belong to one sequence. +// - Each unit (transaction or intermediate state root) in data is prefixed with a varint of the length of the unit. +// +// # Sparse Share Schema +// +// The remaining bytes contain blob data. +// +// [message layout rational]: https://celestiaorg.github.io/celestia-specs/latest/rationale/message_block_layout.html#message-layout-rationale +// [adr-006-non-interactive-defaults]: https://github.com/celestiaorg/celestia-app/pull/673 +// +// [namespace.ID]: https://github.com/celestiaorg/nmt/blob/master/namespace/id.go +package shares diff --git a/shares/info_byte.go b/shares/info_byte.go new file mode 100644 index 0000000000..3d2e5e877e --- /dev/null +++ b/shares/info_byte.go @@ -0,0 +1,43 @@ +package shares + +import ( + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" +) + +// InfoByte is a byte with the following structure: the first 7 bits are +// reserved for version information in big endian form (initially `0000000`). +// The last bit is a "sequence start indicator", that is `1` if this is the +// first share of a sequence and `0` if this is a continuation share. +type InfoByte byte + +func NewInfoByte(version uint8, isSequenceStart bool) (InfoByte, error) { + if version > appconsts.MaxShareVersion { + return 0, fmt.Errorf("version %d must be less than or equal to %d", version, appconsts.MaxShareVersion) + } + + prefix := version << 1 + if isSequenceStart { + return InfoByte(prefix + 1), nil + } + return InfoByte(prefix), nil +} + +// Version returns the version encoded in this InfoByte. Version is +// expected to be between 0 and appconsts.MaxShareVersion (inclusive). +func (i InfoByte) Version() uint8 { + version := uint8(i) >> 1 + return version +} + +// IsSequenceStart returns whether this share is the start of a sequence. +func (i InfoByte) IsSequenceStart() bool { + return uint(i)%2 == 1 +} + +func ParseInfoByte(i byte) (InfoByte, error) { + isSequenceStart := i%2 == 1 + version := uint8(i) >> 1 + return NewInfoByte(version, isSequenceStart) +} diff --git a/shares/info_byte_test.go b/shares/info_byte_test.go new file mode 100644 index 0000000000..ff02a548ad --- /dev/null +++ b/shares/info_byte_test.go @@ -0,0 +1,103 @@ +package shares + +import "testing" + +func TestInfoByte(t *testing.T) { + blobStart := true + notBlobStart := false + + type testCase struct { + version uint8 + isSequenceStart bool + } + tests := []testCase{ + {0, blobStart}, + {1, blobStart}, + {2, blobStart}, + {127, blobStart}, + + {0, notBlobStart}, + {1, notBlobStart}, + {2, notBlobStart}, + {127, notBlobStart}, + } + + for _, test := range tests { + irb, err := NewInfoByte(test.version, test.isSequenceStart) + if err != nil { + t.Errorf("got %v want no error", err) + } + if got := irb.Version(); got != test.version { + t.Errorf("got version %v want %v", got, test.version) + } + if got := irb.IsSequenceStart(); got != test.isSequenceStart { + t.Errorf("got IsSequenceStart %v want %v", got, test.isSequenceStart) + } + } +} + +func TestInfoByteErrors(t *testing.T) { + blobStart := true + notBlobStart := false + + type testCase struct { + version uint8 + isSequenceStart bool + } + + tests := []testCase{ + {128, notBlobStart}, + {255, notBlobStart}, + {128, blobStart}, + {255, blobStart}, + } + + for _, test := range tests { + _, err := NewInfoByte(test.version, false) + if err == nil { + t.Errorf("got nil but want error when version > 127") + } + } +} + +func FuzzNewInfoByte(f *testing.F) { + f.Fuzz(func(t *testing.T, version uint8, isSequenceStart bool) { + if version > 127 { + t.Skip() + } + _, err := NewInfoByte(version, isSequenceStart) + if err != nil { + t.Errorf("got nil but want error when version > 127") + } + }) +} + +func TestParseInfoByte(t *testing.T) { + type testCase struct { + b byte + wantVersion uint8 + wantisSequenceStart bool + } + + tests := []testCase{ + {0b00000000, 0, false}, + {0b00000001, 0, true}, + {0b00000010, 1, false}, + {0b00000011, 1, true}, + {0b00000101, 2, true}, + {0b11111111, 127, true}, + } + + for _, test := range tests { + got, err := ParseInfoByte(test.b) + if err != nil { + t.Errorf("got %v want no error", err) + } + if got.Version() != test.wantVersion { + t.Errorf("got version %v want %v", got.Version(), test.wantVersion) + } + if got.IsSequenceStart() != test.wantisSequenceStart { + t.Errorf("got IsSequenceStart %v want %v", got.IsSequenceStart(), test.wantisSequenceStart) + } + } +} diff --git a/shares/non_interactive_defaults.go b/shares/non_interactive_defaults.go new file mode 100644 index 0000000000..33dec00ed6 --- /dev/null +++ b/shares/non_interactive_defaults.go @@ -0,0 +1,95 @@ +package shares + +import ( + "math" +) + +// FitsInSquare uses the non interactive default rules to see if blobs of +// some lengths will fit in a square of squareSize starting at share index +// cursor. Returns whether the blobs fit in the square and the number of +// shares used by blobs. See non-interactive default rules +// https://github.com/celestiaorg/celestia-specs/blob/master/src/rationale/message_block_layout.md#non-interactive-default-rules +// https://github.com/celestiaorg/celestia-app/blob/1b80b94a62c8c292f569e2fc576e26299985681a/docs/architecture/adr-009-non-interactive-default-rules-for-reduced-padding.md +func FitsInSquare(cursor, squareSize int, blobShareLens ...int) (bool, int) { + if len(blobShareLens) == 0 { + if cursor <= squareSize*squareSize { + return true, 0 + } + return false, 0 + } + firstBlobLen := 1 + if len(blobShareLens) > 0 { + firstBlobLen = blobShareLens[0] + } + // here we account for padding between the compact and sparse shares + cursor, _ = NextMultipleOfBlobMinSquareSize(cursor, firstBlobLen, squareSize) + sharesUsed, _ := BlobSharesUsedNonInteractiveDefaults(cursor, squareSize, blobShareLens...) + return cursor+sharesUsed <= squareSize*squareSize, sharesUsed +} + +// BlobSharesUsedNonInteractiveDefaults returns the number of shares used by a given set +// of blobs share lengths. It follows the non-interactive default rules and +// returns the share indexes for each blob. +func BlobSharesUsedNonInteractiveDefaults(cursor, squareSize int, blobShareLens ...int) (sharesUsed int, indexes []uint32) { + start := cursor + indexes = make([]uint32, len(blobShareLens)) + for i, blobLen := range blobShareLens { + cursor, _ = NextMultipleOfBlobMinSquareSize(cursor, blobLen, squareSize) + indexes[i] = uint32(cursor) + cursor += blobLen + } + return cursor - start, indexes +} + +// NextMultipleOfBlobMinSquareSize determines the next index in a square that is +// a multiple of the blob's minimum square size. This function returns false if +// the entire the blob cannot fit on the given row. Assumes that all args are +// non negative, and that squareSize is a power of two. +// https://github.com/celestiaorg/celestia-specs/blob/master/src/rationale/message_block_layout.md#non-interactive-default-rules +// https://github.com/celestiaorg/celestia-app/blob/1b80b94a62c8c292f569e2fc576e26299985681a/docs/architecture/adr-009-non-interactive-default-rules-for-reduced-padding.md +func NextMultipleOfBlobMinSquareSize(cursor, blobLen, squareSize int) (index int, fitsInRow bool) { + // if we're starting at the beginning of the row, then return as there are + // no cases where we don't start at 0. + if isStartOfRow(cursor, squareSize) { + return cursor, true + } + + blobMinSquareSize := MinSquareSize(blobLen) + startOfNextRow := ((cursor / squareSize) + 1) * squareSize + cursor = roundUpBy(cursor, blobMinSquareSize) + switch { + // the entire blob fits in this row + case cursor+blobLen <= startOfNextRow: + return cursor, true + // only a portion of the blob fits in this row + case cursor+blobMinSquareSize <= startOfNextRow: + return cursor, false + // none of the blob fits on this row, so return the start of the next row + default: + return startOfNextRow, false + } +} + +// roundUpBy rounds cursor up to the next multiple of v. If cursor is divisible +// by v, then it returns cursor +func roundUpBy(cursor, v int) int { + switch { + case cursor == 0: + return cursor + case cursor%v == 0: + return cursor + default: + return ((cursor / v) + 1) * v + } +} + +// MinSquareSize returns the minimum square size that can contain shareCount +// number of shares. +func MinSquareSize(shareCount int) int { + return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(shareCount))))) +} + +// isStartOfRow returns true if cursor is at the start of a row +func isStartOfRow(cursor, squareSize int) bool { + return cursor == 0 || cursor%squareSize == 0 +} diff --git a/shares/non_interactive_defaults_test.go b/shares/non_interactive_defaults_test.go new file mode 100644 index 0000000000..1e0b77db67 --- /dev/null +++ b/shares/non_interactive_defaults_test.go @@ -0,0 +1,368 @@ +package shares + +import ( + "fmt" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/stretchr/testify/assert" +) + +func TestBlobSharesUsedNonInteractiveDefaults(t *testing.T) { + type test struct { + cursor, squareSize, expected int + blobLens []int + indexes []uint32 + } + tests := []test{ + {2, 4, 1, []int{1}, []uint32{2}}, + {2, 2, 1, []int{1}, []uint32{2}}, + {3, 4, 8, []int{3, 3}, []uint32{4, 8}}, + {0, 8, 8, []int{8}, []uint32{0}}, + {0, 8, 7, []int{7}, []uint32{0}}, + {0, 8, 7, []int{3, 3}, []uint32{0, 4}}, + {1, 8, 8, []int{3, 3}, []uint32{2, 6}}, + {1, 8, 32, []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}}, + {3, 8, 16, []int{5, 7}, []uint32{4, 12}}, + {0, 8, 29, []int{5, 5, 5, 5}, []uint32{0, 8, 16, 24}}, + {0, 8, 10, []int{10}, []uint32{0}}, + {0, 8, 22, []int{10, 10}, []uint32{0, 12}}, + {1, 8, 25, []int{10, 10}, []uint32{4, 16}}, + {2, 8, 24, []int{10, 10}, []uint32{4, 16}}, + {0, 8, 55, []int{21, 31}, []uint32{0, 24}}, + {0, 8, 128, []int{64, 64}, []uint32{0, 64}}, + {0, appconsts.DefaultMaxSquareSize, 1000, []int{1000}, []uint32{0}}, + {0, appconsts.DefaultMaxSquareSize, appconsts.DefaultMaxSquareSize + 1, []int{appconsts.DefaultMaxSquareSize + 1}, []uint32{0}}, + {1, 128, 399, []int{128, 128, 128}, []uint32{16, 144, 272}}, + {1024, appconsts.DefaultMaxSquareSize, 32, []int{32}, []uint32{1024}}, + } + for i, tt := range tests { + res, indexes := BlobSharesUsedNonInteractiveDefaults(tt.cursor, tt.squareSize, tt.blobLens...) + test := fmt.Sprintf("test %d: cursor %d, squareSize %d", i, tt.cursor, tt.squareSize) + assert.Equal(t, tt.expected, res, test) + assert.Equal(t, tt.indexes, indexes, test) + } +} + +func TestFitsInSquare(t *testing.T) { + type test struct { + name string + blobs []int + start int + size int + fits bool + } + tests := []test{ + { + name: "1 blobs size 2 shares (2 blob shares, 2 compact, size 4)", + blobs: []int{2}, + start: 2, + size: 4, + fits: true, + }, + { + name: "10 blobs size 10 shares (100 blob shares, 0 compact, size 4)", + blobs: []int{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, + start: 0, + size: 4, + fits: false, + }, + { + name: "15 blobs size 1 share (15 blob shares, 0 compact, size 4)", + blobs: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, + start: 0, + size: 4, + fits: true, + }, + { + name: "15 blobs size 1 share starting at share 2 (15 blob shares, 2 compact, size 4)", + blobs: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, + start: 2, + size: 4, + fits: false, + }, + { + name: "8 blobs of various sizes (48 blob shares, 1 compact share, size 8)", + blobs: []int{3, 9, 3, 7, 8, 3, 7, 8}, + start: 1, + size: 8, + fits: true, + }, + { + // C = compact share + // P = padding share + // + // |C|C|C|C|C|C|P|P| + // |3|3|3|P|9|9|9|9| + // |9|9|9|9|9|P|P|P| + // |3|3|3|P|7|7|7|7| + // |7|7|7|P|8|8|8|8| + // |8|8|8|8|3|3|3|P| + // |7|7|7|7|7|7|7|P| + // |8|8|8|8|8|8|8|8| + name: "8 blobs of various sizes (48 blob shares, 6 compact, size 8)", + blobs: []int{3, 9, 3, 7, 8, 3, 7, 8}, + start: 6, + size: 8, + fits: true, + }, + { + name: "0 blobs (0 blob shares, 5 compact, size 2)", + blobs: []int{}, + start: 5, + size: 2, + fits: false, + }, + { + name: "0 blobs (0 blob shares, 4 compact, size 2)", + blobs: []int{}, + start: 4, + size: 2, + fits: true, + }, + { + name: "0 blobs. Cursor at the the max share index", + blobs: []int{}, + start: 16, + size: 4, + fits: true, + }, + { + name: "0 blobs. Cursor higher than max share index", + blobs: []int{}, + start: 17, + size: 4, + fits: false, + }, + { + name: "0 blobs. Cursor higher than max share index (again)", + blobs: []int{}, + start: 18, + size: 4, + fits: false, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, _ := FitsInSquare(tt.start, tt.size, tt.blobs...) + assert.Equal(t, tt.fits, res) + }) + } +} + +func TestNextMultipleOfBlobMinSquareSize(t *testing.T) { + type test struct { + name string + cursor, blobLen, squareSize int + expectedIndex int + fits bool + } + tests := []test{ + { + name: "whole row blobLen 4", + cursor: 0, + blobLen: 4, + squareSize: 4, + fits: true, + expectedIndex: 0, + }, + { + name: "half row blobLen 2 cursor 1", + cursor: 1, + blobLen: 2, + squareSize: 4, + fits: true, + expectedIndex: 2, + }, + { + name: "half row blobLen 2 cursor 2", + cursor: 2, + blobLen: 2, + squareSize: 4, + fits: true, + expectedIndex: 2, + }, + { + name: "half row blobLen 4 cursor 3", + cursor: 3, + blobLen: 4, + squareSize: 8, + fits: true, + expectedIndex: 4, + }, + { + name: "blobLen 5 cursor 3 size 8", + cursor: 3, + blobLen: 5, + squareSize: 8, + fits: false, + expectedIndex: 4, + }, + { + name: "blobLen 2 cursor 3 square size 8", + cursor: 3, + blobLen: 2, + squareSize: 8, + fits: true, + expectedIndex: 4, + }, + { + name: "cursor 3 blobLen 5 size 8", + cursor: 3, + blobLen: 5, + squareSize: 8, + fits: false, + expectedIndex: 4, + }, + { + name: "bloblen 12 cursor 1 size 16", + cursor: 1, + blobLen: 12, + squareSize: 16, + fits: true, + expectedIndex: 4, + }, + { + name: "edge case where there are many blobs with a single size", + cursor: 10291, + blobLen: 1, + squareSize: 128, + fits: true, + expectedIndex: 10291, + }, + { + name: "second row blobLen 2 cursor 11 square size 8", + cursor: 11, + blobLen: 2, + squareSize: 8, + fits: true, + expectedIndex: 12, + }, + { + // inspired by the diagram at https://github.com/celestiaorg/celestia-app/blob/1b80b94a62c8c292f569e2fc576e26299985681a/docs/architecture/adr-009-non-interactive-default-rules-for-reduced-padding.md?plain=1#L30 + name: "non-interactive default rules for reduced padding diagram", + cursor: 11, + blobLen: 11, + squareSize: 8, + fits: false, + expectedIndex: 12, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + res, fits := NextMultipleOfBlobMinSquareSize(tt.cursor, tt.blobLen, tt.squareSize) + assert.Equal(t, tt.fits, fits) + assert.Equal(t, tt.expectedIndex, res) + }) + } +} + +func Test_roundUpBy(t *testing.T) { + type test struct { + cursor, v int + expectedIndex int + } + tests := []test{ + { + cursor: 1, + v: 2, + expectedIndex: 2, + }, + { + cursor: 2, + v: 2, + expectedIndex: 2, + }, + { + cursor: 0, + v: 2, + expectedIndex: 0, + }, + { + cursor: 5, + v: 2, + expectedIndex: 6, + }, + { + cursor: 8, + v: 16, + expectedIndex: 16, + }, + { + cursor: 33, + v: 1, + expectedIndex: 33, + }, + { + cursor: 32, + v: 16, + expectedIndex: 32, + }, + { + cursor: 33, + v: 16, + expectedIndex: 48, + }, + } + for i, tt := range tests { + t.Run( + fmt.Sprintf( + "test %d: %d cursor %d v %d expectedIndex", + i, + tt.cursor, + tt.v, + tt.expectedIndex, + ), + func(t *testing.T) { + res := roundUpBy(tt.cursor, tt.v) + assert.Equal(t, tt.expectedIndex, res) + }) + } +} + +func TestMinSquareSize(t *testing.T) { + type testCase struct { + shareCount int + want int + } + testCases := []testCase{ + { + shareCount: 0, + want: 1, + }, + { + shareCount: 1, + want: 1, + }, + { + shareCount: 2, + want: 2, + }, + { + shareCount: 3, + want: 2, + }, + { + shareCount: 4, + want: 2, + }, + { + shareCount: 5, + want: 4, + }, + { + shareCount: 16, + want: 4, + }, + { + shareCount: 17, + want: 8, + }, + } + for _, tc := range testCases { + t.Run(fmt.Sprintf("shareCount %d", tc.shareCount), func(t *testing.T) { + got := MinSquareSize(tc.shareCount) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/shares/padding.go b/shares/padding.go new file mode 100644 index 0000000000..a755c40ccc --- /dev/null +++ b/shares/padding.go @@ -0,0 +1,69 @@ +package shares + +import ( + "bytes" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" +) + +// NamespacePaddingShare returns a share that acts as padding. Namespace padding +// shares follow a blob so that the next blob may start at an index that +// conforms to non-interactive default rules. The ns parameter provided should +// be the namespace of the blob that precedes this padding in the data square. +func NamespacePaddingShare(ns appns.Namespace) (Share, error) { + b, err := NewBuilder(ns, appconsts.ShareVersionZero, true).Init() + if err != nil { + return Share{}, err + } + if err := b.WriteSequenceLen(0); err != nil { + return Share{}, err + } + padding := bytes.Repeat([]byte{0}, appconsts.FirstSparseShareContentSize) + b.AddData(padding) + + share, err := b.Build() + if err != nil { + return Share{}, err + } + + return *share, nil +} + +// NamespacePaddingShares returns n namespace padding shares. +func NamespacePaddingShares(ns appns.Namespace, n int) ([]Share, error) { + var err error + shares := make([]Share, n) + for i := 0; i < n; i++ { + shares[i], err = NamespacePaddingShare(ns) + if err != nil { + return shares, err + } + } + return shares, nil +} + +// ReservedPaddingShare returns a share that acts as padding. Reserved padding +// shares follow all significant shares in the reserved namespace so that the +// first blob can start at an index that conforms to non-interactive default +// rules. +func ReservedPaddingShare() (Share, error) { + return NamespacePaddingShare(appns.ReservedPaddingNamespace) +} + +// ReservedPaddingShare returns n reserved padding shares. +func ReservedPaddingShares(n int) ([]Share, error) { + return NamespacePaddingShares(appns.ReservedPaddingNamespace, n) +} + +// TailPaddingShare is a share that is used to pad a data square to the desired +// square size. Tail padding shares follow the last blob share in the data +// square. +func TailPaddingShare() (Share, error) { + return NamespacePaddingShare(appns.TailPaddingNamespace) +} + +// TailPaddingShares returns n tail padding shares. +func TailPaddingShares(n int) ([]Share, error) { + return NamespacePaddingShares(appns.TailPaddingNamespace, n) +} diff --git a/shares/padding_test.go b/shares/padding_test.go new file mode 100644 index 0000000000..865543e706 --- /dev/null +++ b/shares/padding_test.go @@ -0,0 +1,82 @@ +package shares + +import ( + "bytes" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +var ns1 = appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + +var nsOnePadding, _ = zeroPadIfNecessary( + append( + ns1.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 0, // sequence len + }..., + ), appconsts.ShareSize) + +var reservedPadding, _ = zeroPadIfNecessary( + append( + appns.ReservedPaddingNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 0, // sequence len + }..., + ), appconsts.ShareSize) + +var tailPadding, _ = zeroPadIfNecessary( + append( + appns.TailPaddingNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 0, // sequence len + }..., + ), appconsts.ShareSize) + +func TestNamespacePaddingShare(t *testing.T) { + got, err := NamespacePaddingShare(ns1) + assert.NoError(t, err) + assert.Equal(t, nsOnePadding, got.ToBytes()) +} + +func TestNamespacePaddingShares(t *testing.T) { + shares, err := NamespacePaddingShares(ns1, 2) + assert.NoError(t, err) + for _, share := range shares { + assert.Equal(t, nsOnePadding, share.ToBytes()) + } +} + +func TestReservedPaddingShare(t *testing.T) { + got, err := ReservedPaddingShare() + require.NoError(t, err) + assert.Equal(t, reservedPadding, got.ToBytes()) +} + +func TestReservedPaddingShares(t *testing.T) { + shares, err := ReservedPaddingShares(2) + require.NoError(t, err) + for _, share := range shares { + assert.Equal(t, reservedPadding, share.ToBytes()) + } +} + +func TestTailPaddingShare(t *testing.T) { + got, err := TailPaddingShare() + require.NoError(t, err) + assert.Equal(t, tailPadding, got.ToBytes()) +} + +func TestTailPaddingShares(t *testing.T) { + shares, err := TailPaddingShares(2) + require.NoError(t, err) + for _, share := range shares { + assert.Equal(t, tailPadding, share.ToBytes()) + } +} diff --git a/shares/parse.go b/shares/parse.go new file mode 100644 index 0000000000..13b0f38d1e --- /dev/null +++ b/shares/parse.go @@ -0,0 +1,81 @@ +package shares + +import ( + "bytes" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + coretypes "github.com/tendermint/tendermint/types" +) + +// ParseTxs collects all of the transactions from the shares provided +func ParseTxs(shares []Share) (coretypes.Txs, error) { + // parse the shares + rawTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) + if err != nil { + return nil, err + } + + // convert to the Tx type + txs := make(coretypes.Txs, len(rawTxs)) + for i := 0; i < len(txs); i++ { + txs[i] = coretypes.Tx(rawTxs[i]) + } + + return txs, nil +} + +// ParseBlobs collects all blobs from the shares provided +func ParseBlobs(shares []Share) ([]coretypes.Blob, error) { + blobList, err := parseSparseShares(shares, appconsts.SupportedShareVersions) + if err != nil { + return []coretypes.Blob{}, err + } + + return blobList, nil +} + +func ParseShares(shares []Share) ([]ShareSequence, error) { + sequences := []ShareSequence{} + currentSequence := ShareSequence{} + + for _, share := range shares { + if err := share.Validate(); err != nil { + return sequences, err + } + isStart, err := share.IsSequenceStart() + if err != nil { + return sequences, err + } + ns, err := share.Namespace() + if err != nil { + return sequences, err + } + if isStart { + if len(currentSequence.Shares) > 0 { + sequences = append(sequences, currentSequence) + } + currentSequence = ShareSequence{ + Shares: []Share{share}, + Namespace: ns, + } + } else { + if !bytes.Equal(currentSequence.Namespace.Bytes(), ns.Bytes()) { + return sequences, fmt.Errorf("share sequence %v has inconsistent namespace IDs with share %v", currentSequence, share) + } + currentSequence.Shares = append(currentSequence.Shares, share) + } + } + + if len(currentSequence.Shares) > 0 { + sequences = append(sequences, currentSequence) + } + + for _, sequence := range sequences { + if err := sequence.validSequenceLen(); err != nil { + return sequences, err + } + } + + return sequences, nil +} diff --git a/shares/parse_compact_shares.go b/shares/parse_compact_shares.go new file mode 100644 index 0000000000..32f959012f --- /dev/null +++ b/shares/parse_compact_shares.go @@ -0,0 +1,83 @@ +package shares + +import "errors" + +// parseCompactShares returns data (transactions or intermediate state roots +// based on the contents of rawShares and supportedShareVersions. If rawShares +// contains a share with a version that isn't present in supportedShareVersions, +// an error is returned. The returned data [][]byte does not have namespaces, +// info bytes, data length delimiter, or unit length delimiters and are ready to +// be unmarshalled. +func parseCompactShares(shares []Share, supportedShareVersions []uint8) (data [][]byte, err error) { + if len(shares) == 0 { + return nil, nil + } + + seqStart, err := shares[0].IsSequenceStart() + if err != nil { + return nil, err + } + if !seqStart { + return nil, errors.New("first share is not the start of a sequence") + } + + err = validateShareVersions(shares, supportedShareVersions) + if err != nil { + return nil, err + } + + rawData, err := extractRawData(shares) + if err != nil { + return nil, err + } + + data, err = parseRawData(rawData) + if err != nil { + return nil, err + } + + return data, nil +} + +// validateShareVersions returns an error if the shares contain a share with an +// unsupported share version. Returns nil if all shares contain supported share +// versions. +func validateShareVersions(shares []Share, supportedShareVersions []uint8) error { + for i := 0; i < len(shares); i++ { + if err := shares[i].DoesSupportVersions(supportedShareVersions); err != nil { + return err + } + } + return nil +} + +// parseRawData returns the units (transactions, PFB transactions, intermediate +// state roots) contained in raw data by parsing the unit length delimiter +// prefixed to each unit. +func parseRawData(rawData []byte) (units [][]byte, err error) { + units = make([][]byte, 0) + for { + actualData, unitLen, err := ParseDelimiter(rawData) + if err != nil { + return nil, err + } + if unitLen == 0 { + return units, nil + } + rawData = actualData[unitLen:] + units = append(units, actualData[:unitLen]) + } +} + +// extractRawData returns the raw data contained in the shares. The raw data does +// not contain the namespace ID, info byte, sequence length, or reserved bytes. +func extractRawData(shares []Share) (rawData []byte, err error) { + for i := 0; i < len(shares); i++ { + raw, err := shares[i].RawData() + if err != nil { + return nil, err + } + rawData = append(rawData, raw...) + } + return rawData, nil +} diff --git a/shares/parse_sparse_shares.go b/shares/parse_sparse_shares.go new file mode 100644 index 0000000000..22e899a3e5 --- /dev/null +++ b/shares/parse_sparse_shares.go @@ -0,0 +1,88 @@ +package shares + +import ( + "bytes" + "fmt" + + coretypes "github.com/tendermint/tendermint/types" +) + +type sequence struct { + blob coretypes.Blob + sequenceLen uint32 +} + +// parseSparseShares iterates through rawShares and parses out individual +// blobs. It returns an error if a rawShare contains a share version that +// isn't present in supportedShareVersions. +func parseSparseShares(shares []Share, supportedShareVersions []uint8) (blobs []coretypes.Blob, err error) { + if len(shares) == 0 { + return nil, nil + } + sequences := make([]sequence, 0) + + for _, share := range shares { + version, err := share.Version() + if err != nil { + return nil, err + } + if !bytes.Contains(supportedShareVersions, []byte{version}) { + return nil, fmt.Errorf("unsupported share version %v is not present in supported share versions %v", version, supportedShareVersions) + } + + isPadding, err := share.IsPadding() + if err != nil { + return nil, err + } + if isPadding { + continue + } + + isStart, err := share.IsSequenceStart() + if err != nil { + return nil, err + } + + if isStart { + sequenceLen, err := share.SequenceLen() + if err != nil { + return nil, err + } + data, err := share.RawData() + if err != nil { + return nil, err + } + ns, err := share.Namespace() + if err != nil { + return nil, err + } + blob := coretypes.Blob{ + NamespaceID: ns.ID, + Data: data, + ShareVersion: version, + NamespaceVersion: ns.Version, + } + sequences = append(sequences, sequence{ + blob: blob, + sequenceLen: sequenceLen, + }) + } else { // continuation share + if len(sequences) == 0 { + return nil, fmt.Errorf("continuation share %v without a sequence start share", share) + } + prev := &sequences[len(sequences)-1] + data, err := share.RawData() + if err != nil { + return nil, err + } + prev.blob.Data = append(prev.blob.Data, data...) + } + } + for _, sequence := range sequences { + // trim any padding from the end of the sequence + sequence.blob.Data = sequence.blob.Data[:sequence.sequenceLen] + blobs = append(blobs, sequence.blob) + } + + return blobs, nil +} diff --git a/shares/parse_sparse_shares_test.go b/shares/parse_sparse_shares_test.go new file mode 100644 index 0000000000..db06bbe879 --- /dev/null +++ b/shares/parse_sparse_shares_test.go @@ -0,0 +1,154 @@ +package shares + +import ( + "bytes" + "fmt" + "sort" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/testutil/testfactory" + "github.com/celestiaorg/nmt/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" +) + +func Test_parseSparseShares(t *testing.T) { + type test struct { + name string + blobSize int + blobCount int + } + + // each test is ran twice, once using blobSize as an exact size, and again + // using it as a cap for randomly sized leaves + tests := []test{ + { + name: "single small blob", + blobSize: 10, + blobCount: 1, + }, + { + name: "ten small blobs", + blobSize: 10, + blobCount: 10, + }, + { + name: "single big blob", + blobSize: appconsts.ContinuationSparseShareContentSize * 4, + blobCount: 1, + }, + { + name: "many big blobs", + blobSize: appconsts.ContinuationSparseShareContentSize * 4, + blobCount: 10, + }, + { + name: "single exact size blob", + blobSize: appconsts.FirstSparseShareContentSize, + blobCount: 1, + }, + } + + for _, tc := range tests { + // run the tests with identically sized blobs + t.Run(fmt.Sprintf("%s identically sized ", tc.name), func(t *testing.T) { + blobs := make([]coretypes.Blob, tc.blobCount) + for i := 0; i < tc.blobCount; i++ { + blobs[i] = testfactory.GenerateRandomBlob(tc.blobSize) + } + + sort.Sort(coretypes.BlobsByNamespace(blobs)) + + shares, _ := SplitBlobs(0, nil, blobs, false) + parsedBlobs, err := parseSparseShares(shares, appconsts.SupportedShareVersions) + if err != nil { + t.Error(err) + } + + // check that the namespaces and data are the same + for i := 0; i < len(blobs); i++ { + assert.Equal(t, blobs[i].NamespaceID, parsedBlobs[i].NamespaceID, "parsed blob namespace does not match") + assert.Equal(t, blobs[i].Data, parsedBlobs[i].Data, "parsed blob data does not match") + } + }) + + // run the same tests using randomly sized blobs with caps of tc.blobSize + t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { + blobs := testfactory.GenerateRandomlySizedBlobs(tc.blobCount, tc.blobSize) + shares, _ := SplitBlobs(0, nil, blobs, false) + parsedBlobs, err := parseSparseShares(shares, appconsts.SupportedShareVersions) + if err != nil { + t.Error(err) + } + + // check that the namespaces and data are the same + for i := 0; i < len(blobs); i++ { + assert.Equal(t, blobs[i].NamespaceID, parsedBlobs[i].NamespaceID) + assert.Equal(t, blobs[i].Data, parsedBlobs[i].Data) + } + }) + } +} + +func Test_parseSparseSharesErrors(t *testing.T) { + type testCase struct { + name string + shares []Share + } + + unsupportedShareVersion := 5 + infoByte, _ := NewInfoByte(uint8(unsupportedShareVersion), true) + + rawShare := []byte{} + rawShare = append(rawShare, namespace.ID{1, 1, 1, 1, 1, 1, 1, 1}...) + rawShare = append(rawShare, byte(infoByte)) + rawShare = append(rawShare, bytes.Repeat([]byte{0}, appconsts.ShareSize-len(rawShare))...) + share, err := NewShare(rawShare) + if err != nil { + t.Fatal(err) + } + + tests := []testCase{ + { + "share with unsupported share version", + []Share{*share}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(*testing.T) { + _, err := parseSparseShares(tt.shares, appconsts.SupportedShareVersions) + assert.Error(t, err) + }) + } +} + +func Test_parseSparseSharesWithNamespacedPadding(t *testing.T) { + sss := NewSparseShareSplitter() + randomSmallBlob := testfactory.GenerateRandomBlob(appconsts.ContinuationSparseShareContentSize / 2) + randomLargeBlob := testfactory.GenerateRandomBlob(appconsts.ContinuationSparseShareContentSize * 4) + blobs := []coretypes.Blob{ + randomSmallBlob, + randomLargeBlob, + } + sort.Sort(coretypes.BlobsByNamespace(blobs)) + + err := sss.Write(blobs[0]) + require.NoError(t, err) + + err = sss.WriteNamespacedPaddedShares(4) + require.NoError(t, err) + + err = sss.Write(blobs[1]) + require.NoError(t, err) + + err = sss.WriteNamespacedPaddedShares(10) + require.NoError(t, err) + + shares := sss.Export() + pblobs, err := parseSparseShares(shares, appconsts.SupportedShareVersions) + require.NoError(t, err) + require.Equal(t, blobs, pblobs) +} diff --git a/shares/parse_test.go b/shares/parse_test.go new file mode 100644 index 0000000000..6a0639e9ec --- /dev/null +++ b/shares/parse_test.go @@ -0,0 +1,196 @@ +package shares + +import ( + "bytes" + "encoding/binary" + "math/rand" + "reflect" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" + "github.com/tendermint/tendermint/types" +) + +func TestParseShares(t *testing.T) { + type testCase struct { + name string + shares []Share + want []ShareSequence + expectErr bool + } + + start := true + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + namespaceTwo := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + txShares, _, _, err := SplitTxs(generateRandomTxs(2, 1000)) + require.NoError(t, err) + txShareStart := txShares[0] + txShareContinuation := txShares[1] + + blobOneShares, err := SplitBlobs(0, []uint32{}, []types.Blob{generateRandomBlobWithNamespace(ns1, 1000)}, false) + if err != nil { + t.Fatal(err) + } + blobOneStart := blobOneShares[0] + blobOneContinuation := blobOneShares[1] + + blobTwoShares, err := SplitBlobs(0, []uint32{}, []types.Blob{generateRandomBlobWithNamespace(namespaceTwo, 1000)}, false) + if err != nil { + t.Fatal(err) + } + blobTwoStart := blobTwoShares[0] + blobTwoContinuation := blobTwoShares[1] + + invalidShare := Share{data: append(generateRawShare(ns1, start, 1), []byte{0}...)} // invalidShare is now longer than the length of a valid share + + largeSequenceLen := 1000 // it takes more than one share to store a sequence of 1000 bytes + oneShareWithTooLargeSequenceLen := generateRawShare(ns1, start, uint32(largeSequenceLen)) + + shortSequenceLen := 0 + oneShareWithTooShortSequenceLen := generateRawShare(ns1, start, uint32(shortSequenceLen)) + + tests := []testCase{ + { + "empty", + []Share{}, + []ShareSequence{}, + false, + }, + { + "one transaction share", + []Share{txShareStart}, + []ShareSequence{{Namespace: appns.TxNamespace, Shares: []Share{txShareStart}}}, + false, + }, + { + "two transaction shares", + []Share{txShareStart, txShareContinuation}, + []ShareSequence{{Namespace: appns.TxNamespace, Shares: []Share{txShareStart, txShareContinuation}}}, + false, + }, + { + "one blob share", + []Share{blobOneStart}, + []ShareSequence{{Namespace: ns1, Shares: []Share{blobOneStart}}}, + false, + }, + { + "two blob shares", + []Share{blobOneStart, blobOneContinuation}, + []ShareSequence{{Namespace: ns1, Shares: []Share{blobOneStart, blobOneContinuation}}}, + false, + }, + { + "two blobs with two shares each", + []Share{blobOneStart, blobOneContinuation, blobTwoStart, blobTwoContinuation}, + []ShareSequence{ + {Namespace: ns1, Shares: []Share{blobOneStart, blobOneContinuation}}, + {Namespace: namespaceTwo, Shares: []Share{blobTwoStart, blobTwoContinuation}}, + }, + false, + }, + { + "one transaction, one blob", + []Share{txShareStart, blobOneStart}, + []ShareSequence{ + {Namespace: appns.TxNamespace, Shares: []Share{txShareStart}}, + {Namespace: ns1, Shares: []Share{blobOneStart}}, + }, + false, + }, + { + "one transaction, two blobs", + []Share{txShareStart, blobOneStart, blobTwoStart}, + []ShareSequence{ + {Namespace: appns.TxNamespace, Shares: []Share{txShareStart}}, + {Namespace: ns1, Shares: []Share{blobOneStart}}, + {Namespace: namespaceTwo, Shares: []Share{blobTwoStart}}, + }, + false, + }, + { + "one share with invalid size", + []Share{invalidShare}, + []ShareSequence{}, + true, + }, + { + "blob one start followed by blob two continuation", + []Share{blobOneStart, blobTwoContinuation}, + []ShareSequence{}, + true, + }, + { + "one share with too large sequence length", + []Share{{data: oneShareWithTooLargeSequenceLen}}, + []ShareSequence{}, + true, + }, + { + "one share with too short sequence length", + []Share{{data: oneShareWithTooShortSequenceLen}}, + []ShareSequence{}, + true, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := ParseShares(tt.shares) + if tt.expectErr { + assert.Error(t, err) + return + } + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("ParseShares() got %v, want %v", got, tt.want) + } + }) + } +} + +func generateRawShare(namespace appns.Namespace, isSequenceStart bool, sequenceLen uint32) (rawShare []byte) { + infoByte, _ := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + + sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) + binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) + + rawShare = append(rawShare, namespace.Bytes()...) + rawShare = append(rawShare, byte(infoByte)) + rawShare = append(rawShare, sequenceLenBuf...) + + return padWithRandomBytes(rawShare) +} + +func padWithRandomBytes(partialShare []byte) (paddedShare []byte) { + paddedShare = make([]byte, appconsts.ShareSize) + copy(paddedShare, partialShare) + rand.Read(paddedShare[len(partialShare):]) + return paddedShare +} + +func generateRandomTxs(count, size int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + tx := make([]byte, size) + _, err := rand.Read(tx) + if err != nil { + panic(err) + } + txs[i] = tx + } + return txs +} + +func generateRandomBlobWithNamespace(namespace appns.Namespace, size int) types.Blob { + blob := types.Blob{ + NamespaceVersion: namespace.Version, + NamespaceID: namespace.ID, + Data: tmrand.Bytes(size), + ShareVersion: appconsts.ShareVersionZero, + } + return blob +} diff --git a/shares/powers_of_two.go b/shares/powers_of_two.go new file mode 100644 index 0000000000..d9c7bc1ff2 --- /dev/null +++ b/shares/powers_of_two.go @@ -0,0 +1,44 @@ +package shares + +import ( + "fmt" + + "golang.org/x/exp/constraints" +) + +// RoundUpPowerOfTwo returns the next power of two greater than or equal to input. +func RoundUpPowerOfTwo[I constraints.Integer](input I) I { + var result I = 1 + for result < input { + result = result << 1 + } + return result +} + +// RoundDownPowerOfTwo returns the next power of two less than or equal to input. +func RoundDownPowerOfTwo[I constraints.Integer](input I) (I, error) { + if input <= 0 { + return 0, fmt.Errorf("input %v must be positive", input) + } + roundedUp := RoundUpPowerOfTwo(input) + if roundedUp == input { + return roundedUp, nil + } + return roundedUp / 2, nil +} + +// RoundUpPowerOfTwo returns the next power of two that is strictly greater than input. +func RoundUpPowerOfTwoStrict[I constraints.Integer](input I) I { + result := RoundUpPowerOfTwo(input) + + // round the result up to the next power of two if is equal to the input + if result == input { + return result * 2 + } + return result +} + +// IsPowerOfTwo returns true if input is a power of two. +func IsPowerOfTwo[I constraints.Integer](input I) bool { + return input&(input-1) == 0 && input != 0 +} diff --git a/shares/powers_of_two_test.go b/shares/powers_of_two_test.go new file mode 100644 index 0000000000..a8a4f4fd52 --- /dev/null +++ b/shares/powers_of_two_test.go @@ -0,0 +1,101 @@ +package shares + +import ( + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestRoundUpPowerOfTwo(t *testing.T) { + type testCase struct { + input int + want int + } + testCases := []testCase{ + {input: -1, want: 1}, + {input: 0, want: 1}, + {input: 1, want: 1}, + {input: 2, want: 2}, + {input: 4, want: 4}, + {input: 5, want: 8}, + {input: 8, want: 8}, + {input: 11, want: 16}, + {input: 511, want: 512}, + } + for _, tc := range testCases { + got := RoundUpPowerOfTwo(tc.input) + assert.Equal(t, tc.want, got) + } +} + +func TestRoundDownPowerOfTwo(t *testing.T) { + type testCase struct { + input int + want int + } + testCases := []testCase{ + {input: 1, want: 1}, + {input: 2, want: 2}, + {input: 4, want: 4}, + {input: 5, want: 4}, + {input: 8, want: 8}, + {input: 11, want: 8}, + {input: 511, want: 256}, + } + for _, tc := range testCases { + got, err := RoundDownPowerOfTwo(tc.input) + require.NoError(t, err) + assert.Equal(t, tc.want, got) + } +} + +func TestRoundUpPowerOfTwoStrict(t *testing.T) { + type testCase struct { + input int + want int + } + testCases := []testCase{ + {input: -1, want: 1}, + {input: 0, want: 1}, + {input: 1, want: 2}, + {input: 2, want: 4}, + {input: 4, want: 8}, + {input: 5, want: 8}, + {input: 8, want: 16}, + {input: 11, want: 16}, + {input: 511, want: 512}, + } + for _, tc := range testCases { + got := RoundUpPowerOfTwoStrict(tc.input) + assert.Equal(t, tc.want, got) + } +} + +func TestIsPowerOfTwoU(t *testing.T) { + type test struct { + input uint64 + want bool + } + tests := []test{ + // powers of two + {input: 1, want: true}, + {input: 2, want: true}, + {input: 4, want: true}, + {input: 8, want: true}, + {input: 16, want: true}, + {input: 32, want: true}, + {input: 64, want: true}, + {input: 128, want: true}, + {input: 256, want: true}, + // not powers of two + {input: 0, want: false}, + {input: 3, want: false}, + {input: 12, want: false}, + {input: 79, want: false}, + } + for _, tt := range tests { + got := IsPowerOfTwo(tt.input) + assert.Equal(t, tt.want, got) + } +} diff --git a/shares/reserved_bytes.go b/shares/reserved_bytes.go new file mode 100644 index 0000000000..7240e2551d --- /dev/null +++ b/shares/reserved_bytes.go @@ -0,0 +1,33 @@ +package shares + +import ( + "encoding/binary" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" +) + +// NewReservedBytes returns a byte slice of length +// appconsts.CompactShareReservedBytes that contains the byteIndex of the first +// unit that starts in a compact share. +func NewReservedBytes(byteIndex uint32) ([]byte, error) { + if byteIndex >= appconsts.ShareSize { + return []byte{}, fmt.Errorf("byte index %d must be less than share size %d", byteIndex, appconsts.ShareSize) + } + reservedBytes := make([]byte, appconsts.CompactShareReservedBytes) + binary.BigEndian.PutUint32(reservedBytes, byteIndex) + return reservedBytes, nil +} + +// ParseReservedBytes parses a byte slice of length +// appconsts.CompactShareReservedBytes into a byteIndex. +func ParseReservedBytes(reservedBytes []byte) (uint32, error) { + if len(reservedBytes) != appconsts.CompactShareReservedBytes { + return 0, fmt.Errorf("reserved bytes must be of length %d", appconsts.CompactShareReservedBytes) + } + byteIndex := binary.BigEndian.Uint32(reservedBytes) + if appconsts.ShareSize <= byteIndex { + return 0, fmt.Errorf("byteIndex must be less than share size %d", appconsts.ShareSize) + } + return byteIndex, nil +} diff --git a/shares/reserved_bytes_test.go b/shares/reserved_bytes_test.go new file mode 100644 index 0000000000..1723b358ae --- /dev/null +++ b/shares/reserved_bytes_test.go @@ -0,0 +1,84 @@ +package shares + +import ( + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestParseReservedBytes(t *testing.T) { + type testCase struct { + name string + input []byte + want uint32 + expectErr bool + } + testCases := []testCase{ + {"byte index of 0", []byte{0, 0, 0, 0}, 0, false}, + {"byte index of 2", []byte{0, 0, 0, 2}, 2, false}, + {"byte index of 4", []byte{0, 0, 0, 4}, 4, false}, + {"byte index of 8", []byte{0, 0, 0, 8}, 8, false}, + {"byte index of 16", []byte{0, 0, 0, 16}, 16, false}, + {"byte index of 32", []byte{0, 0, 0, 32}, 32, false}, + {"byte index of 64", []byte{0, 0, 0, 64}, 64, false}, + {"byte index of 128", []byte{0, 0, 0, 128}, 128, false}, + {"byte index of 256", []byte{0, 0, 1, 0}, 256, false}, + {"byte index of 511", []byte{0, 0, 1, 255}, 511, false}, + + // error cases + {"empty", []byte{}, 0, true}, + {"too few reserved bytes", []byte{1}, 0, true}, + {"another case of too few reserved bytes", []byte{3, 3, 3}, 0, true}, + {"too many bytes", []byte{0, 0, 0, 0, 0}, 0, true}, + {"too high of a byte index", []byte{0, 0, 3, 232}, 0, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := ParseReservedBytes(tc.input) + if tc.expectErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestNewReservedBytes(t *testing.T) { + type testCase struct { + name string + input uint32 + want []byte + expectErr bool + } + testCases := []testCase{ + {"byte index of 0", 0, []byte{0, 0, 0, 0}, false}, + {"byte index of 2", 2, []byte{0, 0, 0, 2}, false}, + {"byte index of 4", 4, []byte{0, 0, 0, 4}, false}, + {"byte index of 8", 8, []byte{0, 0, 0, 8}, false}, + {"byte index of 16", 16, []byte{0, 0, 0, 16}, false}, + {"byte index of 32", 32, []byte{0, 0, 0, 32}, false}, + {"byte index of 64", 64, []byte{0, 0, 0, 64}, false}, + {"byte index of 128", 128, []byte{0, 0, 0, 128}, false}, + {"byte index of 256", 256, []byte{0, 0, 1, 0}, false}, + {"byte index of 511", 511, []byte{0, 0, 1, 255}, false}, + + // error cases + {"byte index of 512 is equal to share size", 512, []byte{}, true}, + {"byte index of 1000 is greater than share size", 1000, []byte{}, true}, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := NewReservedBytes(tc.input) + if tc.expectErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/shares/share_builder.go b/shares/share_builder.go new file mode 100644 index 0000000000..b19f5cb7c8 --- /dev/null +++ b/shares/share_builder.go @@ -0,0 +1,227 @@ +package shares + +import ( + "encoding/binary" + "errors" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" +) + +type Builder struct { + namespace appns.Namespace + shareVersion uint8 + isFirstShare bool + isCompactShare bool + rawShareData []byte +} + +func NewEmptyBuilder() *Builder { + return &Builder{ + rawShareData: make([]byte, 0, appconsts.ShareSize), + } +} + +// Init() needs to be called right after this method +func NewBuilder(ns appns.Namespace, shareVersion uint8, isFirstShare bool) *Builder { + return &Builder{ + namespace: ns, + shareVersion: shareVersion, + isFirstShare: isFirstShare, + isCompactShare: isCompactShare(ns), + } +} + +func (b *Builder) Init() (*Builder, error) { + if b.isCompactShare { + if err := b.prepareCompactShare(); err != nil { + return nil, err + } + } else { + if err := b.prepareSparseShare(); err != nil { + return nil, err + } + } + + return b, nil +} + +func (b *Builder) AvailableBytes() int { + return appconsts.ShareSize - len(b.rawShareData) +} + +func (b *Builder) ImportRawShare(rawBytes []byte) *Builder { + b.rawShareData = rawBytes + return b +} + +func (b *Builder) AddData(rawData []byte) (rawDataLeftOver []byte) { + // find the len left in the pending share + pendingLeft := appconsts.ShareSize - len(b.rawShareData) + + // if we can simply add the tx to the share without creating a new + // pending share, do so and return + if len(rawData) <= pendingLeft { + b.rawShareData = append(b.rawShareData, rawData...) + return nil + } + + // if we can only add a portion of the rawData to the pending share, + // then we add it and add the pending share to the finalized shares. + chunk := rawData[:pendingLeft] + b.rawShareData = append(b.rawShareData, chunk...) + + // We need to finish this share and start a new one + // so we return the leftover to be written into a new share + return rawData[pendingLeft:] +} + +func (b *Builder) Build() (*Share, error) { + return NewShare(b.rawShareData) +} + +// IsEmptyShare returns true if no data has been written to the share +func (b *Builder) IsEmptyShare() bool { + expectedLen := appconsts.NamespaceSize + appconsts.ShareInfoBytes + if b.isCompactShare { + expectedLen += appconsts.CompactShareReservedBytes + } + if b.isFirstShare { + expectedLen += appconsts.SequenceLenBytes + } + return len(b.rawShareData) == expectedLen +} + +func (b *Builder) ZeroPadIfNecessary() (bytesOfPadding int) { + b.rawShareData, bytesOfPadding = zeroPadIfNecessary(b.rawShareData, appconsts.ShareSize) + return bytesOfPadding +} + +// isEmptyReservedBytes returns true if the reserved bytes are empty. +func (b *Builder) isEmptyReservedBytes() (bool, error) { + indexOfReservedBytes := b.indexOfReservedBytes() + reservedBytes, err := ParseReservedBytes(b.rawShareData[indexOfReservedBytes : indexOfReservedBytes+appconsts.CompactShareReservedBytes]) + if err != nil { + return false, err + } + return reservedBytes == 0, nil +} + +// indexOfReservedBytes returns the index of the reserved bytes in the share. +func (b *Builder) indexOfReservedBytes() int { + if b.isFirstShare { + // if the share is the first share, the reserved bytes follow the namespace, info byte, and sequence length + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + } + // if the share is not the first share, the reserved bytes follow the namespace and info byte + return appconsts.NamespaceSize + appconsts.ShareInfoBytes +} + +// indexOfInfoBytes returns the index of the InfoBytes. +func (b *Builder) indexOfInfoBytes() int { + // the info byte is immediately after the namespace + return appconsts.NamespaceSize +} + +// MaybeWriteReservedBytes will be a no-op if the reserved bytes +// have already been populated. If the reserved bytes are empty, it will write +// the location of the next unit of data to the reserved bytes. +func (b *Builder) MaybeWriteReservedBytes() error { + if !b.isCompactShare { + return errors.New("this is not a compact share") + } + + empty, err := b.isEmptyReservedBytes() + if err != nil { + return err + } + if !empty { + return nil + } + + byteIndexOfNextUnit := len(b.rawShareData) + reservedBytes, err := NewReservedBytes(uint32(byteIndexOfNextUnit)) + if err != nil { + return err + } + + indexOfReservedBytes := b.indexOfReservedBytes() + // overwrite the reserved bytes of the pending share + for i := 0; i < appconsts.CompactShareReservedBytes; i++ { + b.rawShareData[indexOfReservedBytes+i] = reservedBytes[i] + } + return nil +} + +// writeSequenceLen writes the sequence length to the first share. +func (b *Builder) WriteSequenceLen(sequenceLen uint32) error { + if b == nil { + return errors.New("the builder object is not initialized (is nil)") + } + if !b.isFirstShare { + return errors.New("not the first share") + } + sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) + binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) + + for i := 0; i < appconsts.SequenceLenBytes; i++ { + b.rawShareData[appconsts.NamespaceSize+appconsts.ShareInfoBytes+i] = sequenceLenBuf[i] + } + + return nil +} + +// FlipSequenceStart flips the sequence start indicator of the share provided +func (b *Builder) FlipSequenceStart() { + infoByteIndex := b.indexOfInfoBytes() + + // the sequence start indicator is the last bit of the info byte so flip the + // last bit + b.rawShareData[infoByteIndex] = b.rawShareData[infoByteIndex] ^ 0x01 +} + +func (b *Builder) prepareCompactShare() error { + shareData := make([]byte, 0, appconsts.ShareSize) + infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare) + if err != nil { + return err + } + placeholderSequenceLen := make([]byte, appconsts.SequenceLenBytes) + placeholderReservedBytes := make([]byte, appconsts.CompactShareReservedBytes) + + shareData = append(shareData, b.namespace.Bytes()...) + shareData = append(shareData, byte(infoByte)) + + if b.isFirstShare { + shareData = append(shareData, placeholderSequenceLen...) + } + + shareData = append(shareData, placeholderReservedBytes...) + + b.rawShareData = shareData + + return nil +} + +func (b *Builder) prepareSparseShare() error { + shareData := make([]byte, 0, appconsts.ShareSize) + infoByte, err := NewInfoByte(b.shareVersion, b.isFirstShare) + if err != nil { + return err + } + placeholderSequenceLen := make([]byte, appconsts.SequenceLenBytes) + + shareData = append(shareData, b.namespace.Bytes()...) + shareData = append(shareData, byte(infoByte)) + + if b.isFirstShare { + shareData = append(shareData, placeholderSequenceLen...) + } + + b.rawShareData = shareData + return nil +} + +func isCompactShare(ns appns.Namespace) bool { + return ns.IsTx() || ns.IsPayForBlob() +} diff --git a/shares/share_builder_test.go b/shares/share_builder_test.go new file mode 100644 index 0000000000..8696e4d22c --- /dev/null +++ b/shares/share_builder_test.go @@ -0,0 +1,320 @@ +package shares + +import ( + "bytes" + "fmt" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestShareBuilderIsEmptyShare(t *testing.T) { + type testCase struct { + name string + builder *Builder + data []byte // input data + want bool + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + testCases := []testCase{ + { + name: "first compact share empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: nil, + want: true, + }, + { + name: "first compact share not empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "first sparse share empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: nil, + want: true, + }, + { + name: "first sparse share not empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "continues compact share empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: nil, + want: true, + }, + { + name: "continues compact share not empty", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "continues sparse share not empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: false, + }, + { + name: "continues sparse share empty", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: nil, + want: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.builder.Init() + require.NoError(t, err) + tc.builder.AddData(tc.data) + assert.Equal(t, tc.want, tc.builder.IsEmptyShare()) + }) + } +} + +func TestShareBuilderWriteSequenceLen(t *testing.T) { + type testCase struct { + name string + builder *Builder + wantLen uint32 + wantErr bool + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + testCases := []testCase{ + { + name: "first share", + builder: NewBuilder(ns1, 1, true), + wantLen: 10, + wantErr: false, + }, + { + name: "first share with long sequence", + builder: NewBuilder(ns1, 1, true), + wantLen: 323, + wantErr: false, + }, + { + name: "continuation sparse share", + builder: NewBuilder(ns1, 1, false), + wantLen: 10, + wantErr: true, + }, + { + name: "compact share", + builder: NewBuilder(appns.TxNamespace, 1, true), + wantLen: 10, + wantErr: false, + }, + { + name: "continuation compact share", + builder: NewBuilder(ns1, 1, false), + wantLen: 10, + wantErr: true, + }, + { + name: "nil builder", + builder: &Builder{}, + wantLen: 10, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.builder.Init() + require.NoError(t, err) + if err := tc.builder.WriteSequenceLen(tc.wantLen); tc.wantErr { + assert.Error(t, err) + return + } + + tc.builder.ZeroPadIfNecessary() + share, err := tc.builder.Build() + require.NoError(t, err) + + len, err := share.SequenceLen() + require.NoError(t, err) + + assert.Equal(t, tc.wantLen, len) + }) + } +} + +func TestShareBuilderAddData(t *testing.T) { + type testCase struct { + name string + builder *Builder + data []byte // input data + want []byte + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + testCases := []testCase{ + { + name: "small share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + want: nil, + }, + { + name: "exact fit first compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.ShareInfoBytes-appconsts.CompactShareReservedBytes-appconsts.SequenceLenBytes), + want: nil, + }, + { + name: "exact fit first sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.SequenceLenBytes-1 /*1 = info byte*/), + want: nil, + }, + { + name: "exact fit continues compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-appconsts.CompactShareReservedBytes-1 /*1 = info byte*/), + want: nil, + }, + { + name: "exact fit continues sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, appconsts.ShareSize-appconsts.NamespaceSize-1 /*1 = info byte*/), + want: nil, + }, + { + name: "oversize first compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-appconsts.CompactShareReservedBytes-appconsts.SequenceLenBytes-1 /*1 = info byte*/), + want: []byte{1}, + }, + { + name: "oversize first sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, true), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-appconsts.SequenceLenBytes-1 /*1 = info byte*/), + want: []byte{1}, + }, + { + name: "oversize continues compact share", + builder: NewBuilder(appns.TxNamespace, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-appconsts.CompactShareReservedBytes-1 /*1 = info byte*/), + want: []byte{1}, + }, + { + name: "oversize continues sparse share", + builder: NewBuilder(ns1, appconsts.ShareVersionZero, false), + data: bytes.Repeat([]byte{1}, 1 /*1 extra byte*/ +appconsts.ShareSize-appconsts.NamespaceSize-1 /*1 = info byte*/), + want: []byte{1}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + _, err := tc.builder.Init() + require.NoError(t, err) + + got := tc.builder.AddData(tc.data) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestShareBuilderImportRawData(t *testing.T) { + type testCase struct { + name string + shareBytes []byte + want []byte + wantErr bool + } + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + + firstSparseShare := append(ns1.Bytes(), []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + continuationSparseShare := append(ns1.Bytes(), []byte{ + 0, // info byte + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + firstCompactShare := append(appns.TxNamespace.Bytes(), []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 0, 0, 0, 15, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + continuationCompactShare := append(appns.TxNamespace.Bytes(), []byte{ + 0, // info byte + 0, 0, 0, 0, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + + oversizedImport := append( + append( + ns1.Bytes(), + []byte{ + 0, // info byte + 0, 0, 0, 0, // reserved bytes + }...), bytes.Repeat([]byte{1}, 513)...) // data + + testCases := []testCase{ + { + name: "first sparse share", + shareBytes: firstSparseShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation sparse share", + shareBytes: continuationSparseShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "first compact share", + shareBytes: firstCompactShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation compact share", + shareBytes: continuationCompactShare, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "oversized import", + shareBytes: oversizedImport, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + b := NewEmptyBuilder().ImportRawShare(tc.shareBytes) + b.ZeroPadIfNecessary() + builtShare, err := b.Build() + if tc.wantErr { + assert.Error(t, err) + return + } + + rawData, err := builtShare.RawData() + if tc.wantErr { + assert.Error(t, err) + return + } + // Since rawData has padding, we need to use contains + if !bytes.Contains(rawData, tc.want) { + t.Errorf(fmt.Sprintf("%#v does not contain %#v", rawData, tc.want)) + } + }) + } +} diff --git a/shares/share_sequence.go b/shares/share_sequence.go new file mode 100644 index 0000000000..bcf5c51250 --- /dev/null +++ b/shares/share_sequence.go @@ -0,0 +1,123 @@ +package shares + +import ( + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" +) + +// ShareSequence represents a contiguous sequence of shares that are part of the +// same namespace and blob. For compact shares, one share sequence exists per +// reserved namespace. For sparse shares, one share sequence exists per blob. +type ShareSequence struct { + Namespace appns.Namespace + Shares []Share +} + +// RawData returns the raw share data of this share sequence. The raw data does +// not contain the namespace ID, info byte, sequence length, or reserved bytes. +func (s ShareSequence) RawData() (data []byte, err error) { + for _, share := range s.Shares { + raw, err := share.RawData() + if err != nil { + return []byte{}, err + } + data = append(data, raw...) + } + + sequenceLen, err := s.SequenceLen() + if err != nil { + return []byte{}, err + } + // trim any padding that may have been added to the last share + return data[:sequenceLen], nil +} + +func (s ShareSequence) SequenceLen() (uint32, error) { + if len(s.Shares) == 0 { + return 0, fmt.Errorf("invalid sequence length because share sequence %v has no shares", s) + } + firstShare := s.Shares[0] + return firstShare.SequenceLen() +} + +// validSequenceLen extracts the sequenceLen written to the first share +// and returns an error if the number of shares needed to store a sequence of +// length sequenceLen doesn't match the number of shares in this share +// sequence. Returns nil if there is no error. +func (s ShareSequence) validSequenceLen() error { + if len(s.Shares) == 0 { + return fmt.Errorf("invalid sequence length because share sequence %v has no shares", s) + } + firstShare := s.Shares[0] + sharesNeeded, err := numberOfSharesNeeded(firstShare) + if err != nil { + return err + } + + if len(s.Shares) != sharesNeeded { + return fmt.Errorf("share sequence has %d shares but needed %d shares", len(s.Shares), sharesNeeded) + } + return nil +} + +// numberOfSharesNeeded extracts the sequenceLen written to the share +// firstShare and returns the number of shares needed to store a sequence of +// that length. +func numberOfSharesNeeded(firstShare Share) (sharesUsed int, err error) { + sequenceLen, err := firstShare.SequenceLen() + if err != nil { + return 0, err + } + + isCompact, err := firstShare.IsCompactShare() + if err != nil { + return 0, err + } + if isCompact { + return CompactSharesNeeded(int(sequenceLen)), nil + } + return SparseSharesNeeded(sequenceLen), nil +} + +// CompactSharesNeeded returns the number of compact shares needed to store a +// sequence of length sequenceLen. The parameter sequenceLen is the number +// of bytes of transactions or intermediate state roots in a sequence. +func CompactSharesNeeded(sequenceLen int) (sharesNeeded int) { + if sequenceLen == 0 { + return 0 + } + + if sequenceLen < appconsts.FirstCompactShareContentSize { + return 1 + } + + bytesAvailable := appconsts.FirstCompactShareContentSize + sharesNeeded++ + for bytesAvailable < sequenceLen { + bytesAvailable += appconsts.ContinuationCompactShareContentSize + sharesNeeded++ + } + return sharesNeeded +} + +// SparseSharesNeeded returns the number of shares needed to store a sequence of +// length sequenceLen. +func SparseSharesNeeded(sequenceLen uint32) (sharesNeeded int) { + if sequenceLen == 0 { + return 0 + } + + if sequenceLen < appconsts.FirstSparseShareContentSize { + return 1 + } + + bytesAvailable := appconsts.FirstSparseShareContentSize + sharesNeeded++ + for uint32(bytesAvailable) < sequenceLen { + bytesAvailable += appconsts.ContinuationSparseShareContentSize + sharesNeeded++ + } + return sharesNeeded +} diff --git a/shares/share_sequence_test.go b/shares/share_sequence_test.go new file mode 100644 index 0000000000..72e171e825 --- /dev/null +++ b/shares/share_sequence_test.go @@ -0,0 +1,137 @@ +package shares + +import ( + "bytes" + "encoding/binary" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" +) + +func TestShareSequenceRawData(t *testing.T) { + type testCase struct { + name string + shareSequence ShareSequence + want []byte + wantErr bool + } + blobNamespace := appns.RandomBlobNamespace() + + testCases := []testCase{ + { + name: "empty share sequence", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{}, + }, + want: []byte{}, + wantErr: false, + }, + { + name: "one empty share", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{ + shareWithData(blobNamespace, true, 0, []byte{}), + }, + }, + want: []byte{}, + wantErr: false, + }, + { + name: "one share with one byte", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{ + shareWithData(blobNamespace, true, 1, []byte{0x0f}), + }, + }, + want: []byte{0xf}, + wantErr: false, + }, + { + name: "removes padding from last share", + shareSequence: ShareSequence{ + Namespace: appns.TxNamespace, + Shares: []Share{ + shareWithData(blobNamespace, true, appconsts.FirstSparseShareContentSize+1, bytes.Repeat([]byte{0xf}, appconsts.FirstSparseShareContentSize)), + shareWithData(blobNamespace, false, 0, []byte{0x0f}), + }, + }, + want: bytes.Repeat([]byte{0xf}, appconsts.FirstSparseShareContentSize+1), + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.shareSequence.RawData() + if tc.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tc.want, got) + }) + } +} + +func Test_compactSharesNeeded(t *testing.T) { + type testCase struct { + sequenceLen int + want int + } + testCases := []testCase{ + {0, 0}, + {1, 1}, + {2, 1}, + {appconsts.FirstCompactShareContentSize, 1}, + {appconsts.FirstCompactShareContentSize + 1, 2}, + {appconsts.FirstCompactShareContentSize + appconsts.ContinuationCompactShareContentSize, 2}, + {appconsts.FirstCompactShareContentSize + appconsts.ContinuationCompactShareContentSize*100, 101}, + } + for _, tc := range testCases { + got := CompactSharesNeeded(tc.sequenceLen) + assert.Equal(t, tc.want, got) + } +} + +func Test_sparseSharesNeeded(t *testing.T) { + type testCase struct { + sequenceLen uint32 + want int + } + testCases := []testCase{ + {0, 0}, + {1, 1}, + {2, 1}, + {appconsts.FirstSparseShareContentSize, 1}, + {appconsts.FirstSparseShareContentSize + 1, 2}, + {appconsts.FirstSparseShareContentSize + appconsts.ContinuationSparseShareContentSize, 2}, + {appconsts.FirstSparseShareContentSize + appconsts.ContinuationCompactShareContentSize*2, 3}, + {appconsts.FirstSparseShareContentSize + appconsts.ContinuationCompactShareContentSize*99, 100}, + {1000, 3}, + {10000, 21}, + {100000, 210}, + } + for _, tc := range testCases { + got := SparseSharesNeeded(tc.sequenceLen) + assert.Equal(t, tc.want, got) + } +} + +func shareWithData(namespace appns.Namespace, isSequenceStart bool, sequenceLen uint32, data []byte) (rawShare Share) { + infoByte, _ := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) + rawShareBytes := make([]byte, 0, appconsts.ShareSize) + rawShareBytes = append(rawShareBytes, namespace.Bytes()...) + rawShareBytes = append(rawShareBytes, byte(infoByte)) + if isSequenceStart { + sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) + binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) + rawShareBytes = append(rawShareBytes, sequenceLenBuf...) + } + rawShareBytes = append(rawShareBytes, data...) + + return padShare(Share{data: rawShareBytes}) +} diff --git a/shares/share_splitting.go b/shares/share_splitting.go new file mode 100644 index 0000000000..d416489a21 --- /dev/null +++ b/shares/share_splitting.go @@ -0,0 +1,168 @@ +package shares + +import ( + "errors" + "fmt" + "sort" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + coretypes "github.com/tendermint/tendermint/types" + "golang.org/x/exp/maps" +) + +var ( + ErrIncorrectNumberOfIndexes = errors.New( + "number of indexes is not identical to the number of blobs", + ) + ErrUnexpectedFirstBlobShareIndex = errors.New( + "the first blob started at an unexpected index", + ) +) + +// Split converts block data into encoded shares, optionally using share indexes +// that are encoded as wrapped transactions. Most use cases out of this package +// should use these share indexes and therefore set useShareIndexes to true. +func Split(data coretypes.Data, useShareIndexes bool) ([]Share, error) { + if data.SquareSize == 0 || !isPowerOf2(data.SquareSize) { + return nil, fmt.Errorf("square size is not a power of two: %d", data.SquareSize) + } + wantShareCount := int(data.SquareSize * data.SquareSize) + currentShareCount := 0 + + txShares, pfbTxShares, _, err := SplitTxs(data.Txs) + if err != nil { + return nil, err + } + currentShareCount += len(txShares) + len(pfbTxShares) + // blobIndexes will be nil if we are working with a list of txs that do not + // have a blob index. This preserves backwards compatibility with old blocks + // that do not follow the non-interactive defaults + blobIndexes := ExtractShareIndexes(data.Txs) + sort.Slice(blobIndexes, func(i, j int) bool { return blobIndexes[i] < blobIndexes[j] }) + + var padding []Share + if len(data.Blobs) > 0 { + blobShareStart, _ := NextMultipleOfBlobMinSquareSize( + currentShareCount, + SparseSharesNeeded(uint32(len(data.Blobs[0].Data))), + int(data.SquareSize), + ) + // force blobSharesStart to be the first share index + if len(blobIndexes) != 0 && useShareIndexes { + blobShareStart = int(blobIndexes[0]) + } + if blobShareStart < currentShareCount { + panic(fmt.Sprintf("blobShareStart %v < currentShareCount %v", blobShareStart, currentShareCount)) + } + + padding, err = NamespacePaddingShares(appns.ReservedPaddingNamespace, blobShareStart-currentShareCount) + if err != nil { + return nil, err + } + } + currentShareCount += len(padding) + + if blobIndexes != nil && int(blobIndexes[0]) < currentShareCount { + return nil, ErrUnexpectedFirstBlobShareIndex + } + + blobShares, err := SplitBlobs(currentShareCount, blobIndexes, data.Blobs, useShareIndexes) + if err != nil { + return nil, err + } + currentShareCount += len(blobShares) + tailShares, err := TailPaddingShares(wantShareCount - currentShareCount) + if err != nil { + return nil, err + } + shares := make([]Share, 0, data.SquareSize*data.SquareSize) + shares = append(append(append(append(append( + shares, + txShares...), + pfbTxShares...), + padding...), + blobShares...), + tailShares...) + return shares, nil +} + +// ExtractShareIndexes iterates over the transactions and extracts the share +// indexes from wrapped transactions. It returns nil if the transactions are +// from an old block that did not have share indexes in the wrapped txs. +func ExtractShareIndexes(txs coretypes.Txs) []uint32 { + var shareIndexes []uint32 + for _, rawTx := range txs { + if indexWrappedTxs, isIndexWrapped := coretypes.UnmarshalIndexWrapper(rawTx); isIndexWrapped { + // Since share index == 0 is invalid, it indicates that we are + // attempting to extract share indexes from txs that do not have any + // due to them being old. here we return nil to indicate that we are + // attempting to extract indexes from a block that doesn't support + // it. It checks for 0 because if there is a message in the block, + // then there must also be a tx, which will take up at least one + // share. + if len(indexWrappedTxs.ShareIndexes) == 0 { + return nil + } + shareIndexes = append(shareIndexes, indexWrappedTxs.ShareIndexes...) + } + } + + return shareIndexes +} + +func SplitTxs(txs coretypes.Txs) (txShares []Share, pfbShares []Share, shareRanges map[coretypes.TxKey]ShareRange, err error) { + txWriter := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + pfbTxWriter := NewCompactShareSplitter(appns.PayForBlobNamespace, appconsts.ShareVersionZero) + + for _, tx := range txs { + if _, isIndexWrapper := coretypes.UnmarshalIndexWrapper(tx); isIndexWrapper { + err = pfbTxWriter.WriteTx(tx) + } else { + err = txWriter.WriteTx(tx) + } + if err != nil { + return nil, nil, nil, err + } + } + + txShares, txMap, err := txWriter.Export(0) + if err != nil { + return nil, nil, nil, err + } + + pfbShares, pfbMap, err := pfbTxWriter.Export(len(txShares)) + if err != nil { + return nil, nil, nil, err + } + + return txShares, pfbShares, mergeMaps(txMap, pfbMap), nil +} + +func SplitBlobs(cursor int, indexes []uint32, blobs []coretypes.Blob, useShareIndexes bool) ([]Share, error) { + if useShareIndexes && len(indexes) != len(blobs) { + return nil, ErrIncorrectNumberOfIndexes + } + writer := NewSparseShareSplitter() + for i, blob := range blobs { + if err := writer.Write(blob); err != nil { + return nil, err + } + if useShareIndexes && len(indexes) > i+1 { + paddedShareCount := int(indexes[i+1]) - (writer.Count() + cursor) + if err := writer.WriteNamespacedPaddedShares(paddedShareCount); err != nil { + return nil, err + } + } + } + return writer.Export(), nil +} + +// mergeMaps merges two maps into a new map. If there are any duplicate keys, +// the value in the second map takes precedence. +func mergeMaps(mapOne, mapTwo map[coretypes.TxKey]ShareRange) map[coretypes.TxKey]ShareRange { + merged := make(map[coretypes.TxKey]ShareRange, len(mapOne)+len(mapTwo)) + maps.Copy(merged, mapOne) + maps.Copy(merged, mapTwo) + return merged +} diff --git a/shares/share_splitting_test.go b/shares/share_splitting_test.go new file mode 100644 index 0000000000..a4dbfc68e9 --- /dev/null +++ b/shares/share_splitting_test.go @@ -0,0 +1,387 @@ +package shares + +import ( + "bytes" + "reflect" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" +) + +func TestSplitTxs_forTxShares(t *testing.T) { + smallTransactionA := coretypes.Tx{0xa} + smallTransactionB := coretypes.Tx{0xb} + largeTransaction := bytes.Repeat([]byte{0xc}, 512) + + type testCase struct { + name string + txs coretypes.Txs + want []Share + } + testCases := []testCase{ + { + name: "empty txs", + txs: coretypes.Txs{}, + want: []Share{}, + }, + { + name: "one small tx", + txs: coretypes.Txs{smallTransactionA}, + want: []Share{ + padShare(Share{ + data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x0, 0x2, // 1 byte (unit) + 1 byte (unit length) = 2 bytes sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 0x1, // unit length of first transaction + 0xa, // data of first transaction + }..., + ), + }, + ), + }, + }, + { + name: "two small txs", + txs: coretypes.Txs{smallTransactionA, smallTransactionB}, + want: []Share{ + padShare(Share{ + data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x0, 0x4, // 2 bytes (first transaction) + 2 bytes (second transaction) = 4 bytes sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 0x1, // unit length of first transaction + 0xa, // data of first transaction + 0x1, // unit length of second transaction + 0xb, // data of second transaction + }..., + ), + }, + ), + }, + }, + { + name: "one large tx that spans two shares", + txs: coretypes.Txs{largeTransaction}, + want: []Share{ + fillShare(Share{ + data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x2, 0x2, // 512 (unit) + 2 (unit length) = 514 sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 128, 4, // unit length of transaction is 512 + }..., + ), + }, + 0xc, // data of transaction + ), + padShare(Share{ + data: append( + append( + appns.TxNamespace.Bytes(), + []byte{ + 0x0, // info byte + 0x0, 0x0, 0x0, 0x0, // reserved bytes + }..., + ), + bytes.Repeat([]byte{0xc}, 44)..., // continuation data of transaction + ), + }, + ), + }, + }, + { + name: "one small tx then one large tx that spans two shares", + txs: coretypes.Txs{smallTransactionA, largeTransaction}, + want: []Share{ + fillShare(Share{ + data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x2, 0x4, // 2 bytes (first transaction) + 514 bytes (second transaction) = 516 bytes sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 1, // unit length of first transaction + 0xa, // data of first transaction + 128, 4, // unit length of second transaction is 512 + }..., + ), + }, + 0xc, // data of second transaction + ), + padShare(Share{ + data: append( + append( + appns.TxNamespace.Bytes(), + []byte{ + 0x0, // info byte + 0x0, 0x0, 0x0, 0x0, // reserved bytes + }..., + ), + bytes.Repeat([]byte{0xc}, 46)..., // continuation data of second transaction + ), + }, + ), + }, + }, + { + name: "one large tx that spans two shares then one small tx", + txs: coretypes.Txs{largeTransaction, smallTransactionA}, + want: []Share{ + fillShare(Share{ + data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x2, 0x4, // 514 bytes (first transaction) + 2 bytes (second transaction) = 516 bytes sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 128, 4, // unit length of first transaction is 512 + }..., + ), + }, + 0xc, // data of first transaction + ), + padShare(Share{ + data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x0, // info byte + 0x0, 0x0, 0x0, 0x52, // reserved bytes + 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, // continuation data of first transaction + 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, // continuation data of first transaction + 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, // continuation data of first transaction + 1, // unit length of second transaction + 0xa, // data of second transaction + }..., + ), + }, + ), + }, + }, + } + for _, tt := range testCases { + t.Run(tt.name, func(t *testing.T) { + got, _, _, err := SplitTxs(tt.txs) + require.NoError(t, err) + if !reflect.DeepEqual(got, tt.want) { + t.Errorf("SplitTxs()\n got %#v\n want %#v", got, tt.want) + } + }) + } +} + +func TestSplitTxs(t *testing.T) { + type testCase struct { + name string + txs coretypes.Txs + wantTxShares []Share + wantPfbShares []Share + wantMap map[coretypes.TxKey]ShareRange + } + + smallTx := coretypes.Tx{0xa} // spans one share + smallTxShares := []Share{ + padShare(Share{ + data: append(appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x0, 0x2, // 1 byte (unit) + 1 byte (unit length) = 2 bytes sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 0x1, // unit length of first transaction + 0xa, // data of first transaction + }..., + ), + }, + ), + } + + pfbTx, err := coretypes.MarshalIndexWrapper(coretypes.Tx{0xb}, 10) // spans one share + require.NoError(t, err) + pfbTxShares := []Share{ + padShare(Share{ + data: append( + appns.PayForBlobNamespace.Bytes(), + []uint8{ + 0x1, // info byte + 0x0, 0x0, 0x0, 13, // 1 byte (unit) + 1 byte (unit length) = 2 bytes sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 12, // unit length of first transaction + 0xa, 0x1, 0xb, 0x12, 0x1, 0xa, 0x1a, 0x4, 0x49, 0x4e, 0x44, 0x58, // data of first transaction + }..., + ), + }, + ), + } + + largeTx := coretypes.Tx(bytes.Repeat([]byte{0xc}, appconsts.ShareSize)) // spans two shares + largeTxShares := []Share{ + fillShare(Share{ + data: append(appns.TxNamespace.Bytes(), + []uint8{ + 0x1, // info byte + 0x0, 0x0, 0x2, 0x2, // 512 (unit) + 2 (unit length) = 514 sequence length + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 128, 4, // unit length of transaction is 512 + }..., + ), + }, + 0xc), // data of transaction + padShare(Share{ + data: append( + append( + appns.TxNamespace.Bytes(), + []uint8{ + 0x0, // info byte + 0x0, 0x0, 0x0, 0x0, // reserved bytes + }..., + ), + bytes.Repeat([]byte{0xc}, 44)..., // continuation data of transaction + ), + }, + ), + } + + testCases := []testCase{ + { + name: "empty", + txs: coretypes.Txs{}, + wantTxShares: []Share{}, + wantPfbShares: []Share{}, + wantMap: map[coretypes.TxKey]ShareRange{}, + }, + { + name: "smallTx", + txs: coretypes.Txs{smallTx}, + wantTxShares: smallTxShares, + wantPfbShares: []Share{}, + wantMap: map[coretypes.TxKey]ShareRange{ + smallTx.Key(): {0, 0}, + }, + }, + { + name: "largeTx", + txs: coretypes.Txs{largeTx}, + wantTxShares: largeTxShares, + wantPfbShares: []Share{}, + wantMap: map[coretypes.TxKey]ShareRange{ + largeTx.Key(): {0, 1}, + }, + }, + { + name: "pfbTx", + txs: coretypes.Txs{pfbTx}, + wantTxShares: []Share{}, + wantPfbShares: pfbTxShares, + wantMap: map[coretypes.TxKey]ShareRange{ + pfbTx.Key(): {0, 0}, + }, + }, + { + name: "largeTx then pfbTx", + txs: coretypes.Txs{largeTx, pfbTx}, + wantTxShares: largeTxShares, + wantPfbShares: pfbTxShares, + wantMap: map[coretypes.TxKey]ShareRange{ + largeTx.Key(): {0, 1}, + pfbTx.Key(): {2, 2}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + txShares, pfbTxShares, gotMap, err := SplitTxs(tc.txs) + require.NoError(t, err) + assert.Equal(t, tc.wantTxShares, txShares) + assert.Equal(t, tc.wantPfbShares, pfbTxShares) + assert.Equal(t, tc.wantMap, gotMap) + }) + } +} + +// padShare returns a share padded with trailing zeros. +func padShare(share Share) (paddedShare Share) { + return fillShare(share, 0) +} + +// fillShare returns a share filled with filler so that the share length +// is equal to appconsts.ShareSize. +func fillShare(share Share, filler byte) (paddedShare Share) { + return Share{data: append(share.data, bytes.Repeat([]byte{filler}, appconsts.ShareSize-len(share.data))...)} +} + +func Test_mergeMaps(t *testing.T) { + type testCase struct { + name string + mapOne map[coretypes.TxKey]ShareRange + mapTwo map[coretypes.TxKey]ShareRange + want map[coretypes.TxKey]ShareRange + } + testCases := []testCase{ + { + name: "empty maps", + mapOne: map[coretypes.TxKey]ShareRange{}, + mapTwo: map[coretypes.TxKey]ShareRange{}, + want: map[coretypes.TxKey]ShareRange{}, + }, + { + name: "merges maps with one key each", + mapOne: map[coretypes.TxKey]ShareRange{ + {0x1}: {0, 1}, + }, + mapTwo: map[coretypes.TxKey]ShareRange{ + {0x2}: {2, 3}, + }, + want: map[coretypes.TxKey]ShareRange{ + {0x1}: {0, 1}, + {0x2}: {2, 3}, + }, + }, + { + name: "merges maps with multiple keys each", + mapOne: map[coretypes.TxKey]ShareRange{ + {0x1}: {0, 1}, + {0x2}: {2, 3}, + }, + mapTwo: map[coretypes.TxKey]ShareRange{ + {0x3}: {3, 3}, + {0x4}: {4, 4}, + }, + want: map[coretypes.TxKey]ShareRange{ + {0x1}: {0, 1}, + {0x2}: {2, 3}, + {0x3}: {3, 3}, + {0x4}: {4, 4}, + }, + }, + { + name: "merges maps with a duplicate key and the second map's value takes precedence", + mapOne: map[coretypes.TxKey]ShareRange{ + {0x1}: {0, 0}, + }, + mapTwo: map[coretypes.TxKey]ShareRange{ + {0x1}: {1, 1}, + }, + want: map[coretypes.TxKey]ShareRange{ + {0x1}: {1, 1}, + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got := mergeMaps(tc.mapOne, tc.mapTwo) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/shares/shares.go b/shares/shares.go new file mode 100644 index 0000000000..9653be5f43 --- /dev/null +++ b/shares/shares.go @@ -0,0 +1,213 @@ +package shares + +import ( + "bytes" + "encoding/binary" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" +) + +// Share contains the raw share data (including namespace ID). +type Share struct { + data []byte +} + +func (s *Share) Namespace() (appns.Namespace, error) { + if len(s.data) < appns.NamespaceSize { + panic(fmt.Sprintf("share %s is too short to contain a namespace", s)) + } + return appns.From(s.data[:appns.NamespaceSize]) +} + +func (s *Share) InfoByte() (InfoByte, error) { + if len(s.data) < appns.NamespaceSize+appconsts.ShareInfoBytes { + return 0, fmt.Errorf("share %s is too short to contain an info byte", s) + } + // the info byte is the first byte after the namespace + unparsed := s.data[appns.NamespaceSize] + return ParseInfoByte(unparsed) +} + +func NewShare(data []byte) (*Share, error) { + if err := validateSize(data); err != nil { + return nil, err + } + return &Share{data}, nil +} + +func (s *Share) Validate() error { + return validateSize(s.data) +} + +func validateSize(data []byte) error { + if len(data) != appconsts.ShareSize { + return fmt.Errorf("share data must be %d bytes, got %d", appconsts.ShareSize, len(data)) + } + return nil +} + +func (s *Share) Len() int { + return len(s.data) +} + +func (s *Share) Version() (uint8, error) { + infoByte, err := s.InfoByte() + if err != nil { + return 0, err + } + return infoByte.Version(), nil +} + +func (s *Share) DoesSupportVersions(supportedShareVersions []uint8) error { + ver, err := s.Version() + if err != nil { + return err + } + if !bytes.Contains(supportedShareVersions, []byte{ver}) { + return fmt.Errorf("unsupported share version %v is not present in the list of supported share versions %v", ver, supportedShareVersions) + } + return nil +} + +// IsSequenceStart returns true if this is the first share in a sequence. +func (s *Share) IsSequenceStart() (bool, error) { + infoByte, err := s.InfoByte() + if err != nil { + return false, err + } + return infoByte.IsSequenceStart(), nil +} + +// IsCompactShare returns true if this is a compact share. +func (s Share) IsCompactShare() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + isCompact := ns.IsTx() || ns.IsPayForBlob() + return isCompact, nil +} + +// SequenceLen returns the sequence length of this *share and optionally an +// error. It returns 0, nil if this is a continuation share (i.e. doesn't +// contain a sequence length). +func (s *Share) SequenceLen() (sequenceLen uint32, err error) { + isSequenceStart, err := s.IsSequenceStart() + if err != nil { + return 0, err + } + if !isSequenceStart { + return 0, nil + } + + start := appconsts.NamespaceSize + appconsts.ShareInfoBytes + end := start + appconsts.SequenceLenBytes + if len(s.data) < end { + return 0, fmt.Errorf("share %s with length %d is too short to contain a sequence length", + s, len(s.data)) + } + return binary.BigEndian.Uint32(s.data[start:end]), nil +} + +// IsPadding returns whether this *share is padding or not. +func (s *Share) IsPadding() (bool, error) { + isNamespacePadding, err := s.isNamespacePadding() + if err != nil { + return false, err + } + isTailPadding, err := s.isTailPadding() + if err != nil { + return false, err + } + isReservedPadding, err := s.isReservedPadding() + if err != nil { + return false, err + } + return isNamespacePadding || isTailPadding || isReservedPadding, nil +} + +func (s *Share) isNamespacePadding() (bool, error) { + isSequenceStart, err := s.IsSequenceStart() + if err != nil { + return false, err + } + sequenceLen, err := s.SequenceLen() + if err != nil { + return false, err + } + + return isSequenceStart && sequenceLen == 0, nil +} + +func (s *Share) isTailPadding() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + return ns.IsTailPadding(), nil +} + +func (s *Share) isReservedPadding() (bool, error) { + ns, err := s.Namespace() + if err != nil { + return false, err + } + return ns.IsReservedPadding(), nil +} + +func (s *Share) ToBytes() []byte { + return s.data +} + +// RawData returns the raw share data. The raw share data does not contain the +// namespace ID, info byte, sequence length, or reserved bytes. +func (s *Share) RawData() (rawData []byte, err error) { + if len(s.data) < s.rawDataStartIndex() { + return rawData, fmt.Errorf("share %s is too short to contain raw data", s) + } + + return s.data[s.rawDataStartIndex():], nil +} + +func (s *Share) rawDataStartIndex() int { + isStart, err := s.IsSequenceStart() + if err != nil { + panic(err) + } + isCompact, err := s.IsCompactShare() + if err != nil { + panic(err) + } + if isStart && isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + appconsts.CompactShareReservedBytes + } else if isStart && !isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.SequenceLenBytes + } else if !isStart && isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + appconsts.CompactShareReservedBytes + } else if !isStart && !isCompact { + return appconsts.NamespaceSize + appconsts.ShareInfoBytes + } else { + panic(fmt.Sprintf("unable to determine the rawDataStartIndex for share %s", s.data)) + } +} + +func ToBytes(shares []Share) (bytes [][]byte) { + bytes = make([][]byte, len(shares)) + for i, share := range shares { + bytes[i] = []byte(share.data) + } + return bytes +} + +func FromBytes(bytes [][]byte) (shares []Share, err error) { + for _, b := range bytes { + share, err := NewShare(b) + if err != nil { + return nil, err + } + shares = append(shares, *share) + } + return shares, nil +} diff --git a/shares/shares_test.go b/shares/shares_test.go new file mode 100644 index 0000000000..76758b484a --- /dev/null +++ b/shares/shares_test.go @@ -0,0 +1,325 @@ +package shares + +import ( + "bytes" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + tmrand "github.com/tendermint/tendermint/libs/rand" + coretypes "github.com/tendermint/tendermint/types" +) + +// TestPadFirstIndexedBlob ensures that we are adding padding to the first share +// instead of calculating the value. +func TestPadFirstIndexedBlob(t *testing.T) { + tx := tmrand.Bytes(300) + blob := tmrand.Bytes(300) + index := 100 + indexedTx, err := coretypes.MarshalIndexWrapper(tx, 100) + require.NoError(t, err) + + bd := coretypes.Data{ + Txs: []coretypes.Tx{indexedTx}, + Blobs: []coretypes.Blob{ + { + NamespaceVersion: appns.RandomBlobNamespace().Version, + NamespaceID: appns.RandomBlobNamespace().ID, + Data: blob, + ShareVersion: appconsts.ShareVersionZero, + }, + }, + SquareSize: 64, + } + + shares, err := Split(bd, true) + require.NoError(t, err) + + resShare, err := shares[index].RawData() + require.NoError(t, err) + + require.True(t, bytes.Contains(resShare, blob)) +} + +func TestSequenceLen(t *testing.T) { + type testCase struct { + name string + share Share + wantLen uint32 + wantErr bool + } + sparseNamespaceID := bytes.Repeat([]byte{1}, appconsts.NamespaceSize) + firstShare := append(sparseNamespaceID, + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + firstShareWithLongSequence := append(sparseNamespaceID, + []byte{ + 1, // info byte + 0, 0, 1, 67, // sequence len + }...) + continuationShare := append(sparseNamespaceID, + []byte{ + 0, // info byte + }...) + compactShare := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + }...) + noInfoByte := appns.TxNamespace.Bytes() + noSequenceLen := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + }...) + testCases := []testCase{ + { + name: "first share", + share: Share{data: firstShare}, + wantLen: 10, + wantErr: false, + }, + { + name: "first share with long sequence", + share: Share{data: firstShareWithLongSequence}, + wantLen: 323, + wantErr: false, + }, + { + name: "continuation share", + share: Share{data: continuationShare}, + wantLen: 0, + wantErr: false, + }, + { + name: "compact share", + share: Share{data: compactShare}, + wantLen: 10, + wantErr: false, + }, + { + name: "no info byte returns error", + share: Share{data: noInfoByte}, + wantLen: 0, + wantErr: true, + }, + { + name: "no sequence len returns error", + share: Share{data: noSequenceLen}, + wantLen: 0, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + len, err := tc.share.SequenceLen() + + if tc.wantErr { + assert.Error(t, err) + return + } + if tc.wantLen != len { + t.Errorf("want %d, got %d", tc.wantLen, len) + } + }) + } +} + +func TestRawData(t *testing.T) { + type testCase struct { + name string + share Share + want []byte + wantErr bool + } + sparseNamespaceID := appns.MustNewV0(bytes.Repeat([]byte{0x1}, appns.NamespaceVersionZeroIDSize)) + firstSparseShare := append( + sparseNamespaceID.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + continuationSparseShare := append( + sparseNamespaceID.Bytes(), + []byte{ + 0, // info byte + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + firstCompactShare := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 10, // sequence len + 0, 0, 0, 15, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + continuationCompactShare := append(appns.TxNamespace.Bytes(), + []byte{ + 0, // info byte + 0, 0, 0, 0, // reserved bytes + 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, // data + }...) + noSequenceLen := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + }...) + notEnoughSequenceLenBytes := append(appns.TxNamespace.Bytes(), + []byte{ + 1, // info byte + 0, 0, 10, // sequence len + }...) + testCases := []testCase{ + { + name: "first sparse share", + share: Share{data: firstSparseShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation sparse share", + share: Share{data: continuationSparseShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "first compact share", + share: Share{data: firstCompactShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "continuation compact share", + share: Share{data: continuationCompactShare}, + want: []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + }, + { + name: "no sequence len returns error", + share: Share{data: noSequenceLen}, + wantErr: true, + }, + { + name: "not enough sequence len bytes returns error", + share: Share{data: notEnoughSequenceLenBytes}, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + rawData, err := tc.share.RawData() + if tc.wantErr { + assert.Error(t, err) + return + } + assert.Equal(t, tc.want, rawData) + }) + } +} + +func TestIsCompactShare(t *testing.T) { + type testCase struct { + name string + share Share + want bool + } + + ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) + txShare, _ := zeroPadIfNecessary(appns.TxNamespace.Bytes(), appconsts.ShareSize) + pfbTxShare, _ := zeroPadIfNecessary(appns.PayForBlobNamespace.Bytes(), appconsts.ShareSize) + blobShare, _ := zeroPadIfNecessary(ns1.Bytes(), appconsts.ShareSize) + + testCases := []testCase{ + { + name: "tx share", + share: Share{data: txShare}, + want: true, + }, + { + name: "pfb tx share", + share: Share{data: pfbTxShare}, + want: true, + }, + { + name: "blob share", + share: Share{data: blobShare}, + want: false, + }, + } + + for _, tc := range testCases { + got, err := tc.share.IsCompactShare() + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + } +} + +func TestIsPadding(t *testing.T) { + type testCase struct { + name string + share Share + want bool + wantErr bool + } + emptyShare := Share{} + blobShare, _ := zeroPadIfNecessary( + append( + ns1.Bytes(), + []byte{ + 1, // info byte + 0, 0, 0, 1, // sequence len + 0xff, // data + }..., + ), + appconsts.ShareSize) + + nsPadding, err := NamespacePaddingShare(ns1) + require.NoError(t, err) + + tailPadding, err := TailPaddingShare() + require.NoError(t, err) + + reservedPaddingShare, err := ReservedPaddingShare() + require.NoError(t, err) + + testCases := []testCase{ + { + name: "empty share", + share: emptyShare, + wantErr: true, + }, + { + name: "blob share", + share: Share{data: blobShare}, + want: false, + }, + { + name: "namespace padding", + share: nsPadding, + want: true, + }, + { + name: "tail padding", + share: tailPadding, + want: true, + }, + { + name: "reserved padding", + share: reservedPaddingShare, + want: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := tc.share.IsPadding() + if tc.wantErr { + assert.Error(t, err) + return + } + require.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} diff --git a/shares/sparse_shares_test.go b/shares/sparse_shares_test.go new file mode 100644 index 0000000000..278cf432a7 --- /dev/null +++ b/shares/sparse_shares_test.go @@ -0,0 +1,86 @@ +package shares + +import ( + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/celestiaorg/celestia-app/testutil/testfactory" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" +) + +func TestSparseShareContainsInfoByte(t *testing.T) { + blob := testfactory.GenerateRandomBlobOfShareCount(4) + + sequenceStartInfoByte, err := NewInfoByte(appconsts.ShareVersionZero, true) + require.NoError(t, err) + + sequenceContinuationInfoByte, err := NewInfoByte(appconsts.ShareVersionZero, false) + require.NoError(t, err) + + type testCase struct { + name string + shareIndex int + expected InfoByte + } + testCases := []testCase{ + { + name: "first share of blob", + shareIndex: 0, + expected: sequenceStartInfoByte, + }, + { + name: "second share of blob", + shareIndex: 1, + expected: sequenceContinuationInfoByte, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sss := NewSparseShareSplitter() + err := sss.Write(blob) + assert.NoError(t, err) + shares := sss.Export() + got, err := shares[tc.shareIndex].InfoByte() + require.NoError(t, err) + assert.Equal(t, tc.expected, got) + }) + } +} + +func TestSparseShareSplitterCount(t *testing.T) { + type testCase struct { + name string + blob coretypes.Blob + expected int + } + testCases := []testCase{ + { + name: "one share", + blob: testfactory.GenerateRandomBlobOfShareCount(1), + expected: 1, + }, + { + name: "two shares", + blob: testfactory.GenerateRandomBlobOfShareCount(2), + expected: 2, + }, + { + name: "ten shares", + blob: testfactory.GenerateRandomBlobOfShareCount(10), + expected: 10, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + sss := NewSparseShareSplitter() + err := sss.Write(tc.blob) + assert.NoError(t, err) + got := sss.Count() + assert.Equal(t, tc.expected, got) + }) + } +} diff --git a/shares/split_compact_shares.go b/shares/split_compact_shares.go new file mode 100644 index 0000000000..b32561b671 --- /dev/null +++ b/shares/split_compact_shares.go @@ -0,0 +1,232 @@ +package shares + +import ( + "encoding/binary" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + coretypes "github.com/tendermint/tendermint/types" +) + +type ShareRange struct { + // Start is the index of the first share occupied by this range. + Start int + // End is the index of the last share occupied by this range. + End int +} + +// CompactShareSplitter will write raw data compactly across a progressively +// increasing set of shares. It is used to lazily split block data such as +// transactions or intermediate state roots into shares. +type CompactShareSplitter struct { + shares []Share + // pendingShare Share + shareBuilder *Builder + namespace appns.Namespace + done bool + shareVersion uint8 + // shareRanges is a map from a transaction key to the range of shares it + // occupies. The range assumes this compact share splitter is the only + // thing in the data square (e.g. the range for the first tx starts at index + // 0). + shareRanges map[coretypes.TxKey]ShareRange +} + +// NewCompactShareSplitter returns a CompactShareSplitter using the provided +// namespace and shareVersion. +func NewCompactShareSplitter(ns appns.Namespace, shareVersion uint8) *CompactShareSplitter { + sb, err := NewBuilder(ns, shareVersion, true).Init() + if err != nil { + panic(err) + } + + return &CompactShareSplitter{ + shares: []Share{}, + namespace: ns, + shareVersion: shareVersion, + shareRanges: map[coretypes.TxKey]ShareRange{}, + shareBuilder: sb, + } +} + +// WriteTx adds the delimited data for the provided tx to the underlying compact +// share splitter. +func (css *CompactShareSplitter) WriteTx(tx coretypes.Tx) error { + rawData, err := MarshalDelimitedTx(tx) + if err != nil { + return fmt.Errorf("included Tx in mem-pool that can not be encoded %v", tx) + } + + startShare := len(css.shares) + + if err := css.write(rawData); err != nil { + return err + } + endShare := css.Count() - 1 + + css.shareRanges[tx.Key()] = ShareRange{ + Start: startShare, + End: endShare, + } + return nil +} + +// write adds the delimited data to the underlying compact shares. +func (css *CompactShareSplitter) write(rawData []byte) error { + if css.done { + // remove the last element + if !css.shareBuilder.IsEmptyShare() { + css.shares = css.shares[:len(css.shares)-1] + } + css.done = false + } + + if err := css.shareBuilder.MaybeWriteReservedBytes(); err != nil { + return err + } + + for { + rawDataLeftOver := css.shareBuilder.AddData(rawData) + if rawDataLeftOver == nil { + break + } + if err := css.stackPending(); err != nil { + return err + } + + rawData = rawDataLeftOver + } + + if css.shareBuilder.AvailableBytes() == 0 { + if err := css.stackPending(); err != nil { + return err + } + } + return nil +} + +// stackPending will build & add the pending share to accumulated shares +func (css *CompactShareSplitter) stackPending() error { + pendingShare, err := css.shareBuilder.Build() + if err != nil { + return err + } + css.shares = append(css.shares, *pendingShare) + + // Now we need to create a new builder + css.shareBuilder, err = NewBuilder(css.namespace, css.shareVersion, false).Init() + return err +} + +// Export finalizes and returns the underlying compact shares and a map of +// shareRanges. All share ranges in the map of shareRanges will be offset (i.e. +// incremented) by the shareRangeOffset provided. shareRangeOffset should be 0 +// for the first compact share sequence in the data square (transactions) but +// should be some non-zero number for subsequent compact share sequences (e.g. +// pfb txs). +func (css *CompactShareSplitter) Export(shareRangeOffset int) ([]Share, map[coretypes.TxKey]ShareRange, error) { + // apply the shareRangeOffset to all share ranges + shareRanges := make(map[coretypes.TxKey]ShareRange, len(css.shareRanges)) + + if css.isEmpty() { + return []Share{}, shareRanges, nil + } + + for k, v := range css.shareRanges { + shareRanges[k] = ShareRange{ + Start: v.Start + shareRangeOffset, + End: v.End + shareRangeOffset, + } + } + + // in case Export is called multiple times + if css.done { + return css.shares, shareRanges, nil + } + + var bytesOfPadding int + // add the pending share to the current shares before returning + if !css.shareBuilder.IsEmptyShare() { + bytesOfPadding = css.shareBuilder.ZeroPadIfNecessary() + if err := css.stackPending(); err != nil { + return []Share{}, shareRanges, err + } + } + + sequenceLen := css.sequenceLen(bytesOfPadding) + if err := css.writeSequenceLen(sequenceLen); err != nil { + return []Share{}, shareRanges, err + } + css.done = true + return css.shares, shareRanges, nil +} + +// writeSequenceLen writes the sequence length to the first share. +func (css *CompactShareSplitter) writeSequenceLen(sequenceLen uint32) error { + if css.isEmpty() { + return nil + } + + // We may find a more efficient way to write seqLen + b, err := NewBuilder(css.namespace, css.shareVersion, true).Init() + if err != nil { + return err + } + b.ImportRawShare(css.shares[0].ToBytes()) + if err := b.WriteSequenceLen(sequenceLen); err != nil { + return err + } + + firstShare, err := b.Build() + if err != nil { + return err + } + + // replace existing first share with new first share + css.shares[0] = *firstShare + + return nil +} + +// sequenceLen returns the total length in bytes of all units (transactions or +// intermediate state roots) written to this splitter. sequenceLen does not +// include the number of bytes occupied by the namespace ID, the share info +// byte, or the reserved bytes. sequenceLen does include the unit length +// delimiter prefixed to each unit. +func (css *CompactShareSplitter) sequenceLen(bytesOfPadding int) uint32 { + if len(css.shares) == 0 { + return 0 + } + if len(css.shares) == 1 { + return uint32(appconsts.FirstCompactShareContentSize) - uint32(bytesOfPadding) + } + + continuationSharesCount := len(css.shares) - 1 + continuationSharesSequenceLen := continuationSharesCount * appconsts.ContinuationCompactShareContentSize + return uint32(appconsts.FirstCompactShareContentSize + continuationSharesSequenceLen - bytesOfPadding) +} + +// isEmpty returns whether this compact share splitter is empty. +func (css *CompactShareSplitter) isEmpty() bool { + return len(css.shares) == 0 && css.shareBuilder.IsEmptyShare() +} + +// Count returns the number of shares that would be made if `Export` was invoked +// on this compact share splitter. +func (css *CompactShareSplitter) Count() (shareCount int) { + if !css.shareBuilder.IsEmptyShare() && !css.done { + // pending share is non-empty, so it will be zero padded and added to shares during export + return len(css.shares) + 1 + } + return len(css.shares) +} + +// MarshalDelimitedTx prefixes a transaction with the length of the transaction +// encoded as a varint. +func MarshalDelimitedTx(tx coretypes.Tx) ([]byte, error) { + lenBuf := make([]byte, binary.MaxVarintLen64) + length := uint64(len(tx)) + n := binary.PutUvarint(lenBuf, length) + return append(lenBuf[:n], tx...), nil +} diff --git a/shares/split_compact_shares_test.go b/shares/split_compact_shares_test.go new file mode 100644 index 0000000000..596720789f --- /dev/null +++ b/shares/split_compact_shares_test.go @@ -0,0 +1,382 @@ +package shares + +import ( + "bytes" + "testing" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + coretypes "github.com/tendermint/tendermint/types" +) + +func TestCount(t *testing.T) { + type testCase struct { + transactions []coretypes.Tx + wantShareCount int + } + testCases := []testCase{ + {transactions: []coretypes.Tx{}, wantShareCount: 0}, + {transactions: []coretypes.Tx{[]byte{0}}, wantShareCount: 1}, + {transactions: []coretypes.Tx{bytes.Repeat([]byte{1}, 100)}, wantShareCount: 1}, + // Test with 1 byte over 1 share + {transactions: []coretypes.Tx{bytes.Repeat([]byte{1}, rawTxSize(appconsts.FirstCompactShareContentSize+1))}, wantShareCount: 2}, + {transactions: []coretypes.Tx{generateTx(1)}, wantShareCount: 1}, + {transactions: []coretypes.Tx{generateTx(2)}, wantShareCount: 2}, + {transactions: []coretypes.Tx{generateTx(20)}, wantShareCount: 20}, + } + for _, tc := range testCases { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + for _, transaction := range tc.transactions { + err := css.WriteTx(transaction) + require.NoError(t, err) + } + got := css.Count() + if got != tc.wantShareCount { + t.Errorf("count got %d want %d", got, tc.wantShareCount) + } + } +} + +// generateTx generates a transaction that occupies exactly numShares number of +// shares. +func generateTx(numShares int) coretypes.Tx { + if numShares == 0 { + return coretypes.Tx{} + } + if numShares == 1 { + return bytes.Repeat([]byte{1}, rawTxSize(appconsts.FirstCompactShareContentSize)) + } + return bytes.Repeat([]byte{2}, rawTxSize(appconsts.FirstCompactShareContentSize+(numShares-1)*appconsts.ContinuationCompactShareContentSize)) +} + +func TestExport_write(t *testing.T) { + type testCase struct { + name string + want []Share + writeBytes [][]byte + } + + oneShare, _ := zeroPadIfNecessary( + append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x0, 0x1, // sequence len + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + 0xf, // data + }..., + ), + appconsts.ShareSize) + + firstShare := fillShare(Share{data: append( + appns.TxNamespace.Bytes(), + []byte{ + 0x1, // info byte + 0x0, 0x0, 0x2, 0x0, // sequence len + 0x0, 0x0, 0x0, 0x2a, // reserved bytes + }..., + )}, 0xf) + + continuationShare, _ := zeroPadIfNecessary( + append( + appns.TxNamespace.Bytes(), + append( + []byte{ + 0x0, // info byte + 0x0, 0x0, 0x0, 0x0, // reserved bytes + }, bytes.Repeat([]byte{0xf}, appconsts.NamespaceSize+appconsts.ShareInfoBytes+appconsts.SequenceLenBytes+appconsts.CompactShareReservedBytes)..., // data + )..., + ), + appconsts.ShareSize) + + testCases := []testCase{ + { + name: "empty", + want: []Share{}, + }, + { + name: "one share with small sequence len", + want: []Share{ + {data: oneShare}, + }, + writeBytes: [][]byte{{0xf}}, + }, + { + name: "two shares with big sequence len", + want: []Share{ + firstShare, + {data: continuationShare}, + }, + writeBytes: [][]byte{bytes.Repeat([]byte{0xf}, 512)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + for _, bytes := range tc.writeBytes { + err := css.write(bytes) + require.NoError(t, err) + } + got, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, tc.want, got) + + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, got, shares) + assert.Len(t, got, css.Count()) + }) + } +} + +func TestWriteAndExportIdempotence(t *testing.T) { + type testCase struct { + name string + txs []coretypes.Tx + wantLen int + } + testCases := []testCase{ + { + name: "one tx that occupies exactly one share", + txs: []coretypes.Tx{generateTx(1)}, + wantLen: 1, + }, + { + name: "one tx that occupies exactly two shares", + txs: []coretypes.Tx{generateTx(2)}, + wantLen: 2, + }, + { + name: "one tx that occupies exactly three shares", + txs: []coretypes.Tx{generateTx(3)}, + wantLen: 3, + }, + { + name: "two txs that occupy exactly two shares", + txs: []coretypes.Tx{ + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + }, + wantLen: 2, + }, + { + name: "three txs that occupy exactly three shares", + txs: []coretypes.Tx{ + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + }, + wantLen: 3, + }, + { + name: "four txs that occupy three full shares and one partial share", + txs: []coretypes.Tx{ + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)), + []byte{0xf}, + }, + wantLen: 4, + }, + } + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + + for _, tx := range tc.txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + assert.Equal(t, tc.wantLen, css.Count()) + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, tc.wantLen, len(shares)) + }) + } +} + +func TestExport(t *testing.T) { + type testCase struct { + name string + txs []coretypes.Tx + want map[coretypes.TxKey]ShareRange + shareRangeOffset int + } + + txOne := coretypes.Tx{0x1} + txTwo := coretypes.Tx(bytes.Repeat([]byte{2}, 600)) + txThree := coretypes.Tx(bytes.Repeat([]byte{3}, 1000)) + exactlyOneShare := coretypes.Tx(bytes.Repeat([]byte{4}, rawTxSize(appconsts.FirstCompactShareContentSize))) + exactlyTwoShares := coretypes.Tx(bytes.Repeat([]byte{5}, rawTxSize(appconsts.FirstCompactShareContentSize+appconsts.ContinuationCompactShareContentSize))) + + testCases := []testCase{ + { + name: "empty", + txs: []coretypes.Tx{}, + want: map[coretypes.TxKey]ShareRange{}, + }, + { + name: "txOne occupies shares 0 to 0", + txs: []coretypes.Tx{ + txOne, + }, + want: map[coretypes.TxKey]ShareRange{ + txOne.Key(): {0, 0}, + }, + }, + { + name: "txTwo occupies shares 0 to 1", + txs: []coretypes.Tx{ + txTwo, + }, + want: map[coretypes.TxKey]ShareRange{ + txTwo.Key(): {0, 1}, + }, + }, + { + name: "txThree occupies shares 0 to 2", + txs: []coretypes.Tx{ + txThree, + }, + want: map[coretypes.TxKey]ShareRange{ + txThree.Key(): {0, 2}, + }, + }, + { + name: "txOne occupies shares 0 to 0, txTwo occupies shares 0 to 1, txThree occupies shares 1 to 3", + txs: []coretypes.Tx{ + txOne, + txTwo, + txThree, + }, + want: map[coretypes.TxKey]ShareRange{ + txOne.Key(): {0, 0}, + txTwo.Key(): {0, 1}, + txThree.Key(): {1, 3}, + }, + }, + + { + name: "exactly one share occupies shares 0 to 0", + txs: []coretypes.Tx{ + exactlyOneShare, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyOneShare.Key(): {0, 0}, + }, + }, + { + name: "exactly two shares occupies shares 0 to 1", + txs: []coretypes.Tx{ + exactlyTwoShares, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyTwoShares.Key(): {0, 1}, + }, + }, + { + name: "two shares followed by one share", + txs: []coretypes.Tx{ + exactlyTwoShares, + exactlyOneShare, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyTwoShares.Key(): {0, 1}, + exactlyOneShare.Key(): {2, 2}, + }, + }, + { + name: "one share followed by two shares", + txs: []coretypes.Tx{ + exactlyOneShare, + exactlyTwoShares, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyOneShare.Key(): {0, 0}, + exactlyTwoShares.Key(): {1, 2}, + }, + }, + { + name: "one share followed by two shares offset by 10", + txs: []coretypes.Tx{ + exactlyOneShare, + exactlyTwoShares, + }, + want: map[coretypes.TxKey]ShareRange{ + exactlyOneShare.Key(): {10, 10}, + exactlyTwoShares.Key(): {11, 12}, + }, + shareRangeOffset: 10, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + + for _, tx := range tc.txs { + err := css.WriteTx(tx) + require.NoError(t, err) + } + + _, got, err := css.Export(tc.shareRangeOffset) + require.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestWriteAfterExport(t *testing.T) { + a := bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.FirstCompactShareContentSize)) + b := bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize*2)) + c := bytes.Repeat([]byte{0xf}, rawTxSize(appconsts.ContinuationCompactShareContentSize)) + d := []byte{0xf} + + css := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + shares, _, err := css.Export(0) + require.NoError(t, err) + assert.Equal(t, 0, len(shares)) + + err = css.WriteTx(a) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 1, len(shares)) + + err = css.WriteTx(b) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 3, len(shares)) + + err = css.WriteTx(c) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 4, len(shares)) + + err = css.WriteTx(d) + require.NoError(t, err) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 5, len(shares)) + + shares, _, err = css.Export(0) + require.NoError(t, err) + assert.Equal(t, 5, len(shares)) +} + +// rawTxSize returns the raw tx size that can be used to construct a +// tx of desiredSize bytes. This function is useful in tests to account for +// the length delimiter that is prefixed to a tx. +func rawTxSize(desiredSize int) int { + return desiredSize - DelimLen(uint64(desiredSize)) +} diff --git a/shares/split_sparse_shares.go b/shares/split_sparse_shares.go new file mode 100644 index 0000000000..15788b5163 --- /dev/null +++ b/shares/split_sparse_shares.go @@ -0,0 +1,128 @@ +package shares + +import ( + "errors" + "fmt" + + "github.com/celestiaorg/celestia-app/pkg/appconsts" + appns "github.com/celestiaorg/celestia-app/pkg/namespace" + coretypes "github.com/tendermint/tendermint/types" + "golang.org/x/exp/slices" +) + +// SparseShareSplitter lazily splits blobs into shares that will eventually be +// included in a data square. It also has methods to help progressively count +// how many shares the blobs written take up. +type SparseShareSplitter struct { + shares []Share +} + +func NewSparseShareSplitter() *SparseShareSplitter { + return &SparseShareSplitter{} +} + +// Write writes the provided blob to this sparse share splitter. It returns an +// error or nil if no error is encountered. +func (sss *SparseShareSplitter) Write(blob coretypes.Blob) error { + if !slices.Contains(appconsts.SupportedShareVersions, blob.ShareVersion) { + return fmt.Errorf("unsupported share version: %d", blob.ShareVersion) + } + + rawData := blob.Data + blobNamespace, err := appns.New(blob.NamespaceVersion, blob.NamespaceID) + if err != nil { + return err + } + + // First share + b, err := NewBuilder(blobNamespace, blob.ShareVersion, true).Init() + if err != nil { + return err + } + if err := b.WriteSequenceLen(uint32(len(rawData))); err != nil { + return err + } + + for rawData != nil { + + rawDataLeftOver := b.AddData(rawData) + if rawDataLeftOver == nil { + // Just call it on the latest share + b.ZeroPadIfNecessary() + } + + share, err := b.Build() + if err != nil { + return err + } + sss.shares = append(sss.shares, *share) + + b, err = NewBuilder(blobNamespace, blob.ShareVersion, false).Init() + if err != nil { + return err + } + rawData = rawDataLeftOver + } + + return nil +} + +// RemoveBlob will remove a blob from the underlying blob state. If +// there is namespaced padding after the blob, then that is also removed. +func (sss *SparseShareSplitter) RemoveBlob(i int) (int, error) { + j := 1 + initialCount := len(sss.shares) + if len(sss.shares) > i+1 { + sequenceLen, err := sss.shares[i+1].SequenceLen() + if err != nil { + return 0, err + } + // 0 means that there is padding after the share that we are about to + // remove. to remove this padding, we increase j by 1 + // with the blob + if sequenceLen == 0 { + j++ + } + } + copy(sss.shares[i:], sss.shares[i+j:]) + sss.shares = sss.shares[:len(sss.shares)-j] + newCount := len(sss.shares) + return initialCount - newCount, nil +} + +// WriteNamespacedPaddedShares adds empty shares using the namespace of the last written share. +// This is useful to follow the message layout rules. It assumes that at least +// one share has already been written, if not it panics. +func (sss *SparseShareSplitter) WriteNamespacedPaddedShares(count int) error { + if len(sss.shares) == 0 { + return errors.New("cannot write empty namespaced shares on an empty SparseShareSplitter") + } + if count < 0 { + return errors.New("cannot write negative namespaced shares") + } + if count == 0 { + return nil + } + lastBlob := sss.shares[len(sss.shares)-1] + lastBlobNs, err := lastBlob.Namespace() + if err != nil { + return err + } + nsPaddingShares, err := NamespacePaddingShares(lastBlobNs, count) + if err != nil { + return err + } + sss.shares = append(sss.shares, nsPaddingShares...) + + return nil +} + +// Export finalizes and returns the underlying shares. +func (sss *SparseShareSplitter) Export() []Share { + return sss.shares +} + +// Count returns the current number of shares that will be made if exporting. +func (sss *SparseShareSplitter) Count() int { + return len(sss.shares) +} diff --git a/shares/split_sparse_shares_test.go b/shares/split_sparse_shares_test.go new file mode 100644 index 0000000000..550f0c0465 --- /dev/null +++ b/shares/split_sparse_shares_test.go @@ -0,0 +1 @@ +package shares diff --git a/shares/testdata/sample-block.json b/shares/testdata/sample-block.json new file mode 100755 index 0000000000..9041fcf72d --- /dev/null +++ b/shares/testdata/sample-block.json @@ -0,0 +1,62 @@ +{ + "header": { + "version": { + "block": 11 + }, + "chain_id": "private", + "height": 4, + "time": "2023-04-11T20:52:54.923092Z", + "last_block_id": { + "hash": "TLPtmfe84IKmiOU8K45MwPawkYq9XAx8KIYdQltNNXs=", + "part_set_header": { + "total": 1, + "hash": "7HS0mrbP9UNbYk2YMkA8oigEvHEErzs3rC0i1RkloUY=" + } + }, + "last_commit_hash": "DTjLVpzuBBOdujbAcw8nnQ4ACGxXwt7JJ0u4FiBGcGk=", + "data_hash": "ecqvIuCjhlW76pE31tEJcRhohEODd3lo9YBNaza8HGg=", + "validators_hash": "T46pWC4iIg0DjiF1DpCCv4UVlqcP2piZ7+/Is/cLRPo=", + "next_validators_hash": "T46pWC4iIg0DjiF1DpCCv4UVlqcP2piZ7+/Is/cLRPo=", + "consensus_hash": "BICRvH3cKD93v7+R1zxE2ljD34qcvIZ0Bdi389qtoi8=", + "app_hash": "hYTcK/JXCrBCS9C6gfDzYdBnx2dV3cTty21r1a+AbPA=", + "last_results_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=", + "evidence_hash": "47DEQpj8HBSa+/TImW+5JCeuQeRkm5NMpJWZG3hSuFU=", + "proposer_address": "8TeEky8GnBxKq/c2jmFemTVQCy0=" + }, + "data": { + "txs": [ + "CsUCCqQBCqEBCiAvY2VsZXN0aWEuYmxvYi52MS5Nc2dQYXlGb3JCbG9icxJ9Ci9jZWxlc3RpYTFuY3Y3bGh4NDRndnR5NmdmOGMzZjVoeTJneWx2NGRhOW4yZ3ZmMxIhAAAAAAAAAAAAAAAAAAAAAAAAAAAAAADh/+2uFQ/iYjVRGgKSGiIgmcwTeaaECDCWTmNHehI5AtEkDFkxYs8YT/63si4foiVCAQASWgpOCkYKHy9jb3Ntb3MuY3J5cHRvLnNlY3AyNTZrMS5QdWJLZXkSIwohA1ja+jKt+kagxAbHlZx4wzHALPC0YPqCn1DlHH6FHSpAEgQKAggBEggQgIDpg7HeFhpAc2aRvS1W7s5XPCEuekN8Tbt5i33u4pZYSIfb7rQuD4dp+H7FRePT2MXdNYmFx5IZtizLchyJ/UG+0EdySwNnvhIBBBoESU5EWA==" + ], + "blobs": [ + { + "namespace_id": "AAAAAAAAAAAAAAAAAAAAAAAAAAAAAOH/7a4VD+JiNVE=", + "data": "xLsIsDwF/TuEkH+SxQGtwa0pHDY13lPBOXQHiGkssqGycX2jdMpAudsH9yfGiFbk3vwkISoNjR3PbOxS+t8+6LHmGdg88O6r1OILoxxL0mbt9N3+4ixaJl9JtbrJCBDzt2xF+dzegAs8NYdhjGp/VPwuBAVjNtuNsH+6qu+r7aK1WtoIFcQ0/DPLMI3r7WXU9ZRIflOnRAEcwuCdpRoIF7/aEiwJC7z22XN3j8wDnLJTv6xJ+hMAdTiwwFoh+PmlvwFl0pcB5EXjgSNNZT0KFEhhZl4AO3Y8xALTq2aRVOIY+2M3e791BAflL8TelrAwFT2XcGKpuQugAY3PF5N57rjTJyKx8d5leHU5kZ+ZQRCo1XPzNZMVZqgK6ay0DVPFuF4E3AStgtSQQBtm/irWxtHBJyEe7hc+PG5rP8FkDoTcPyIJtL659jR8VFSBhpsVKfAoOa2V56h6NxGWFb3hQzSedPeOKq3pOqBwWTAAmpO65mK4m2hjwexl1fhUXdqNlyEpSvWp6M0K7pwgSYlmz37ugmh5ZKcpjiwsTYX197Cqvx8bMK8ZVqIIRIBQe77i8kaBSOte5G3GmKLGfQM/eh+HyNkLQP5iBlq4gBgrC6m5eLnYXUmMYkJIQkHfxK6IKrRYmTaWwZ1lB7dsQLkigbJAjbQLsR8mswmfeQCegAbPagoFewNizrhqVDNZL5Bgm2vmEO65pwUHQ06QGELHL+/VosBRVgyoxWQ1ZCmORwN4X+5bHeglUjKPs1AH7Fb4bWJ6vu6CNztU+DqO/lv7S7iBhaIklb2mpEmLNra211/QMaBkPqrQazj5ydaXwn6ObFDUqdXh6+ZdQSjZheKhTAQ3idaC2yE+yOOMm4CqQbiJu0JTV4LGtzn2QJb/h3EXtZfGvlhnD3sLmc9ImFdb5KSHvQFHitV4n1fCDYhgbnVONz9dJmtpIKJN+S4w/Fc7qCOMKw/mza7hwASnKwXNwAOTgM6ckUW/LXLVLS5EmKMccjKrQHqX3V9FpFyGSNDhW8Rslr/Hp/iZKphrfJxQ08V7swQYe3iafnjEdStS1E9hAD19EdL+ioYqkLPNIlYPkFUzovvHktCqmA+b9X1wX/7blmkakuwsN2FTkea3HGs1BNLcZ6eN1zzhx1Ehd0mzHt+jNpMC+jUb3doqz/fGfzRDJNX3UxB4dSGlfePqhzbscCzYVle1eyZ4pyYDW6hyDStYp8SZGV2sMyJN/44uHferxPYREHj6M41KwRyanJoH3dBzUz8PibIcL+QFeZqPghh+Syv0cw8YD7DyDSLzKFuw79rFiSxVhXdFTYsKie5c0W2B1Pez15x94E0wkdRLroX7+TJDutPrWb9aIiX0QmI7jWJaWdA55aPF9NdEKsIn3AxX9KM0bZA78BVuuxZvWo1UvkQW3X70NvjcoDb7jcca2JrGnTRA0shrb3d7qoDD6O5pOLxk99dhBE9UF/4in4uPoRo5uJ811tpTL2a969U/CU3fyXVFQZAUIICx1+QeBO+R7zoHAXhpbfJslia1VNYDdSsD+9XcqoXOCyrt/5Xay0nAe6nnCtnIrhJZUxXUh+lGKfvUbVXmwSCo+tnmnnxN6IvOthxDvsMAd/P5bMunQQ4dI6pHgnvyxqaDBwbfVsFTLBba0FBA6DXUrm73X+SGcbXmx/Q4ERLF98zCEqmeJMQDmq7szdwT39ShqInidENTYEG+xWy6oVexfQmYyR881Owv0W+Qp7BNVEW5ePlmOTLCXcPWuolpIBHF9r6S5rhjpBHEvj743SPhbfb+IEfodCsLXGGyp/WZsw8exvL1ky7jz5nDPHu9ipck+FnmCyFOaMExUcnNQVbvfzmCZohDIchXY/O9BQU/clQAOORbgQBj7cSNKwgcOhTCpbYKh3j+PL3TpW3rRXpLe4nr2itD5dusp+BIXWquZS7EiBmmY1DtttIQThw34pfxrffFQbLg6oUD0fkd6JL3PXItr+98OGEtmx9oJ/jVsC2583YLLheq4ZEASPdm65B8IHsit4tKCs5DcydaXhFTJem5vW2UNdR7lOF5F50veXA7r2ay55aSb8pw4ZueptBLDIuwR2pnyF8+6X5Amb1vR20UKOhA/rvuskhN/9b0KpqzUo//Z4xaV+hgOx5FDbS1uKSDG0Aat+XeR7pxARe+p1GAAX9X7YezCH5mAW+UzsHmHzXyYhXxnC7BjLnirxEF7pMPukaHN6XVVym14aV1beHHUrz1R52XLZZVlneTtb5CaJ748LC6oLSqBnE6mZFJnSh2acxdHpNL3LYisJkUIhmZxyLzQhli+xOgYjy3k27Vy/u00qEa8YZ7OmYPVCgd8sYDsmSnO1wCd8nt9QP/nSxOzUFU7pJTyJvZEKV4ROOR9AIL/bRF3juYWeh6BgssC4c1ClUfA4qX1588FxQictwGVqfABLk8tkzr02FEKddR6+FxmpZ9oi+OKWZ1bWvdPsz4rln15ILi6llfxM3A7Kxc8LPDaiYIQ7U8Tv0Zrlw+mhjGbqBbdapKAVvIspTHPxqfPVNDHFgimCUWTsPZhznfhW4kJHK0cuhK9dnzdAfDgHnHHeqY+2MTTwbYwhJrawEOOjAgUvn43z1KLyqi04Q0XTEm039TY/Alw/vRvYmigzOXtzQeT+QUqfd4nxhNpH8hCkO7mXxVuBiloa4Lk6a5Jny0NY7efrGDVqAjFs3T/E17sKBkDgFvYg5nQH6IiAZ5ECYvmjkzEPhDvgqmCGuqmmVb6moX7om0kMoR97gj0Mjn6j+kBluQ9IfODlGUn4v10t8hMVRbHnPjqBFx4srH9g+IRHcOoneFj1EF6YtYYIOD+GPDJ/UJRLdwF/KppWnJhOiX5brpvMzZfXaJ6QukTP9cAA8l5YvlnwGY/aBS72EJpBuiWkGg2L7TvQpGm2HHxWzfEiT8v+cg6QONJ1lSf+BTd0GAW54xlBOMd8oP7hxOVOcFMECyUD5h7dHzU4WztB6thofRyfGUV8mSo6jXdZWsqNBMEHDNi6aJpAbgszlnkXpgbtN+bflKFhvsWvh1G8NRzLt000f66VXhWDVx9D0rakAYuAfgnKf4iRuw/m9FbUTC5RXpMqjc0iy/oTLMjad0TGUqjuvwR11/zIMehJqIBDAqgZX2A9v2K0Naoj1ZeRp9M8skC2jmp66dMJcISv0alz+pe4cqB5Xoen9aa4A3PUH+v07xvr17Q/0w2nMeo+shfJGI+OPtn+hl72FLDAMVPMVx0F5G4ypvToszYPufIb3R6GoGTSs+mkgSkw8gulBh9A5VHqvtqe85g0kFQkp5f4Q1l+wurcHotWQntIrur1yvekENGPD1tBAN8sIG9zoFqB/23z9XMFDLgi1FmbkeXEI0WmWmfhqQZ4ZZelZRKXXDgSKERbtcgYbtaDKlOxVKxJqxHXYkiyhljJKiGhCqvaMI3crbbWSBK57jlSZ5i5Om+b7gqkf0H9A+wjwPSEDwoR1CM4yeUvAlhamaOM8W4q7bBOuWJvHD/j+nHe353Sh+oi0ycqhkLzajqacueZdTyk+hvkU0rZ1TJBm/xmqhVC+Hc967z7QzZs34wV1dR/F8DIUPxvE+M4fw5DMqLTRpl+PkswZXVAw5FYFC6D8WL19IbflXUhTJ8qiVFgeDfXdW/VIMjsLfUpSiTM3t2S8zaEABcGmkNqrzWbAmEIr91EE6X+AOsO2VBv9u+2YhmZRGSPe0svxhdussSfKxeMtCWAEbek6pC0HpuQU5zc2SqdBhsraX0TImkHQJtoaMQEWKdurvfmP1fnvqP0bAx5Yxjmz5cSGk/wcpAhN4Qc5J16MWxkmGM+v6/4N3Kc2JqK7DH4o7faK5Pkod+sRdiZvRVoEi/tTrB0n4bzPkSFc59eCFgsnZajF/3zQS7hRyO3xos4FJmhdgbD6F8KZmOrYGfr1yYyUYToC2hFGSSWGNcXIRAojqvByBjSktPaxEqmc0u+5Mma0F8TzQHcs/AtZV2NrPlwDsOo5ITBMVTUTEezak7hDJCQXcgm4VanDY+qvE8DjkXQiMm5lUpLwpJcbCRRGYpcM4HzsgOV9pmofeanywTkTf8JapiH47aawB2b9QrR/GZ/eYRMQyvkeDRkwW3mdN4GNYueeKtyiTpDXo77/LgwLfcxcFi22sRT36zD5QhEs4yFH0yolWR6wxXyqLC/17bMegw1KAWP3Nevb+ay/lo8/22SM/K7JPn/9y019rB56YIl8TbNWKEU308hTc/b1PXTY961wdwsk4Z58F4XUmkyuQjjYwqgDB/+6mKlcjwPD+AXcIcwVfHolhX0M7THb8mK7bfxrANEYaNolsKAlh7WD0/dw7lOV3S0TWVaCkeZgUy62JjbJhC9l/7pjzAqmCqd99PGupei9yx7fJdrdJ//7VmzsTi32Hzuldjl2lKcSe/BdEWFChe0eosLY9sumppZbzuYhRHZRC7A==" + } + ], + "square_size": 4, + "hash": "ecqvIuCjhlW76pE31tEJcRhohEODd3lo9YBNaza8HGg=" + }, + "evidence": { + "evidence": [ + + ] + }, + "last_commit": { + "height": 3, + "block_id": { + "hash": "TLPtmfe84IKmiOU8K45MwPawkYq9XAx8KIYdQltNNXs=", + "part_set_header": { + "total": 1, + "hash": "7HS0mrbP9UNbYk2YMkA8oigEvHEErzs3rC0i1RkloUY=" + } + }, + "signatures": [ + { + "block_id_flag": 2, + "validator_address": "8TeEky8GnBxKq/c2jmFemTVQCy0=", + "timestamp": "2023-04-11T20:52:54.923092Z", + "signature": "1Z3NmDMQU1B5dhSkxdv7xQNF0/n/q3dn+qlHlVBGcXEI3AZ1z4Kua9f+vKOwP3RacsEuGggD90rEbhpN6IqmBA==" + } + ] + } +} diff --git a/shares/utils.go b/shares/utils.go new file mode 100644 index 0000000000..636f16bcc1 --- /dev/null +++ b/shares/utils.go @@ -0,0 +1,101 @@ +package shares + +import ( + "bytes" + "encoding/binary" + "fmt" + "math" + + core "github.com/tendermint/tendermint/proto/tendermint/types" + coretypes "github.com/tendermint/tendermint/types" +) + +// DelimLen calculates the length of the delimiter for a given unit size +func DelimLen(size uint64) int { + lenBuf := make([]byte, binary.MaxVarintLen64) + return binary.PutUvarint(lenBuf, size) +} + +func isPowerOf2(v uint64) bool { + return v&(v-1) == 0 && v != 0 +} + +func BlobsFromProto(blobs []core.Blob) ([]coretypes.Blob, error) { + result := make([]coretypes.Blob, len(blobs)) + for i, blob := range blobs { + if blob.ShareVersion > math.MaxUint8 { + return nil, fmt.Errorf("share version %d is too large to be a uint8", blob.ShareVersion) + } + result[i] = coretypes.Blob{ + NamespaceID: blob.NamespaceId, + Data: blob.Data, + ShareVersion: uint8(blob.ShareVersion), + } + } + return result, nil +} + +func TxsToBytes(txs coretypes.Txs) [][]byte { + e := make([][]byte, len(txs)) + for i, tx := range txs { + e[i] = []byte(tx) + } + return e +} + +func TxsFromBytes(txs [][]byte) coretypes.Txs { + e := make(coretypes.Txs, len(txs)) + for i, tx := range txs { + e[i] = coretypes.Tx(tx) + } + return e +} + +// zeroPadIfNecessary pads the share with trailing zero bytes if the provided +// share has fewer bytes than width. Returns the share unmodified if the +// len(share) is greater than or equal to width. +func zeroPadIfNecessary(share []byte, width int) (padded []byte, bytesOfPadding int) { + oldLen := len(share) + if oldLen >= width { + return share, 0 + } + + missingBytes := width - oldLen + padByte := []byte{0} + padding := bytes.Repeat(padByte, missingBytes) + share = append(share, padding...) + return share, missingBytes +} + +// ParseDelimiter attempts to parse a varint length delimiter from the input +// provided. It returns the input without the len delimiter bytes, the length +// parsed from the varint optionally an error. Unit length delimiters are used +// in compact shares where units (i.e. a transaction) are prefixed with a length +// delimiter that is encoded as a varint. Input should not contain the namespace +// ID or info byte of a share. +func ParseDelimiter(input []byte) (inputWithoutLenDelimiter []byte, unitLen uint64, err error) { + if len(input) == 0 { + return input, 0, nil + } + + l := binary.MaxVarintLen64 + if len(input) < binary.MaxVarintLen64 { + l = len(input) + } + + delimiter, _ := zeroPadIfNecessary(input[:l], binary.MaxVarintLen64) + + // read the length of the data + r := bytes.NewBuffer(delimiter) + dataLen, err := binary.ReadUvarint(r) + if err != nil { + return nil, 0, err + } + + // calculate the number of bytes used by the delimiter + lenBuf := make([]byte, binary.MaxVarintLen64) + n := binary.PutUvarint(lenBuf, dataLen) + + // return the input without the length delimiter + return input[n:], dataLen, nil +} diff --git a/shares/utils_test.go b/shares/utils_test.go new file mode 100644 index 0000000000..49f30b9153 --- /dev/null +++ b/shares/utils_test.go @@ -0,0 +1,70 @@ +package shares + +import ( + "reflect" + "testing" + + "github.com/celestiaorg/celestia-app/testutil/testfactory" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "github.com/tendermint/tendermint/types" +) + +func FuzzBlobSharesUsed(f *testing.F) { + f.Add(uint32(1)) + f.Fuzz(func(t *testing.T, a uint32) { + if a < 1 { + t.Skip() + } + ml := SparseSharesNeeded(a) + blob := testfactory.GenerateRandomBlob(int(a)) + rawShares, err := SplitBlobs(0, nil, []types.Blob{blob}, false) + require.NoError(t, err) + require.Equal(t, len(rawShares), ml) + }) +} + +func Test_zeroPadIfNecessary(t *testing.T) { + type args struct { + share []byte + width int + } + tests := []struct { + name string + args args + wantPadded []byte + wantBytesOfPadding int + }{ + {"pad", args{[]byte{1, 2, 3}, 6}, []byte{1, 2, 3, 0, 0, 0}, 3}, + {"not necessary (equal to shareSize)", args{[]byte{1, 2, 3}, 3}, []byte{1, 2, 3}, 0}, + {"not necessary (greater shareSize)", args{[]byte{1, 2, 3}, 2}, []byte{1, 2, 3}, 0}, + } + for _, tt := range tests { + tt := tt // stupid scopelint :-/ + t.Run(tt.name, func(t *testing.T) { + gotPadded, gotBytesOfPadding := zeroPadIfNecessary(tt.args.share, tt.args.width) + if !reflect.DeepEqual(gotPadded, tt.wantPadded) { + t.Errorf("zeroPadIfNecessary gotPadded %v, wantPadded %v", gotPadded, tt.wantPadded) + } + if gotBytesOfPadding != tt.wantBytesOfPadding { + t.Errorf("zeroPadIfNecessary gotBytesOfPadding %v, wantBytesOfPadding %v", gotBytesOfPadding, tt.wantBytesOfPadding) + } + }) + } +} + +func TestParseDelimiter(t *testing.T) { + for i := uint64(0); i < 100; i++ { + tx := testfactory.GenerateRandomTxs(1, int(i))[0] + input, err := MarshalDelimitedTx(tx) + if err != nil { + panic(err) + } + res, txLen, err := ParseDelimiter(input) + if err != nil { + panic(err) + } + assert.Equal(t, i, txLen) + assert.Equal(t, []byte(tx), res) + } +} From b76fa79aff883b6a8e371aff89d0390c7f152b23 Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Fri, 21 Apr 2023 17:15:45 -0400 Subject: [PATCH 2/8] Finish copying shares pkg in --- appconsts/appconsts.go | 23 +- namespace/consts.go | 70 +++++ namespace/namespace.go | 130 ++++++++ namespace/namespace_test.go | 151 +++++++++ namespace/random_blob.go | 28 ++ namespace/random_namespace.go | 18 ++ shares/compact_shares_test.go | 41 ++- shares/info_byte.go | 2 +- shares/non_interactive_defaults.go | 95 ------ shares/non_interactive_defaults_test.go | 368 ---------------------- shares/padding.go | 69 ----- shares/padding_test.go | 82 ----- shares/parse.go | 81 ----- shares/parse_sparse_shares.go | 88 ------ shares/parse_sparse_shares_test.go | 154 ---------- shares/parse_test.go | 196 ------------ shares/powers_of_two.go | 44 --- shares/powers_of_two_test.go | 101 ------- shares/reserved_bytes.go | 2 +- shares/share_builder.go | 4 +- shares/share_builder_test.go | 4 +- shares/share_sequence.go | 4 +- shares/share_sequence_test.go | 4 +- shares/share_splitting.go | 168 ---------- shares/share_splitting_test.go | 387 ------------------------ shares/shares.go | 4 +- shares/shares_test.go | 185 ++++++----- shares/sparse_shares_test.go | 86 ------ shares/split_compact_shares.go | 4 +- shares/split_compact_shares_test.go | 15 +- shares/split_sparse_shares.go | 128 -------- shares/split_sparse_shares_test.go | 1 - shares/utils.go | 31 +- shares/utils_test.go | 30 +- testfactory/txs.go | 32 ++ 35 files changed, 619 insertions(+), 2211 deletions(-) create mode 100644 namespace/consts.go create mode 100644 namespace/namespace.go create mode 100644 namespace/namespace_test.go create mode 100644 namespace/random_blob.go create mode 100644 namespace/random_namespace.go delete mode 100644 shares/non_interactive_defaults.go delete mode 100644 shares/non_interactive_defaults_test.go delete mode 100644 shares/padding.go delete mode 100644 shares/padding_test.go delete mode 100644 shares/parse.go delete mode 100644 shares/parse_sparse_shares.go delete mode 100644 shares/parse_sparse_shares_test.go delete mode 100644 shares/parse_test.go delete mode 100644 shares/powers_of_two.go delete mode 100644 shares/powers_of_two_test.go delete mode 100644 shares/share_splitting.go delete mode 100644 shares/share_splitting_test.go delete mode 100644 shares/sparse_shares_test.go delete mode 100644 shares/split_sparse_shares.go delete mode 100644 shares/split_sparse_shares_test.go create mode 100644 testfactory/txs.go diff --git a/appconsts/appconsts.go b/appconsts/appconsts.go index 6ab84338fb..8a5281e94c 100644 --- a/appconsts/appconsts.go +++ b/appconsts/appconsts.go @@ -1,10 +1,5 @@ package appconsts -import ( - "github.com/celestiaorg/rsmt2d" - "github.com/tendermint/tendermint/pkg/consts" -) - // These constants were originally sourced from: // https://github.com/celestiaorg/celestia-specs/blob/master/src/specs/consensus.md#constants const ( @@ -88,16 +83,18 @@ const ( ) var ( - // NewBaseHashFunc is the base hash function used by NMT. Change accordingly - // if another hash.Hash should be used as a base hasher in the NMT. - NewBaseHashFunc = consts.NewBaseHashFunc - // DefaultCodec is the default codec creator used for data erasure. - DefaultCodec = rsmt2d.NewLeoRSCodec + // TODO: Consider commenting back in. Removed to reduce unneeded dependency + + // // NewBaseHashFunc is the base hash function used by NMT. Change accordingly + // // if another hash.Hash should be used as a base hasher in the NMT. + // NewBaseHashFunc = consts.NewBaseHashFunc + // // DefaultCodec is the default codec creator used for data erasure. + // DefaultCodec = rsmt2d.NewLeoRSCodec - // DataCommitmentBlocksLimit is the limit to the number of blocks we can - // generate a data commitment for. - DataCommitmentBlocksLimit = consts.DataCommitmentBlocksLimit + // // DataCommitmentBlocksLimit is the limit to the number of blocks we can + // // generate a data commitment for. + // DataCommitmentBlocksLimit = consts.DataCommitmentBlocksLimit // SupportedShareVersions is a list of supported share versions. SupportedShareVersions = []uint8{ShareVersionZero} diff --git a/namespace/consts.go b/namespace/consts.go new file mode 100644 index 0000000000..5f3d613223 --- /dev/null +++ b/namespace/consts.go @@ -0,0 +1,70 @@ +package namespace + +import ( + "bytes" + "math" + + "github.com/rollkit/rollkit/appconsts" +) + +const ( + // NamespaveVersionSize is the size of a namespace version in bytes. + NamespaceVersionSize = appconsts.NamespaceVersionSize + + // NamespaceIDSize is the size of a namespace ID in bytes. + NamespaceIDSize = appconsts.NamespaceIDSize + + // NamespaceSize is the size of a namespace (version + ID) in bytes. + NamespaceSize = appconsts.NamespaceSize + + // NamespaceVersionZero is the first namespace version. + NamespaceVersionZero = uint8(0) + + // NamespaceVersionMax is the max namespace version. + NamespaceVersionMax = math.MaxUint8 + + // NamespaceZeroPrefixSize is the number of `0` bytes that are prefixed to + // namespace IDs for version 0. + NamespaceVersionZeroPrefixSize = 22 + + // NamespaceVersionZeroIDSize is the number of bytes available for + // user-specified namespace ID in a namespace ID for version 0. + NamespaceVersionZeroIDSize = NamespaceIDSize - NamespaceVersionZeroPrefixSize +) + +var ( + // NamespaceVersionZeroPrefix is the prefix of a namespace ID for version 0. + NamespaceVersionZeroPrefix = bytes.Repeat([]byte{0}, NamespaceVersionZeroPrefixSize) + + // TxNamespace is the namespace reserved for transaction data. + TxNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 1}) + + // IntermediateStateRootsNamespace is the namespace reserved for + // intermediate state root data. + IntermediateStateRootsNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 2}) + + // PayForBlobNamespace is the namespace reserved for PayForBlobs transactions. + PayForBlobNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 4}) + + // ReservedPaddingNamespace is the namespace used for padding after all + // reserved namespaces. In practice this padding is after transactions + // (ordinary and PFBs) but before blobs. + ReservedPaddingNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 255}) + + // MaxReservedNamespace is lexicographically the largest namespace that is + // reserved for protocol use. + MaxReservedNamespace = MustNewV0([]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 255}) + + // TailPaddingNamespace is the namespace reserved for tail padding. All data + // with this namespace will be ignored. + TailPaddingNamespace = Namespace{ + Version: math.MaxUint8, + ID: append(bytes.Repeat([]byte{0xFF}, NamespaceIDSize-1), 0xFE), + } + + // ParitySharesNamespace is the namespace reserved for erasure coded data. + ParitySharesNamespace = Namespace{ + Version: math.MaxUint8, + ID: bytes.Repeat([]byte{0xFF}, NamespaceIDSize), + } +) diff --git a/namespace/namespace.go b/namespace/namespace.go new file mode 100644 index 0000000000..0d4c95d080 --- /dev/null +++ b/namespace/namespace.go @@ -0,0 +1,130 @@ +package namespace + +import ( + "bytes" + "fmt" +) + +type Namespace struct { + Version uint8 + ID []byte +} + +// New returns a new namespace with the provided version and id. +func New(version uint8, id []byte) (Namespace, error) { + err := validateVersion(version) + if err != nil { + return Namespace{}, err + } + + err = validateID(version, id) + if err != nil { + return Namespace{}, err + } + + return Namespace{ + Version: version, + ID: id, + }, nil +} + +// MustNew returns a new namespace with the provided version and id. It panics +// if the provided version or id are not supported. +func MustNew(version uint8, id []byte) Namespace { + ns, err := New(version, id) + if err != nil { + panic(err) + } + return ns +} + +// MustNewV0 returns a new namespace with version 0 and the provided id. This +// function panics if the provided id is not exactly NamespaceVersionZeroIDSize bytes. +func MustNewV0(id []byte) Namespace { + if len(id) != NamespaceVersionZeroIDSize { + panic(fmt.Sprintf("invalid namespace id length: %v must be %v", len(id), NamespaceVersionZeroIDSize)) + } + + ns, err := New(NamespaceVersionZero, append(NamespaceVersionZeroPrefix, id...)) + if err != nil { + panic(err) + } + return ns +} + +// From returns a namespace from the provided byte slice. +func From(b []byte) (Namespace, error) { + if len(b) != NamespaceSize { + return Namespace{}, fmt.Errorf("invalid namespace length: %v must be %v", len(b), NamespaceSize) + } + rawVersion := b[0] + rawNamespace := b[1:] + return New(rawVersion, rawNamespace) +} + +// Bytes returns this namespace as a byte slice. +func (n Namespace) Bytes() []byte { + return append([]byte{n.Version}, n.ID...) +} + +// ValidateBlobNamespace returns an error if this namespace is not a valid blob namespace. +func (n Namespace) ValidateBlobNamespace() error { + if n.IsReserved() { + return fmt.Errorf("invalid blob namespace: %v cannot use a reserved namespace ID, want > %v", n.Bytes(), MaxReservedNamespace.Bytes()) + } + + if n.IsParityShares() { + return fmt.Errorf("invalid blob namespace: %v cannot use parity shares namespace ID", n.Bytes()) + } + + if n.IsTailPadding() { + return fmt.Errorf("invalid blob namespace: %v cannot use tail padding namespace ID", n.Bytes()) + } + + return nil +} + +// validateVersion returns an error if the version is not supported. +func validateVersion(version uint8) error { + if version != NamespaceVersionZero && version != NamespaceVersionMax { + return fmt.Errorf("unsupported namespace version %v", version) + } + return nil +} + +// validateID returns an error if the provided id does not meet the requirements +// for the provided version. +func validateID(version uint8, id []byte) error { + if len(id) != NamespaceIDSize { + return fmt.Errorf("unsupported namespace id length: id %v must be %v bytes but it was %v bytes", id, NamespaceIDSize, len(id)) + } + + if version == NamespaceVersionZero && !bytes.HasPrefix(id, NamespaceVersionZeroPrefix) { + return fmt.Errorf("unsupported namespace id with version %v. ID %v must start with %v leading zeros", version, id, len(NamespaceVersionZeroPrefix)) + } + return nil +} + +func (n Namespace) IsReserved() bool { + return bytes.Compare(n.Bytes(), MaxReservedNamespace.Bytes()) < 1 +} + +func (n Namespace) IsParityShares() bool { + return bytes.Equal(n.Bytes(), ParitySharesNamespace.Bytes()) +} + +func (n Namespace) IsTailPadding() bool { + return bytes.Equal(n.Bytes(), TailPaddingNamespace.Bytes()) +} + +func (n Namespace) IsReservedPadding() bool { + return bytes.Equal(n.Bytes(), ReservedPaddingNamespace.Bytes()) +} + +func (n Namespace) IsTx() bool { + return bytes.Equal(n.Bytes(), TxNamespace.Bytes()) +} + +func (n Namespace) IsPayForBlob() bool { + return bytes.Equal(n.Bytes(), PayForBlobNamespace.Bytes()) +} diff --git a/namespace/namespace_test.go b/namespace/namespace_test.go new file mode 100644 index 0000000000..712fb7073f --- /dev/null +++ b/namespace/namespace_test.go @@ -0,0 +1,151 @@ +package namespace + +import ( + "bytes" + "testing" + + "github.com/stretchr/testify/assert" +) + +var ( + validID = append(NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceVersionZeroIDSize)...) + tooShortID = append(NamespaceVersionZeroPrefix, []byte{1}...) + tooLongID = append(NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceSize)...) + invalidPrefixID = bytes.Repeat([]byte{1}, NamespaceSize) +) + +func TestNew(t *testing.T) { + type testCase struct { + name string + version uint8 + id []byte + wantErr bool + want Namespace + } + + testCases := []testCase{ + { + name: "valid namespace", + version: NamespaceVersionZero, + id: validID, + wantErr: false, + want: Namespace{ + Version: NamespaceVersionZero, + ID: validID, + }, + }, + { + name: "unsupported version", + version: uint8(1), + id: validID, + wantErr: true, + }, + { + name: "unsupported id: too short", + version: NamespaceVersionZero, + id: tooShortID, + wantErr: true, + }, + { + name: "unsupported id: too long", + version: NamespaceVersionZero, + id: tooLongID, + wantErr: true, + }, + { + name: "unsupported id: invalid prefix", + version: NamespaceVersionZero, + id: invalidPrefixID, + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := New(tc.version, tc.id) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestFrom(t *testing.T) { + type testCase struct { + name string + bytes []byte + wantErr bool + want Namespace + } + validNamespace := []byte{} + validNamespace = append(validNamespace, NamespaceVersionZero) + validNamespace = append(validNamespace, NamespaceVersionZeroPrefix...) + validNamespace = append(validNamespace, bytes.Repeat([]byte{0x1}, NamespaceVersionZeroIDSize)...) + parityNamespace := bytes.Repeat([]byte{0xFF}, NamespaceSize) + + testCases := []testCase{ + { + name: "valid namespace", + bytes: validNamespace, + wantErr: false, + want: Namespace{ + Version: NamespaceVersionZero, + ID: validID, + }, + }, + { + name: "parity namespace", + bytes: parityNamespace, + wantErr: false, + want: Namespace{ + Version: NamespaceVersionMax, + ID: bytes.Repeat([]byte{0xFF}, NamespaceIDSize), + }, + }, + { + name: "unsupported version", + bytes: append([]byte{1}, append(NamespaceVersionZeroPrefix, bytes.Repeat([]byte{1}, NamespaceSize-len(NamespaceVersionZeroPrefix))...)...), + wantErr: true, + }, + { + name: "unsupported id: too short", + bytes: append([]byte{NamespaceVersionZero}, tooShortID...), + wantErr: true, + }, + { + name: "unsupported id: too long", + bytes: append([]byte{NamespaceVersionZero}, tooLongID...), + wantErr: true, + }, + { + name: "unsupported id: invalid prefix", + bytes: append([]byte{NamespaceVersionZero}, invalidPrefixID...), + wantErr: true, + }, + } + + for _, tc := range testCases { + t.Run(tc.name, func(t *testing.T) { + got, err := From(tc.bytes) + if tc.wantErr { + assert.Error(t, err) + return + } + assert.NoError(t, err) + assert.Equal(t, tc.want, got) + }) + } +} + +func TestBytes(t *testing.T) { + namespace, err := New(NamespaceVersionZero, validID) + assert.NoError(t, err) + + want := append([]byte{NamespaceVersionZero}, validID...) + got := namespace.Bytes() + + assert.Equal(t, want, got) +} diff --git a/namespace/random_blob.go b/namespace/random_blob.go new file mode 100644 index 0000000000..376534b8cf --- /dev/null +++ b/namespace/random_blob.go @@ -0,0 +1,28 @@ +package namespace + +import ( + tmrand "github.com/tendermint/tendermint/libs/rand" +) + +func RandomBlobNamespaceID() []byte { + return tmrand.Bytes(NamespaceVersionZeroIDSize) +} + +func RandomBlobNamespace() Namespace { + for { + id := RandomBlobNamespaceID() + namespace := MustNewV0(id) + err := namespace.ValidateBlobNamespace() + if err != nil { + continue + } + return namespace + } +} + +func RandomBlobNamespaces(count int) (namespaces []Namespace) { + for i := 0; i < count; i++ { + namespaces = append(namespaces, RandomBlobNamespace()) + } + return namespaces +} diff --git a/namespace/random_namespace.go b/namespace/random_namespace.go new file mode 100644 index 0000000000..79e52a6118 --- /dev/null +++ b/namespace/random_namespace.go @@ -0,0 +1,18 @@ +package namespace + +import tmrand "github.com/tendermint/tendermint/libs/rand" + +func RandomNamespace() Namespace { + for { + id := RandomVerzionZeroID() + namespace, err := New(NamespaceVersionZero, id) + if err != nil { + continue + } + return namespace + } +} + +func RandomVerzionZeroID() []byte { + return append(NamespaceVersionZeroPrefix, tmrand.Bytes(NamespaceVersionZeroIDSize)...) +} diff --git a/shares/compact_shares_test.go b/shares/compact_shares_test.go index 2c2f9c55ab..b00c71ef36 100644 --- a/shares/compact_shares_test.go +++ b/shares/compact_shares_test.go @@ -6,14 +6,41 @@ import ( "testing" "time" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - "github.com/celestiaorg/celestia-app/testutil/testfactory" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/testfactory" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" ) +// ToTxs converts a raw slice of byte slices into a Txs type. +func ToTxs(txs [][]byte) coretypes.Txs { + txBzs := make(coretypes.Txs, len(txs)) + for i := 0; i < len(txs); i++ { + txBzs[i] = txs[i] + } + return txBzs +} + +func SplitTxs(txs coretypes.Txs) (txShares []Share, err error) { + txWriter := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) + + for _, tx := range txs { + err = txWriter.WriteTx(tx) + if err != nil { + return nil, err + } + } + + txShares, _, err = txWriter.Export(0) + if err != nil { + return nil, err + } + + return txShares, nil +} + func TestCompactShareSplitter(t *testing.T) { // note that this test is mainly for debugging purposes, the main round trip // tests occur in TestMerge and Test_processCompactShares @@ -27,7 +54,7 @@ func TestCompactShareSplitter(t *testing.T) { require.NoError(t, err) rawResTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) - resTxs := coretypes.ToTxs(rawResTxs) + resTxs := ToTxs(rawResTxs) require.NoError(t, err) assert.Equal(t, txs, resTxs) @@ -79,7 +106,7 @@ func Test_processCompactShares(t *testing.T) { t.Run(fmt.Sprintf("%s idendically sized", tc.name), func(t *testing.T) { txs := testfactory.GenerateRandomTxs(tc.txCount, tc.txSize) - shares, _, _, err := SplitTxs(txs) + shares, err := SplitTxs(txs) require.NoError(t, err) parsedTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) @@ -97,7 +124,7 @@ func Test_processCompactShares(t *testing.T) { t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { txs := testfactory.GenerateRandomlySizedTxs(tc.txCount, tc.txSize) - txShares, _, _, err := SplitTxs(txs) + txShares, err := SplitTxs(txs) require.NoError(t, err) parsedTxs, err := parseCompactShares(txShares, appconsts.SupportedShareVersions) if err != nil { @@ -163,7 +190,7 @@ func Test_parseCompactSharesErrors(t *testing.T) { } txs := testfactory.GenerateRandomTxs(2, appconsts.ContinuationCompactShareContentSize*4) - txShares, _, _, err := SplitTxs(txs) + txShares, err := SplitTxs(txs) require.NoError(t, err) rawShares := ToBytes(txShares) diff --git a/shares/info_byte.go b/shares/info_byte.go index 3d2e5e877e..013344a83f 100644 --- a/shares/info_byte.go +++ b/shares/info_byte.go @@ -3,7 +3,7 @@ package shares import ( "fmt" - "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/rollkit/rollkit/appconsts" ) // InfoByte is a byte with the following structure: the first 7 bits are diff --git a/shares/non_interactive_defaults.go b/shares/non_interactive_defaults.go deleted file mode 100644 index 33dec00ed6..0000000000 --- a/shares/non_interactive_defaults.go +++ /dev/null @@ -1,95 +0,0 @@ -package shares - -import ( - "math" -) - -// FitsInSquare uses the non interactive default rules to see if blobs of -// some lengths will fit in a square of squareSize starting at share index -// cursor. Returns whether the blobs fit in the square and the number of -// shares used by blobs. See non-interactive default rules -// https://github.com/celestiaorg/celestia-specs/blob/master/src/rationale/message_block_layout.md#non-interactive-default-rules -// https://github.com/celestiaorg/celestia-app/blob/1b80b94a62c8c292f569e2fc576e26299985681a/docs/architecture/adr-009-non-interactive-default-rules-for-reduced-padding.md -func FitsInSquare(cursor, squareSize int, blobShareLens ...int) (bool, int) { - if len(blobShareLens) == 0 { - if cursor <= squareSize*squareSize { - return true, 0 - } - return false, 0 - } - firstBlobLen := 1 - if len(blobShareLens) > 0 { - firstBlobLen = blobShareLens[0] - } - // here we account for padding between the compact and sparse shares - cursor, _ = NextMultipleOfBlobMinSquareSize(cursor, firstBlobLen, squareSize) - sharesUsed, _ := BlobSharesUsedNonInteractiveDefaults(cursor, squareSize, blobShareLens...) - return cursor+sharesUsed <= squareSize*squareSize, sharesUsed -} - -// BlobSharesUsedNonInteractiveDefaults returns the number of shares used by a given set -// of blobs share lengths. It follows the non-interactive default rules and -// returns the share indexes for each blob. -func BlobSharesUsedNonInteractiveDefaults(cursor, squareSize int, blobShareLens ...int) (sharesUsed int, indexes []uint32) { - start := cursor - indexes = make([]uint32, len(blobShareLens)) - for i, blobLen := range blobShareLens { - cursor, _ = NextMultipleOfBlobMinSquareSize(cursor, blobLen, squareSize) - indexes[i] = uint32(cursor) - cursor += blobLen - } - return cursor - start, indexes -} - -// NextMultipleOfBlobMinSquareSize determines the next index in a square that is -// a multiple of the blob's minimum square size. This function returns false if -// the entire the blob cannot fit on the given row. Assumes that all args are -// non negative, and that squareSize is a power of two. -// https://github.com/celestiaorg/celestia-specs/blob/master/src/rationale/message_block_layout.md#non-interactive-default-rules -// https://github.com/celestiaorg/celestia-app/blob/1b80b94a62c8c292f569e2fc576e26299985681a/docs/architecture/adr-009-non-interactive-default-rules-for-reduced-padding.md -func NextMultipleOfBlobMinSquareSize(cursor, blobLen, squareSize int) (index int, fitsInRow bool) { - // if we're starting at the beginning of the row, then return as there are - // no cases where we don't start at 0. - if isStartOfRow(cursor, squareSize) { - return cursor, true - } - - blobMinSquareSize := MinSquareSize(blobLen) - startOfNextRow := ((cursor / squareSize) + 1) * squareSize - cursor = roundUpBy(cursor, blobMinSquareSize) - switch { - // the entire blob fits in this row - case cursor+blobLen <= startOfNextRow: - return cursor, true - // only a portion of the blob fits in this row - case cursor+blobMinSquareSize <= startOfNextRow: - return cursor, false - // none of the blob fits on this row, so return the start of the next row - default: - return startOfNextRow, false - } -} - -// roundUpBy rounds cursor up to the next multiple of v. If cursor is divisible -// by v, then it returns cursor -func roundUpBy(cursor, v int) int { - switch { - case cursor == 0: - return cursor - case cursor%v == 0: - return cursor - default: - return ((cursor / v) + 1) * v - } -} - -// MinSquareSize returns the minimum square size that can contain shareCount -// number of shares. -func MinSquareSize(shareCount int) int { - return RoundUpPowerOfTwo(int(math.Ceil(math.Sqrt(float64(shareCount))))) -} - -// isStartOfRow returns true if cursor is at the start of a row -func isStartOfRow(cursor, squareSize int) bool { - return cursor == 0 || cursor%squareSize == 0 -} diff --git a/shares/non_interactive_defaults_test.go b/shares/non_interactive_defaults_test.go deleted file mode 100644 index 1e0b77db67..0000000000 --- a/shares/non_interactive_defaults_test.go +++ /dev/null @@ -1,368 +0,0 @@ -package shares - -import ( - "fmt" - "testing" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/stretchr/testify/assert" -) - -func TestBlobSharesUsedNonInteractiveDefaults(t *testing.T) { - type test struct { - cursor, squareSize, expected int - blobLens []int - indexes []uint32 - } - tests := []test{ - {2, 4, 1, []int{1}, []uint32{2}}, - {2, 2, 1, []int{1}, []uint32{2}}, - {3, 4, 8, []int{3, 3}, []uint32{4, 8}}, - {0, 8, 8, []int{8}, []uint32{0}}, - {0, 8, 7, []int{7}, []uint32{0}}, - {0, 8, 7, []int{3, 3}, []uint32{0, 4}}, - {1, 8, 8, []int{3, 3}, []uint32{2, 6}}, - {1, 8, 32, []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, []uint32{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}}, - {3, 8, 16, []int{5, 7}, []uint32{4, 12}}, - {0, 8, 29, []int{5, 5, 5, 5}, []uint32{0, 8, 16, 24}}, - {0, 8, 10, []int{10}, []uint32{0}}, - {0, 8, 22, []int{10, 10}, []uint32{0, 12}}, - {1, 8, 25, []int{10, 10}, []uint32{4, 16}}, - {2, 8, 24, []int{10, 10}, []uint32{4, 16}}, - {0, 8, 55, []int{21, 31}, []uint32{0, 24}}, - {0, 8, 128, []int{64, 64}, []uint32{0, 64}}, - {0, appconsts.DefaultMaxSquareSize, 1000, []int{1000}, []uint32{0}}, - {0, appconsts.DefaultMaxSquareSize, appconsts.DefaultMaxSquareSize + 1, []int{appconsts.DefaultMaxSquareSize + 1}, []uint32{0}}, - {1, 128, 399, []int{128, 128, 128}, []uint32{16, 144, 272}}, - {1024, appconsts.DefaultMaxSquareSize, 32, []int{32}, []uint32{1024}}, - } - for i, tt := range tests { - res, indexes := BlobSharesUsedNonInteractiveDefaults(tt.cursor, tt.squareSize, tt.blobLens...) - test := fmt.Sprintf("test %d: cursor %d, squareSize %d", i, tt.cursor, tt.squareSize) - assert.Equal(t, tt.expected, res, test) - assert.Equal(t, tt.indexes, indexes, test) - } -} - -func TestFitsInSquare(t *testing.T) { - type test struct { - name string - blobs []int - start int - size int - fits bool - } - tests := []test{ - { - name: "1 blobs size 2 shares (2 blob shares, 2 compact, size 4)", - blobs: []int{2}, - start: 2, - size: 4, - fits: true, - }, - { - name: "10 blobs size 10 shares (100 blob shares, 0 compact, size 4)", - blobs: []int{10, 10, 10, 10, 10, 10, 10, 10, 10, 10}, - start: 0, - size: 4, - fits: false, - }, - { - name: "15 blobs size 1 share (15 blob shares, 0 compact, size 4)", - blobs: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, - start: 0, - size: 4, - fits: true, - }, - { - name: "15 blobs size 1 share starting at share 2 (15 blob shares, 2 compact, size 4)", - blobs: []int{1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}, - start: 2, - size: 4, - fits: false, - }, - { - name: "8 blobs of various sizes (48 blob shares, 1 compact share, size 8)", - blobs: []int{3, 9, 3, 7, 8, 3, 7, 8}, - start: 1, - size: 8, - fits: true, - }, - { - // C = compact share - // P = padding share - // - // |C|C|C|C|C|C|P|P| - // |3|3|3|P|9|9|9|9| - // |9|9|9|9|9|P|P|P| - // |3|3|3|P|7|7|7|7| - // |7|7|7|P|8|8|8|8| - // |8|8|8|8|3|3|3|P| - // |7|7|7|7|7|7|7|P| - // |8|8|8|8|8|8|8|8| - name: "8 blobs of various sizes (48 blob shares, 6 compact, size 8)", - blobs: []int{3, 9, 3, 7, 8, 3, 7, 8}, - start: 6, - size: 8, - fits: true, - }, - { - name: "0 blobs (0 blob shares, 5 compact, size 2)", - blobs: []int{}, - start: 5, - size: 2, - fits: false, - }, - { - name: "0 blobs (0 blob shares, 4 compact, size 2)", - blobs: []int{}, - start: 4, - size: 2, - fits: true, - }, - { - name: "0 blobs. Cursor at the the max share index", - blobs: []int{}, - start: 16, - size: 4, - fits: true, - }, - { - name: "0 blobs. Cursor higher than max share index", - blobs: []int{}, - start: 17, - size: 4, - fits: false, - }, - { - name: "0 blobs. Cursor higher than max share index (again)", - blobs: []int{}, - start: 18, - size: 4, - fits: false, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - res, _ := FitsInSquare(tt.start, tt.size, tt.blobs...) - assert.Equal(t, tt.fits, res) - }) - } -} - -func TestNextMultipleOfBlobMinSquareSize(t *testing.T) { - type test struct { - name string - cursor, blobLen, squareSize int - expectedIndex int - fits bool - } - tests := []test{ - { - name: "whole row blobLen 4", - cursor: 0, - blobLen: 4, - squareSize: 4, - fits: true, - expectedIndex: 0, - }, - { - name: "half row blobLen 2 cursor 1", - cursor: 1, - blobLen: 2, - squareSize: 4, - fits: true, - expectedIndex: 2, - }, - { - name: "half row blobLen 2 cursor 2", - cursor: 2, - blobLen: 2, - squareSize: 4, - fits: true, - expectedIndex: 2, - }, - { - name: "half row blobLen 4 cursor 3", - cursor: 3, - blobLen: 4, - squareSize: 8, - fits: true, - expectedIndex: 4, - }, - { - name: "blobLen 5 cursor 3 size 8", - cursor: 3, - blobLen: 5, - squareSize: 8, - fits: false, - expectedIndex: 4, - }, - { - name: "blobLen 2 cursor 3 square size 8", - cursor: 3, - blobLen: 2, - squareSize: 8, - fits: true, - expectedIndex: 4, - }, - { - name: "cursor 3 blobLen 5 size 8", - cursor: 3, - blobLen: 5, - squareSize: 8, - fits: false, - expectedIndex: 4, - }, - { - name: "bloblen 12 cursor 1 size 16", - cursor: 1, - blobLen: 12, - squareSize: 16, - fits: true, - expectedIndex: 4, - }, - { - name: "edge case where there are many blobs with a single size", - cursor: 10291, - blobLen: 1, - squareSize: 128, - fits: true, - expectedIndex: 10291, - }, - { - name: "second row blobLen 2 cursor 11 square size 8", - cursor: 11, - blobLen: 2, - squareSize: 8, - fits: true, - expectedIndex: 12, - }, - { - // inspired by the diagram at https://github.com/celestiaorg/celestia-app/blob/1b80b94a62c8c292f569e2fc576e26299985681a/docs/architecture/adr-009-non-interactive-default-rules-for-reduced-padding.md?plain=1#L30 - name: "non-interactive default rules for reduced padding diagram", - cursor: 11, - blobLen: 11, - squareSize: 8, - fits: false, - expectedIndex: 12, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - res, fits := NextMultipleOfBlobMinSquareSize(tt.cursor, tt.blobLen, tt.squareSize) - assert.Equal(t, tt.fits, fits) - assert.Equal(t, tt.expectedIndex, res) - }) - } -} - -func Test_roundUpBy(t *testing.T) { - type test struct { - cursor, v int - expectedIndex int - } - tests := []test{ - { - cursor: 1, - v: 2, - expectedIndex: 2, - }, - { - cursor: 2, - v: 2, - expectedIndex: 2, - }, - { - cursor: 0, - v: 2, - expectedIndex: 0, - }, - { - cursor: 5, - v: 2, - expectedIndex: 6, - }, - { - cursor: 8, - v: 16, - expectedIndex: 16, - }, - { - cursor: 33, - v: 1, - expectedIndex: 33, - }, - { - cursor: 32, - v: 16, - expectedIndex: 32, - }, - { - cursor: 33, - v: 16, - expectedIndex: 48, - }, - } - for i, tt := range tests { - t.Run( - fmt.Sprintf( - "test %d: %d cursor %d v %d expectedIndex", - i, - tt.cursor, - tt.v, - tt.expectedIndex, - ), - func(t *testing.T) { - res := roundUpBy(tt.cursor, tt.v) - assert.Equal(t, tt.expectedIndex, res) - }) - } -} - -func TestMinSquareSize(t *testing.T) { - type testCase struct { - shareCount int - want int - } - testCases := []testCase{ - { - shareCount: 0, - want: 1, - }, - { - shareCount: 1, - want: 1, - }, - { - shareCount: 2, - want: 2, - }, - { - shareCount: 3, - want: 2, - }, - { - shareCount: 4, - want: 2, - }, - { - shareCount: 5, - want: 4, - }, - { - shareCount: 16, - want: 4, - }, - { - shareCount: 17, - want: 8, - }, - } - for _, tc := range testCases { - t.Run(fmt.Sprintf("shareCount %d", tc.shareCount), func(t *testing.T) { - got := MinSquareSize(tc.shareCount) - assert.Equal(t, tc.want, got) - }) - } -} diff --git a/shares/padding.go b/shares/padding.go deleted file mode 100644 index a755c40ccc..0000000000 --- a/shares/padding.go +++ /dev/null @@ -1,69 +0,0 @@ -package shares - -import ( - "bytes" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" -) - -// NamespacePaddingShare returns a share that acts as padding. Namespace padding -// shares follow a blob so that the next blob may start at an index that -// conforms to non-interactive default rules. The ns parameter provided should -// be the namespace of the blob that precedes this padding in the data square. -func NamespacePaddingShare(ns appns.Namespace) (Share, error) { - b, err := NewBuilder(ns, appconsts.ShareVersionZero, true).Init() - if err != nil { - return Share{}, err - } - if err := b.WriteSequenceLen(0); err != nil { - return Share{}, err - } - padding := bytes.Repeat([]byte{0}, appconsts.FirstSparseShareContentSize) - b.AddData(padding) - - share, err := b.Build() - if err != nil { - return Share{}, err - } - - return *share, nil -} - -// NamespacePaddingShares returns n namespace padding shares. -func NamespacePaddingShares(ns appns.Namespace, n int) ([]Share, error) { - var err error - shares := make([]Share, n) - for i := 0; i < n; i++ { - shares[i], err = NamespacePaddingShare(ns) - if err != nil { - return shares, err - } - } - return shares, nil -} - -// ReservedPaddingShare returns a share that acts as padding. Reserved padding -// shares follow all significant shares in the reserved namespace so that the -// first blob can start at an index that conforms to non-interactive default -// rules. -func ReservedPaddingShare() (Share, error) { - return NamespacePaddingShare(appns.ReservedPaddingNamespace) -} - -// ReservedPaddingShare returns n reserved padding shares. -func ReservedPaddingShares(n int) ([]Share, error) { - return NamespacePaddingShares(appns.ReservedPaddingNamespace, n) -} - -// TailPaddingShare is a share that is used to pad a data square to the desired -// square size. Tail padding shares follow the last blob share in the data -// square. -func TailPaddingShare() (Share, error) { - return NamespacePaddingShare(appns.TailPaddingNamespace) -} - -// TailPaddingShares returns n tail padding shares. -func TailPaddingShares(n int) ([]Share, error) { - return NamespacePaddingShares(appns.TailPaddingNamespace, n) -} diff --git a/shares/padding_test.go b/shares/padding_test.go deleted file mode 100644 index 865543e706..0000000000 --- a/shares/padding_test.go +++ /dev/null @@ -1,82 +0,0 @@ -package shares - -import ( - "bytes" - "testing" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var ns1 = appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) - -var nsOnePadding, _ = zeroPadIfNecessary( - append( - ns1.Bytes(), - []byte{ - 1, // info byte - 0, 0, 0, 0, // sequence len - }..., - ), appconsts.ShareSize) - -var reservedPadding, _ = zeroPadIfNecessary( - append( - appns.ReservedPaddingNamespace.Bytes(), - []byte{ - 1, // info byte - 0, 0, 0, 0, // sequence len - }..., - ), appconsts.ShareSize) - -var tailPadding, _ = zeroPadIfNecessary( - append( - appns.TailPaddingNamespace.Bytes(), - []byte{ - 1, // info byte - 0, 0, 0, 0, // sequence len - }..., - ), appconsts.ShareSize) - -func TestNamespacePaddingShare(t *testing.T) { - got, err := NamespacePaddingShare(ns1) - assert.NoError(t, err) - assert.Equal(t, nsOnePadding, got.ToBytes()) -} - -func TestNamespacePaddingShares(t *testing.T) { - shares, err := NamespacePaddingShares(ns1, 2) - assert.NoError(t, err) - for _, share := range shares { - assert.Equal(t, nsOnePadding, share.ToBytes()) - } -} - -func TestReservedPaddingShare(t *testing.T) { - got, err := ReservedPaddingShare() - require.NoError(t, err) - assert.Equal(t, reservedPadding, got.ToBytes()) -} - -func TestReservedPaddingShares(t *testing.T) { - shares, err := ReservedPaddingShares(2) - require.NoError(t, err) - for _, share := range shares { - assert.Equal(t, reservedPadding, share.ToBytes()) - } -} - -func TestTailPaddingShare(t *testing.T) { - got, err := TailPaddingShare() - require.NoError(t, err) - assert.Equal(t, tailPadding, got.ToBytes()) -} - -func TestTailPaddingShares(t *testing.T) { - shares, err := TailPaddingShares(2) - require.NoError(t, err) - for _, share := range shares { - assert.Equal(t, tailPadding, share.ToBytes()) - } -} diff --git a/shares/parse.go b/shares/parse.go deleted file mode 100644 index 13b0f38d1e..0000000000 --- a/shares/parse.go +++ /dev/null @@ -1,81 +0,0 @@ -package shares - -import ( - "bytes" - "fmt" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - coretypes "github.com/tendermint/tendermint/types" -) - -// ParseTxs collects all of the transactions from the shares provided -func ParseTxs(shares []Share) (coretypes.Txs, error) { - // parse the shares - rawTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) - if err != nil { - return nil, err - } - - // convert to the Tx type - txs := make(coretypes.Txs, len(rawTxs)) - for i := 0; i < len(txs); i++ { - txs[i] = coretypes.Tx(rawTxs[i]) - } - - return txs, nil -} - -// ParseBlobs collects all blobs from the shares provided -func ParseBlobs(shares []Share) ([]coretypes.Blob, error) { - blobList, err := parseSparseShares(shares, appconsts.SupportedShareVersions) - if err != nil { - return []coretypes.Blob{}, err - } - - return blobList, nil -} - -func ParseShares(shares []Share) ([]ShareSequence, error) { - sequences := []ShareSequence{} - currentSequence := ShareSequence{} - - for _, share := range shares { - if err := share.Validate(); err != nil { - return sequences, err - } - isStart, err := share.IsSequenceStart() - if err != nil { - return sequences, err - } - ns, err := share.Namespace() - if err != nil { - return sequences, err - } - if isStart { - if len(currentSequence.Shares) > 0 { - sequences = append(sequences, currentSequence) - } - currentSequence = ShareSequence{ - Shares: []Share{share}, - Namespace: ns, - } - } else { - if !bytes.Equal(currentSequence.Namespace.Bytes(), ns.Bytes()) { - return sequences, fmt.Errorf("share sequence %v has inconsistent namespace IDs with share %v", currentSequence, share) - } - currentSequence.Shares = append(currentSequence.Shares, share) - } - } - - if len(currentSequence.Shares) > 0 { - sequences = append(sequences, currentSequence) - } - - for _, sequence := range sequences { - if err := sequence.validSequenceLen(); err != nil { - return sequences, err - } - } - - return sequences, nil -} diff --git a/shares/parse_sparse_shares.go b/shares/parse_sparse_shares.go deleted file mode 100644 index 22e899a3e5..0000000000 --- a/shares/parse_sparse_shares.go +++ /dev/null @@ -1,88 +0,0 @@ -package shares - -import ( - "bytes" - "fmt" - - coretypes "github.com/tendermint/tendermint/types" -) - -type sequence struct { - blob coretypes.Blob - sequenceLen uint32 -} - -// parseSparseShares iterates through rawShares and parses out individual -// blobs. It returns an error if a rawShare contains a share version that -// isn't present in supportedShareVersions. -func parseSparseShares(shares []Share, supportedShareVersions []uint8) (blobs []coretypes.Blob, err error) { - if len(shares) == 0 { - return nil, nil - } - sequences := make([]sequence, 0) - - for _, share := range shares { - version, err := share.Version() - if err != nil { - return nil, err - } - if !bytes.Contains(supportedShareVersions, []byte{version}) { - return nil, fmt.Errorf("unsupported share version %v is not present in supported share versions %v", version, supportedShareVersions) - } - - isPadding, err := share.IsPadding() - if err != nil { - return nil, err - } - if isPadding { - continue - } - - isStart, err := share.IsSequenceStart() - if err != nil { - return nil, err - } - - if isStart { - sequenceLen, err := share.SequenceLen() - if err != nil { - return nil, err - } - data, err := share.RawData() - if err != nil { - return nil, err - } - ns, err := share.Namespace() - if err != nil { - return nil, err - } - blob := coretypes.Blob{ - NamespaceID: ns.ID, - Data: data, - ShareVersion: version, - NamespaceVersion: ns.Version, - } - sequences = append(sequences, sequence{ - blob: blob, - sequenceLen: sequenceLen, - }) - } else { // continuation share - if len(sequences) == 0 { - return nil, fmt.Errorf("continuation share %v without a sequence start share", share) - } - prev := &sequences[len(sequences)-1] - data, err := share.RawData() - if err != nil { - return nil, err - } - prev.blob.Data = append(prev.blob.Data, data...) - } - } - for _, sequence := range sequences { - // trim any padding from the end of the sequence - sequence.blob.Data = sequence.blob.Data[:sequence.sequenceLen] - blobs = append(blobs, sequence.blob) - } - - return blobs, nil -} diff --git a/shares/parse_sparse_shares_test.go b/shares/parse_sparse_shares_test.go deleted file mode 100644 index db06bbe879..0000000000 --- a/shares/parse_sparse_shares_test.go +++ /dev/null @@ -1,154 +0,0 @@ -package shares - -import ( - "bytes" - "fmt" - "sort" - "testing" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/testutil/testfactory" - "github.com/celestiaorg/nmt/namespace" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - coretypes "github.com/tendermint/tendermint/types" -) - -func Test_parseSparseShares(t *testing.T) { - type test struct { - name string - blobSize int - blobCount int - } - - // each test is ran twice, once using blobSize as an exact size, and again - // using it as a cap for randomly sized leaves - tests := []test{ - { - name: "single small blob", - blobSize: 10, - blobCount: 1, - }, - { - name: "ten small blobs", - blobSize: 10, - blobCount: 10, - }, - { - name: "single big blob", - blobSize: appconsts.ContinuationSparseShareContentSize * 4, - blobCount: 1, - }, - { - name: "many big blobs", - blobSize: appconsts.ContinuationSparseShareContentSize * 4, - blobCount: 10, - }, - { - name: "single exact size blob", - blobSize: appconsts.FirstSparseShareContentSize, - blobCount: 1, - }, - } - - for _, tc := range tests { - // run the tests with identically sized blobs - t.Run(fmt.Sprintf("%s identically sized ", tc.name), func(t *testing.T) { - blobs := make([]coretypes.Blob, tc.blobCount) - for i := 0; i < tc.blobCount; i++ { - blobs[i] = testfactory.GenerateRandomBlob(tc.blobSize) - } - - sort.Sort(coretypes.BlobsByNamespace(blobs)) - - shares, _ := SplitBlobs(0, nil, blobs, false) - parsedBlobs, err := parseSparseShares(shares, appconsts.SupportedShareVersions) - if err != nil { - t.Error(err) - } - - // check that the namespaces and data are the same - for i := 0; i < len(blobs); i++ { - assert.Equal(t, blobs[i].NamespaceID, parsedBlobs[i].NamespaceID, "parsed blob namespace does not match") - assert.Equal(t, blobs[i].Data, parsedBlobs[i].Data, "parsed blob data does not match") - } - }) - - // run the same tests using randomly sized blobs with caps of tc.blobSize - t.Run(fmt.Sprintf("%s randomly sized", tc.name), func(t *testing.T) { - blobs := testfactory.GenerateRandomlySizedBlobs(tc.blobCount, tc.blobSize) - shares, _ := SplitBlobs(0, nil, blobs, false) - parsedBlobs, err := parseSparseShares(shares, appconsts.SupportedShareVersions) - if err != nil { - t.Error(err) - } - - // check that the namespaces and data are the same - for i := 0; i < len(blobs); i++ { - assert.Equal(t, blobs[i].NamespaceID, parsedBlobs[i].NamespaceID) - assert.Equal(t, blobs[i].Data, parsedBlobs[i].Data) - } - }) - } -} - -func Test_parseSparseSharesErrors(t *testing.T) { - type testCase struct { - name string - shares []Share - } - - unsupportedShareVersion := 5 - infoByte, _ := NewInfoByte(uint8(unsupportedShareVersion), true) - - rawShare := []byte{} - rawShare = append(rawShare, namespace.ID{1, 1, 1, 1, 1, 1, 1, 1}...) - rawShare = append(rawShare, byte(infoByte)) - rawShare = append(rawShare, bytes.Repeat([]byte{0}, appconsts.ShareSize-len(rawShare))...) - share, err := NewShare(rawShare) - if err != nil { - t.Fatal(err) - } - - tests := []testCase{ - { - "share with unsupported share version", - []Share{*share}, - }, - } - - for _, tt := range tests { - t.Run(tt.name, func(*testing.T) { - _, err := parseSparseShares(tt.shares, appconsts.SupportedShareVersions) - assert.Error(t, err) - }) - } -} - -func Test_parseSparseSharesWithNamespacedPadding(t *testing.T) { - sss := NewSparseShareSplitter() - randomSmallBlob := testfactory.GenerateRandomBlob(appconsts.ContinuationSparseShareContentSize / 2) - randomLargeBlob := testfactory.GenerateRandomBlob(appconsts.ContinuationSparseShareContentSize * 4) - blobs := []coretypes.Blob{ - randomSmallBlob, - randomLargeBlob, - } - sort.Sort(coretypes.BlobsByNamespace(blobs)) - - err := sss.Write(blobs[0]) - require.NoError(t, err) - - err = sss.WriteNamespacedPaddedShares(4) - require.NoError(t, err) - - err = sss.Write(blobs[1]) - require.NoError(t, err) - - err = sss.WriteNamespacedPaddedShares(10) - require.NoError(t, err) - - shares := sss.Export() - pblobs, err := parseSparseShares(shares, appconsts.SupportedShareVersions) - require.NoError(t, err) - require.Equal(t, blobs, pblobs) -} diff --git a/shares/parse_test.go b/shares/parse_test.go deleted file mode 100644 index 6a0639e9ec..0000000000 --- a/shares/parse_test.go +++ /dev/null @@ -1,196 +0,0 @@ -package shares - -import ( - "bytes" - "encoding/binary" - "math/rand" - "reflect" - "testing" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - tmrand "github.com/tendermint/tendermint/libs/rand" - "github.com/tendermint/tendermint/types" -) - -func TestParseShares(t *testing.T) { - type testCase struct { - name string - shares []Share - want []ShareSequence - expectErr bool - } - - start := true - ns1 := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) - namespaceTwo := appns.MustNewV0(bytes.Repeat([]byte{1}, appns.NamespaceVersionZeroIDSize)) - - txShares, _, _, err := SplitTxs(generateRandomTxs(2, 1000)) - require.NoError(t, err) - txShareStart := txShares[0] - txShareContinuation := txShares[1] - - blobOneShares, err := SplitBlobs(0, []uint32{}, []types.Blob{generateRandomBlobWithNamespace(ns1, 1000)}, false) - if err != nil { - t.Fatal(err) - } - blobOneStart := blobOneShares[0] - blobOneContinuation := blobOneShares[1] - - blobTwoShares, err := SplitBlobs(0, []uint32{}, []types.Blob{generateRandomBlobWithNamespace(namespaceTwo, 1000)}, false) - if err != nil { - t.Fatal(err) - } - blobTwoStart := blobTwoShares[0] - blobTwoContinuation := blobTwoShares[1] - - invalidShare := Share{data: append(generateRawShare(ns1, start, 1), []byte{0}...)} // invalidShare is now longer than the length of a valid share - - largeSequenceLen := 1000 // it takes more than one share to store a sequence of 1000 bytes - oneShareWithTooLargeSequenceLen := generateRawShare(ns1, start, uint32(largeSequenceLen)) - - shortSequenceLen := 0 - oneShareWithTooShortSequenceLen := generateRawShare(ns1, start, uint32(shortSequenceLen)) - - tests := []testCase{ - { - "empty", - []Share{}, - []ShareSequence{}, - false, - }, - { - "one transaction share", - []Share{txShareStart}, - []ShareSequence{{Namespace: appns.TxNamespace, Shares: []Share{txShareStart}}}, - false, - }, - { - "two transaction shares", - []Share{txShareStart, txShareContinuation}, - []ShareSequence{{Namespace: appns.TxNamespace, Shares: []Share{txShareStart, txShareContinuation}}}, - false, - }, - { - "one blob share", - []Share{blobOneStart}, - []ShareSequence{{Namespace: ns1, Shares: []Share{blobOneStart}}}, - false, - }, - { - "two blob shares", - []Share{blobOneStart, blobOneContinuation}, - []ShareSequence{{Namespace: ns1, Shares: []Share{blobOneStart, blobOneContinuation}}}, - false, - }, - { - "two blobs with two shares each", - []Share{blobOneStart, blobOneContinuation, blobTwoStart, blobTwoContinuation}, - []ShareSequence{ - {Namespace: ns1, Shares: []Share{blobOneStart, blobOneContinuation}}, - {Namespace: namespaceTwo, Shares: []Share{blobTwoStart, blobTwoContinuation}}, - }, - false, - }, - { - "one transaction, one blob", - []Share{txShareStart, blobOneStart}, - []ShareSequence{ - {Namespace: appns.TxNamespace, Shares: []Share{txShareStart}}, - {Namespace: ns1, Shares: []Share{blobOneStart}}, - }, - false, - }, - { - "one transaction, two blobs", - []Share{txShareStart, blobOneStart, blobTwoStart}, - []ShareSequence{ - {Namespace: appns.TxNamespace, Shares: []Share{txShareStart}}, - {Namespace: ns1, Shares: []Share{blobOneStart}}, - {Namespace: namespaceTwo, Shares: []Share{blobTwoStart}}, - }, - false, - }, - { - "one share with invalid size", - []Share{invalidShare}, - []ShareSequence{}, - true, - }, - { - "blob one start followed by blob two continuation", - []Share{blobOneStart, blobTwoContinuation}, - []ShareSequence{}, - true, - }, - { - "one share with too large sequence length", - []Share{{data: oneShareWithTooLargeSequenceLen}}, - []ShareSequence{}, - true, - }, - { - "one share with too short sequence length", - []Share{{data: oneShareWithTooShortSequenceLen}}, - []ShareSequence{}, - true, - }, - } - for _, tt := range tests { - t.Run(tt.name, func(t *testing.T) { - got, err := ParseShares(tt.shares) - if tt.expectErr { - assert.Error(t, err) - return - } - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("ParseShares() got %v, want %v", got, tt.want) - } - }) - } -} - -func generateRawShare(namespace appns.Namespace, isSequenceStart bool, sequenceLen uint32) (rawShare []byte) { - infoByte, _ := NewInfoByte(appconsts.ShareVersionZero, isSequenceStart) - - sequenceLenBuf := make([]byte, appconsts.SequenceLenBytes) - binary.BigEndian.PutUint32(sequenceLenBuf, sequenceLen) - - rawShare = append(rawShare, namespace.Bytes()...) - rawShare = append(rawShare, byte(infoByte)) - rawShare = append(rawShare, sequenceLenBuf...) - - return padWithRandomBytes(rawShare) -} - -func padWithRandomBytes(partialShare []byte) (paddedShare []byte) { - paddedShare = make([]byte, appconsts.ShareSize) - copy(paddedShare, partialShare) - rand.Read(paddedShare[len(partialShare):]) - return paddedShare -} - -func generateRandomTxs(count, size int) types.Txs { - txs := make(types.Txs, count) - for i := 0; i < count; i++ { - tx := make([]byte, size) - _, err := rand.Read(tx) - if err != nil { - panic(err) - } - txs[i] = tx - } - return txs -} - -func generateRandomBlobWithNamespace(namespace appns.Namespace, size int) types.Blob { - blob := types.Blob{ - NamespaceVersion: namespace.Version, - NamespaceID: namespace.ID, - Data: tmrand.Bytes(size), - ShareVersion: appconsts.ShareVersionZero, - } - return blob -} diff --git a/shares/powers_of_two.go b/shares/powers_of_two.go deleted file mode 100644 index d9c7bc1ff2..0000000000 --- a/shares/powers_of_two.go +++ /dev/null @@ -1,44 +0,0 @@ -package shares - -import ( - "fmt" - - "golang.org/x/exp/constraints" -) - -// RoundUpPowerOfTwo returns the next power of two greater than or equal to input. -func RoundUpPowerOfTwo[I constraints.Integer](input I) I { - var result I = 1 - for result < input { - result = result << 1 - } - return result -} - -// RoundDownPowerOfTwo returns the next power of two less than or equal to input. -func RoundDownPowerOfTwo[I constraints.Integer](input I) (I, error) { - if input <= 0 { - return 0, fmt.Errorf("input %v must be positive", input) - } - roundedUp := RoundUpPowerOfTwo(input) - if roundedUp == input { - return roundedUp, nil - } - return roundedUp / 2, nil -} - -// RoundUpPowerOfTwo returns the next power of two that is strictly greater than input. -func RoundUpPowerOfTwoStrict[I constraints.Integer](input I) I { - result := RoundUpPowerOfTwo(input) - - // round the result up to the next power of two if is equal to the input - if result == input { - return result * 2 - } - return result -} - -// IsPowerOfTwo returns true if input is a power of two. -func IsPowerOfTwo[I constraints.Integer](input I) bool { - return input&(input-1) == 0 && input != 0 -} diff --git a/shares/powers_of_two_test.go b/shares/powers_of_two_test.go deleted file mode 100644 index a8a4f4fd52..0000000000 --- a/shares/powers_of_two_test.go +++ /dev/null @@ -1,101 +0,0 @@ -package shares - -import ( - "testing" - - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -func TestRoundUpPowerOfTwo(t *testing.T) { - type testCase struct { - input int - want int - } - testCases := []testCase{ - {input: -1, want: 1}, - {input: 0, want: 1}, - {input: 1, want: 1}, - {input: 2, want: 2}, - {input: 4, want: 4}, - {input: 5, want: 8}, - {input: 8, want: 8}, - {input: 11, want: 16}, - {input: 511, want: 512}, - } - for _, tc := range testCases { - got := RoundUpPowerOfTwo(tc.input) - assert.Equal(t, tc.want, got) - } -} - -func TestRoundDownPowerOfTwo(t *testing.T) { - type testCase struct { - input int - want int - } - testCases := []testCase{ - {input: 1, want: 1}, - {input: 2, want: 2}, - {input: 4, want: 4}, - {input: 5, want: 4}, - {input: 8, want: 8}, - {input: 11, want: 8}, - {input: 511, want: 256}, - } - for _, tc := range testCases { - got, err := RoundDownPowerOfTwo(tc.input) - require.NoError(t, err) - assert.Equal(t, tc.want, got) - } -} - -func TestRoundUpPowerOfTwoStrict(t *testing.T) { - type testCase struct { - input int - want int - } - testCases := []testCase{ - {input: -1, want: 1}, - {input: 0, want: 1}, - {input: 1, want: 2}, - {input: 2, want: 4}, - {input: 4, want: 8}, - {input: 5, want: 8}, - {input: 8, want: 16}, - {input: 11, want: 16}, - {input: 511, want: 512}, - } - for _, tc := range testCases { - got := RoundUpPowerOfTwoStrict(tc.input) - assert.Equal(t, tc.want, got) - } -} - -func TestIsPowerOfTwoU(t *testing.T) { - type test struct { - input uint64 - want bool - } - tests := []test{ - // powers of two - {input: 1, want: true}, - {input: 2, want: true}, - {input: 4, want: true}, - {input: 8, want: true}, - {input: 16, want: true}, - {input: 32, want: true}, - {input: 64, want: true}, - {input: 128, want: true}, - {input: 256, want: true}, - // not powers of two - {input: 0, want: false}, - {input: 3, want: false}, - {input: 12, want: false}, - {input: 79, want: false}, - } - for _, tt := range tests { - got := IsPowerOfTwo(tt.input) - assert.Equal(t, tt.want, got) - } -} diff --git a/shares/reserved_bytes.go b/shares/reserved_bytes.go index 7240e2551d..4fabfcdeaf 100644 --- a/shares/reserved_bytes.go +++ b/shares/reserved_bytes.go @@ -4,7 +4,7 @@ import ( "encoding/binary" "fmt" - "github.com/celestiaorg/celestia-app/pkg/appconsts" + "github.com/rollkit/rollkit/appconsts" ) // NewReservedBytes returns a byte slice of length diff --git a/shares/share_builder.go b/shares/share_builder.go index b19f5cb7c8..7ed165ce43 100644 --- a/shares/share_builder.go +++ b/shares/share_builder.go @@ -4,8 +4,8 @@ import ( "encoding/binary" "errors" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" ) type Builder struct { diff --git a/shares/share_builder_test.go b/shares/share_builder_test.go index 8696e4d22c..6e1638471b 100644 --- a/shares/share_builder_test.go +++ b/shares/share_builder_test.go @@ -5,8 +5,8 @@ import ( "fmt" "testing" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) diff --git a/shares/share_sequence.go b/shares/share_sequence.go index bcf5c51250..ae93008abe 100644 --- a/shares/share_sequence.go +++ b/shares/share_sequence.go @@ -3,8 +3,8 @@ package shares import ( "fmt" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" ) // ShareSequence represents a contiguous sequence of shares that are part of the diff --git a/shares/share_sequence_test.go b/shares/share_sequence_test.go index 72e171e825..2f334b07dc 100644 --- a/shares/share_sequence_test.go +++ b/shares/share_sequence_test.go @@ -5,8 +5,8 @@ import ( "encoding/binary" "testing" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" "github.com/stretchr/testify/assert" ) diff --git a/shares/share_splitting.go b/shares/share_splitting.go deleted file mode 100644 index d416489a21..0000000000 --- a/shares/share_splitting.go +++ /dev/null @@ -1,168 +0,0 @@ -package shares - -import ( - "errors" - "fmt" - "sort" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - coretypes "github.com/tendermint/tendermint/types" - "golang.org/x/exp/maps" -) - -var ( - ErrIncorrectNumberOfIndexes = errors.New( - "number of indexes is not identical to the number of blobs", - ) - ErrUnexpectedFirstBlobShareIndex = errors.New( - "the first blob started at an unexpected index", - ) -) - -// Split converts block data into encoded shares, optionally using share indexes -// that are encoded as wrapped transactions. Most use cases out of this package -// should use these share indexes and therefore set useShareIndexes to true. -func Split(data coretypes.Data, useShareIndexes bool) ([]Share, error) { - if data.SquareSize == 0 || !isPowerOf2(data.SquareSize) { - return nil, fmt.Errorf("square size is not a power of two: %d", data.SquareSize) - } - wantShareCount := int(data.SquareSize * data.SquareSize) - currentShareCount := 0 - - txShares, pfbTxShares, _, err := SplitTxs(data.Txs) - if err != nil { - return nil, err - } - currentShareCount += len(txShares) + len(pfbTxShares) - // blobIndexes will be nil if we are working with a list of txs that do not - // have a blob index. This preserves backwards compatibility with old blocks - // that do not follow the non-interactive defaults - blobIndexes := ExtractShareIndexes(data.Txs) - sort.Slice(blobIndexes, func(i, j int) bool { return blobIndexes[i] < blobIndexes[j] }) - - var padding []Share - if len(data.Blobs) > 0 { - blobShareStart, _ := NextMultipleOfBlobMinSquareSize( - currentShareCount, - SparseSharesNeeded(uint32(len(data.Blobs[0].Data))), - int(data.SquareSize), - ) - // force blobSharesStart to be the first share index - if len(blobIndexes) != 0 && useShareIndexes { - blobShareStart = int(blobIndexes[0]) - } - if blobShareStart < currentShareCount { - panic(fmt.Sprintf("blobShareStart %v < currentShareCount %v", blobShareStart, currentShareCount)) - } - - padding, err = NamespacePaddingShares(appns.ReservedPaddingNamespace, blobShareStart-currentShareCount) - if err != nil { - return nil, err - } - } - currentShareCount += len(padding) - - if blobIndexes != nil && int(blobIndexes[0]) < currentShareCount { - return nil, ErrUnexpectedFirstBlobShareIndex - } - - blobShares, err := SplitBlobs(currentShareCount, blobIndexes, data.Blobs, useShareIndexes) - if err != nil { - return nil, err - } - currentShareCount += len(blobShares) - tailShares, err := TailPaddingShares(wantShareCount - currentShareCount) - if err != nil { - return nil, err - } - shares := make([]Share, 0, data.SquareSize*data.SquareSize) - shares = append(append(append(append(append( - shares, - txShares...), - pfbTxShares...), - padding...), - blobShares...), - tailShares...) - return shares, nil -} - -// ExtractShareIndexes iterates over the transactions and extracts the share -// indexes from wrapped transactions. It returns nil if the transactions are -// from an old block that did not have share indexes in the wrapped txs. -func ExtractShareIndexes(txs coretypes.Txs) []uint32 { - var shareIndexes []uint32 - for _, rawTx := range txs { - if indexWrappedTxs, isIndexWrapped := coretypes.UnmarshalIndexWrapper(rawTx); isIndexWrapped { - // Since share index == 0 is invalid, it indicates that we are - // attempting to extract share indexes from txs that do not have any - // due to them being old. here we return nil to indicate that we are - // attempting to extract indexes from a block that doesn't support - // it. It checks for 0 because if there is a message in the block, - // then there must also be a tx, which will take up at least one - // share. - if len(indexWrappedTxs.ShareIndexes) == 0 { - return nil - } - shareIndexes = append(shareIndexes, indexWrappedTxs.ShareIndexes...) - } - } - - return shareIndexes -} - -func SplitTxs(txs coretypes.Txs) (txShares []Share, pfbShares []Share, shareRanges map[coretypes.TxKey]ShareRange, err error) { - txWriter := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) - pfbTxWriter := NewCompactShareSplitter(appns.PayForBlobNamespace, appconsts.ShareVersionZero) - - for _, tx := range txs { - if _, isIndexWrapper := coretypes.UnmarshalIndexWrapper(tx); isIndexWrapper { - err = pfbTxWriter.WriteTx(tx) - } else { - err = txWriter.WriteTx(tx) - } - if err != nil { - return nil, nil, nil, err - } - } - - txShares, txMap, err := txWriter.Export(0) - if err != nil { - return nil, nil, nil, err - } - - pfbShares, pfbMap, err := pfbTxWriter.Export(len(txShares)) - if err != nil { - return nil, nil, nil, err - } - - return txShares, pfbShares, mergeMaps(txMap, pfbMap), nil -} - -func SplitBlobs(cursor int, indexes []uint32, blobs []coretypes.Blob, useShareIndexes bool) ([]Share, error) { - if useShareIndexes && len(indexes) != len(blobs) { - return nil, ErrIncorrectNumberOfIndexes - } - writer := NewSparseShareSplitter() - for i, blob := range blobs { - if err := writer.Write(blob); err != nil { - return nil, err - } - if useShareIndexes && len(indexes) > i+1 { - paddedShareCount := int(indexes[i+1]) - (writer.Count() + cursor) - if err := writer.WriteNamespacedPaddedShares(paddedShareCount); err != nil { - return nil, err - } - } - } - return writer.Export(), nil -} - -// mergeMaps merges two maps into a new map. If there are any duplicate keys, -// the value in the second map takes precedence. -func mergeMaps(mapOne, mapTwo map[coretypes.TxKey]ShareRange) map[coretypes.TxKey]ShareRange { - merged := make(map[coretypes.TxKey]ShareRange, len(mapOne)+len(mapTwo)) - maps.Copy(merged, mapOne) - maps.Copy(merged, mapTwo) - return merged -} diff --git a/shares/share_splitting_test.go b/shares/share_splitting_test.go deleted file mode 100644 index a4dbfc68e9..0000000000 --- a/shares/share_splitting_test.go +++ /dev/null @@ -1,387 +0,0 @@ -package shares - -import ( - "bytes" - "reflect" - "testing" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - coretypes "github.com/tendermint/tendermint/types" -) - -func TestSplitTxs_forTxShares(t *testing.T) { - smallTransactionA := coretypes.Tx{0xa} - smallTransactionB := coretypes.Tx{0xb} - largeTransaction := bytes.Repeat([]byte{0xc}, 512) - - type testCase struct { - name string - txs coretypes.Txs - want []Share - } - testCases := []testCase{ - { - name: "empty txs", - txs: coretypes.Txs{}, - want: []Share{}, - }, - { - name: "one small tx", - txs: coretypes.Txs{smallTransactionA}, - want: []Share{ - padShare(Share{ - data: append( - appns.TxNamespace.Bytes(), - []byte{ - 0x1, // info byte - 0x0, 0x0, 0x0, 0x2, // 1 byte (unit) + 1 byte (unit length) = 2 bytes sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 0x1, // unit length of first transaction - 0xa, // data of first transaction - }..., - ), - }, - ), - }, - }, - { - name: "two small txs", - txs: coretypes.Txs{smallTransactionA, smallTransactionB}, - want: []Share{ - padShare(Share{ - data: append( - appns.TxNamespace.Bytes(), - []byte{ - 0x1, // info byte - 0x0, 0x0, 0x0, 0x4, // 2 bytes (first transaction) + 2 bytes (second transaction) = 4 bytes sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 0x1, // unit length of first transaction - 0xa, // data of first transaction - 0x1, // unit length of second transaction - 0xb, // data of second transaction - }..., - ), - }, - ), - }, - }, - { - name: "one large tx that spans two shares", - txs: coretypes.Txs{largeTransaction}, - want: []Share{ - fillShare(Share{ - data: append( - appns.TxNamespace.Bytes(), - []byte{ - 0x1, // info byte - 0x0, 0x0, 0x2, 0x2, // 512 (unit) + 2 (unit length) = 514 sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 128, 4, // unit length of transaction is 512 - }..., - ), - }, - 0xc, // data of transaction - ), - padShare(Share{ - data: append( - append( - appns.TxNamespace.Bytes(), - []byte{ - 0x0, // info byte - 0x0, 0x0, 0x0, 0x0, // reserved bytes - }..., - ), - bytes.Repeat([]byte{0xc}, 44)..., // continuation data of transaction - ), - }, - ), - }, - }, - { - name: "one small tx then one large tx that spans two shares", - txs: coretypes.Txs{smallTransactionA, largeTransaction}, - want: []Share{ - fillShare(Share{ - data: append( - appns.TxNamespace.Bytes(), - []byte{ - 0x1, // info byte - 0x0, 0x0, 0x2, 0x4, // 2 bytes (first transaction) + 514 bytes (second transaction) = 516 bytes sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 1, // unit length of first transaction - 0xa, // data of first transaction - 128, 4, // unit length of second transaction is 512 - }..., - ), - }, - 0xc, // data of second transaction - ), - padShare(Share{ - data: append( - append( - appns.TxNamespace.Bytes(), - []byte{ - 0x0, // info byte - 0x0, 0x0, 0x0, 0x0, // reserved bytes - }..., - ), - bytes.Repeat([]byte{0xc}, 46)..., // continuation data of second transaction - ), - }, - ), - }, - }, - { - name: "one large tx that spans two shares then one small tx", - txs: coretypes.Txs{largeTransaction, smallTransactionA}, - want: []Share{ - fillShare(Share{ - data: append( - appns.TxNamespace.Bytes(), - []byte{ - 0x1, // info byte - 0x0, 0x0, 0x2, 0x4, // 514 bytes (first transaction) + 2 bytes (second transaction) = 516 bytes sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 128, 4, // unit length of first transaction is 512 - }..., - ), - }, - 0xc, // data of first transaction - ), - padShare(Share{ - data: append( - appns.TxNamespace.Bytes(), - []byte{ - 0x0, // info byte - 0x0, 0x0, 0x0, 0x52, // reserved bytes - 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, // continuation data of first transaction - 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, // continuation data of first transaction - 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, 0xc, // continuation data of first transaction - 1, // unit length of second transaction - 0xa, // data of second transaction - }..., - ), - }, - ), - }, - }, - } - for _, tt := range testCases { - t.Run(tt.name, func(t *testing.T) { - got, _, _, err := SplitTxs(tt.txs) - require.NoError(t, err) - if !reflect.DeepEqual(got, tt.want) { - t.Errorf("SplitTxs()\n got %#v\n want %#v", got, tt.want) - } - }) - } -} - -func TestSplitTxs(t *testing.T) { - type testCase struct { - name string - txs coretypes.Txs - wantTxShares []Share - wantPfbShares []Share - wantMap map[coretypes.TxKey]ShareRange - } - - smallTx := coretypes.Tx{0xa} // spans one share - smallTxShares := []Share{ - padShare(Share{ - data: append(appns.TxNamespace.Bytes(), - []byte{ - 0x1, // info byte - 0x0, 0x0, 0x0, 0x2, // 1 byte (unit) + 1 byte (unit length) = 2 bytes sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 0x1, // unit length of first transaction - 0xa, // data of first transaction - }..., - ), - }, - ), - } - - pfbTx, err := coretypes.MarshalIndexWrapper(coretypes.Tx{0xb}, 10) // spans one share - require.NoError(t, err) - pfbTxShares := []Share{ - padShare(Share{ - data: append( - appns.PayForBlobNamespace.Bytes(), - []uint8{ - 0x1, // info byte - 0x0, 0x0, 0x0, 13, // 1 byte (unit) + 1 byte (unit length) = 2 bytes sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 12, // unit length of first transaction - 0xa, 0x1, 0xb, 0x12, 0x1, 0xa, 0x1a, 0x4, 0x49, 0x4e, 0x44, 0x58, // data of first transaction - }..., - ), - }, - ), - } - - largeTx := coretypes.Tx(bytes.Repeat([]byte{0xc}, appconsts.ShareSize)) // spans two shares - largeTxShares := []Share{ - fillShare(Share{ - data: append(appns.TxNamespace.Bytes(), - []uint8{ - 0x1, // info byte - 0x0, 0x0, 0x2, 0x2, // 512 (unit) + 2 (unit length) = 514 sequence length - 0x0, 0x0, 0x0, 0x2a, // reserved bytes - 128, 4, // unit length of transaction is 512 - }..., - ), - }, - 0xc), // data of transaction - padShare(Share{ - data: append( - append( - appns.TxNamespace.Bytes(), - []uint8{ - 0x0, // info byte - 0x0, 0x0, 0x0, 0x0, // reserved bytes - }..., - ), - bytes.Repeat([]byte{0xc}, 44)..., // continuation data of transaction - ), - }, - ), - } - - testCases := []testCase{ - { - name: "empty", - txs: coretypes.Txs{}, - wantTxShares: []Share{}, - wantPfbShares: []Share{}, - wantMap: map[coretypes.TxKey]ShareRange{}, - }, - { - name: "smallTx", - txs: coretypes.Txs{smallTx}, - wantTxShares: smallTxShares, - wantPfbShares: []Share{}, - wantMap: map[coretypes.TxKey]ShareRange{ - smallTx.Key(): {0, 0}, - }, - }, - { - name: "largeTx", - txs: coretypes.Txs{largeTx}, - wantTxShares: largeTxShares, - wantPfbShares: []Share{}, - wantMap: map[coretypes.TxKey]ShareRange{ - largeTx.Key(): {0, 1}, - }, - }, - { - name: "pfbTx", - txs: coretypes.Txs{pfbTx}, - wantTxShares: []Share{}, - wantPfbShares: pfbTxShares, - wantMap: map[coretypes.TxKey]ShareRange{ - pfbTx.Key(): {0, 0}, - }, - }, - { - name: "largeTx then pfbTx", - txs: coretypes.Txs{largeTx, pfbTx}, - wantTxShares: largeTxShares, - wantPfbShares: pfbTxShares, - wantMap: map[coretypes.TxKey]ShareRange{ - largeTx.Key(): {0, 1}, - pfbTx.Key(): {2, 2}, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - txShares, pfbTxShares, gotMap, err := SplitTxs(tc.txs) - require.NoError(t, err) - assert.Equal(t, tc.wantTxShares, txShares) - assert.Equal(t, tc.wantPfbShares, pfbTxShares) - assert.Equal(t, tc.wantMap, gotMap) - }) - } -} - -// padShare returns a share padded with trailing zeros. -func padShare(share Share) (paddedShare Share) { - return fillShare(share, 0) -} - -// fillShare returns a share filled with filler so that the share length -// is equal to appconsts.ShareSize. -func fillShare(share Share, filler byte) (paddedShare Share) { - return Share{data: append(share.data, bytes.Repeat([]byte{filler}, appconsts.ShareSize-len(share.data))...)} -} - -func Test_mergeMaps(t *testing.T) { - type testCase struct { - name string - mapOne map[coretypes.TxKey]ShareRange - mapTwo map[coretypes.TxKey]ShareRange - want map[coretypes.TxKey]ShareRange - } - testCases := []testCase{ - { - name: "empty maps", - mapOne: map[coretypes.TxKey]ShareRange{}, - mapTwo: map[coretypes.TxKey]ShareRange{}, - want: map[coretypes.TxKey]ShareRange{}, - }, - { - name: "merges maps with one key each", - mapOne: map[coretypes.TxKey]ShareRange{ - {0x1}: {0, 1}, - }, - mapTwo: map[coretypes.TxKey]ShareRange{ - {0x2}: {2, 3}, - }, - want: map[coretypes.TxKey]ShareRange{ - {0x1}: {0, 1}, - {0x2}: {2, 3}, - }, - }, - { - name: "merges maps with multiple keys each", - mapOne: map[coretypes.TxKey]ShareRange{ - {0x1}: {0, 1}, - {0x2}: {2, 3}, - }, - mapTwo: map[coretypes.TxKey]ShareRange{ - {0x3}: {3, 3}, - {0x4}: {4, 4}, - }, - want: map[coretypes.TxKey]ShareRange{ - {0x1}: {0, 1}, - {0x2}: {2, 3}, - {0x3}: {3, 3}, - {0x4}: {4, 4}, - }, - }, - { - name: "merges maps with a duplicate key and the second map's value takes precedence", - mapOne: map[coretypes.TxKey]ShareRange{ - {0x1}: {0, 0}, - }, - mapTwo: map[coretypes.TxKey]ShareRange{ - {0x1}: {1, 1}, - }, - want: map[coretypes.TxKey]ShareRange{ - {0x1}: {1, 1}, - }, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - got := mergeMaps(tc.mapOne, tc.mapTwo) - assert.Equal(t, tc.want, got) - }) - } -} diff --git a/shares/shares.go b/shares/shares.go index 9653be5f43..5af9bbe269 100644 --- a/shares/shares.go +++ b/shares/shares.go @@ -5,8 +5,8 @@ import ( "encoding/binary" "fmt" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" ) // Share contains the raw share data (including namespace ID). diff --git a/shares/shares_test.go b/shares/shares_test.go index 76758b484a..5316a0de59 100644 --- a/shares/shares_test.go +++ b/shares/shares_test.go @@ -4,44 +4,41 @@ import ( "bytes" "testing" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - tmrand "github.com/tendermint/tendermint/libs/rand" - coretypes "github.com/tendermint/tendermint/types" ) -// TestPadFirstIndexedBlob ensures that we are adding padding to the first share -// instead of calculating the value. -func TestPadFirstIndexedBlob(t *testing.T) { - tx := tmrand.Bytes(300) - blob := tmrand.Bytes(300) - index := 100 - indexedTx, err := coretypes.MarshalIndexWrapper(tx, 100) - require.NoError(t, err) +// // TestPadFirstIndexedBlob ensures that we are adding padding to the first share +// // instead of calculating the value. +// func TestPadFirstIndexedBlob(t *testing.T) { +// tx := tmrand.Bytes(300) +// blob := tmrand.Bytes(300) +// index := 100 +// indexedTx, err := coretypes.MarshalIndexWrapper(tx, 100) +// require.NoError(t, err) - bd := coretypes.Data{ - Txs: []coretypes.Tx{indexedTx}, - Blobs: []coretypes.Blob{ - { - NamespaceVersion: appns.RandomBlobNamespace().Version, - NamespaceID: appns.RandomBlobNamespace().ID, - Data: blob, - ShareVersion: appconsts.ShareVersionZero, - }, - }, - SquareSize: 64, - } +// bd := coretypes.Data{ +// Txs: []coretypes.Tx{indexedTx}, +// Blobs: []coretypes.Blob{ +// { +// NamespaceVersion: appns.RandomBlobNamespace().Version, +// NamespaceID: appns.RandomBlobNamespace().ID, +// Data: blob, +// ShareVersion: appconsts.ShareVersionZero, +// }, +// }, +// SquareSize: 64, +// } - shares, err := Split(bd, true) - require.NoError(t, err) +// shares, err := Split(bd, true) +// require.NoError(t, err) - resShare, err := shares[index].RawData() - require.NoError(t, err) +// resShare, err := shares[index].RawData() +// require.NoError(t, err) - require.True(t, bytes.Contains(resShare, blob)) -} +// require.True(t, bytes.Contains(resShare, blob)) +// } func TestSequenceLen(t *testing.T) { type testCase struct { @@ -255,71 +252,71 @@ func TestIsCompactShare(t *testing.T) { } } -func TestIsPadding(t *testing.T) { - type testCase struct { - name string - share Share - want bool - wantErr bool - } - emptyShare := Share{} - blobShare, _ := zeroPadIfNecessary( - append( - ns1.Bytes(), - []byte{ - 1, // info byte - 0, 0, 0, 1, // sequence len - 0xff, // data - }..., - ), - appconsts.ShareSize) +// func TestIsPadding(t *testing.T) { +// type testCase struct { +// name string +// share Share +// want bool +// wantErr bool +// } +// emptyShare := Share{} +// blobShare, _ := zeroPadIfNecessary( +// append( +// ns1.Bytes(), +// []byte{ +// 1, // info byte +// 0, 0, 0, 1, // sequence len +// 0xff, // data +// }..., +// ), +// appconsts.ShareSize) - nsPadding, err := NamespacePaddingShare(ns1) - require.NoError(t, err) +// nsPadding, err := NamespacePaddingShare(ns1) +// require.NoError(t, err) - tailPadding, err := TailPaddingShare() - require.NoError(t, err) +// tailPadding, err := TailPaddingShare() +// require.NoError(t, err) - reservedPaddingShare, err := ReservedPaddingShare() - require.NoError(t, err) +// reservedPaddingShare, err := ReservedPaddingShare() +// require.NoError(t, err) - testCases := []testCase{ - { - name: "empty share", - share: emptyShare, - wantErr: true, - }, - { - name: "blob share", - share: Share{data: blobShare}, - want: false, - }, - { - name: "namespace padding", - share: nsPadding, - want: true, - }, - { - name: "tail padding", - share: tailPadding, - want: true, - }, - { - name: "reserved padding", - share: reservedPaddingShare, - want: true, - }, - } +// testCases := []testCase{ +// { +// name: "empty share", +// share: emptyShare, +// wantErr: true, +// }, +// { +// name: "blob share", +// share: Share{data: blobShare}, +// want: false, +// }, +// { +// name: "namespace padding", +// share: nsPadding, +// want: true, +// }, +// { +// name: "tail padding", +// share: tailPadding, +// want: true, +// }, +// { +// name: "reserved padding", +// share: reservedPaddingShare, +// want: true, +// }, +// } - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - got, err := tc.share.IsPadding() - if tc.wantErr { - assert.Error(t, err) - return - } - require.NoError(t, err) - assert.Equal(t, tc.want, got) - }) - } -} +// for _, tc := range testCases { +// t.Run(tc.name, func(t *testing.T) { +// got, err := tc.share.IsPadding() +// if tc.wantErr { +// assert.Error(t, err) +// return +// } +// require.NoError(t, err) +// assert.Equal(t, tc.want, got) +// }) +// } +// } diff --git a/shares/sparse_shares_test.go b/shares/sparse_shares_test.go deleted file mode 100644 index 278cf432a7..0000000000 --- a/shares/sparse_shares_test.go +++ /dev/null @@ -1,86 +0,0 @@ -package shares - -import ( - "testing" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - "github.com/celestiaorg/celestia-app/testutil/testfactory" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - coretypes "github.com/tendermint/tendermint/types" -) - -func TestSparseShareContainsInfoByte(t *testing.T) { - blob := testfactory.GenerateRandomBlobOfShareCount(4) - - sequenceStartInfoByte, err := NewInfoByte(appconsts.ShareVersionZero, true) - require.NoError(t, err) - - sequenceContinuationInfoByte, err := NewInfoByte(appconsts.ShareVersionZero, false) - require.NoError(t, err) - - type testCase struct { - name string - shareIndex int - expected InfoByte - } - testCases := []testCase{ - { - name: "first share of blob", - shareIndex: 0, - expected: sequenceStartInfoByte, - }, - { - name: "second share of blob", - shareIndex: 1, - expected: sequenceContinuationInfoByte, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - sss := NewSparseShareSplitter() - err := sss.Write(blob) - assert.NoError(t, err) - shares := sss.Export() - got, err := shares[tc.shareIndex].InfoByte() - require.NoError(t, err) - assert.Equal(t, tc.expected, got) - }) - } -} - -func TestSparseShareSplitterCount(t *testing.T) { - type testCase struct { - name string - blob coretypes.Blob - expected int - } - testCases := []testCase{ - { - name: "one share", - blob: testfactory.GenerateRandomBlobOfShareCount(1), - expected: 1, - }, - { - name: "two shares", - blob: testfactory.GenerateRandomBlobOfShareCount(2), - expected: 2, - }, - { - name: "ten shares", - blob: testfactory.GenerateRandomBlobOfShareCount(10), - expected: 10, - }, - } - - for _, tc := range testCases { - t.Run(tc.name, func(t *testing.T) { - sss := NewSparseShareSplitter() - err := sss.Write(tc.blob) - assert.NoError(t, err) - got := sss.Count() - assert.Equal(t, tc.expected, got) - }) - } -} diff --git a/shares/split_compact_shares.go b/shares/split_compact_shares.go index b32561b671..d2bb1d2222 100644 --- a/shares/split_compact_shares.go +++ b/shares/split_compact_shares.go @@ -4,8 +4,8 @@ import ( "encoding/binary" "fmt" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" coretypes "github.com/tendermint/tendermint/types" ) diff --git a/shares/split_compact_shares_test.go b/shares/split_compact_shares_test.go index 596720789f..2e2b0c1980 100644 --- a/shares/split_compact_shares_test.go +++ b/shares/split_compact_shares_test.go @@ -4,13 +4,24 @@ import ( "bytes" "testing" - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" ) +// fillShare returns a share filled with filler so that the share length +// is equal to appconsts.ShareSize. +func fillShare(share Share, filler byte) (paddedShare Share) { + return Share{data: append(share.data, bytes.Repeat([]byte{filler}, appconsts.ShareSize-len(share.data))...)} +} + +// padShare returns a share padded with trailing zeros. +func padShare(share Share) (paddedShare Share) { + return fillShare(share, 0) +} + func TestCount(t *testing.T) { type testCase struct { transactions []coretypes.Tx diff --git a/shares/split_sparse_shares.go b/shares/split_sparse_shares.go deleted file mode 100644 index 15788b5163..0000000000 --- a/shares/split_sparse_shares.go +++ /dev/null @@ -1,128 +0,0 @@ -package shares - -import ( - "errors" - "fmt" - - "github.com/celestiaorg/celestia-app/pkg/appconsts" - appns "github.com/celestiaorg/celestia-app/pkg/namespace" - coretypes "github.com/tendermint/tendermint/types" - "golang.org/x/exp/slices" -) - -// SparseShareSplitter lazily splits blobs into shares that will eventually be -// included in a data square. It also has methods to help progressively count -// how many shares the blobs written take up. -type SparseShareSplitter struct { - shares []Share -} - -func NewSparseShareSplitter() *SparseShareSplitter { - return &SparseShareSplitter{} -} - -// Write writes the provided blob to this sparse share splitter. It returns an -// error or nil if no error is encountered. -func (sss *SparseShareSplitter) Write(blob coretypes.Blob) error { - if !slices.Contains(appconsts.SupportedShareVersions, blob.ShareVersion) { - return fmt.Errorf("unsupported share version: %d", blob.ShareVersion) - } - - rawData := blob.Data - blobNamespace, err := appns.New(blob.NamespaceVersion, blob.NamespaceID) - if err != nil { - return err - } - - // First share - b, err := NewBuilder(blobNamespace, blob.ShareVersion, true).Init() - if err != nil { - return err - } - if err := b.WriteSequenceLen(uint32(len(rawData))); err != nil { - return err - } - - for rawData != nil { - - rawDataLeftOver := b.AddData(rawData) - if rawDataLeftOver == nil { - // Just call it on the latest share - b.ZeroPadIfNecessary() - } - - share, err := b.Build() - if err != nil { - return err - } - sss.shares = append(sss.shares, *share) - - b, err = NewBuilder(blobNamespace, blob.ShareVersion, false).Init() - if err != nil { - return err - } - rawData = rawDataLeftOver - } - - return nil -} - -// RemoveBlob will remove a blob from the underlying blob state. If -// there is namespaced padding after the blob, then that is also removed. -func (sss *SparseShareSplitter) RemoveBlob(i int) (int, error) { - j := 1 - initialCount := len(sss.shares) - if len(sss.shares) > i+1 { - sequenceLen, err := sss.shares[i+1].SequenceLen() - if err != nil { - return 0, err - } - // 0 means that there is padding after the share that we are about to - // remove. to remove this padding, we increase j by 1 - // with the blob - if sequenceLen == 0 { - j++ - } - } - copy(sss.shares[i:], sss.shares[i+j:]) - sss.shares = sss.shares[:len(sss.shares)-j] - newCount := len(sss.shares) - return initialCount - newCount, nil -} - -// WriteNamespacedPaddedShares adds empty shares using the namespace of the last written share. -// This is useful to follow the message layout rules. It assumes that at least -// one share has already been written, if not it panics. -func (sss *SparseShareSplitter) WriteNamespacedPaddedShares(count int) error { - if len(sss.shares) == 0 { - return errors.New("cannot write empty namespaced shares on an empty SparseShareSplitter") - } - if count < 0 { - return errors.New("cannot write negative namespaced shares") - } - if count == 0 { - return nil - } - lastBlob := sss.shares[len(sss.shares)-1] - lastBlobNs, err := lastBlob.Namespace() - if err != nil { - return err - } - nsPaddingShares, err := NamespacePaddingShares(lastBlobNs, count) - if err != nil { - return err - } - sss.shares = append(sss.shares, nsPaddingShares...) - - return nil -} - -// Export finalizes and returns the underlying shares. -func (sss *SparseShareSplitter) Export() []Share { - return sss.shares -} - -// Count returns the current number of shares that will be made if exporting. -func (sss *SparseShareSplitter) Count() int { - return len(sss.shares) -} diff --git a/shares/split_sparse_shares_test.go b/shares/split_sparse_shares_test.go deleted file mode 100644 index 550f0c0465..0000000000 --- a/shares/split_sparse_shares_test.go +++ /dev/null @@ -1 +0,0 @@ -package shares diff --git a/shares/utils.go b/shares/utils.go index 636f16bcc1..87b53c9023 100644 --- a/shares/utils.go +++ b/shares/utils.go @@ -3,10 +3,7 @@ package shares import ( "bytes" "encoding/binary" - "fmt" - "math" - core "github.com/tendermint/tendermint/proto/tendermint/types" coretypes "github.com/tendermint/tendermint/types" ) @@ -20,20 +17,20 @@ func isPowerOf2(v uint64) bool { return v&(v-1) == 0 && v != 0 } -func BlobsFromProto(blobs []core.Blob) ([]coretypes.Blob, error) { - result := make([]coretypes.Blob, len(blobs)) - for i, blob := range blobs { - if blob.ShareVersion > math.MaxUint8 { - return nil, fmt.Errorf("share version %d is too large to be a uint8", blob.ShareVersion) - } - result[i] = coretypes.Blob{ - NamespaceID: blob.NamespaceId, - Data: blob.Data, - ShareVersion: uint8(blob.ShareVersion), - } - } - return result, nil -} +// func BlobsFromProto(blobs []core.Blob) ([]coretypes.Blob, error) { +// result := make([]coretypes.Blob, len(blobs)) +// for i, blob := range blobs { +// if blob.ShareVersion > math.MaxUint8 { +// return nil, fmt.Errorf("share version %d is too large to be a uint8", blob.ShareVersion) +// } +// result[i] = coretypes.Blob{ +// NamespaceID: blob.NamespaceId, +// Data: blob.Data, +// ShareVersion: uint8(blob.ShareVersion), +// } +// } +// return result, nil +// } func TxsToBytes(txs coretypes.Txs) [][]byte { e := make([][]byte, len(txs)) diff --git a/shares/utils_test.go b/shares/utils_test.go index 49f30b9153..e639458d82 100644 --- a/shares/utils_test.go +++ b/shares/utils_test.go @@ -4,25 +4,23 @@ import ( "reflect" "testing" - "github.com/celestiaorg/celestia-app/testutil/testfactory" + "github.com/rollkit/rollkit/testfactory" "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - "github.com/tendermint/tendermint/types" ) -func FuzzBlobSharesUsed(f *testing.F) { - f.Add(uint32(1)) - f.Fuzz(func(t *testing.T, a uint32) { - if a < 1 { - t.Skip() - } - ml := SparseSharesNeeded(a) - blob := testfactory.GenerateRandomBlob(int(a)) - rawShares, err := SplitBlobs(0, nil, []types.Blob{blob}, false) - require.NoError(t, err) - require.Equal(t, len(rawShares), ml) - }) -} +// func FuzzBlobSharesUsed(f *testing.F) { +// f.Add(uint32(1)) +// f.Fuzz(func(t *testing.T, a uint32) { +// if a < 1 { +// t.Skip() +// } +// ml := SparseSharesNeeded(a) +// blob := testfactory.GenerateRandomBlob(int(a)) +// rawShares, err := SplitBlobs(0, nil, []types.Blob{blob}, false) +// require.NoError(t, err) +// require.Equal(t, len(rawShares), ml) +// }) +// } func Test_zeroPadIfNecessary(t *testing.T) { type args struct { diff --git a/testfactory/txs.go b/testfactory/txs.go new file mode 100644 index 0000000000..50da0937c2 --- /dev/null +++ b/testfactory/txs.go @@ -0,0 +1,32 @@ +package testfactory + +import ( + mrand "math/rand" + + "github.com/tendermint/tendermint/types" +) + +func GenerateRandomlySizedTxs(count, maxSize int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + size := mrand.Intn(maxSize) + if size == 0 { + size = 1 + } + txs[i] = GenerateRandomTxs(1, size)[0] + } + return txs +} + +func GenerateRandomTxs(count, size int) types.Txs { + txs := make(types.Txs, count) + for i := 0; i < count; i++ { + tx := make([]byte, size) + _, err := mrand.Read(tx) + if err != nil { + panic(err) + } + txs[i] = tx + } + return txs +} From 0d479d89b0332496b355c37236f6896b9e59e098 Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Fri, 21 Apr 2023 17:29:46 -0400 Subject: [PATCH 3/8] Remove unused --- shares/share_sequence.go | 39 --------------------------------------- shares/utils.go | 6 +++--- 2 files changed, 3 insertions(+), 42 deletions(-) diff --git a/shares/share_sequence.go b/shares/share_sequence.go index ae93008abe..5b74a72383 100644 --- a/shares/share_sequence.go +++ b/shares/share_sequence.go @@ -42,45 +42,6 @@ func (s ShareSequence) SequenceLen() (uint32, error) { return firstShare.SequenceLen() } -// validSequenceLen extracts the sequenceLen written to the first share -// and returns an error if the number of shares needed to store a sequence of -// length sequenceLen doesn't match the number of shares in this share -// sequence. Returns nil if there is no error. -func (s ShareSequence) validSequenceLen() error { - if len(s.Shares) == 0 { - return fmt.Errorf("invalid sequence length because share sequence %v has no shares", s) - } - firstShare := s.Shares[0] - sharesNeeded, err := numberOfSharesNeeded(firstShare) - if err != nil { - return err - } - - if len(s.Shares) != sharesNeeded { - return fmt.Errorf("share sequence has %d shares but needed %d shares", len(s.Shares), sharesNeeded) - } - return nil -} - -// numberOfSharesNeeded extracts the sequenceLen written to the share -// firstShare and returns the number of shares needed to store a sequence of -// that length. -func numberOfSharesNeeded(firstShare Share) (sharesUsed int, err error) { - sequenceLen, err := firstShare.SequenceLen() - if err != nil { - return 0, err - } - - isCompact, err := firstShare.IsCompactShare() - if err != nil { - return 0, err - } - if isCompact { - return CompactSharesNeeded(int(sequenceLen)), nil - } - return SparseSharesNeeded(sequenceLen), nil -} - // CompactSharesNeeded returns the number of compact shares needed to store a // sequence of length sequenceLen. The parameter sequenceLen is the number // of bytes of transactions or intermediate state roots in a sequence. diff --git a/shares/utils.go b/shares/utils.go index 87b53c9023..90aeb5dcdd 100644 --- a/shares/utils.go +++ b/shares/utils.go @@ -13,9 +13,9 @@ func DelimLen(size uint64) int { return binary.PutUvarint(lenBuf, size) } -func isPowerOf2(v uint64) bool { - return v&(v-1) == 0 && v != 0 -} +// func isPowerOf2(v uint64) bool { +// return v&(v-1) == 0 && v != 0 +// } // func BlobsFromProto(blobs []core.Blob) ([]coretypes.Blob, error) { // result := make([]coretypes.Blob, len(blobs)) From 98f841ce6ba29221c29c8694220b98ce896e5ff9 Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Fri, 21 Apr 2023 17:36:15 -0400 Subject: [PATCH 4/8] lint --- shares/compact_shares_test.go | 7 ++++--- shares/share_builder_test.go | 5 +++-- shares/share_sequence_test.go | 3 ++- shares/shares_test.go | 3 ++- shares/split_compact_shares.go | 3 ++- shares/split_compact_shares_test.go | 5 +++-- shares/utils_test.go | 3 ++- 7 files changed, 18 insertions(+), 11 deletions(-) diff --git a/shares/compact_shares_test.go b/shares/compact_shares_test.go index b00c71ef36..edd11758d7 100644 --- a/shares/compact_shares_test.go +++ b/shares/compact_shares_test.go @@ -6,12 +6,13 @@ import ( "testing" "time" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" - "github.com/rollkit/rollkit/testfactory" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" + + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/testfactory" ) // ToTxs converts a raw slice of byte slices into a Txs type. diff --git a/shares/share_builder_test.go b/shares/share_builder_test.go index 6e1638471b..5a6cf863b1 100644 --- a/shares/share_builder_test.go +++ b/shares/share_builder_test.go @@ -5,10 +5,11 @@ import ( "fmt" "testing" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" + + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" ) func TestShareBuilderIsEmptyShare(t *testing.T) { diff --git a/shares/share_sequence_test.go b/shares/share_sequence_test.go index 2f334b07dc..3eab5b8806 100644 --- a/shares/share_sequence_test.go +++ b/shares/share_sequence_test.go @@ -5,9 +5,10 @@ import ( "encoding/binary" "testing" + "github.com/stretchr/testify/assert" + "github.com/rollkit/rollkit/appconsts" appns "github.com/rollkit/rollkit/namespace" - "github.com/stretchr/testify/assert" ) func TestShareSequenceRawData(t *testing.T) { diff --git a/shares/shares_test.go b/shares/shares_test.go index 5316a0de59..28621a4c30 100644 --- a/shares/shares_test.go +++ b/shares/shares_test.go @@ -4,9 +4,10 @@ import ( "bytes" "testing" + "github.com/stretchr/testify/assert" + "github.com/rollkit/rollkit/appconsts" appns "github.com/rollkit/rollkit/namespace" - "github.com/stretchr/testify/assert" ) // // TestPadFirstIndexedBlob ensures that we are adding padding to the first share diff --git a/shares/split_compact_shares.go b/shares/split_compact_shares.go index d2bb1d2222..7a2d6f3dcf 100644 --- a/shares/split_compact_shares.go +++ b/shares/split_compact_shares.go @@ -4,9 +4,10 @@ import ( "encoding/binary" "fmt" + coretypes "github.com/tendermint/tendermint/types" + "github.com/rollkit/rollkit/appconsts" appns "github.com/rollkit/rollkit/namespace" - coretypes "github.com/tendermint/tendermint/types" ) type ShareRange struct { diff --git a/shares/split_compact_shares_test.go b/shares/split_compact_shares_test.go index 2e2b0c1980..9948b1d0c0 100644 --- a/shares/split_compact_shares_test.go +++ b/shares/split_compact_shares_test.go @@ -4,11 +4,12 @@ import ( "bytes" "testing" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" + + "github.com/rollkit/rollkit/appconsts" + appns "github.com/rollkit/rollkit/namespace" ) // fillShare returns a share filled with filler so that the share length diff --git a/shares/utils_test.go b/shares/utils_test.go index e639458d82..bcb955b8d2 100644 --- a/shares/utils_test.go +++ b/shares/utils_test.go @@ -4,8 +4,9 @@ import ( "reflect" "testing" - "github.com/rollkit/rollkit/testfactory" "github.com/stretchr/testify/assert" + + "github.com/rollkit/rollkit/testfactory" ) // func FuzzBlobSharesUsed(f *testing.F) { From 5e53133712528cb01f52ceea5a9ff45148e1b448 Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Sun, 23 Apr 2023 11:27:34 -0400 Subject: [PATCH 5/8] Remove ToTxs with already existing method --- shares/compact_shares_test.go | 11 +---------- 1 file changed, 1 insertion(+), 10 deletions(-) diff --git a/shares/compact_shares_test.go b/shares/compact_shares_test.go index edd11758d7..50dd85cc2a 100644 --- a/shares/compact_shares_test.go +++ b/shares/compact_shares_test.go @@ -15,15 +15,6 @@ import ( "github.com/rollkit/rollkit/testfactory" ) -// ToTxs converts a raw slice of byte slices into a Txs type. -func ToTxs(txs [][]byte) coretypes.Txs { - txBzs := make(coretypes.Txs, len(txs)) - for i := 0; i < len(txs); i++ { - txBzs[i] = txs[i] - } - return txBzs -} - func SplitTxs(txs coretypes.Txs) (txShares []Share, err error) { txWriter := NewCompactShareSplitter(appns.TxNamespace, appconsts.ShareVersionZero) @@ -55,7 +46,7 @@ func TestCompactShareSplitter(t *testing.T) { require.NoError(t, err) rawResTxs, err := parseCompactShares(shares, appconsts.SupportedShareVersions) - resTxs := ToTxs(rawResTxs) + resTxs := TxsFromBytes(rawResTxs) require.NoError(t, err) assert.Equal(t, txs, resTxs) From 22aba0e8bdbcf71e2ef1666a15e5d4118842ba7b Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Mon, 24 Apr 2023 09:38:21 -0400 Subject: [PATCH 6/8] move to libs --- {appconsts => libs/appconsts}/appconsts.go | 0 {appconsts => libs/appconsts}/consensus_consts.go | 0 {namespace => libs/namespace}/consts.go | 0 {namespace => libs/namespace}/namespace.go | 0 {namespace => libs/namespace}/namespace_test.go | 0 {namespace => libs/namespace}/random_blob.go | 0 {namespace => libs/namespace}/random_namespace.go | 0 {shares => libs/shares}/compact_shares_test.go | 0 {shares => libs/shares}/doc.go | 0 {shares => libs/shares}/info_byte.go | 0 {shares => libs/shares}/info_byte_test.go | 0 {shares => libs/shares}/parse_compact_shares.go | 0 {shares => libs/shares}/reserved_bytes.go | 0 {shares => libs/shares}/reserved_bytes_test.go | 0 {shares => libs/shares}/share_builder.go | 0 {shares => libs/shares}/share_builder_test.go | 0 {shares => libs/shares}/share_sequence.go | 0 {shares => libs/shares}/share_sequence_test.go | 0 {shares => libs/shares}/shares.go | 0 {shares => libs/shares}/shares_test.go | 0 {shares => libs/shares}/split_compact_shares.go | 0 {shares => libs/shares}/split_compact_shares_test.go | 0 {shares => libs/shares}/testdata/sample-block.json | 0 {shares => libs/shares}/utils.go | 0 {shares => libs/shares}/utils_test.go | 0 {testfactory => libs/testfactory}/txs.go | 0 26 files changed, 0 insertions(+), 0 deletions(-) rename {appconsts => libs/appconsts}/appconsts.go (100%) rename {appconsts => libs/appconsts}/consensus_consts.go (100%) rename {namespace => libs/namespace}/consts.go (100%) rename {namespace => libs/namespace}/namespace.go (100%) rename {namespace => libs/namespace}/namespace_test.go (100%) rename {namespace => libs/namespace}/random_blob.go (100%) rename {namespace => libs/namespace}/random_namespace.go (100%) rename {shares => libs/shares}/compact_shares_test.go (100%) rename {shares => libs/shares}/doc.go (100%) rename {shares => libs/shares}/info_byte.go (100%) rename {shares => libs/shares}/info_byte_test.go (100%) rename {shares => libs/shares}/parse_compact_shares.go (100%) rename {shares => libs/shares}/reserved_bytes.go (100%) rename {shares => libs/shares}/reserved_bytes_test.go (100%) rename {shares => libs/shares}/share_builder.go (100%) rename {shares => libs/shares}/share_builder_test.go (100%) rename {shares => libs/shares}/share_sequence.go (100%) rename {shares => libs/shares}/share_sequence_test.go (100%) rename {shares => libs/shares}/shares.go (100%) rename {shares => libs/shares}/shares_test.go (100%) rename {shares => libs/shares}/split_compact_shares.go (100%) rename {shares => libs/shares}/split_compact_shares_test.go (100%) rename {shares => libs/shares}/testdata/sample-block.json (100%) rename {shares => libs/shares}/utils.go (100%) rename {shares => libs/shares}/utils_test.go (100%) rename {testfactory => libs/testfactory}/txs.go (100%) diff --git a/appconsts/appconsts.go b/libs/appconsts/appconsts.go similarity index 100% rename from appconsts/appconsts.go rename to libs/appconsts/appconsts.go diff --git a/appconsts/consensus_consts.go b/libs/appconsts/consensus_consts.go similarity index 100% rename from appconsts/consensus_consts.go rename to libs/appconsts/consensus_consts.go diff --git a/namespace/consts.go b/libs/namespace/consts.go similarity index 100% rename from namespace/consts.go rename to libs/namespace/consts.go diff --git a/namespace/namespace.go b/libs/namespace/namespace.go similarity index 100% rename from namespace/namespace.go rename to libs/namespace/namespace.go diff --git a/namespace/namespace_test.go b/libs/namespace/namespace_test.go similarity index 100% rename from namespace/namespace_test.go rename to libs/namespace/namespace_test.go diff --git a/namespace/random_blob.go b/libs/namespace/random_blob.go similarity index 100% rename from namespace/random_blob.go rename to libs/namespace/random_blob.go diff --git a/namespace/random_namespace.go b/libs/namespace/random_namespace.go similarity index 100% rename from namespace/random_namespace.go rename to libs/namespace/random_namespace.go diff --git a/shares/compact_shares_test.go b/libs/shares/compact_shares_test.go similarity index 100% rename from shares/compact_shares_test.go rename to libs/shares/compact_shares_test.go diff --git a/shares/doc.go b/libs/shares/doc.go similarity index 100% rename from shares/doc.go rename to libs/shares/doc.go diff --git a/shares/info_byte.go b/libs/shares/info_byte.go similarity index 100% rename from shares/info_byte.go rename to libs/shares/info_byte.go diff --git a/shares/info_byte_test.go b/libs/shares/info_byte_test.go similarity index 100% rename from shares/info_byte_test.go rename to libs/shares/info_byte_test.go diff --git a/shares/parse_compact_shares.go b/libs/shares/parse_compact_shares.go similarity index 100% rename from shares/parse_compact_shares.go rename to libs/shares/parse_compact_shares.go diff --git a/shares/reserved_bytes.go b/libs/shares/reserved_bytes.go similarity index 100% rename from shares/reserved_bytes.go rename to libs/shares/reserved_bytes.go diff --git a/shares/reserved_bytes_test.go b/libs/shares/reserved_bytes_test.go similarity index 100% rename from shares/reserved_bytes_test.go rename to libs/shares/reserved_bytes_test.go diff --git a/shares/share_builder.go b/libs/shares/share_builder.go similarity index 100% rename from shares/share_builder.go rename to libs/shares/share_builder.go diff --git a/shares/share_builder_test.go b/libs/shares/share_builder_test.go similarity index 100% rename from shares/share_builder_test.go rename to libs/shares/share_builder_test.go diff --git a/shares/share_sequence.go b/libs/shares/share_sequence.go similarity index 100% rename from shares/share_sequence.go rename to libs/shares/share_sequence.go diff --git a/shares/share_sequence_test.go b/libs/shares/share_sequence_test.go similarity index 100% rename from shares/share_sequence_test.go rename to libs/shares/share_sequence_test.go diff --git a/shares/shares.go b/libs/shares/shares.go similarity index 100% rename from shares/shares.go rename to libs/shares/shares.go diff --git a/shares/shares_test.go b/libs/shares/shares_test.go similarity index 100% rename from shares/shares_test.go rename to libs/shares/shares_test.go diff --git a/shares/split_compact_shares.go b/libs/shares/split_compact_shares.go similarity index 100% rename from shares/split_compact_shares.go rename to libs/shares/split_compact_shares.go diff --git a/shares/split_compact_shares_test.go b/libs/shares/split_compact_shares_test.go similarity index 100% rename from shares/split_compact_shares_test.go rename to libs/shares/split_compact_shares_test.go diff --git a/shares/testdata/sample-block.json b/libs/shares/testdata/sample-block.json similarity index 100% rename from shares/testdata/sample-block.json rename to libs/shares/testdata/sample-block.json diff --git a/shares/utils.go b/libs/shares/utils.go similarity index 100% rename from shares/utils.go rename to libs/shares/utils.go diff --git a/shares/utils_test.go b/libs/shares/utils_test.go similarity index 100% rename from shares/utils_test.go rename to libs/shares/utils_test.go diff --git a/testfactory/txs.go b/libs/testfactory/txs.go similarity index 100% rename from testfactory/txs.go rename to libs/testfactory/txs.go From d48b1881d0251408cef77f51e5c340605e98b797 Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Mon, 24 Apr 2023 09:43:31 -0400 Subject: [PATCH 7/8] modify path to libs --- libs/namespace/consts.go | 2 +- libs/shares/compact_shares_test.go | 6 +++--- libs/shares/info_byte.go | 2 +- libs/shares/reserved_bytes.go | 2 +- libs/shares/share_builder.go | 4 ++-- libs/shares/share_builder_test.go | 4 ++-- libs/shares/share_sequence.go | 4 ++-- libs/shares/share_sequence_test.go | 5 ++--- libs/shares/shares.go | 4 ++-- libs/shares/shares_test.go | 4 ++-- libs/shares/split_compact_shares.go | 4 ++-- libs/shares/split_compact_shares_test.go | 4 ++-- libs/shares/utils_test.go | 2 +- 13 files changed, 23 insertions(+), 24 deletions(-) diff --git a/libs/namespace/consts.go b/libs/namespace/consts.go index 5f3d613223..c8b320238a 100644 --- a/libs/namespace/consts.go +++ b/libs/namespace/consts.go @@ -4,7 +4,7 @@ import ( "bytes" "math" - "github.com/rollkit/rollkit/appconsts" + "github.com/rollkit/rollkit/libs/appconsts" ) const ( diff --git a/libs/shares/compact_shares_test.go b/libs/shares/compact_shares_test.go index 50dd85cc2a..45ae135e71 100644 --- a/libs/shares/compact_shares_test.go +++ b/libs/shares/compact_shares_test.go @@ -10,9 +10,9 @@ import ( "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" - "github.com/rollkit/rollkit/testfactory" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" + "github.com/rollkit/rollkit/libs/testfactory" ) func SplitTxs(txs coretypes.Txs) (txShares []Share, err error) { diff --git a/libs/shares/info_byte.go b/libs/shares/info_byte.go index 013344a83f..a57e295b7c 100644 --- a/libs/shares/info_byte.go +++ b/libs/shares/info_byte.go @@ -3,7 +3,7 @@ package shares import ( "fmt" - "github.com/rollkit/rollkit/appconsts" + "github.com/rollkit/rollkit/libs/appconsts" ) // InfoByte is a byte with the following structure: the first 7 bits are diff --git a/libs/shares/reserved_bytes.go b/libs/shares/reserved_bytes.go index 4fabfcdeaf..f5b1b0efd1 100644 --- a/libs/shares/reserved_bytes.go +++ b/libs/shares/reserved_bytes.go @@ -4,7 +4,7 @@ import ( "encoding/binary" "fmt" - "github.com/rollkit/rollkit/appconsts" + "github.com/rollkit/rollkit/libs/appconsts" ) // NewReservedBytes returns a byte slice of length diff --git a/libs/shares/share_builder.go b/libs/shares/share_builder.go index 7ed165ce43..02affdffea 100644 --- a/libs/shares/share_builder.go +++ b/libs/shares/share_builder.go @@ -4,8 +4,8 @@ import ( "encoding/binary" "errors" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) type Builder struct { diff --git a/libs/shares/share_builder_test.go b/libs/shares/share_builder_test.go index 5a6cf863b1..6187637bdf 100644 --- a/libs/shares/share_builder_test.go +++ b/libs/shares/share_builder_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) func TestShareBuilderIsEmptyShare(t *testing.T) { diff --git a/libs/shares/share_sequence.go b/libs/shares/share_sequence.go index 5b74a72383..cb9ac2e500 100644 --- a/libs/shares/share_sequence.go +++ b/libs/shares/share_sequence.go @@ -3,8 +3,8 @@ package shares import ( "fmt" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) // ShareSequence represents a contiguous sequence of shares that are part of the diff --git a/libs/shares/share_sequence_test.go b/libs/shares/share_sequence_test.go index 3eab5b8806..d0ce24ac1c 100644 --- a/libs/shares/share_sequence_test.go +++ b/libs/shares/share_sequence_test.go @@ -5,10 +5,9 @@ import ( "encoding/binary" "testing" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" "github.com/stretchr/testify/assert" - - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" ) func TestShareSequenceRawData(t *testing.T) { diff --git a/libs/shares/shares.go b/libs/shares/shares.go index 5af9bbe269..a8eccc067a 100644 --- a/libs/shares/shares.go +++ b/libs/shares/shares.go @@ -5,8 +5,8 @@ import ( "encoding/binary" "fmt" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) // Share contains the raw share data (including namespace ID). diff --git a/libs/shares/shares_test.go b/libs/shares/shares_test.go index 28621a4c30..412c2d57e5 100644 --- a/libs/shares/shares_test.go +++ b/libs/shares/shares_test.go @@ -6,8 +6,8 @@ import ( "github.com/stretchr/testify/assert" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) // // TestPadFirstIndexedBlob ensures that we are adding padding to the first share diff --git a/libs/shares/split_compact_shares.go b/libs/shares/split_compact_shares.go index 7a2d6f3dcf..41b80e5eae 100644 --- a/libs/shares/split_compact_shares.go +++ b/libs/shares/split_compact_shares.go @@ -6,8 +6,8 @@ import ( coretypes "github.com/tendermint/tendermint/types" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) type ShareRange struct { diff --git a/libs/shares/split_compact_shares_test.go b/libs/shares/split_compact_shares_test.go index 9948b1d0c0..2bb77a4c00 100644 --- a/libs/shares/split_compact_shares_test.go +++ b/libs/shares/split_compact_shares_test.go @@ -8,8 +8,8 @@ import ( "github.com/stretchr/testify/require" coretypes "github.com/tendermint/tendermint/types" - "github.com/rollkit/rollkit/appconsts" - appns "github.com/rollkit/rollkit/namespace" + "github.com/rollkit/rollkit/libs/appconsts" + appns "github.com/rollkit/rollkit/libs/namespace" ) // fillShare returns a share filled with filler so that the share length diff --git a/libs/shares/utils_test.go b/libs/shares/utils_test.go index bcb955b8d2..09eb2a113b 100644 --- a/libs/shares/utils_test.go +++ b/libs/shares/utils_test.go @@ -6,7 +6,7 @@ import ( "github.com/stretchr/testify/assert" - "github.com/rollkit/rollkit/testfactory" + "github.com/rollkit/rollkit/libs/testfactory" ) // func FuzzBlobSharesUsed(f *testing.F) { From 85671ec6e9c2e9e041b7ba695576acfc07945427 Mon Sep 17 00:00:00 2001 From: Manav Aggarwal Date: Mon, 24 Apr 2023 09:44:49 -0400 Subject: [PATCH 8/8] goimports rearrange --- libs/shares/share_sequence_test.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/libs/shares/share_sequence_test.go b/libs/shares/share_sequence_test.go index d0ce24ac1c..869433f289 100644 --- a/libs/shares/share_sequence_test.go +++ b/libs/shares/share_sequence_test.go @@ -5,9 +5,10 @@ import ( "encoding/binary" "testing" + "github.com/stretchr/testify/assert" + "github.com/rollkit/rollkit/libs/appconsts" appns "github.com/rollkit/rollkit/libs/namespace" - "github.com/stretchr/testify/assert" ) func TestShareSequenceRawData(t *testing.T) {