EigenDA x Optimism fork diff

diff: ignored:
+2961
-2999
+71
-4

The original optimism codebase can be found at github.com/ethereum-optimism/optimism. And our fork at github.com/Layr-Labs/optimism.

Modifications to op-batcher.

diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_config_provider_test.go layr-labs/optimism/op-batcher/batcher/channel_config_provider_test.go index 6f4ea7701c0b12048f03a659906c4c9a5865fd00..3931320c5edd17b6fdd22f91ce5e8b0760fce877 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_config_provider_test.go +++ layr-labs/optimism/op-batcher/batcher/channel_config_provider_test.go @@ -31,11 +31,12 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { calldataCfg := ChannelConfig{ MaxFrameSize: 120_000 - 1, TargetNumFrames: 1, + DaType: DaTypeCalldata, } blobCfg := ChannelConfig{ MaxFrameSize: eth.MaxBlobDataSize - 1, TargetNumFrames: 3, // gets closest to amortized fixed tx costs - UseBlobs: true, + DaType: DaTypeBlob, }   tests := []struct {
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_manager_test.go layr-labs/optimism/op-batcher/batcher/channel_manager_test.go index c8bce13262ee979c622fc9f82d119c5909ab89f0..359aafc373a205198ec53aaf6786eacf6ab84054 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_manager_test.go +++ layr-labs/optimism/op-batcher/batcher/channel_manager_test.go @@ -298,11 +298,12 @@ calldataCfg := ChannelConfig{ MaxFrameSize: 120_000 - 1, TargetNumFrames: 1, + DaType: DaTypeCalldata, } blobCfg := ChannelConfig{ MaxFrameSize: eth.MaxBlobDataSize - 1, TargetNumFrames: 3, // gets closest to amortized fixed tx costs - UseBlobs: true, + DaType: DaTypeBlob, } calldataCfg.InitNoneCompressor() blobCfg.InitNoneCompressor() @@ -356,7 +357,7 @@ cfg := newFakeDynamicEthChannelConfig(l, 1000)   cfg.chooseBlobs = tc.chooseBlobsWhenChannelCreated m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - require.Equal(t, tc.chooseBlobsWhenChannelCreated, m.defaultCfg.UseBlobs) + require.Equal(t, tc.chooseBlobsWhenChannelCreated, m.defaultCfg.DaType == DaTypeBlob)   // Seed channel manager with a block rng := rand.New(rand.NewSource(99)) @@ -393,8 +394,8 @@ } }   require.Equal(t, tc.numExpectedAssessments, cfg.assessments) - require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, data.asBlob) - require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, m.defaultCfg.UseBlobs) + require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, data.daType == DaTypeBlob) + require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, m.defaultCfg.DaType == DaTypeBlob) }) }
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_test.go layr-labs/optimism/op-batcher/batcher/channel_test.go index 0e1365eceec9c8aa8cbf68b184e8305f557dd1f3..19759c40ef8de759ad7f6e5db57409fca2753fae 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_test.go +++ layr-labs/optimism/op-batcher/batcher/channel_test.go @@ -131,7 +131,7 @@ require := require.New(t) const n = 6 lgr := testlog.Logger(t, log.LevelWarn) ch, err := newChannelWithChannelOut(lgr, metrics.NoopMetrics, ChannelConfig{ - UseBlobs: false, + DaType: DaTypeCalldata, TargetNumFrames: n, CompressorConfig: compressor.Config{ CompressionAlgo: derive.Zlib, @@ -172,7 +172,7 @@ require := require.New(t) const n = 6 lgr := testlog.Logger(t, log.LevelWarn) ch, err := newChannelWithChannelOut(lgr, metrics.NoopMetrics, ChannelConfig{ - UseBlobs: true, + DaType: DaTypeBlob, TargetNumFrames: n, CompressorConfig: compressor.Config{ CompressionAlgo: derive.Zlib,
diff --git ethereum-optimism/optimism/op-batcher/batcher/driver_test.go layr-labs/optimism/op-batcher/batcher/driver_test.go index 454ade01ee756ab315eac77e5876805f18c7b6fd..d5fa76e76e39aa233dd3905d315cbec524ae1b92 100644 --- ethereum-optimism/optimism/op-batcher/batcher/driver_test.go +++ layr-labs/optimism/op-batcher/batcher/driver_test.go @@ -4,6 +4,8 @@ import ( "context" "encoding/json" "errors" + "fmt" + "math/big" "net" "net/http" "net/http/httptest" @@ -11,14 +13,20 @@ "sync" "testing" "time"   + altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/batcher/throttler" + "github.com/ethereum-optimism/optimism/op-batcher/compressor" "github.com/ethereum-optimism/optimism/op-batcher/config" "github.com/ethereum-optimism/optimism/op-batcher/metrics" + "github.com/ethereum-optimism/optimism/op-node/rollup" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-service/dial" "github.com/ethereum-optimism/optimism/op-service/eth" "github.com/ethereum-optimism/optimism/op-service/testlog" "github.com/ethereum-optimism/optimism/op-service/testutils" "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" "github.com/ethereum/go-ethereum/log" "github.com/stretchr/testify/require" ) @@ -128,6 +136,7 @@ ep.rollupClientErr = errors.New("failed to resolve rollup client")   _, err := bs.safeL1Origin(context.Background()) + fmt.Println(err) require.Error(t, err) }   @@ -171,6 +180,224 @@ candidateOut := q.Load(txData.ID().String())   expectedFloorDataGas := uint64(21_000 + 12*10) require.GreaterOrEqual(t, candidateOut.GasLimit, expectedFloorDataGas) +} + +// ======= ALTDA TESTS ======= + +// fakeL1Client is just a dummy struct. All fault injection is done via the fakeTxMgr (which doesn't interact with this fakeL1Client). +type fakeL1Client struct { +} + +func (f *fakeL1Client) HeaderByNumber(ctx context.Context, number *big.Int) (*types.Header, error) { + if number == nil { + number = big.NewInt(0) + } + return &types.Header{ + Number: number, + ParentHash: common.Hash{}, + Time: 0, + }, nil +} +func (f *fakeL1Client) NonceAt(ctx context.Context, account common.Address, blockNumber *big.Int) (uint64, error) { + return 0, nil +} + +func altDASetup(t *testing.T, log log.Logger) (*BatchSubmitter, *mockL2EndpointProvider, *altda.MockDAClient, *testutils.FakeTxMgr) { + ep := newEndpointProvider() + + rollupCfg := &rollup.Config{ + Genesis: rollup.Genesis{L2: eth.BlockID{Number: 0}, L1: eth.BlockID{Number: genesisL1Origin}}, + L2ChainID: big.NewInt(1234), + } + batcherCfg := BatcherConfig{ + PollInterval: 10 * time.Millisecond, + UseAltDA: true, + } + + fakeTxMgr := testutils.NewFakeTxMgr(log.With("subsystem", "fake-txmgr"), common.Address{0}) + l1Client := &fakeL1Client{} + + channelCfg := ChannelConfig{ + // SeqWindowSize: 15, + // SubSafetyMargin: 4, + ChannelTimeout: 10, + MaxFrameSize: 300, + TargetNumFrames: 1, + BatchType: derive.SingularBatchType, + CompressorConfig: compressor.Config{ + Kind: compressor.NoneKind, + }, + DaType: DaTypeAltDA, + } + mockAltDAClient := altda.NewCountingGenericCommitmentMockDAClient(log.With("subsystem", "da-client")) + return NewBatchSubmitter(DriverSetup{ + Log: log, + Metr: metrics.NoopMetrics, + RollupConfig: rollupCfg, + ChannelConfig: channelCfg, + Config: batcherCfg, + EndpointProvider: ep, + Txmgr: fakeTxMgr, + L1Client: l1Client, + AltDA: mockAltDAClient, + }), ep, mockAltDAClient, fakeTxMgr +} + +func fakeSyncStatus(unsafeL2BlockNum uint64, L1BlockRef eth.L1BlockRef) *eth.SyncStatus { + return &eth.SyncStatus{ + UnsafeL2: eth.L2BlockRef{ + Number: unsafeL2BlockNum, + Hash: common.MaxHash, + L1Origin: eth.BlockID{ + Number: 0, + }, + }, + LocalSafeL2: eth.L2BlockRef{ + Number: 0, + Hash: common.MaxHash, + L1Origin: eth.BlockID{ + Number: 0, + }, + }, + HeadL1: L1BlockRef, + } +} + +// There are 4 failure cases (unhappy paths) that the op-batcher has to deal with. +// They are outlined in https://github.com/ethereum-optimism/optimism/tree/develop/op-batcher#happy-path +// This test suite covers these 4 cases in the context of AltDA. +func TestBatchSubmitter_AltDA_FailureCase1_L2Reorg(t *testing.T) { + t.Parallel() + log := testlog.Logger(t, log.LevelDebug) + bs, ep, mockAltDAClient, fakeTxMgr := altDASetup(t, log) + + L1Block0 := types.NewBlock(&types.Header{ + Number: big.NewInt(0), + }, nil, nil, nil, types.DefaultBlockConfig) + L1Block0Ref := eth.L1BlockRef{ + Hash: L1Block0.Hash(), + Number: L1Block0.NumberU64(), + } + // We return incremental syncStatuses to force the op-batcher to entirely process each L2 block one by one. + // To test multi channel behavior, we could return a sync status that is multiple blocks ahead of the current L2 block. + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(1, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(2, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(3, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(1, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(2, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Return(fakeSyncStatus(3, L1Block0Ref), nil) + + L2Block0 := newMiniL2BlockWithNumberParent(1, big.NewInt(0), common.HexToHash("0x0")) + L2Block1 := newMiniL2BlockWithNumberParent(1, big.NewInt(1), L2Block0.Hash()) + L2Block2 := newMiniL2BlockWithNumberParent(1, big.NewInt(2), L2Block1.Hash()) + L2Block2Prime := newMiniL2BlockWithNumberParentAndL1Information(1, big.NewInt(2), L2Block1.Hash(), 101, 0) + L2Block3Prime := newMiniL2BlockWithNumberParent(1, big.NewInt(3), L2Block2Prime.Hash()) + + // L2block0 is the genesis block which is considered safe, so never loaded into the state. + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(1)).Twice().Return(L2Block1, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(2)).Once().Return(L2Block2, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(2)).Once().Return(L2Block2Prime, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(3)).Twice().Return(L2Block3Prime, nil) + + err := bs.StartBatchSubmitting() + require.NoError(t, err) + time.Sleep(1 * time.Second) // 1 second is enough to process all blocks at 10ms poll interval + err = bs.StopBatchSubmitting(context.Background()) + require.NoError(t, err) + + // After the reorg, block 1 needs to be reprocessed, hence why we see 5 store calls: 1, 2, 1, 2', 3' + require.Equal(t, 5, mockAltDAClient.StoreCount) + require.Equal(t, uint64(5), fakeTxMgr.Nonce) + +} + +func TestBatchSubmitter_AltDA_FailureCase2_FailedL1Tx(t *testing.T) { + t.Parallel() + log := testlog.Logger(t, log.LevelDebug) + bs, ep, mockAltDAClient, fakeTxMgr := altDASetup(t, log) + + L1Block0 := types.NewBlock(&types.Header{ + Number: big.NewInt(0), + }, nil, nil, nil, types.DefaultBlockConfig) + L1Block0Ref := eth.L1BlockRef{ + Hash: L1Block0.Hash(), + Number: L1Block0.NumberU64(), + } + // We return incremental syncStatuses to force the op-batcher to entirely process each L2 block one by one. + // To test multi channel behavior, we could return a sync status that is multiple blocks ahead of the current L2 block. + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(1, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(2, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Times(10).Return(fakeSyncStatus(3, L1Block0Ref), nil) + ep.rollupClient.Mock.On("SyncStatus").Return(fakeSyncStatus(4, L1Block0Ref), nil) + + L2Block0 := newMiniL2BlockWithNumberParent(1, big.NewInt(0), common.HexToHash("0x0")) + L2Block1 := newMiniL2BlockWithNumberParent(1, big.NewInt(1), L2Block0.Hash()) + L2Block2 := newMiniL2BlockWithNumberParent(1, big.NewInt(2), L2Block1.Hash()) + L2Block3 := newMiniL2BlockWithNumberParent(1, big.NewInt(3), L2Block2.Hash()) + L2Block4 := newMiniL2BlockWithNumberParent(1, big.NewInt(4), L2Block3.Hash()) + + // L2block0 is the genesis block which is considered safe, so never loaded into the state. + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(1)).Once().Return(L2Block1, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(2)).Once().Return(L2Block2, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(3)).Once().Return(L2Block3, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(4)).Once().Return(L2Block4, nil) + + fakeTxMgr.ErrorEveryNthSend(2) + err := bs.StartBatchSubmitting() + require.NoError(t, err) + time.Sleep(1 * time.Second) // 1 second is enough to process all blocks at 10ms poll interval + err = bs.StopBatchSubmitting(context.Background()) + require.NoError(t, err) + + require.Equal(t, 4, mockAltDAClient.StoreCount) + // TODO: we should prob also check that the commitments are in order? + require.Equal(t, uint64(4), fakeTxMgr.Nonce) +} + +func TestBatchSubmitter_AltDA_FailureCase3_ChannelTimeout(t *testing.T) { + // This function is not implemented because the batcher channel logic makes it very difficult to inject faults. + // A version of this test was implemented here: https://github.com/Layr-Labs/optimism/blob/4b79c981a13bf096ae2984634d976956fbbfddff/op-batcher/batcher/driver_test.go#L300 + // However we opted to not merge it into the main branch because it has an external dependency on the https://github.com/pingcap/failpoint package, + // and requires a lot of custom test setup and failpoint code injection into the batcher's codebase. + // See https://github.com/ethereum-optimism/optimism/commit/4b79c981a13bf096ae2984634d976956fbbfddff for the full implementation. +} + +func TestBatchSubmitter_AltDA_FailureCase4_FailedBlobSubmission(t *testing.T) { + t.Parallel() + log := testlog.Logger(t, log.LevelDebug) + bs, ep, mockAltDAClient, fakeTxMgr := altDASetup(t, log) + + L1Block0 := types.NewBlock(&types.Header{ + Number: big.NewInt(0), + }, nil, nil, nil, types.DefaultBlockConfig) + L1Block0Ref := eth.L1BlockRef{ + Hash: L1Block0.Hash(), + Number: L1Block0.NumberU64(), + } + ep.rollupClient.Mock.On("SyncStatus").Return(fakeSyncStatus(4, L1Block0Ref), nil) + + L2Block0 := newMiniL2BlockWithNumberParent(1, big.NewInt(0), common.HexToHash("0x0")) + L2Block1 := newMiniL2BlockWithNumberParent(1, big.NewInt(1), L2Block0.Hash()) + L2Block2 := newMiniL2BlockWithNumberParent(1, big.NewInt(2), L2Block1.Hash()) + L2Block3 := newMiniL2BlockWithNumberParent(1, big.NewInt(3), L2Block2.Hash()) + L2Block4 := newMiniL2BlockWithNumberParent(1, big.NewInt(4), L2Block3.Hash()) + + // L2block0 is the genesis block which is considered safe, so never loaded into the state. + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(1)).Once().Return(L2Block1, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(2)).Once().Return(L2Block2, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(3)).Once().Return(L2Block3, nil) + ep.ethClient.Mock.On("BlockByNumber", big.NewInt(4)).Once().Return(L2Block4, nil) + + mockAltDAClient.DropEveryNthPut(2) + + err := bs.StartBatchSubmitting() + require.NoError(t, err) + time.Sleep(1 * time.Second) // 1 second is enough to process all blocks at 10ms poll interval + err = bs.StopBatchSubmitting(context.Background()) + require.NoError(t, err) + + require.Equal(t, 4, mockAltDAClient.StoreCount) + require.Equal(t, uint64(4), fakeTxMgr.Nonce) }   // createHTTPHandler creates a mock HTTP handler for testing, it accepts a callback which
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel.go layr-labs/optimism/op-batcher/batcher/channel.go index 8270977a68c57b374d03d20529e74b2a15c5e04d..850e6ca38c5158235c88b25a8bdaf11d3f459fc9 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel.go +++ layr-labs/optimism/op-batcher/batcher/channel.go @@ -3,6 +3,7 @@ import ( "math"   + altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -19,7 +20,16 @@ cfg ChannelConfig   // pending channel builder channelBuilder *ChannelBuilder - // Set of unconfirmed txID -> tx data. For tx resubmission + // Temporary cache for altDACommitments that are received potentially out of order from the da layer. + // Map: first frameNumber in txData -> txData (that contains an altDACommitment) + // Once the txData containing altDAFrameCursor is received, it will be pulled out of the + // channel on the next driver iteration, and sent to L1. + altDACommitments map[uint16]txData + // Points to the next frame number to send to L1 in order to maintain holocene strict ordering rules. + // When altDACommitments[altDAFrameCursor] is non-nil, it will be sent to L1. + altDAFrameCursor uint16 + // Set of unconfirmed txID -> tx data. For tx resubmission. + // Also used for altda for the entirity of the submission (data -> commitment -> tx). pendingTransactions map[string]txData // Set of confirmed txID -> inclusion block. For determining if the channel is timed out confirmedTransactions map[string]eth.BlockID @@ -37,26 +47,83 @@ log: log, metr: metr, cfg: cfg, channelBuilder: cb, + altDACommitments: make(map[uint16]txData), pendingTransactions: make(map[string]txData), confirmedTransactions: make(map[string]eth.BlockID), minInclusionBlock: math.MaxUint64, } }   +// CacheAltDACommitment caches the commitment received from the DA layer for the given txData. +// We cannot submit it directly to L1 yet, as we need to make sure the commitments are submitted in order, +// according to the holocene rules. Therefore, we cache the commitment and let the channelManager +// decide when to pull them out of the channel and send them to L1. +func (c *channel) CacheAltDACommitment(txData txData, commitment altda.CommitmentData) { + if commitment == nil { + panic("expected non-nil commitment") + } + if len(txData.frames) == 0 { + panic("expected txData to have frames") + } + txData.altDACommitment = commitment + c.log.Debug("caching altDA commitment", "frame", txData.frames[0].id.frameNumber, "commitment", commitment.String()) + c.altDACommitments[txData.frames[0].id.frameNumber] = txData +} + +func (c *channel) rewindAltDAFrameCursor(txData txData) { + if len(txData.frames) == 0 { + panic("expected txData to have frames") + } + c.altDAFrameCursor = txData.frames[0].id.frameNumber +} + +// AltDASubmissionFailed records an AltDA blob dispersal as having failed. +// It rewinds the channelBuilder's frameCursor to the first frame of the failed txData, +// so that the frames can be resubmitted. failoverToEthDA should be set to true when using altDA +// and altDA is down. This will switch the channel to submit frames to ethDA instead. +// TODO: add a metric for altDA submission failures. +func (c *channel) AltDASubmissionFailed(id string, failoverToEthDA bool) { + // We coopt TxFailed to rewind the frame cursor. + // This will force a resubmit of all the following frames as well, + // even if they had already successfully been submitted and their commitment cached. + // Ideally we'd have another way but for simplicity and to not tangle the altda code + // too much with the non altda code, we reuse the FrameCursor feature. + // TODO: Is there a better abstraction for altda channels? FrameCursors are not well suited + // since frames do not have to be sent in order to the altda, only their commitment does. + c.TxFailed(id) + if failoverToEthDA { + // We failover to calldata txs because in altda mode the channel and channelManager + // are configured to use a calldataConfigManager, as opposed to DynamicEthChannelConfig + // which can use both calldata and blobs. Failover should happen extremely rarely, + // and is only used while the altDA is down, so we can afford to be inefficient here. + // TODO: figure out how to switch to blobs/auto instead. Might need to make + // batcherService.initChannelConfig function stateless so that we can reuse it. + c.log.Info("Failing over to calldata txs", "id", c.ID()) + c.cfg.DaType = DaTypeCalldata + } +} + // TxFailed records a transaction as failed. It will attempt to resubmit the data // in the failed transaction. func (c *channel) TxFailed(id string) { if data, ok := c.pendingTransactions[id]; ok { c.log.Trace("marked transaction as failed", "id", id) - // Rewind to the first frame of the failed tx - // -- the frames are ordered, and we want to send them - // all again. - c.channelBuilder.RewindFrameCursor(data.Frames()[0]) + if data.altDACommitment != nil { + // In altDA mode, we don't want to rewind the channelBuilder's frameCursor + // because that will lead to resubmitting the same data to the da layer. + // We simply need to rewind the altDAFrameCursor to the first frame of the failed txData, + // to force a resubmit of the cached altDACommitment. + c.rewindAltDAFrameCursor(data) + } else { + // Rewind to the first frame of the failed tx + // -- the frames are ordered, and we want to send them + // all again. + c.channelBuilder.RewindFrameCursor(data.Frames()[0]) + } delete(c.pendingTransactions, id) } else { c.log.Warn("unknown transaction marked as failed", "id", id) } - c.metr.RecordBatchTxFailed() }   @@ -88,7 +155,16 @@ // If this channel timed out, put the pending blocks back into the local saved blocks // and then reset this state so it can try to build a new channel. if c.isTimedOut() { c.metr.RecordChannelTimedOut(c.ID()) - c.log.Warn("Channel timed out", "id", c.ID(), "min_inclusion_block", c.minInclusionBlock, "max_inclusion_block", c.maxInclusionBlock) + var chanFirstL2BlockNum, chanLastL2BlockNum uint64 + if c.channelBuilder.blocks.Len() > 0 { + chanFirstL2Block, _ := c.channelBuilder.blocks.Peek() + chanLastL2Block, _ := c.channelBuilder.blocks.PeekN(c.channelBuilder.blocks.Len() - 1) + chanFirstL2BlockNum = chanFirstL2Block.NumberU64() + chanLastL2BlockNum = chanLastL2Block.NumberU64() + } + c.log.Warn("Channel timed out", "id", c.ID(), + "min_l1_inclusion_block", c.minInclusionBlock, "max_l1_inclusion_block", c.maxInclusionBlock, + "first_l2_block", chanFirstL2BlockNum, "last_l2_block", chanLastL2BlockNum) return true }   @@ -123,22 +199,44 @@ func (c *channel) ID() derive.ChannelID { return c.channelBuilder.ID() }   +// NextAltDACommitment checks if it has already received the altDA commitment +// of the txData whose first frame is altDAFrameCursor. If it has, it returns +// the txData and true. Otherwise, it returns an empty txData and false. +func (c *channel) NextAltDACommitment() (txData, bool) { + if txData, ok := c.altDACommitments[c.altDAFrameCursor]; ok { + if txData.altDACommitment == nil { + panic("expected altDACommitment to be non-nil") + } + if len(txData.frames) == 0 { + panic("expected txData to have frames") + } + // update altDAFrameCursor to the first frame of the next txData + lastFrame := txData.frames[len(txData.frames)-1] + c.altDAFrameCursor = lastFrame.id.frameNumber + 1 + // We also store it in pendingTransactions so that TxFailed can know + // that this tx's altDA commitment was already cached. + c.pendingTransactions[txData.ID().String()] = txData + return txData, true + } + return txData{}, false +} + // NextTxData dequeues the next frames from the channel and returns them encoded in a tx data packet. -// If cfg.UseBlobs is false, it returns txData with a single frame. -// If cfg.UseBlobs is true, it will read frames from its channel builder +// If cfg.DaType == DaTypeCalldata, it returns txData with a single frame. +// Else when cfg.DaType == DaTypeBlob or DaTypeAltDA, it will read frames from its channel builder // until it either doesn't have more frames or the target number of frames is reached. // // NextTxData should only be called after HasTxData returned true. func (c *channel) NextTxData() txData { nf := c.cfg.MaxFramesPerTx() - txdata := txData{frames: make([]frameData, 0, nf), asBlob: c.cfg.UseBlobs} + txdata := txData{frames: make([]frameData, 0, nf), daType: c.cfg.DaType} for i := 0; i < nf && c.channelBuilder.HasPendingFrame(); i++ { frame := c.channelBuilder.NextFrame() txdata.frames = append(txdata.frames, frame) }   id := txdata.ID().String() - c.log.Debug("returning next tx data", "id", id, "num_frames", len(txdata.frames), "as_blob", txdata.asBlob) + c.log.Debug("returning next tx data", "id", id, "num_frames", len(txdata.frames), "da_type", txdata.daType) c.pendingTransactions[id] = txdata   return txdata @@ -146,7 +244,7 @@ }   func (c *channel) HasTxData() bool { if c.IsFull() || // If the channel is full, we should start to submit it - !c.cfg.UseBlobs { // If using calldata, we only send one frame per tx + c.cfg.DaType == DaTypeCalldata { // If using calldata, we only send one frame per tx return c.channelBuilder.HasPendingFrame() } // Collect enough frames if channel is not full yet
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_config.go layr-labs/optimism/op-batcher/batcher/channel_config.go index 20ff60ea1a5a17349e1d3a5806ffe7d52e198a75..f82e8b2ea63fa5f632dc92d0ce5eaff045db262b 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_config.go +++ layr-labs/optimism/op-batcher/batcher/channel_config.go @@ -46,9 +46,12 @@ // BatchType indicates whether the channel uses SingularBatch or SpanBatch. BatchType uint   - // UseBlobs indicates that this channel should be sent as a multi-blob - // transaction with one blob per frame. - UseBlobs bool + // DaType indicates how the frames in this channel should be sent to the L1. + DaType DaType +} + +func (cc ChannelConfig) UseBlobs() bool { + return cc.DaType == DaTypeBlob }   // ChannelConfig returns a copy of the receiver. @@ -93,7 +96,7 @@ ) }   func (cc *ChannelConfig) MaxFramesPerTx() int { - if !cc.UseBlobs { + if cc.DaType == DaTypeCalldata { return 1 } return cc.TargetNumFrames
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_manager.go layr-labs/optimism/op-batcher/batcher/channel_manager.go index b933de454f2f9f871ca678de33bb6d85cde903ad..01ca39f433019f266c77946464f8011aa4d145f2 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_manager.go +++ layr-labs/optimism/op-batcher/batcher/channel_manager.go @@ -6,6 +6,7 @@ "fmt" "io" "math"   + altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-batcher/metrics" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" @@ -53,7 +54,7 @@ // channel to write new block data to currentChannel *channel // channels to read frame data from, for writing batches onchain channelQueue []*channel - // used to lookup channels by tx ID upon tx success / failure + // used to lookup channels by tx ID upon altda and tx success / failure txChannels map[string]*channel }   @@ -94,6 +95,41 @@ func (s *channelManager) pendingBlocks() int { return s.blocks.Len() - s.blockCursor }   +// CacheAltDACommitment caches the commitment received from the DA layer for the given txData. +// We cannot submit it directly to L1 yet, as we need to make sure the commitments are submitted in order, +// according to the holocene rules. Therefore, we cache them and let the channelManager decide when to submit them. +func (s *channelManager) CacheAltDACommitment(txData txData, commitment altda.CommitmentData) { + if len(txData.frames) == 0 { + panic("no frames in txData") + } + firstFrame, lastFrame := txData.frames[0], txData.frames[len(txData.frames)-1] + if firstFrame.id.chID != lastFrame.id.chID { + // The current implementation caches commitments inside channels, + // so it assumes that a txData only contains frames from a single channel. + // If this ever panics (hopefully in tests...) it shouldn't be too hard to fix. + panic("commitment spans multiple channels") + } + if channel, ok := s.txChannels[txData.ID().String()]; ok { + channel.CacheAltDACommitment(txData, commitment) + } else { + s.log.Warn("Trying to cache altda commitment for txData from unknown channel. Probably some state reset (from reorg?) happened.", "id", txData.ID()) + } +} + +// AltDASubmissionFailed marks a DA submission as having failed to be submitted to the DA layer. +// The frames will be pushed back into the corresponding channel such that they can be pulled again by the +// driver main loop and resent to the DA layer. failoverToEthDA should be set to true when using altDA +// and altDA is down. This will switch the channel to submit frames to ethDA instead. +func (s *channelManager) AltDASubmissionFailed(_id txID, failoverToEthDA bool) { + id := _id.String() + if channel, ok := s.txChannels[id]; ok { + delete(s.txChannels, id) + channel.AltDASubmissionFailed(id, failoverToEthDA) + } else { + s.log.Warn("transaction from unknown channel marked as failed", "id", id) + } +} + // TxFailed records a transaction as failed. It will attempt to resubmit the data // in the failed transaction. func (s *channelManager) TxFailed(_id txID) { @@ -216,6 +252,20 @@ s.txChannels[tx.ID().String()] = channel return tx, nil }   +func (s *channelManager) getNextAltDACommitment() (txData, bool) { + for _, channel := range s.channelQueue { + // if all frames have already been sent to altda, skip this channel + if int(channel.altDAFrameCursor) == channel.channelBuilder.TotalFrames() { + continue + } + if txData, ok := channel.NextAltDACommitment(); ok { + return txData, true + } + break // We need to send the commitments in order, so we can't skip to the next channel + } + return emptyTxData, false +} + // TxData returns the next tx data that should be submitted to L1. // // If the current channel is @@ -226,6 +276,10 @@ // It will decide whether to switch DA type automatically. // When switching DA type, the channelManager state will be rebuilt // with a new ChannelConfig. func (s *channelManager) TxData(l1Head eth.BlockID, isPectra, isThrottling, forcePublish bool) (txData, error) { + // if any altda commitment is ready, return it + if txdata, ok := s.getNextAltDACommitment(); ok { + return txdata, nil + } channel, err := s.getReadyChannel(l1Head, forcePublish) if err != nil { return emptyTxData, err @@ -240,16 +294,16 @@ // Call provider method to reassess optimal DA type newCfg := s.cfgProvider.ChannelConfig(isPectra, isThrottling)   // No change: - if newCfg.UseBlobs == s.defaultCfg.UseBlobs { + if newCfg.UseBlobs() == s.defaultCfg.UseBlobs() { s.log.Debug("Recomputing optimal ChannelConfig: no need to switch DA type", - "useBlobs", s.defaultCfg.UseBlobs) + "useBlobs", s.defaultCfg.UseBlobs()) return s.nextTxData(channel) }   // Change: s.log.Info("Recomputing optimal ChannelConfig: changing DA type and requeing blocks...", - "useBlobsBefore", s.defaultCfg.UseBlobs, - "useBlobsAfter", newCfg.UseBlobs) + "useBlobsBefore", s.defaultCfg.UseBlobs(), + "useBlobsAfter", newCfg.UseBlobs())   // Invalidate the channel so its blocks // get requeued: @@ -294,7 +348,7 @@ } }   dataPending := firstWithTxData != nil - s.log.Debug("Requested tx data", "l1Head", l1Head, "txdata_pending", dataPending, "blocks_pending", s.blocks.Len()) + s.log.Debug("Requested tx data", "l1Head", l1Head, "txdata_pending", dataPending, "blocks_pending", s.pendingBlocks())   // Short circuit if there is pending tx data or the channel manager is closed if dataPending { @@ -361,7 +415,7 @@ "batch_type", cfg.BatchType, "compression_algo", cfg.CompressorConfig.CompressionAlgo, "target_num_frames", cfg.TargetNumFrames, "max_frame_size", cfg.MaxFrameSize, - "use_blobs", cfg.UseBlobs, + "da_type", cfg.DaType.String(), ) s.metr.RecordChannelOpened(pc.ID(), s.pendingBlocks())
diff --git ethereum-optimism/optimism/op-batcher/batcher/driver.go layr-labs/optimism/op-batcher/batcher/driver.go index 4630df4a6f3bdd88aaa6e9a1f71ec43323b95150..908ba0503d09b99a79d1e4b61ed2f2e49d5077a9 100644 --- ethereum-optimism/optimism/op-batcher/batcher/driver.go +++ layr-labs/optimism/op-batcher/batcher/driver.go @@ -81,6 +81,10 @@ type RollupClient interface { SyncStatus(ctx context.Context) (*eth.SyncStatus, error) }   +type AltDAClient interface { + SetInput(ctx context.Context, data []byte) (altda.CommitmentData, error) +} + // DriverSetup is the collection of input/output interfaces and configuration that the driver operates on. type DriverSetup struct { Log log.Logger @@ -91,7 +95,7 @@ Txmgr txmgr.TxManager L1Client L1Client EndpointProvider dial.L2EndpointProvider ChannelConfig ChannelConfigProvider - AltDA *altda.DAClient + AltDA AltDAClient ChannelOutFactory ChannelOutFactory }   @@ -851,6 +855,12 @@ return err } l.Metr.RecordLatestL1Block(l1tip)   + // In AltDA mode, before pulling data out of the state, we make sure + // that the daGroup has not reached the maximum number of goroutines. + // This is to prevent blocking the main event loop when submitting the data to the DA Provider. + if l.Config.UseAltDA && !daGroup.TryGo(func() error { return nil }) { + return io.EOF + } _, params := l.throttleController.Load() // Collect next transaction data. This pulls data out of the channel, so we need to make sure // to put it back if ever da or txmgr requests fail, by calling l.recordFailedDARequest/recordFailedTx. @@ -911,19 +921,16 @@ l.Log.Warn("sending a cancellation transaction to unblock txpool", "blocked_blob", isBlockedBlob) l.sendTx(txData{}, true, candidate, queue, receiptsCh) }   -// publishToAltDAAndL1 posts the txdata to the DA Provider and then sends the commitment to L1. -func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) { - // sanity checks - if nf := len(txdata.frames); nf != 1 { - l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) - } - if txdata.asBlob { - l.Log.Crit("Unexpected blob txdata with AltDA enabled") +// publishToAltDAAndStoreCommitment posts the txdata to the DA Provider and stores the returned commitment +// in the channelMgr. The commitment will later be sent to the L1 while making sure to follow holocene's strict ordering rules. +func (l *BatchSubmitter) publishToAltDAAndStoreCommitment(txdata txData, daGroup *errgroup.Group) { + if txdata.daType != DaTypeAltDA { + l.Log.Crit("publishToAltDAAndStoreCommitment called with non-AltDA txdata") }   // when posting txdata to an external DA Provider, we use a goroutine to avoid blocking the main loop // since it may take a while for the request to return. - goroutineSpawned := daGroup.TryGo(func() error { + daGroup.Go(func() error { // TODO: probably shouldn't be using the global shutdownCtx here, see https://go.dev/blog/context-and-structs // but sendTransaction receives l.killCtx as an argument, which currently is only canceled after waiting for the main loop // to exit, which would wait on this DA call to finish, which would take a long time. @@ -942,17 +949,12 @@ l.recordFailedDARequest(txdata.ID(), err) } return nil } - l.Log.Info("Set altda input", "commitment", comm, "tx", txdata.ID()) - candidate := l.calldataTxCandidate(comm.TxData()) - l.sendTx(txdata, false, candidate, queue, receiptsCh) + l.Log.Info("Sent txdata to altda layer and received commitment", "commitment", comm, "tx", txdata.ID()) + l.channelMgrMutex.Lock() + l.channelMgr.CacheAltDACommitment(txdata, comm) + l.channelMgrMutex.Unlock() return nil }) - if !goroutineSpawned { - // We couldn't start the goroutine because the errgroup.Group limit - // is already reached. Since we can't send the txdata, we have to - // return it for later processing. We use nil error to skip error logging. - l.recordFailedDARequest(txdata.ID(), nil) - } }   // sendTransaction creates & queues for sending a transaction to the batch inbox address with the given `txData`. @@ -960,16 +962,27 @@ // This call will block if the txmgr queue is at the max-pending limit. // The method will block if the queue's MaxPendingTransactions is exceeded. func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { var err error - - // if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment. - if l.Config.UseAltDA { - l.publishToAltDAAndL1(txdata, queue, receiptsCh, daGroup) - // we return nil to allow publishStateToL1 to keep processing the next txdata - return nil - } + var candidate *txmgr.TxCandidate + switch txdata.daType { + case DaTypeAltDA: + if !l.Config.UseAltDA { + l.Log.Crit("Received AltDA type txdata without AltDA being enabled") + } + if txdata.altDACommitment == nil { + // This means the txdata was not sent to the DA Provider yet. + // This will send the txdata to the DA Provider and store the commitment in the channelMgr. + // Next time this txdata is requested, we will have the commitment and can send it to the L1 (else branch below). + l.publishToAltDAAndStoreCommitment(txdata, daGroup) + // We return here because publishToAltDA is an async operation; the commitment + // is not yet ready to be submitted to the L1. + return nil + } + // This means the txdata was already sent to the DA Provider and we have the commitment + // so we can send the commitment to the L1 + l.Log.Info("Sending altda commitment to L1", "commitment", txdata.altDACommitment, "tx", txdata.ID()) + candidate = l.calldataTxCandidate(txdata.altDACommitment.TxData())   - var candidate *txmgr.TxCandidate - if txdata.asBlob { + case DaTypeBlob: if candidate, err = l.blobTxCandidate(txdata); err != nil { // We could potentially fall through and try a calldata tx instead, but this would // likely result in the chain spending more in gas fees than it is tuned for, so best @@ -977,14 +990,18 @@ // to just fail. We do not expect this error to trigger unless there is a serious bug // or configuration issue. return fmt.Errorf("could not create blob tx candidate: %w", err) } - } else { + case DaTypeCalldata: // sanity check if nf := len(txdata.frames); nf != 1 { l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) } candidate = l.calldataTxCandidate(txdata.CallData()) + default: + l.Log.Crit("Unknown DA type", "da_type", txdata.daType) } - + if candidate == nil { + l.Log.Crit("txcandidate should have been set by one of the three branches above.") + } l.sendTx(txdata, false, candidate, queue, receiptsCh) return nil } @@ -1004,7 +1021,7 @@ } else { candidate.GasLimit = floorDataGas }   - queue.Send(txRef{id: txdata.ID(), isCancel: isCancel, isBlob: txdata.asBlob}, *candidate, receiptsCh) + queue.Send(txRef{id: txdata.ID(), isCancel: isCancel, isBlob: txdata.daType == DaTypeBlob}, *candidate, receiptsCh) }   func (l *BatchSubmitter) blobTxCandidate(data txData) (*txmgr.TxCandidate, error) { @@ -1044,10 +1061,11 @@ func (l *BatchSubmitter) recordFailedDARequest(id txID, err error) { l.channelMgrMutex.Lock() defer l.channelMgrMutex.Unlock() + failover := errors.Is(err, altda.ErrAltDADown) if err != nil { - l.Log.Warn("DA request failed", logFields(id, err)...) + l.Log.Warn("DA request failed", append([]interface{}{"failoverToEthDA", failover}, logFields(id, err)...)...) } - l.channelMgr.TxFailed(id) + l.channelMgr.AltDASubmissionFailed(id, failover) }   func (l *BatchSubmitter) recordFailedTx(id txID, err error) {
diff --git ethereum-optimism/optimism/op-batcher/batcher/service.go layr-labs/optimism/op-batcher/batcher/service.go index 2f0ed8ae67e64d1168a68722d62a9a412362979a..5697a3b4575ec58a8b63ea14fad8f7df1fd45d0d 100644 --- ethereum-optimism/optimism/op-batcher/batcher/service.go +++ layr-labs/optimism/op-batcher/batcher/service.go @@ -266,34 +266,40 @@ MaxBlocksPerSpanBatch: cfg.MaxBlocksPerSpanBatch, TargetNumFrames: cfg.TargetNumFrames, SubSafetyMargin: cfg.SubSafetyMargin, BatchType: cfg.BatchType, + // DaType: set below }   - switch cfg.DataAvailabilityType { - case flags.BlobsType, flags.AutoType: - if !cfg.TestUseMaxTxSizeForBlobs { - // account for version byte prefix - cc.MaxFrameSize = eth.MaxBlobDataSize - 1 + if bs.UseAltDA { + if cfg.DataAvailabilityType == flags.CalldataType { + cc.DaType = DaTypeAltDA + } else { + return fmt.Errorf("altDA is currently only supported with calldata DA Type") } - cc.UseBlobs = true - case flags.CalldataType: // do nothing - default: - return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType) - } - - if bs.UseAltDA && cc.UseBlobs { - return fmt.Errorf("cannot use data availability type blobs or auto with Alt-DA") - } + if cc.MaxFrameSize > altda.MaxInputSize { + return fmt.Errorf("max frame size %d exceeds altDA max input size %d", cc.MaxFrameSize, altda.MaxInputSize) + } + } else {   - if bs.UseAltDA && cc.MaxFrameSize > altda.MaxInputSize { - return fmt.Errorf("max frame size %d exceeds altDA max input size %d", cc.MaxFrameSize, altda.MaxInputSize) + switch cfg.DataAvailabilityType { + case flags.BlobsType, flags.AutoType: + if !cfg.TestUseMaxTxSizeForBlobs { + // account for version byte prefix + cc.MaxFrameSize = eth.MaxBlobDataSize - 1 + } + cc.DaType = DaTypeBlob + case flags.CalldataType: // do nothing + cc.DaType = DaTypeCalldata + default: + return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType) + } }   cc.InitCompressorConfig(cfg.ApproxComprRatio, cfg.Compressor, cfg.CompressionAlgo)   - if cc.UseBlobs && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { + if cc.UseBlobs() && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { return errors.New("cannot use Blobs before Ecotone") } - if !cc.UseBlobs && bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { + if !cc.UseBlobs() && bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { bs.Log.Warn("Ecotone upgrade is active, but batcher is not configured to use Blobs!") }   @@ -325,7 +331,7 @@ // copy blobs config and use hardcoded calldata fallback config for now calldataCC := cc calldataCC.TargetNumFrames = 1 calldataCC.MaxFrameSize = 120_000 - calldataCC.UseBlobs = false + calldataCC.DaType = DaTypeCalldata calldataCC.ReinitCompressorConfig()   bs.ChannelConfig = NewDynamicEthChannelConfig(bs.Log, 10*time.Second, bs.TxManager, cc, calldataCC) @@ -423,10 +429,11 @@ }   func (bs *BatcherService) initAltDA(cfg *CLIConfig) error { config := cfg.AltDA - if err := config.Check(); err != nil { + daClient, err := config.NewDAClient() + if err != nil { return err } - bs.AltDA = config.NewDAClient() + bs.AltDA = daClient bs.UseAltDA = config.Enabled return nil }
diff --git ethereum-optimism/optimism/op-batcher/batcher/test_batch_submitter.go layr-labs/optimism/op-batcher/batcher/test_batch_submitter.go index 2b0b9649a36580cf075df0f45f608516889cd93a..7b901269a939e5d5aab762a01c744e907a17a47b 100644 --- ethereum-optimism/optimism/op-batcher/batcher/test_batch_submitter.go +++ layr-labs/optimism/op-batcher/batcher/test_batch_submitter.go @@ -28,7 +28,7 @@ } var candidate *txmgr.TxCandidate var err error cc := l.channelMgr.cfgProvider.ChannelConfig(true, false) - if cc.UseBlobs { + if cc.UseBlobs() { candidate = l.calldataTxCandidate([]byte{}) } else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil { return err
diff --git ethereum-optimism/optimism/op-batcher/batcher/tx_data.go layr-labs/optimism/op-batcher/batcher/tx_data.go index 0165f85f079ed61bda4874984b2f8169d83ffa99..42b7074985f124965fd1a1083deeff34df6d6965 100644 --- ethereum-optimism/optimism/op-batcher/batcher/tx_data.go +++ layr-labs/optimism/op-batcher/batcher/tx_data.go @@ -4,11 +4,37 @@ import ( "fmt" "strings"   + altda "github.com/ethereum-optimism/optimism/op-alt-da" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/eth" )   +// DaType determines how txData is submitted to L1. +type DaType int + +const ( + // DaTypeCalldata means that the (single) frame in the txData is submitted as calldata. + DaTypeCalldata DaType = iota + // DaTypeBlob means that the frame(s) in the txData are submitted as ethereum 4844 blobs. + DaTypeBlob + // DaTypeAltDA means that the frame(s) in the txData are submitted to an altda da-server. + DaTypeAltDA +) + +func (d DaType) String() string { + switch d { + case DaTypeCalldata: + return "calldata" + case DaTypeBlob: + return "blob" + case DaTypeAltDA: + return "alt_da" + default: + return fmt.Sprintf("unknown_da_type_%d", d) + } +} + // txData represents the data for a single transaction. // // Note: The batcher currently sends exactly one frame per transaction. This @@ -16,7 +42,12 @@ // might change in the future to allow for multiple frames from possibly // different channels. type txData struct { frames []frameData - asBlob bool // indicates whether this should be sent as blob + // daType represents the DA type which the frames data will be submitted to. + daType DaType + // altDACommitment is non-nil when the frames have been sent to the alt-da server, + // and the received commitment needs to be sent to the L1. + // Should only be present when daType is DaTypeAltDA. + altDACommitment altda.CommitmentData }   func singleFrameTxData(frame frameData) txData {
diff --git ethereum-optimism/optimism/op-batcher/flags/flags.go layr-labs/optimism/op-batcher/flags/flags.go index 86faf762f2838865708dc99c37e56537693b07d2..0fa463ae386399e147d73faa7821d1955d201c4a 100644 --- ethereum-optimism/optimism/op-batcher/flags/flags.go +++ layr-labs/optimism/op-batcher/flags/flags.go @@ -84,8 +84,10 @@ Usage: "Maximum number of blocks to add to a span batch. Default is 0 - no maximum.", EnvVars: prefixEnvVars("MAX_BLOCKS_PER_SPAN_BATCH"), } TargetNumFramesFlag = &cli.IntFlag{ - Name: "target-num-frames", - Usage: "The target number of frames to create per channel. Controls number of blobs per blob tx, if using Blob DA.", + Name: "target-num-frames", + Usage: "The target number of frames to create per channel. " + + "Controls number of blobs per blob tx, if using Blob DA, " + + "or number of frames per blob, if using altDA.", Value: 1, EnvVars: prefixEnvVars("TARGET_NUM_FRAMES"), }
diff --git ethereum-optimism/optimism/op-batcher/readme.md layr-labs/optimism/op-batcher/readme.md index b86fdff600333eeedc8b779999de58db03ff0578..d584f7d1c8474526089a0430359b33ba70133498 100644 --- ethereum-optimism/optimism/op-batcher/readme.md +++ layr-labs/optimism/op-batcher/readme.md @@ -49,7 +49,7 @@ 1. Waits for a signal from the `blockLoadingLoop` 2. Enqueues a new channel, if necessary. 3. Processes some unprocessed blocks into the current channel, triggers the compression of the block data and the creation of frames. -4. Sends frames from the channel queue to the DA layer as (e.g. to Ethereum L1 as calldata or blob transactions). +4. Sends frames from the channel queue to the DA layer (e.g. to Ethereum L1 as calldata or blob transactions). 5. If there is more transaction data to send, go to 2. Else go to 1.   The `receiptsLoop` which @@ -98,17 +98,25 @@ The `blockCursor` state variable tracks the next unprocessed block. In each channel, the `frameCursor` tracks the next unsent frame.   -### Reorgs +### Failure Cases   -When an L2 unsafe reorg is detected, the batch submitter will reset its state, and wait for any in flight transactions to be ingested by the verifier nodes before starting work again. +#### Reorgs   -### Tx Failed +When an L2 reorg (safe or unsafe) is detected, the batch submitter will reset its state, and wait for any in flight transactions to be ingested by the verifier nodes before starting work again. + +#### Tx Failed   When a Tx fails, an asynchronous receipts handler is triggered. The channel from whence the Tx's frames came has its `frameCursor` rewound, so that all the frames can be resubmitted in order.   -### Channel Times Out +> Note: there is an issue with this simple logic. See https://github.com/ethereum-optimism/optimism/issues/13283 + +#### Channel Times Out   When a Tx is confirmed, an asynchronous receipts handler is triggered. We only update the batcher's state if the channel timed out on chain. In that case, the `blockCursor` is rewound to the first block added to that channel, and the channel queue is cleared out. This allows the batcher to start fresh building a new channel starting from the same block -- it does not need to refetch blocks from the sequencer. + +#### AltDA Submission Fails + +When an AltDA submission fails, the frames get pushed back into their respective channel, and will be retried in the next tick. If the da-server returns a 503 HTTP error, then failover to ethDA-calldata is triggered for that specific channel. Each channel will independently always first try to submit to EigenDA.   ## Design Principles and Optimization Targets

Modifications to op-node.

diff --git ethereum-optimism/optimism/op-node/node/node.go layr-labs/optimism/op-node/node/node.go index 5d7caadfa62dfef4acab51cc4d567e72d9fb8ce8..2287d1967e427f45393e639ac5ef545b33813794 100644 --- ethereum-optimism/optimism/op-node/node/node.go +++ layr-labs/optimism/op-node/node/node.go @@ -444,7 +444,10 @@ rpCfg, err := cfg.Rollup.GetOPAltDAConfig() if cfg.AltDA.Enabled && err != nil { return fmt.Errorf("failed to get altDA config: %w", err) } - altDA := altda.NewAltDA(n.log, cfg.AltDA, rpCfg, n.metrics.AltDAMetrics) + altDA, err := altda.NewAltDA(n.log, cfg.AltDA, rpCfg, n.metrics.AltDAMetrics) + if err != nil { + return fmt.Errorf("failed to create altDA: %w", err) + } if cfg.SafeDBPath != "" { n.log.Info("Safe head database enabled", "path", cfg.SafeDBPath) safeDB, err := safedb.NewSafeDB(n.log, cfg.SafeDBPath)
diff --git ethereum-optimism/optimism/op-node/rollup/derive/altda_data_source.go layr-labs/optimism/op-node/rollup/derive/altda_data_source.go index 2945a2a9e57b264df389e321f8daf8f25aab9f04..80f3a6e6106e997f760fd36b6130ccb031c08470 100644 --- ethereum-optimism/optimism/op-node/rollup/derive/altda_data_source.go +++ layr-labs/optimism/op-node/rollup/derive/altda_data_source.go @@ -40,8 +40,10 @@ // for the same origin and noop if the origin was already processed. It is also called if // there is not commitment in the current origin. if err := s.fetcher.AdvanceL1Origin(ctx, s.l1, s.id.ID()); err != nil { if errors.Is(err, altda.ErrReorgRequired) { + s.log.Warn("reorg required, resetting altDA L1 origin", "origin", s.id) return nil, NewResetError(errors.New("new expired challenge")) } + s.log.Warn("failed to advance altDA L1 origin", "err", err) return nil, NewTemporaryError(fmt.Errorf("failed to advance altDA L1 origin: %w", err)) }   @@ -58,6 +60,7 @@ } // If the tx data type is not altDA, we forward it downstream to let the next // steps validate and potentially parse it as L1 DA inputs. if data[0] != params.DerivationVersion1 { + s.log.Info("forwarding downstream non altDA data", "version_byte", data[0]) return data, nil }   @@ -72,6 +75,8 @@ s.comm = comm } // use the commitment to fetch the input from the AltDA provider. data, err := s.fetcher.GetInput(ctx, s.l1, s.comm, s.id) + var dropEigenDACommitmentError altda.DropEigenDACommitmentError + // ========================= vvv keccak commitment errors =========================== // GetInput may call for a reorg if the pipeline is stalled and the AltDA manager // continued syncing origins detached from the pipeline origin. if errors.Is(err, altda.ErrReorgRequired) { @@ -79,7 +84,7 @@ // challenge for a new previously derived commitment expired. return nil, NewResetError(err) } else if errors.Is(err, altda.ErrExpiredChallenge) { // this commitment was challenged and the challenge expired. - s.log.Warn("challenge expired, skipping batch", "comm", s.comm) + s.log.Warn("challenge expired, skipping batch", "comm", s.comm, "err", err) s.comm = nil // skip the input return s.Next(ctx) @@ -88,6 +93,15 @@ return nil, NewCriticalError(fmt.Errorf("data for comm %s not available: %w", s.comm, err)) } else if errors.Is(err, altda.ErrPendingChallenge) { // continue stepping without slowing down. return nil, NotEnoughData + // ========================= ^^^ keccak commitment errors =========================== + // ========================= vvv eigenDA commitment errors =========================== + } else if errors.As(err, &dropEigenDACommitmentError) { + // DropEigenDACommitmentError is the only error that can lead to a cert being dropped from the derivation pipeline. + // Any other error should be retried. + s.log.Warn("dropping invalid commitment", "comm", s.comm, "err", err) + s.comm = nil + return s.Next(ctx) // skip the input + // ========================= ^^^ eigenDA commitment errors =========================== } else if err != nil { // return temporary error so we can keep retrying. return nil, NewTemporaryError(fmt.Errorf("failed to fetch input data with comm %s from da service: %w", s.comm, err))

Modifications to op-alt-da client.

diff --git ethereum-optimism/optimism/op-alt-da/daclient_test.go layr-labs/optimism/op-alt-da/daclient_test.go index d9f7902aadee131b38a099bd61616f55e3ad4dfb..21bfe249abff0fd76d6e9bcc5290c76d9a8a8dde 100644 --- ethereum-optimism/optimism/op-alt-da/daclient_test.go +++ layr-labs/optimism/op-alt-da/daclient_test.go @@ -27,7 +27,8 @@ VerifyOnRead: true, } require.NoError(t, cfg.Check())   - client := cfg.NewDAClient() + client, err := cfg.NewDAClient() + require.NoError(t, err)   rng := rand.New(rand.NewSource(1234))   @@ -38,7 +39,7 @@ require.NoError(t, err)   require.Equal(t, comm, NewKeccak256Commitment(input))   - stored, err := client.GetInput(ctx, comm) + stored, err := client.GetInput(ctx, comm, 0) require.NoError(t, err)   require.Equal(t, input, stored) @@ -46,12 +47,12 @@ // set a bad commitment in the store require.NoError(t, store.Put(ctx, comm.Encode(), []byte("bad data")))   - _, err = client.GetInput(ctx, comm) + _, err = client.GetInput(ctx, comm, 0) require.ErrorIs(t, err, ErrCommitmentMismatch)   // test not found error comm = NewKeccak256Commitment(RandomData(rng, 32)) - _, err = client.GetInput(ctx, comm) + _, err = client.GetInput(ctx, comm, 0) require.ErrorIs(t, err, ErrNotFound)   // test storing bad data @@ -63,7 +64,7 @@ require.NoError(t, server.Stop()) _, err = client.SetInput(ctx, input) require.Error(t, err)   - _, err = client.GetInput(ctx, NewKeccak256Commitment(input)) + _, err = client.GetInput(ctx, NewKeccak256Commitment(input), 0) require.Error(t, err) }   @@ -85,7 +86,8 @@ GenericDA: false, } require.NoError(t, cfg.Check())   - client := cfg.NewDAClient() + client, err := cfg.NewDAClient() + require.NoError(t, err)   rng := rand.New(rand.NewSource(1234))   @@ -96,7 +98,7 @@ require.NoError(t, err)   require.Equal(t, comm.String(), NewKeccak256Commitment(input).String())   - stored, err := client.GetInput(ctx, comm) + stored, err := client.GetInput(ctx, comm, 0) require.NoError(t, err)   require.Equal(t, input, stored) @@ -105,12 +107,12 @@ // set a bad commitment in the store require.NoError(t, store.Put(ctx, comm.Encode(), []byte("bad data")))   // assert no error as generic commitments cannot be verified client side - _, err = client.GetInput(ctx, comm) + _, err = client.GetInput(ctx, comm, 0) require.NoError(t, err)   // test not found error comm = NewKeccak256Commitment(RandomData(rng, 32)) - _, err = client.GetInput(ctx, comm) + _, err = client.GetInput(ctx, comm, 0) require.ErrorIs(t, err, ErrNotFound)   // test storing bad data @@ -122,6 +124,6 @@ require.NoError(t, server.Stop()) _, err = client.SetInput(ctx, input) require.Error(t, err)   - _, err = client.GetInput(ctx, NewKeccak256Commitment(input)) + _, err = client.GetInput(ctx, NewKeccak256Commitment(input), 0) require.Error(t, err) }
diff --git ethereum-optimism/optimism/op-alt-da/damgr_test.go layr-labs/optimism/op-alt-da/damgr_test.go index b487fc85c98de323ec5f804850fdbb33b7387de1..9255134ed2cf16b8e00fe2ad80f140703fed9289 100644 --- ethereum-optimism/optimism/op-alt-da/damgr_test.go +++ layr-labs/optimism/op-alt-da/damgr_test.go @@ -53,12 +53,12 @@ require.Empty(t, state.expiredCommitments) require.NoError(t, state.ExpireCommitments(bID(8))) require.Empty(t, state.commitments)   - state.Prune(bID(bn1)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) - state.Prune(bID(7)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) - state.Prune(bID(8)) - require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) + lastPrunedCommitment := state.Prune(bID(bn1)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(7)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(8)) + require.Equal(t, l1Ref(bn1), lastPrunedCommitment)   // Track a commitment, challenge it, & then resolve it c2 := RandomCommitment(rng) @@ -83,12 +83,12 @@ state.ExpireChallenges(bID(30)) require.Empty(t, state.challenges)   // Now finalize everything - state.Prune(bID(20)) - require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) - state.Prune(bID(28)) - require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) - state.Prune(bID(32)) - require.Equal(t, l1Ref(bn2), state.lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(20)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(28)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(32)) + require.Equal(t, l1Ref(bn2), lastPrunedCommitment) }   // TestExpireChallenges expires challenges and prunes the state for longer windows @@ -175,8 +175,8 @@ err = state.ExpireCommitments(bID(11)) require.ErrorIs(t, err, ErrReorgRequired)   // pruning finalized block is safe. It should not prune any commitments yet. - state.Prune(bID(1)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) + lastPrunedCommitment := state.Prune(bID(1)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment)   // Perform reorg back to bn2 state.ClearCommitments()
diff --git ethereum-optimism/optimism/op-alt-da/damock.go layr-labs/optimism/op-alt-da/damock.go index ad388d0b26535bee1dabbccb5aeeb9ed38ecae1e..f6cd00c92e56cb5f65f2137a34294434ea1a4be5 100644 --- ethereum-optimism/optimism/op-alt-da/damock.go +++ layr-labs/optimism/op-alt-da/damock.go @@ -2,7 +2,9 @@ package altda   import ( "context" + "encoding/binary" "errors" + "fmt" "io" "net/http" "sync" @@ -16,12 +18,19 @@ "github.com/ethereum/go-ethereum/log" )   // MockDAClient mocks a DA storage provider to avoid running an HTTP DA server -// in unit tests. +// in unit tests. MockDAClient is goroutine-safe. type MockDAClient struct { - CommitmentType CommitmentType - store ethdb.KeyValueStore - log log.Logger + mu sync.Mutex + CommitmentType CommitmentType + GenericCommitmentCount uint16 // next generic commitment (use counting commitment instead of hash to help with testing) + store ethdb.KeyValueStore + StoreCount int + log log.Logger + dropEveryNthPut uint // 0 means nothing gets dropped, 1 means every put errors, etc. + setInputRequestCount uint // number of put requests received, irrespective of whether they were successful } + +var _ DAStorage = (*MockDAClient)(nil)   func NewMockDAClient(log log.Logger) *MockDAClient { return &MockDAClient{ @@ -31,7 +40,30 @@ log: log, } }   -func (c *MockDAClient) GetInput(ctx context.Context, key CommitmentData) ([]byte, error) { +// NewCountingGenericCommitmentMockDAClient creates a MockDAClient that uses counting commitments. +// Its commitments are big-endian encoded uint16s of 0, 1, 2, etc. instead of actual hash or altda-layer related commitments. +// Used for testing to make sure we receive commitments in order following Holocene strict ordering rules. +func NewCountingGenericCommitmentMockDAClient(log log.Logger) *MockDAClient { + return &MockDAClient{ + CommitmentType: GenericCommitmentType, + store: memorydb.New(), + log: log, + } +} + +// Fakes a da server that drops/errors on every Nth put request. +// Useful for testing the batcher's error handling. +// 0 means nothing gets dropped, 1 means every put errors, etc. +func (c *MockDAClient) DropEveryNthPut(n uint) { + c.mu.Lock() + defer c.mu.Unlock() + c.dropEveryNthPut = n +} + +func (c *MockDAClient) GetInput(ctx context.Context, key CommitmentData, _ uint64) ([]byte, error) { + c.mu.Lock() + defer c.mu.Unlock() + c.log.Debug("Getting input", "key", key) bytes, err := c.store.Get(key.Encode()) if err != nil { return nil, ErrNotFound @@ -40,14 +72,50 @@ return bytes, nil }   func (c *MockDAClient) SetInput(ctx context.Context, data []byte) (CommitmentData, error) { - key := NewCommitmentData(c.CommitmentType, data) - return key, c.store.Put(key.Encode(), data) + c.mu.Lock() + defer c.mu.Unlock() + c.setInputRequestCount++ + var key CommitmentData + if c.CommitmentType == GenericCommitmentType { + countCommitment := make([]byte, 2) + binary.BigEndian.PutUint16(countCommitment, c.GenericCommitmentCount) + key = NewGenericCommitment(countCommitment) + } else { + key = NewKeccak256Commitment(data) + } + var action string = "put" + if c.dropEveryNthPut > 0 && c.setInputRequestCount%c.dropEveryNthPut == 0 { + action = "dropped" + } + c.log.Debug("Setting input", "action", action, "key", key, "data", fmt.Sprintf("%x", data)) + if action == "dropped" { + return nil, errors.New("put dropped") + } + err := c.store.Put(key.Encode(), data) + if err == nil { + c.GenericCommitmentCount++ + c.StoreCount++ + } + return key, err }   func (c *MockDAClient) DeleteData(key []byte) error { - return c.store.Delete(key) + c.mu.Lock() + defer c.mu.Unlock() + c.log.Debug("Deleting data", "key", key) + // memorydb.Delete() returns nil even when the key doesn't exist, so we need to check if the key exists + // before decrementing StoreCount. + var err error + if _, err = c.store.Get(key); err == nil { + if err = c.store.Delete(key); err == nil { + c.StoreCount-- + } + } + return err }   +// DAErrFaker is a DA client that can be configured to return errors on GetInput +// and SetInput calls. type DAErrFaker struct { Client *MockDAClient   @@ -55,12 +123,14 @@ getInputErr error setInputErr error }   -func (f *DAErrFaker) GetInput(ctx context.Context, key CommitmentData) ([]byte, error) { +var _ DAStorage = (*DAErrFaker)(nil) + +func (f *DAErrFaker) GetInput(ctx context.Context, key CommitmentData, l1InclusionBlockNumber uint64) ([]byte, error) { if err := f.getInputErr; err != nil { f.getInputErr = nil return nil, err } - return f.Client.GetInput(ctx, key) + return f.Client.GetInput(ctx, key, l1InclusionBlockNumber) }   func (f *DAErrFaker) SetInput(ctx context.Context, data []byte) (CommitmentData, error) { @@ -105,12 +175,26 @@ return ErrNotEnabled }   // FakeDAServer is a fake DA server for e2e tests. -// It is a small wrapper around DAServer that allows for setting request latencies, -// to mimic a DA service with slow responses (eg. eigenDA with 10 min batching interval). +// It is a small wrapper around DAServer that allows for setting: +// - request latencies, to mimic a DA service with slow responses +// (eg. eigenDA with 10 min batching interval). +// - response status codes, to mimic a DA service that is down. +// +// We use this FakeDaServer as opposed to the DAErrFaker client in the op-e2e altda system tests +// because the batcher service only has a constructor to build from CLI flags (no dependency injection), +// meaning the da client is built from an rpc url config instead of being injected. type FakeDAServer struct { *DAServer putRequestLatency time.Duration getRequestLatency time.Duration + // next failoverCount Put requests will return 503 status code for failover testing + failoverCount uint64 + // outOfOrderResponses is a flag that, when set, causes the server to send responses out of order. + // It will only respond to pairs of request, returning the second response first, and waiting 1 second before sending the first response. + // This is used to test the batcher's ability to handle out of order responses, while still ensuring holocene's strict ordering rules. + outOfOrderResponses bool + oooMu sync.Mutex + oooWaitChan chan struct{} }   func NewFakeDAServer(host string, port int, log log.Logger) *FakeDAServer { @@ -130,6 +214,26 @@ }   func (s *FakeDAServer) HandlePut(w http.ResponseWriter, r *http.Request) { time.Sleep(s.putRequestLatency) + if s.failoverCount > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + s.failoverCount-- + return + } + if s.outOfOrderResponses { + s.oooMu.Lock() + if s.oooWaitChan == nil { + s.log.Info("Received put request while in out-of-order mode, waiting for next request") + s.oooWaitChan = make(chan struct{}) + s.oooMu.Unlock() + <-s.oooWaitChan + time.Sleep(1 * time.Second) + } else { + s.log.Info("Received second put request in out-of-order mode, responding to this one first, then the first one") + close(s.oooWaitChan) + s.oooWaitChan = nil + s.oooMu.Unlock() + } + } s.DAServer.HandlePut(w, r) }   @@ -147,11 +251,26 @@ return nil }   func (s *FakeDAServer) SetPutRequestLatency(latency time.Duration) { + s.log.Info("Setting put request latency", "latency", latency) s.putRequestLatency = latency }   func (s *FakeDAServer) SetGetRequestLatency(latency time.Duration) { + s.log.Info("Setting get request latency", "latency", latency) s.getRequestLatency = latency +} + +// SetResponseStatusForNRequests sets the next n Put requests to return 503 status code. +func (s *FakeDAServer) SetPutFailoverForNRequests(n uint64) { + s.failoverCount = n +} + +// When ooo=true, causes the server to send responses out of order. +// It will only respond to pairs of request, returning the second response first, and waiting 1 second before sending the first response. +// This is used to test the batcher's ability to handle out of order responses, while still ensuring holocene's strict ordering rules. +func (s *FakeDAServer) SetOutOfOrderResponses(ooo bool) { + s.log.Info("Setting out of order responses", "ooo", ooo) + s.outOfOrderResponses = ooo }   type MemStore struct {
diff --git ethereum-optimism/optimism/op-alt-da/damock_test.go layr-labs/optimism/op-alt-da/damock_test.go new file mode 100644 index 0000000000000000000000000000000000000000..3d651e3bd9193e13ce8cf624f5a7cc5cd8cac4cf --- /dev/null +++ layr-labs/optimism/op-alt-da/damock_test.go @@ -0,0 +1,65 @@ +package altda + +import ( + "net/http/httptest" + "sync" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/log" +) + +func TestFakeDAServer_OutOfOrderResponses(t *testing.T) { + logger := testlog.Logger(t, log.LevelDebug) + daServer := NewFakeDAServer("localhost", 0, logger) + daServer.SetOutOfOrderResponses(true) + + // Channel to track completion order + completionOrder := make(chan int, 2) + + // Start two concurrent requests + var wg sync.WaitGroup + wg.Add(2) + + // First request + go func() { + defer wg.Done() + w := httptest.NewRecorder() + r := httptest.NewRequest("PUT", "/data", nil) + + daServer.HandlePut(w, r) + completionOrder <- 1 + }() + + // Small delay to ensure first request starts first + time.Sleep(100 * time.Millisecond) + + // Second request + go func() { + defer wg.Done() + w := httptest.NewRecorder() + r := httptest.NewRequest("PUT", "/data", nil) + + daServer.HandlePut(w, r) + completionOrder <- 2 + }() + + // Wait for both requests to complete + wg.Wait() + close(completionOrder) + + // Check completion order + var order []int + for n := range completionOrder { + order = append(order, n) + } + + // Second request should complete before first + if len(order) != 2 { + t.Fatalf("expected 2 requests to complete, got %d", len(order)) + } + if order[0] != 2 || order[1] != 1 { + t.Errorf("expected completion order [2,1], got %v", order) + } +}
diff --git ethereum-optimism/optimism/op-alt-da/cli.go layr-labs/optimism/op-alt-da/cli.go index 84364e47952a7c1710c3abb17a82b5c8520b3393..72c6fa5259539cad0a623f27f1e3b5370fbce2d4 100644 --- ethereum-optimism/optimism/op-alt-da/cli.go +++ layr-labs/optimism/op-alt-da/cli.go @@ -102,8 +102,12 @@ } return nil }   -func (c CLIConfig) NewDAClient() *DAClient { - return &DAClient{url: c.DAServerURL, verify: c.VerifyOnRead, precompute: !c.GenericDA, getTimeout: c.GetTimeout, putTimeout: c.PutTimeout} +func (c CLIConfig) NewDAClient() (*DAClient, error) { + err := c.Check() + if err != nil { + return nil, fmt.Errorf("check daclient CLIConfig: %w", err) + } + return &DAClient{url: c.DAServerURL, verify: c.VerifyOnRead, precompute: !c.GenericDA, getTimeout: c.GetTimeout, putTimeout: c.PutTimeout}, nil }   func ReadCLIConfig(c *cli.Context) CLIConfig {
diff --git ethereum-optimism/optimism/op-alt-da/daclient.go layr-labs/optimism/op-alt-da/daclient.go index 9f0bdab11fbd98d632edc8cf5e148413a7484c8e..e18a72b052ff37a0750eb1a686dc67d8c8209bb3 100644 --- ethereum-optimism/optimism/op-alt-da/daclient.go +++ layr-labs/optimism/op-alt-da/daclient.go @@ -3,6 +3,7 @@ import ( "bytes" "context" + "encoding/json" "errors" "fmt" "io" @@ -10,11 +11,56 @@ "net/http" "time" )   +// =========== SetInput (PUT path) errors =========== + +// ErrInvalidInput is returned when the input is not valid for posting to the DA storage. +var ErrInvalidInput = errors.New("invalid input") + +// ErrAltDADown is returned when the alt DA returns a 503 status code. +// It is used to signify that the alt DA is down and the client should failover to the eth DA. +// See https://github.com/ethereum-optimism/specs/issues/434 +var ErrAltDADown = errors.New("alt DA is down: failover to eth DA") + +// =========== GetInput (GET path) errors =========== + // ErrNotFound is returned when the server could not find the input. +// Note: this error only applies to keccak commitments, and not to EigenDA altda commitments, +// because a cert that parses correctly and passes the recency check by definition proves +// the availability of the blob that is certifies. +// See https://github.com/Layr-Labs/eigenda/blob/f4ef5cd5/docs/spec/src/integration/spec/6-secure-integration.md#derivation-process for more info. var ErrNotFound = errors.New("not found")   -// ErrInvalidInput is returned when the input is not valid for posting to the DA storage. -var ErrInvalidInput = errors.New("invalid input") +// DropEigenDACommitmentError is returned when the eigenda-proxy returns a 418 TEAPOT error, +// which signifies that the commitment should be dropped/skipped from the derivation pipeline, as either: +// 1. the cert in the commitment is invalid +// 2. the cert's blob cannot be decoded into a frame (it was not encoded according to one of the supported codecs, +// see https://github.com/Layr-Labs/eigenda/blob/f4ef5cd5/api/clients/codecs/blob_codec.go#L7-L15) +// +// See https://github.com/Layr-Labs/eigenda/blob/f4ef5cd5/docs/spec/src/integration/spec/6-secure-integration.md#derivation-process for more info. +// +// This error is parsed from the json body of the 418 TEAPOT error response. +// DropEigenDACommitmentError is the only error that can lead to a cert being dropped from the derivation pipeline. +// It is needed to protect the rollup from liveness attacks (derivation pipeline stalled by malicious batcher). +type DropEigenDACommitmentError struct { + // The StatusCode field MUST be contained in the response body of the 418 TEAPOT error. + StatusCode int + // The Msg field is a human-readable string that explains the error. + // It is optional, but should ideally be set to a meaningful value. + Msg string +} + +func (e DropEigenDACommitmentError) Error() string { + return fmt.Sprintf("Invalid AltDA Commitment: cert verification failed with status code %v: %v", e.StatusCode, e.Msg) +} + +// Validate that the status code is an integer between 1 and 4, and panics if it is not. +func (e DropEigenDACommitmentError) Validate() { + if e.StatusCode < 1 || e.StatusCode > 4 { + panic(fmt.Sprintf("DropEigenDACommitmentError: invalid status code %d, must be between 1 and 4", e.StatusCode)) + } + // The Msg field should ideally be a human-readable string that explains the error, + // but we don't enforce it. +}   // DAClient is an HTTP client to communicate with a DA storage service. // It creates commitments and retrieves input data + verifies if needed. @@ -28,6 +74,8 @@ getTimeout time.Duration putTimeout time.Duration }   +var _ DAStorage = (*DAClient)(nil) + func NewDAClient(url string, verify bool, pc bool) *DAClient { return &DAClient{ url: url, @@ -37,8 +85,12 @@ } }   // GetInput returns the input data for the given encoded commitment bytes. -func (c *DAClient) GetInput(ctx context.Context, comm CommitmentData) ([]byte, error) { - req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/get/0x%x", c.url, comm.Encode()), nil) +// The l1InclusionBlock at which the commitment was included in the batcher-inbox is submitted +// to the DA server as a query parameter. +// It is used to discard old commitments whose blobs have a risk of not being available anymore. +// It is optional, and passing a 0 value will tell the DA server to skip the check. +func (c *DAClient) GetInput(ctx context.Context, comm CommitmentData, l1InclusionBlockNumber uint64) ([]byte, error) { + req, err := http.NewRequestWithContext(ctx, http.MethodGet, fmt.Sprintf("%s/get/0x%x?l1_inclusion_block_number=%d", c.url, comm.Encode(), l1InclusionBlockNumber), nil) if err != nil { return nil, fmt.Errorf("failed to create HTTP request: %w", err) } @@ -49,6 +101,21 @@ return nil, err } if resp.StatusCode == http.StatusNotFound { return nil, ErrNotFound + } + if resp.StatusCode == http.StatusTeapot { + defer resp.Body.Close() + // Limit the body to 5000 bytes to prevent being DDoSed with a large error message. + bytesLimitedBody := io.LimitReader(resp.Body, 5000) + bodyBytes, _ := io.ReadAll(bytesLimitedBody) + + var invalidCommitmentErr DropEigenDACommitmentError + if err := json.Unmarshal(bodyBytes, &invalidCommitmentErr); err != nil { + return nil, fmt.Errorf("failed to decode 418 TEAPOT HTTP error body into a DropEigenDACommitmentError. "+ + "Consider updating proxy to a more recent version that contains https://github.com/Layr-Labs/eigenda/pull/1736: "+ + "%w", err) + } + invalidCommitmentErr.Validate() + return nil, invalidCommitmentErr } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed to get preimage: %v", resp.StatusCode) @@ -131,6 +198,9 @@ if err != nil { return nil, err } defer resp.Body.Close() + if resp.StatusCode == http.StatusServiceUnavailable { + return nil, ErrAltDADown + } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed to store data: %v", resp.StatusCode) }
diff --git ethereum-optimism/optimism/op-alt-da/damgr.go layr-labs/optimism/op-alt-da/damgr.go index 15814263c4ffa84c0ed69b48c51ef4de39c37eec..5b3381c6278cd1ac5fa5cb2d8c60556e9ad2098e 100644 --- ethereum-optimism/optimism/op-alt-da/damgr.go +++ layr-labs/optimism/op-alt-da/damgr.go @@ -40,7 +40,10 @@ }   // DAStorage interface for calling the DA storage server. type DAStorage interface { - GetInput(ctx context.Context, key CommitmentData) ([]byte, error) + // L1InclusionBlockNumber is the block number at which the commitment was included in the batcher inbox. + // It is used to check if the commitment is expired, and should be sent as a query parameter + // to the DA server. It is optional, and passing a 0 value will tell the DA server to skip the check. + GetInput(ctx context.Context, key CommitmentData, L1InclusionBlockNumber uint64) ([]byte, error) SetInput(ctx context.Context, img []byte) (CommitmentData, error) }   @@ -78,8 +81,12 @@ finalizedHeadSignalHandler HeadSignalFn }   // NewAltDA creates a new AltDA instance with the given log and CLIConfig. -func NewAltDA(log log.Logger, cli CLIConfig, cfg Config, metrics Metricer) *DA { - return NewAltDAWithStorage(log, cfg, cli.NewDAClient(), metrics) +func NewAltDA(log log.Logger, cli CLIConfig, cfg Config, metrics Metricer) (*DA, error) { + daClient, err := cli.NewDAClient() + if err != nil { + return nil, fmt.Errorf("new DAClient: %w", err) + } + return NewAltDAWithStorage(log, cfg, daClient, metrics), nil }   // NewAltDAWithStorage creates a new AltDA instance with the given log and DAStorage interface. @@ -117,8 +124,18 @@ // It is called by the Finalize function, as it has an L1 finalized head to use. func (d *DA) updateFinalizedHead(l1Finalized eth.L1BlockRef) { d.l1FinalizedHead = l1Finalized // Prune the state to the finalized head - d.state.Prune(l1Finalized.ID()) - d.finalizedHead = d.state.lastPrunedCommitment + lastPrunedCommIncBlock := d.state.Prune(l1Finalized.ID()) + d.log.Debug("updateFinalizedHead", + "currFinalizedHead", d.finalizedHead.Number, + "lastPrunedCommIncBlock", lastPrunedCommIncBlock.Number, + "l1Finalized", l1Finalized.Number) + // If a commitment was pruned, set the finalized head to that commitment's inclusion block + // When no commitments are left to be pruned (one example is if we have failed over to ethda) + // then updateFinalizedFromL1 becomes the main driver of the finalized head. + // Note that updateFinalizedFromL1 is only called when d.state.NoCommitments() is true. + if lastPrunedCommIncBlock != (eth.L1BlockRef{}) { + d.finalizedHead = lastPrunedCommIncBlock + } }   // updateFinalizedFromL1 updates the finalized head based on the challenge window. @@ -133,6 +150,7 @@ ref, err := l1.L1BlockRefByNumber(ctx, d.l1FinalizedHead.Number-d.cfg.ChallengeWindow) if err != nil { return err } + d.log.Debug("updateFinalizedFromL1", "currFinalizedHead", d.finalizedHead.Number, "newFinalizedHead", ref.Number, "l1FinalizedHead", d.l1FinalizedHead.Number, "challengeWindow", d.cfg.ChallengeWindow) d.finalizedHead = ref return nil } @@ -208,12 +226,11 @@ d.state.TrackCommitment(comm, blockId) d.log.Info("getting input", "comm", comm, "status", status)   // Fetch the input from the DA storage. - data, err := d.storage.GetInput(ctx, comm) + data, err := d.storage.GetInput(ctx, comm, blockId.Number) notFound := errors.Is(ErrNotFound, err) if err != nil && !notFound { d.log.Error("failed to get preimage", "err", err) // the storage client request failed for some other reason - // in which case derivation pipeline should be retried return nil, err }   @@ -413,6 +430,7 @@ continue } for _, log := range rec.Logs { if log.Address == d.cfg.DAChallengeContractAddress && len(log.Topics) > 0 && log.Topics[0] == ChallengeStatusEventABIHash { + d.log.Info("found challenge event", "block", block.Number, "log", log.Index) logs = append(logs, log) } }
diff --git ethereum-optimism/optimism/op-alt-da/dastate.go layr-labs/optimism/op-alt-da/dastate.go index 66a2aee1f31ef24e00c89a6079db5f46d8d01501..5d26841ec51180c0fa420bbe9e817f302b318b26 100644 --- ethereum-optimism/optimism/op-alt-da/dastate.go +++ layr-labs/optimism/op-alt-da/dastate.go @@ -52,15 +52,14 @@ // Challenges and Commitments can be pruned when they are beyond a certain block number (e.g. when they are finalized). // In the special case of a L2 reorg, challenges are still tracked but commitments are removed. // This will allow the altDA fetcher to find the expired challenge. type State struct { - commitments []Commitment // commitments where the challenge/resolve period has not expired yet - expiredCommitments []Commitment // commitments where the challenge/resolve period has expired but not finalized - challenges []*Challenge // challenges ordered by L1 inclusion - expiredChallenges []*Challenge // challenges ordered by L1 inclusion - challengesMap map[string]*Challenge // challenges by serialized comm + block number for easy lookup - lastPrunedCommitment eth.L1BlockRef // the last commitment to be pruned - cfg Config - log log.Logger - metrics Metricer + commitments []Commitment // commitments where the challenge/resolve period has not expired yet + expiredCommitments []Commitment // commitments where the challenge/resolve period has expired but not finalized + challenges []*Challenge // challenges ordered by L1 inclusion + expiredChallenges []*Challenge // challenges ordered by L1 inclusion + challengesMap map[string]*Challenge // challenges by serialized comm + block number for easy lookup + cfg Config + log log.Logger + metrics Metricer }   func NewState(log log.Logger, m Metricer, cfg Config) *State { @@ -207,15 +206,18 @@ } }   // Prune removes challenges & commitments which have an expiry block number beyond the given block number. -func (s *State) Prune(origin eth.BlockID) { +// It returns the last pruned commitment's inclusion block number, or eth.L1BlockRef{} if no commitments were pruned. +func (s *State) Prune(origin eth.BlockID) eth.L1BlockRef { // Commitments rely on challenges, so we prune commitments first. - s.pruneCommitments(origin) + lastPrunedCommIncBlock := s.pruneCommitments(origin) s.pruneChallenges(origin) + return lastPrunedCommIncBlock }   // pruneCommitments removes commitments which have are beyond a given block number. // It will remove commitments in order of inclusion until it finds a commitment which is not beyond the given block number. -func (s *State) pruneCommitments(origin eth.BlockID) { +func (s *State) pruneCommitments(origin eth.BlockID) eth.L1BlockRef { + var lastPrunedCommIncBlock eth.L1BlockRef for len(s.expiredCommitments) > 0 { c := s.expiredCommitments[0] challenge, ok := s.GetChallenge(c.data, c.inclusionBlock.Number) @@ -236,8 +238,9 @@ // Remove the commitment s.expiredCommitments = s.expiredCommitments[1:]   // Record the latest inclusion block to be returned - s.lastPrunedCommitment = c.inclusionBlock + lastPrunedCommIncBlock = c.inclusionBlock } + return lastPrunedCommIncBlock }   // pruneChallenges removes challenges which have are beyond a given block number.
diff --git ethereum-optimism/optimism/op-e2e/actions/altda/altda_test.go layr-labs/optimism/op-e2e/actions/altda/altda_test.go index c1363e07386fa7cad959f8a5e72df00b368dea69..5e249ca08d15a84a7dd8778a9f0ebc37ec26a70d 100644 --- ethereum-optimism/optimism/op-e2e/actions/altda/altda_test.go +++ layr-labs/optimism/op-e2e/actions/altda/altda_test.go @@ -1,6 +1,7 @@ package altda   import ( + "log/slog" "math/big" "math/rand" "testing" @@ -49,6 +50,12 @@ }   type AltDAParam func(p *e2eutils.TestParams)   +func WithLogLevel(level slog.Level) AltDAParam { + return func(p *e2eutils.TestParams) { + p.LogLevel = level + } +} + func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { p := &e2eutils.TestParams{ MaxSequencerDrift: 40, @@ -57,11 +64,12 @@ ChannelTimeout: 12, L1BlockTime: 12, UseAltDA: true, AllocType: config.AllocTypeAltDA, + LogLevel: log.LevelDebug, } for _, apply := range params { apply(p) } - log := testlog.Logger(t, log.LvlDebug) + log := testlog.Logger(t, p.LogLevel)   dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) @@ -75,14 +83,13 @@ jwtPath := e2eutils.WriteDefaultJWT(t) engine := helpers.NewL2Engine(t, log, sd.L2Cfg, jwtPath) engCl := engine.EngineClient(t, sd.RollupCfg)   - storage := &altda.DAErrFaker{Client: altda.NewMockDAClient(log)} - l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic)) require.NoError(t, err)   altDACfg, err := sd.RollupCfg.GetOPAltDAConfig() require.NoError(t, err)   + storage := &altda.DAErrFaker{Client: altda.NewMockDAClient(log)} daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{})   sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, sd.DependencySet, 0) @@ -177,6 +184,34 @@ a.lastCommBn = a.miner.L1Chain().CurrentBlock().Number.Uint64() }   +// ActNewL2TxFinalized sends a new L2 transaction, submits a batch containing it to L1 +// and finalizes the L1 and L2 chains (including advancing enough to clear the altda challenge window). +// +// TODO: understand why (notation is l1unsafe/l1safe/l1finalized-l2unsafe/l2safe/l2finalized): +// - the first call advances heads by (0/0/17-71/71/1) +// - second call advances by 0/0/17-204/204/82, +// - but all subsequent calls advance status by exactly 0/0/17-204/204/204. +// +// 17 makes sense because challengeWindow=16 and we create 1 extra block before that, +// and 204 L2blocks = 17 L1blocks * 12 L2blocks/L1block (L1blocktime=12s, L2blocktime=1s) +func (a *L2AltDA) ActNewL2TxFinalized(t helpers.Testing) { + // Include a new l2 batcher transaction, submitting an input commitment to the l1. + a.ActNewL2Tx(t) + // Create ChallengeWindow empty blocks so the above batcher blocks can finalize (can't be challenged anymore) + a.ActL1Blocks(t, a.altDACfg.ChallengeWindow) + // Finalize the L1 chain and the L2 chain (by draining all events and running through derivation pipeline) + // TODO: understand why we need to drain the pipeline before AND after actL1Finalized + a.sequencer.ActL2PipelineFull(t) + a.ActL1Finalized(t) + a.sequencer.ActL2PipelineFull(t) + + // Uncomment the below code to observe the behavior described in the TODO above + // syncStatus := a.sequencer.SyncStatus() + // a.log.Info("Sync status after ActNewL2TxFinalized", + // "unsafeL1", syncStatus.HeadL1.Number, "safeL1", syncStatus.SafeL1.Number, "finalizedL1", syncStatus.FinalizedL1.Number, + // "unsafeL2", syncStatus.UnsafeL2.Number, "safeL2", syncStatus.SafeL2.Number, "finalizedL2", syncStatus.FinalizedL2.Number) +} + func (a *L2AltDA) ActDeleteLastInput(t helpers.Testing) { require.NoError(t, a.storage.Client.DeleteData(a.lastComm)) } @@ -235,7 +270,7 @@ }   func (a *L2AltDA) ActResolveLastChallenge(t helpers.Testing) { // remove derivation byte prefix - input, err := a.storage.GetInput(t.Ctx(), altda.Keccak256Commitment(a.lastComm[1:])) + input, err := a.storage.GetInput(t.Ctx(), altda.Keccak256Commitment(a.lastComm[1:]), 0) require.NoError(t, err)   a.ActResolveInput(t, a.lastComm, input, a.lastCommBn) @@ -363,7 +398,7 @@ require.Equal(t, syncStatus.SafeL2, verifSyncStatus.SafeL2) }   // DA storage service goes offline while sequencer keeps making blocks. When storage comes back online, it should be able to catch up. -func TestAltDA_StorageError(gt *testing.T) { +func TestAltDA_StorageGetError(gt *testing.T) { t := helpers.NewDefaultTesting(gt) harness := NewL2AltDA(t)   @@ -441,7 +476,7 @@ a.ActNewL2Tx(t)   // keep track of the related commitment comm1 := a.lastComm - input1, err := a.storage.GetInput(t.Ctx(), altda.Keccak256Commitment(comm1[1:])) + input1, err := a.storage.GetInput(t.Ctx(), altda.Keccak256Commitment(comm1[1:]), 0) bn1 := a.lastCommBn require.NoError(t, err)   @@ -490,7 +525,7 @@ }, 100, false)   // keep track of the second commitment comm2 := a.lastComm - _, err = a.storage.GetInput(t.Ctx(), altda.Keccak256Commitment(comm2[1:])) + _, err = a.storage.GetInput(t.Ctx(), altda.Keccak256Commitment(comm2[1:]), 0) require.NoError(t, err) a.lastCommBn = a.miner.L1Chain().CurrentBlock().Number.Uint64()   @@ -528,11 +563,12 @@ func TestAltDA_Finalization(gt *testing.T) { t := helpers.NewDefaultTesting(gt) a := NewL2AltDA(t)   - // build L1 block #1 + // Notation everywhere below is l1unsafe/l1safe/l1finalized-l2unsafe/l2safe/l2finalized + // build L1 block #1: 0/0/0-0/0/0 -> 1/1/0-0/0/0 a.ActL1Blocks(t, 1) a.miner.ActL1SafeNext(t)   - // Fill with l2 blocks up to the L1 head + // Fill with l2 blocks up to the L1 head: 1/1/0:0/0/0 -> 1/1/0:1/1/0 a.sequencer.ActL1HeadSignal(t) a.sequencer.ActBuildToL1Head(t)   @@ -540,7 +576,7 @@ a.sequencer.ActL2PipelineFull(t) a.sequencer.ActL1SafeSignal(t) require.Equal(t, uint64(1), a.sequencer.SyncStatus().SafeL1.Number)   - // add L1 block #2 + // add L1 block #2: 1/1/0:1/1/0 -> 2/2/1:2/1/0 a.ActL1Blocks(t, 1) a.miner.ActL1SafeNext(t) a.miner.ActL1FinalizeNext(t) @@ -552,7 +588,7 @@ a.sequencer.ActL2PipelineFull(t) a.sequencer.ActL1FinalizedSignal(t) a.sequencer.ActL1SafeSignal(t)   - // commit all the l2 blocks to L1 + // commit all the l2 blocks to L1: 2/2/1:2/1/0 -> 3/2/1:2/1/0 a.batcher.ActSubmitAll(t) a.miner.ActL1StartBlock(12)(t) a.miner.ActL1IncludeTx(a.dp.Addresses.Batcher)(t) @@ -561,31 +597,31 @@ // verify a.sequencer.ActL2PipelineFull(t)   - // fill with more unsafe L2 blocks + // fill with more unsafe L2 blocks: 3/2/1:2/1/0 -> 3/2/1:3/1/0 a.sequencer.ActL1HeadSignal(t) a.sequencer.ActBuildToL1Head(t)   - // submit those blocks too, block #4 + // submit those blocks too, block #4: 3/2/1:3/1/0 -> 4/2/1:3/1/0 a.batcher.ActSubmitAll(t) a.miner.ActL1StartBlock(12)(t) a.miner.ActL1IncludeTx(a.dp.Addresses.Batcher)(t) a.miner.ActL1EndBlock(t)   - // add some more L1 blocks #5, #6 + // add some more L1 blocks #5, #6: 4/2/1:3/1/0 -> 6/2/1:3/1/0 a.miner.ActEmptyBlock(t) a.miner.ActEmptyBlock(t)   - // and more unsafe L2 blocks + // and more unsafe L2 blocks: 6/2/1:3/1/0 -> 6/2/1:6/1/0 a.sequencer.ActL1HeadSignal(t) a.sequencer.ActBuildToL1Head(t)   - // move safe/finalize markers: finalize the L1 chain block with the first batch, but not the second + // move safe/finalize markers: 6/2/1:6/1/0 -> 6/4/3:6/1/0 a.miner.ActL1SafeNext(t) // #2 -> #3 a.miner.ActL1SafeNext(t) // #3 -> #4 a.miner.ActL1FinalizeNext(t) // #1 -> #2 a.miner.ActL1FinalizeNext(t) // #2 -> #3   - // L1 safe and finalized as expected + // L1 safe and finalized as expected: a.sequencer.ActL2PipelineFull(t) a.sequencer.ActL1FinalizedSignal(t) a.sequencer.ActL1SafeSignal(t) @@ -607,3 +643,64 @@ // given 12s l1 time and 1s l2 time, l2 should be 12 * 3 = 36 blocks finalized require.Equal(t, uint64(36), a.sequencer.SyncStatus().FinalizedL2.Number) } + +// This test tests altDA -> ethDA -> altDA finalization behavior, simulating a temp altDA failure. +func TestAltDA_FinalizationAfterEthDAFailover(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + // we only print critical logs to be able to see the statusLogs + harness := NewL2AltDA(t, WithLogLevel(log.LevelDebug)) + + // We first call this twice because the first 2 times are irregular. + // See ActNewL2TxFinalized's TODO comment. + harness.ActNewL2TxFinalized(t) + harness.ActNewL2TxFinalized(t) + + // ActNewL2TxFinalized advances L1 by (1+ChallengeWindow)L1 blocks, and there are 12 L2 blocks per L1 block. + diffL2Blocks := (1 + harness.altDACfg.ChallengeWindow) * 12 + + for i := 0; i < 5; i++ { + ssBefore := harness.sequencer.SyncStatus() + harness.ActNewL2TxFinalized(t) + ssAfter := harness.sequencer.SyncStatus() + // Finalized head should advance normally in altda mode + require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) + } + + // We swap out altda batcher for ethda batcher + harness.batcher.ActAltDAFailoverToEthDA(t) + + for i := 0; i < 3; i++ { + ssBefore := harness.sequencer.SyncStatus() + harness.ActNewL2TxFinalized(t) + if i == 0 { + // TODO: figure out why we need to act twice for the first time after failover. + // I think it's because the L1 driven finalizedHead is set to L1FinalizedHead-ChallengeWindow (see damgr.go updateFinalizedFromL1), + // so it trails behind by an extra challenge_window when we switch over to ethDA. + harness.ActNewL2TxFinalized(t) + } + ssAfter := harness.sequencer.SyncStatus() + // Even after failover, the finalized head should continue advancing normally + require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) + } + + // Revert back to altda batcher (simulating that altda's temporary outage is resolved) + harness.batcher.ActAltDAFallbackToAltDA(t) + + for i := 0; i < 3; i++ { + ssBefore := harness.sequencer.SyncStatus() + harness.ActNewL2TxFinalized(t) + ssAfter := harness.sequencer.SyncStatus() + + // Even after fallback to altda, the finalized head should continue advancing normally + if i == 0 { + // This is the opposite as the altda->ethda direction. In this case, the first time we fallback to altda, + // the finalized head will advance by 2*diffL2Blocks: in ethda mode when driven by L1 finalization, + // the head is set to L1FinalizedHead-ChallengeWindow. After sending an altda commitment, the finalized head + // is now driven by the finalization of the altda commitment. + require.Equal(t, ssBefore.FinalizedL2.Number+2*diffL2Blocks, ssAfter.FinalizedL2.Number) + } else { + require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) + } + + } +}
diff --git ethereum-optimism/optimism/op-e2e/config/init.go layr-labs/optimism/op-e2e/config/init.go index 04aa0fd6209255e73448ed50431e546060c61ada..e694e78bb9db31e107621114b742df58120e3701 100644 --- ethereum-optimism/optimism/op-e2e/config/init.go +++ layr-labs/optimism/op-e2e/config/init.go @@ -50,6 +50,7 @@ type AllocType string   const ( AllocTypeAltDA AllocType = "alt-da" + AllocTypeAltDAGeneric AllocType = "alt-da-generic" AllocTypeMTCannon AllocType = "mt-cannon" AllocTypeMTCannonNext AllocType = "mt-cannon-next"   @@ -65,14 +66,14 @@ }   func (a AllocType) UsesProofs() bool { switch a { - case AllocTypeMTCannon, AllocTypeMTCannonNext, AllocTypeAltDA: + case AllocTypeMTCannon, AllocTypeMTCannonNext, AllocTypeAltDA, AllocTypeAltDAGeneric: return true default: return false } }   -var allocTypes = []AllocType{AllocTypeAltDA, AllocTypeMTCannon, AllocTypeMTCannonNext} +var allocTypes = []AllocType{AllocTypeAltDA, AllocTypeAltDAGeneric, AllocTypeMTCannon, AllocTypeMTCannonNext}   var ( // All of the following variables are set in the init function
diff --git ethereum-optimism/optimism/op-e2e/system/altda/concurrent_test.go layr-labs/optimism/op-e2e/system/altda/concurrent_test.go index ef11a879dc70d59a7f381b0002b39f314df5ed2a..45918db78fd33b8241ea12ffbbc8317bc3686be3 100644 --- ethereum-optimism/optimism/op-e2e/system/altda/concurrent_test.go +++ layr-labs/optimism/op-e2e/system/altda/concurrent_test.go @@ -7,19 +7,21 @@ "testing" "time"   op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum/go-ethereum/common/hexutil"   "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-e2e/config" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" - "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/stretchr/testify/require" )   +// TestBatcherConcurrentAltDARequests tests that the batcher can submit parallel requests +// to the alt-da server. It does not check that the requests are correctly ordered and interpreted +// by op nodes. func TestBatcherConcurrentAltDARequests(t *testing.T) { op_e2e.InitParallel(t) - - numL1TxsExpected := int64(10)   cfg := e2esys.DefaultSystemConfig(t) // Manually configure these since the alt-DA values aren't @@ -32,11 +34,9 @@ cfg.DeployConfig.DAResolveWindow = 16 cfg.DeployConfig.DABondSize = 1000000 cfg.DeployConfig.DAResolverRefundPercentage = 0 cfg.BatcherMaxPendingTransactions = 0 // no limit on parallel txs - // ensures that batcher txs are as small as possible - cfg.BatcherMaxL1TxSizeBytes = derive.FrameV0OverHeadSize + 1 /*version bytes*/ + 1 cfg.BatcherBatchType = 0 cfg.DataAvailabilityType = flags.CalldataType - cfg.BatcherMaxConcurrentDARequest = uint64(numL1TxsExpected) + cfg.BatcherMaxConcurrentDARequest = 2   // disable batcher because we start it manually below cfg.DisableBatcher = true @@ -46,14 +46,15 @@ t.Cleanup(func() { sys.Close() })   - // make every request take 5 seconds, such that only concurrent requests will be able to make progress fast enough + // make every request take 5 seconds, such that only if 2 altda requests are made + // concurrently will 2 batcher txs be able to land in a single L1 block sys.FakeAltDAServer.SetPutRequestLatency(5 * time.Second)   l1Client := sys.NodeClient("l1") l2Seq := sys.NodeClient("sequencer")   - // we wait for numL1TxsExpected L2 blocks to have been produced, just to make sure the sequencer is working properly - _, err = geth.WaitForBlock(big.NewInt(numL1TxsExpected), l2Seq) + // we wait for 10 L2 blocks to have been produced, just to make sure the sequencer is working properly + _, err = geth.WaitForBlock(big.NewInt(10), l2Seq) require.NoError(t, err, "Waiting for L2 blocks") ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) defer cancel() @@ -65,15 +66,14 @@ driver := sys.BatchSubmitter.TestDriver() err = driver.StartBatchSubmitting() require.NoError(t, err)   - // Iterate over up to 10 blocks. The number of transactions sent by the batcher should - // exceed the number of blocks. + // We make sure that some block has more than 1 batcher tx checkBlocks := 10 for i := 0; i < checkBlocks; i++ { block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+int64(i)), l1Client) require.NoError(t, err, "Waiting for l1 blocks") // there are possibly other services (proposer/challenger) in the background sending txs // so we only count the batcher txs - batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) + batcherTxCount, err := transactions.TransactionsBySenderCount(block, cfg.DeployConfig.BatchSenderAddress) require.NoError(t, err) if batcherTxCount > 1 { return @@ -82,3 +82,58 @@ }   t.Fatalf("did not find more than 1 batcher tx per block in %d blocks", checkBlocks) } + +// The Holocene fork enforced a new strict batch ordering rule, see https://specs.optimism.io/protocol/holocene/derivation.html +// This test makes sure that concurrent requests to the alt-da server that are responded out of order +// are submitted to the L1 chain in the correct order by the batcher. +func TestBatcherCanHandleOutOfOrderDAServerResponses(t *testing.T) { + op_e2e.InitParallel(t) + // Not sure whether WithAllocType is needed here, as the tests pass even without them + // (see mslipper's comments for the TestBatcherConcurrentAltDARequests test above)) + // TODO: understand how the DeployConfigs are related to the AllocTypes + // I asked here https://discord.com/channels/1244729134312198194/1332175015180767265/1332456541067935834 but have yet to get an answer. + cfg := e2esys.HoloceneSystemConfig(t, new(hexutil.Uint64), e2esys.WithAllocType(config.AllocTypeAltDAGeneric)) + cfg.DeployConfig.UseAltDA = true + cfg.DeployConfig.DACommitmentType = "GenericCommitment" + // TODO: figure out why the below are needed even in GenericCommitment mode which doesn't use the DAChallenge Contract + cfg.DeployConfig.DAChallengeWindow = 16 + cfg.DeployConfig.DAResolveWindow = 16 + cfg.DeployConfig.DABondSize = 1000000 + cfg.DeployConfig.DAResolverRefundPercentage = 0 + cfg.BatcherMaxPendingTransactions = 0 // no limit on parallel txs + cfg.BatcherBatchType = 0 + cfg.DataAvailabilityType = flags.CalldataType + cfg.BatcherMaxConcurrentDARequest = 2 + cfg.BatcherMaxL1TxSizeBytes = 150 // enough to fit a single compressed empty L1 block, but not 2 + cfg.Nodes["sequencer"].SafeDBPath = t.TempDir() // needed for SafeHeadAtL1Block() below + + sys, err := cfg.Start(t) + require.NoError(t, err, "Error starting up system") + t.Cleanup(func() { + sys.Close() + }) + sys.FakeAltDAServer.SetOutOfOrderResponses(true) + + l1Client := sys.NodeClient("l1") + l2SeqCL := sys.RollupClient("sequencer") + + checkBlocksL1 := int64(15) + l2SafeHeadMovedCount := 0 + l2SafeHeadMovedCountExpected := 3 + l2SafeHeadCur := uint64(0) + for i := int64(0); i < checkBlocksL1; i++ { + _, err := geth.WaitForBlock(big.NewInt(i), l1Client, geth.WithNoChangeTimeout(5*time.Minute)) + require.NoError(t, err, "Waiting for l1 blocks") + newL2SafeHead, err := l2SeqCL.SafeHeadAtL1Block(context.Background(), uint64(i)) + require.NoError(t, err) + if newL2SafeHead.SafeHead.Number > l2SafeHeadCur { + l2SafeHeadMovedCount++ + l2SafeHeadCur = newL2SafeHead.SafeHead.Number + } + if l2SafeHeadMovedCount == l2SafeHeadMovedCountExpected { + return + } + } + t.Fatalf("L2SafeHead only advanced %d times (expected >= %d) in %d L1 blocks", l2SafeHeadMovedCount, l2SafeHeadMovedCountExpected, checkBlocksL1) + +}
diff --git ethereum-optimism/optimism/op-e2e/system/altda/failover_test.go layr-labs/optimism/op-e2e/system/altda/failover_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9e250c8f0b6c616133642774b1486afa2d024e5b --- /dev/null +++ layr-labs/optimism/op-e2e/system/altda/failover_test.go @@ -0,0 +1,80 @@ +package altda + +import ( + "math/big" + "testing" + + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" + "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" + "github.com/stretchr/testify/require" +) + +// TestBatcher_FailoverToEthDA_FallbackToAltDA tests that the batcher will failover to ethDA +// if the da-server returns 503. It also tests that the batcher successfully returns to normal +// behavior of posting batches to altda once it becomes available again +// (i.e. the da-server doesn't return 503 anymore). +func TestBatcher_FailoverToEthDA_FallbackToAltDA(t *testing.T) { + op_e2e.InitParallel(t) + + nChannelsFailover := uint64(2) + + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithLogLevel(log.LevelCrit)) + cfg.DeployConfig.UseAltDA = true + cfg.DeployConfig.DACommitmentType = "GenericCommitment" + cfg.DeployConfig.DAChallengeWindow = 16 + cfg.DeployConfig.DAResolveWindow = 16 + cfg.DeployConfig.DABondSize = 1000000 + cfg.DeployConfig.DAResolverRefundPercentage = 0 + // Default cfg.BatcherMaxChannelDuration is 1, which means at least one channel is sent per L1 block. + // Furthermore, by setting cfg.BatcherMaxPendingTransactions = 1, + // we make sure the batcher posts a single commitment per L1 block. + // This way it's easy to trigger failover and observe the commitment changing on the next L1 block. + cfg.BatcherMaxPendingTransactions = 1 + cfg.BatcherMaxConcurrentDARequest = 1 + cfg.BatcherBatchType = 0 + // currently altda commitments can only be sent as calldata + cfg.DataAvailabilityType = flags.CalldataType + + sys, err := cfg.Start(t) + require.NoError(t, err, "Error starting up system") + defer sys.Close() + l1Client := sys.NodeClient("l1") + + startBlockL1, err := geth.WaitForBlockWithTxFromSender(cfg.DeployConfig.BatchSenderAddress, l1Client, 10) + require.NoError(t, err) + + // Simulate altda server returning 503 + sys.FakeAltDAServer.SetPutFailoverForNRequests(nChannelsFailover) + + countEthDACommitment := uint64(0) + + // There is some nondeterministic timing behavior that affects whether the batcher has already + // posted batches before seeing the above SetPutFailoverForNRequests behavior change. + // Most likely, sequence of blocks will be: altDA, ethDA, ethDA, altDA, altDA, altDA. + // 2 ethDA are expected (and checked for) because nChannelsFailover=2, so da-server will return 503 for 2 requests only, + // and the batcher always tries altda first for a new channel, and failsover to ethDA only if altda returns 503. + for blockNumL1 := startBlockL1.NumberU64(); blockNumL1 < startBlockL1.NumberU64()+6; blockNumL1++ { + blockL1, err := geth.WaitForBlock(big.NewInt(0).SetUint64(blockNumL1), l1Client) + require.NoError(t, err) + batcherTxs, err := transactions.TransactionsBySender(blockL1, cfg.DeployConfig.BatchSenderAddress) + require.NoError(t, err) + require.Equal(t, 1, len(batcherTxs)) // sanity check: ensure BatcherMaxPendingTransactions=1 is working + batcherTx := batcherTxs[0] + if batcherTx.Data()[0] == byte(params.DerivationVersion0) { + countEthDACommitment++ + t.Log("blockL1", blockNumL1, "batcherTxType", "ethda") + } else if batcherTx.Data()[0] == byte(params.DerivationVersion1) { + t.Log("blockL1", blockNumL1, "batcherTxType", "altda") + } else { + t.Fatalf("unexpected batcherTxType: %v", batcherTx.Data()[0]) + } + } + require.Equal(t, nChannelsFailover, countEthDACommitment, "Expected %v ethDA commitments, got %v", nChannelsFailover, countEthDACommitment) + +}
diff --git ethereum-optimism/optimism/op-e2e/system/da/multi_test.go layr-labs/optimism/op-e2e/system/da/multi_test.go index 461270282008b931e8ec4ca4760221a4c0e9e297..e8b7ea6ff2664cc8245909221d21e581ac69a671 100644 --- ethereum-optimism/optimism/op-e2e/system/da/multi_test.go +++ layr-labs/optimism/op-e2e/system/da/multi_test.go @@ -52,7 +52,7 @@ for i := startBlock; i <= headNum; i++ { block, err := l1Client.BlockByNumber(ctx, big.NewInt(int64(i))) require.NoError(t, err)   - batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) + batcherTxCount, err := transactions.TransactionsBySenderCount(block, cfg.DeployConfig.BatchSenderAddress) require.NoError(t, err) totalBatcherTxsCount += batcherTxCount
diff --git ethereum-optimism/optimism/op-e2e/system/e2esys/setup.go layr-labs/optimism/op-e2e/system/e2esys/setup.go index 3b4588065b3e649b5b47b7536b2e92e5c176eea6..932cdb6946bafd9d06a849c36ab1093232a85761 100644 --- ethereum-optimism/optimism/op-e2e/system/e2esys/setup.go +++ layr-labs/optimism/op-e2e/system/e2esys/setup.go @@ -6,6 +6,7 @@ "crypto/ecdsa" "crypto/rand" "errors" "fmt" + "log/slog" "math/big" "net" "os" @@ -95,6 +96,7 @@ )   type SystemConfigOpts struct { AllocType config.AllocType + LogLevel slog.Level }   type SystemConfigOpt func(s *SystemConfigOpts) @@ -102,12 +104,19 @@ func WithAllocType(allocType config.AllocType) SystemConfigOpt { return func(s *SystemConfigOpts) { s.AllocType = allocType + } +} + +func WithLogLevel(level slog.Level) SystemConfigOpt { + return func(s *SystemConfigOpts) { + s.LogLevel = level } }   func DefaultSystemConfig(t testing.TB, opts ...SystemConfigOpt) SystemConfig { sco := &SystemConfigOpts{ AllocType: config.DefaultAllocType, + LogLevel: slog.LevelInfo, } for _, opt := range opts { opt(sco) @@ -118,7 +127,7 @@ deployConfig := config.DeployConfig(sco.AllocType) require.Nil(t, deployConfig.L2GenesisJovianTimeOffset, "jovian not supported yet") deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) e2eutils.ApplyDeployConfigForks(deployConfig) - require.NoError(t, deployConfig.Check(testlog.Logger(t, log.LevelInfo)), + require.NoError(t, deployConfig.Check(testlog.Logger(t, sco.LogLevel).New("role", "config-check")), "Deploy config is invalid, do you need to run make devnet-allocs?") l1Deployments := config.L1Deployments(sco.AllocType) require.NoError(t, l1Deployments.Check(deployConfig)) @@ -182,11 +191,12 @@ Sync: sync.Config{SyncMode: sync.CLSync}, }, }, Loggers: map[string]log.Logger{ - RoleVerif: testlog.Logger(t, log.LevelInfo).New("role", RoleVerif), - RoleSeq: testlog.Logger(t, log.LevelInfo).New("role", RoleSeq), - "batcher": testlog.Logger(t, log.LevelInfo).New("role", "batcher"), - "proposer": testlog.Logger(t, log.LevelInfo).New("role", "proposer"), - "da-server": testlog.Logger(t, log.LevelInfo).New("role", "da-server"), + RoleVerif: testlog.Logger(t, sco.LogLevel).New("role", RoleVerif), + RoleSeq: testlog.Logger(t, sco.LogLevel).New("role", RoleSeq), + "batcher": testlog.Logger(t, sco.LogLevel).New("role", "batcher"), + "proposer": testlog.Logger(t, sco.LogLevel).New("role", "proposer"), + "da-server": testlog.Logger(t, sco.LogLevel).New("role", "da-server"), + "config-check": testlog.Logger(t, sco.LogLevel).New("role", "config-check"), }, GethOptions: map[string][]geth.GethOption{}, P2PTopology: nil, // no P2P connectivity by default @@ -616,7 +626,7 @@ // sanity-check the deploy config require.Nil(t, cfg.DeployConfig.L2GenesisJovianTimeOffset, "Jovian is not supported in op-e2e tests yet")   - if err := cfg.DeployConfig.Check(testlog.Logger(t, log.LevelInfo)); err != nil { + if err := cfg.DeployConfig.Check(cfg.Loggers["config-check"]); err != nil { return nil, err }   @@ -856,6 +866,24 @@ } } }   + // The altDACLIConfig is shared by the batcher and rollup nodes. + var altDACLIConfig altda.CLIConfig + if cfg.DeployConfig.UseAltDA { + fakeAltDAServer := altda.NewFakeDAServer("127.0.0.1", 0, sys.Cfg.Loggers["da-server"]) + if err := fakeAltDAServer.Start(); err != nil { + return nil, fmt.Errorf("failed to start fake altDA server: %w", err) + } + sys.FakeAltDAServer = fakeAltDAServer + + altDACLIConfig = altda.CLIConfig{ + Enabled: cfg.DeployConfig.UseAltDA, + DAServerURL: fakeAltDAServer.HttpEndpoint(), + VerifyOnRead: true, + GenericDA: true, + MaxConcurrentRequests: cfg.BatcherMaxConcurrentDARequest, + } + } + // Rollup nodes   // Ensure we are looping through the nodes in alphabetical order @@ -870,7 +898,7 @@ c.Rollup = makeRollupConfig() if err := c.LoadPersisted(cfg.Loggers[name]); err != nil { return nil, err } - + c.AltDA = altDACLIConfig if p, ok := p2pNodes[name]; ok { c.P2P = p   @@ -972,22 +1000,6 @@ if batcherTargetNumFrames == 0 { batcherTargetNumFrames = 1 }   - var batcherAltDACLIConfig altda.CLIConfig - if cfg.DeployConfig.UseAltDA { - fakeAltDAServer := altda.NewFakeDAServer("127.0.0.1", 0, sys.Cfg.Loggers["da-server"]) - if err := fakeAltDAServer.Start(); err != nil { - return nil, fmt.Errorf("failed to start fake altDA server: %w", err) - } - sys.FakeAltDAServer = fakeAltDAServer - - batcherAltDACLIConfig = altda.CLIConfig{ - Enabled: cfg.DeployConfig.UseAltDA, - DAServerURL: fakeAltDAServer.HttpEndpoint(), - VerifyOnRead: true, - GenericDA: true, - MaxConcurrentRequests: cfg.BatcherMaxConcurrentDARequest, - } - } batcherCLIConfig := &bss.CLIConfig{ L1EthRpc: sys.EthInstances[RoleL1].UserRPC().RPC(), L2EthRpc: []string{sys.EthInstances[RoleSeq].UserRPC().RPC()}, @@ -1010,7 +1022,7 @@ BatchType: cfg.BatcherBatchType, MaxBlocksPerSpanBatch: cfg.BatcherMaxBlocksPerSpanBatch, DataAvailabilityType: sys.Cfg.DataAvailabilityType, CompressionAlgo: derive.Zlib, - AltDA: batcherAltDACLIConfig, + AltDA: altDACLIConfig, }   // Apply batcher cli modifications
diff --git ethereum-optimism/optimism/kurtosis-devnet/tests/eigenda/README.md layr-labs/optimism/kurtosis-devnet/tests/eigenda/README.md new file mode 100644 index 0000000000000000000000000000000000000000..ad8f36c34d97ec848b5cf378fb33846c68a19fc2 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/tests/eigenda/README.md @@ -0,0 +1,12 @@ +# EigenDA tests + +All tests in here are run against an eigenda kurtosis devnet, which is started using the justfile command `just eigenda-devnet-start`. +See the justfile for the options there. + +## Backends + +The devnet can either be started with proxy in `memstore` mode, or in `holesky` mode where it connects to the eigenda holesky network. + +> Every test in this package MUST have a `_Memstore` or `_Holesky` suffix to indicate which backend it is testing. + +The testing commands in the justfile are `eigenda-devnet-test-memstore` and `eigenda-devnet-test-holesky`, and pattern match the test names to run the correct tests.
diff --git ethereum-optimism/optimism/kurtosis-devnet/tests/eigenda/batcher_logs_test.go layr-labs/optimism/kurtosis-devnet/tests/eigenda/batcher_logs_test.go new file mode 100644 index 0000000000000000000000000000000000000000..b5503629b3bfe4308266fd1f57f69324e290fec2 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/tests/eigenda/batcher_logs_test.go @@ -0,0 +1,102 @@ +package eigenda + +import ( + "context" + "testing" + "time" + + "github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context" +) + +func TestBatcherFromLogs_Holesky(t *testing.T) { + deadline, ok := t.Deadline() + // !ok means no timeout was set, and hence uses golang's default 10min timeout. + if !ok || time.Until(deadline) < 15*time.Minute { + t.Logf("TestBatcherFromLogs_Holesky needs a timeout of at least 15 minutes to run.") + t.FailNow() + } + harness := NewHarness(t) + // Batching time on Holesky can be up to 10 minutes, so we need long time to see a tx getting confirmed. + testBatcherFromLogs(t, harness, 15*time.Minute) +} + +func TestBatcherFromLogs_Memstore(t *testing.T) { + harness := NewHarness(t) + // 2 minutes is arbitrary here but should be long enough to observe interesting behavior using memstore. + // Also need to run the failover test which takes quite a while and can't run in parallel with these tests (or can it..?) + testBatcherFromLogs(t, harness, 2*time.Minute) +} + +// These tests are log driven. The batcher doesn't expose an API to query its state outside of logs and metrics, +// so hard to do much better. We rely on some info logs appearing and some warning/error logs not appearing. +// These tests are not very sophisticated, but are at least a good sanity check... +// FIXME: one issue is that if op changes the log lines then our tests here might just silently pass and we won't know... +// A better approach might be to generate txs from inside the golang test instead of relying on the external tx-fuzzer. +// We could then increase traffic until the point where DA gets throttled, then change batcher parameters to increase blob size, etc. +// Updating the batcher params is currently hard to do however; see comments above the eigenda-devnet-restart-batcher command in the justfile. +func testBatcherFromLogs(t *testing.T, harness *Harness, testTimeout time.Duration) { + // We stream logs for testTimeout, and run all the below tests in parallel (they read the same log outputs) + ctxWithTestTimeout, cancel := context.WithTimeout(context.Background(), testTimeout) + t.Cleanup(cancel) + + // Make sure that no channel is ever timing out (fails to be sent to L1 in timely manner). + // Make sure the testsTimer is longer than max-channel-duration in the batcher config (found in the eigenda-template-values/ files). + // Currently max-channel-duration is set to 10 L1 blocks, meaning 10*6 seconds = 60 seconds. + t.Run("No channel timeout", func(t *testing.T) { + t.Parallel() + // Log output is from https://github.com/Layr-Labs/optimism/blob/a5709b435f39cab0d7f5dc879b65e07e2f90a548/op-batcher/batcher/channel.go#L102 + filter := kurtosis_context.NewDoesContainMatchRegexLogLineFilter("channel timed out") + c := harness.QueryBatcherLogs(ctxWithTestTimeout, true, filter) + + for { + select { + case <-ctxWithTestTimeout.Done(): + return + case logLine := <-c: + t.Logf("channel timed out on batcher... something went wrong. Log line: %v", logLine) + t.FailNow() + } + } + }) + + t.Run("No DA Throttling", func(t *testing.T) { + t.Parallel() + // Log output is from https://github.com/Layr-Labs/optimism/blob/a5709b435f39cab0d7f5dc879b65e07e2f90a548/op-batcher/batcher/driver.go#L540 + filter := kurtosis_context.NewDoesContainMatchRegexLogLineFilter("throttling DA") + c := harness.QueryBatcherLogs(ctxWithTestTimeout, true, filter) + + for { + select { + case <-ctxWithTestTimeout.Done(): + return + case logLine := <-c: + t.Logf("da got throttled... something went wrong. Log line: %v", logLine) + t.FailNow() + } + } + }) + + t.Run("Transactions are confirming", func(t *testing.T) { + t.Parallel() + // Log line from https://github.com/Layr-Labs/optimism/blob/a5709b435f39cab0d7f5dc879b65e07e2f90a548/op-batcher/batcher/driver.go#L921 + // Actually there's a duplicate log line: https://github.com/Layr-Labs/optimism/blob/a5709b435f39cab0d7f5dc879b65e07e2f90a548/op-service/txmgr/txmgr.go#L780 + // We should prob divide by 2 but leaving as is in case this duplicate gets removed in the future... + filter := kurtosis_context.NewDoesContainMatchRegexLogLineFilter("Transaction confirmed") + c := harness.QueryBatcherLogs(ctxWithTestTimeout, true, filter) + + confirmedTxsCount := 0 + for { + select { + case <-ctxWithTestTimeout.Done(): + if confirmedTxsCount == 0 { + t.Logf("no transactions confirmed... something went wrong.") + t.FailNow() + } + t.Logf("%d transactions confirmed", confirmedTxsCount) + return + case <-c: + confirmedTxsCount++ + } + } + }) +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/tests/eigenda/failover_test.go layr-labs/optimism/kurtosis-devnet/tests/eigenda/failover_test.go new file mode 100644 index 0000000000000000000000000000000000000000..4efe412461da04b312ce230c318fe9e6934fbdaa --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/tests/eigenda/failover_test.go @@ -0,0 +1,282 @@ +package eigenda + +import ( + "context" + "encoding/json" + "fmt" + "math/big" + "net/http" + "strconv" + "strings" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum/go-ethereum/common" + "github.com/stretchr/testify/require" +) + +// TODO: add test which sets other properties of memstore like latency, out of order, etc. + +// TestFailover tests the failover behavior of the batcher, in response to the proxy returning 503 errors. +// See https://github.com/Layr-Labs/eigenda-proxy?tab=readme-ov-file#failover-signals for proxy behavior. +// The proxy's memstore's failover behavior is toggled on and off by this test via a REST api. +// We then check that the batcher correctly interprets the 503 signals and starts submitting batches to EthDACalldata instead. +// The test then toggles the failover back off and checks that the batcher starts submitting EigenDA batches again. +// The batches inbox transactions are queried via geth's GraphQL API. +// +// Note: because this test relies on modifying the proxy's memstore config, it should be run in isolation. +// That is, if we ever implement more kurtosis tests, they would currently need to be run sequentially. +func TestFailoverToEthDACalldata_Memstore(t *testing.T) { + deadline, ok := t.Deadline() + if !ok { + deadline = time.Now().Add(10 * time.Minute) + } + ctxWithDeadline, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + harness := NewHarness(t) + t.Cleanup(func() { + // switch proxy back to normal mode, in case test gets cancelled + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + t.Logf("Cleanup; Failing back to eigenda... resetting enclave proxy to start posting to eigenda again") + err := harness.Clients.ProxyClients.Failback(ctx) + if err != nil { + t.Logf("Error failing back... you might need to reset proxy to normal mode manually: %v", err) + } + }) + + // Number of blocks to query for batcher txs, for each of the initial/failover/failback stages + // Test will look at batcher txs between blocks: + // - initial altda stage: [testStartL1BlockNum - l1BlocksQueriedForBatcherTxs, testStartL1BlockNum] + // - ethDACalldata stage: [afterFailoverFromBlockNum, afterFailoverFromBlockNum + l1BlocksQueriedForBatcherTxs] + // - altDA stage: [afterFailbackFromBlockNum, afterFailbackFromBlockNum + l1BlocksQueriedForBatcherTxs] + // + // Need to make sure each stage contains at least 2 commitments of the correct type. This can only happen if channels + // are being closed in time, which requires either: sending traffic with traffic-generator, or setting a low (e.g. 2 L1 blocks) channel-timeout. + l1BlocksQueriedForBatcherTxs := uint64(15) + + // assume kurtosis is running and is at least at block numBlocksBetweenStages + require.GreaterOrEqual(t, harness.TestStartL1BlockNum, l1BlocksQueriedForBatcherTxs, "Test started too early in the chain") + fromBlock := harness.TestStartL1BlockNum - l1BlocksQueriedForBatcherTxs + + eigenDABackend, err := harness.Clients.ProxyClients.GetDispersalBackend(ctxWithDeadline) + require.NoError(t, err) + DALayerEigenDA, err := certVersionToDALayer(eigenDABackend) + require.NoError(t, err) + + // 1. Check that the original commitments are EigenDA + t.Logf("[Stage1: EigenDA] Checking that the initial commitments are EigenDA") + requireBatcherTxsToBeFromLayer(t, fromBlock, fromBlock+l1BlocksQueriedForBatcherTxs, DALayerEigenDA, harness.Endpoints.GethL1Endpoint, harness.BatchInboxAddr) + + // 2. Failover and check that the commitments are now EthDACalldata + t.Logf("[Stage2: EthDA-Calldata] Failing over... changing proxy's config to return 503 errors") + err = harness.Clients.ProxyClients.Failover(ctxWithDeadline) + require.NoError(t, err) + + afterFailoverFromBlockNum, err := harness.Clients.GethL1Client.BlockNumber(ctxWithDeadline) + require.NoError(t, err) + afterFailoverToBlockNum := afterFailoverFromBlockNum + l1BlocksQueriedForBatcherTxs + _, err = geth.WaitForBlock(big.NewInt(int64(afterFailoverToBlockNum)), harness.Clients.GethL1Client) + require.NoError(t, err) + + requireBatcherTxsToBeFromLayer(t, afterFailoverFromBlockNum, afterFailoverToBlockNum, DALayerEthCalldata, harness.Endpoints.GethL1Endpoint, harness.BatchInboxAddr) + + // We also check that the op-node is still finalizing blocks after the failover + syncStatus, err := harness.Clients.OpNodeClient.SyncStatus(ctxWithDeadline) + require.NoError(t, err) + afterFailoverFinalizedL2 := syncStatus.FinalizedL2 + t.Logf("[Finalization] Current finalized L2 block: %d. Waiting for next block to finalize to make sure finalization is still happening.", afterFailoverFinalizedL2.Number) + // On average would expect this to take half an epoch, aka 16 L1 blocks, which at 6 sec/block means 1.5 minutes. + // This generally takes longer (3-6 minutes), but I'm not quite sure why. + _, err = geth.WaitForBlockToBeFinalized(new(big.Int).SetUint64(afterFailoverFinalizedL2.Number+1), harness.Clients.OpGethClient, 6*time.Minute) + require.NoError(t, err, "op-node should still be finalizing blocks after failover") + + // 3. Failback and check that the commitments are EigenDA again + t.Logf("[Stage3: EigenDA] Failing back... changing proxy's config to start processing PUT requests normally again") + err = harness.Clients.ProxyClients.Failback(ctxWithDeadline) + require.NoError(t, err) + + afterFailbackFromBlockNum, err := harness.Clients.GethL1Client.BlockNumber(ctxWithDeadline) + require.NoError(t, err) + afterFailbackToBlockNum := afterFailbackFromBlockNum + l1BlocksQueriedForBatcherTxs + _, err = geth.WaitForBlock(big.NewInt(int64(afterFailbackToBlockNum)), harness.Clients.GethL1Client) + require.NoError(t, err) + + requireBatcherTxsToBeFromLayer(t, afterFailbackFromBlockNum, afterFailbackToBlockNum, DALayerEigenDA, harness.Endpoints.GethL1Endpoint, harness.BatchInboxAddr) + +} + +// requireBatcherTxsToBeFromLayer checks that the batcher transactions since startingFromBlockNum are all from the expectedLayer. +// It allows for up to 3 initial commitments to be of the wrong type, as the failover/failback might not have taken effect yet. +// It requires that at least 2 commitments of the expected type are present after the failover/failback. +func requireBatcherTxsToBeFromLayer(t *testing.T, fromBlockNum, toBlockNum uint64, expectedLayer DALayer, gethL1Endpoint string, batchInboxAddr common.Address) { + batcherTxs, err := fetchBatcherTxs(gethL1Endpoint, batchInboxAddr.String(), fromBlockNum, toBlockNum) + require.NoError(t, err) + t.Logf("Fetched %d batcher transactions since block %d", len(batcherTxs), fromBlockNum) + + // We allow first 3 commitments to be of the wrong DA layer, as the failover/failback might not have taken effect yet. + wrongCommitmentsToDiscard := 0 + for _, batcherTx := range batcherTxs { + if batcherTx.daLayer != expectedLayer { + t.Logf("Discarding batcher tx @ block %d with wrong commitment type %s (expected %s)", batcherTx.block, batcherTx.daLayer, expectedLayer) + wrongCommitmentsToDiscard++ + } + // as soon as we see a commitment from expectedLayer, we stop discarding. + if batcherTx.daLayer == expectedLayer { + break + } + } + batcherTxs = batcherTxs[wrongCommitmentsToDiscard:] + t.Logf("Discarded %d commitments. %d left which should all be %v", wrongCommitmentsToDiscard, len(batcherTxs), expectedLayer) + + // After potentially discarding some commitments from wrong da layer, we expect all future commitments (at least 2) to be of the expectedLayer + require.GreaterOrEqual(t, len(batcherTxs), 2, "Expected at least 2 %v commitments after failover/failback", expectedLayer) + for _, batcherTx := range batcherTxs { + require.Equal(t, expectedLayer, batcherTx.daLayer, + "Invalid commitment in block %d: expected %v, received commitment %s", batcherTx.block, expectedLayer, batcherTx.commitment) + } +} + +// See https://specs.optimism.io/experimental/alt-da.html#example-commitments +// Batcher only supports failing over to calldata txs right now, so this test doesn't test 4844 failover. +// Note that 4844 txs are completely different and don't use normal txs with a prefix in the calldata, +// see https://github.com/ethereum-optimism/optimism/blob/develop/op-node/rollup/derive/blob_data_source.go#L134-L137 +const ethDACalldataCommitmentPrefix = "0x00" +const eigenDAV1CommitmentPrefix = "0x01010000" +const eigenDAV2CommitmentPrefix = "0x01010001" + +type DALayer string + +const ( + DALayerEthCalldata DALayer = "ethda-calldata" + DALayerEigenDAV1 DALayer = "eigendaV1" + DALayerEigenDAV2 DALayer = "eigendaV2" +) + +func certVersionToDALayer(certVersion EigenDACertVersion) (DALayer, error) { + switch certVersion { + case EigenDACertVersionV1: + return DALayerEigenDAV1, nil + case EigenDACertVersionV2: + return DALayerEigenDAV2, nil + default: + return "", fmt.Errorf("unknown EigenDA cert version: %s", certVersion) + } +} + +type BatcherTx struct { + commitment string + daLayer DALayer // commitment starts with respective prefix + block uint64 +} + +// HexUint64 is a custom type that can unmarshal from a hex string +type HexUint64 uint64 + +// UnmarshalJSON implements the json.Unmarshaler interface +func (h *HexUint64) UnmarshalJSON(data []byte) error { + // Remove quotes from the JSON string + hexStr := string(data) + hexStr = strings.Trim(hexStr, "\"") + + // Check if it's a hex string + if !strings.HasPrefix(hexStr, "0x") { + return fmt.Errorf("not a hex string: %s", hexStr) + } + + // Parse the hex string (without the 0x prefix) + val, err := strconv.ParseUint(hexStr[2:], 16, 64) + if err != nil { + return err + } + + *h = HexUint64(val) + return nil +} + +// Fetches all the batch-inbox posted commitments from blockNum (inclusive) to current block. +// We rely on geth's GraphQL API to fetch the batcher transactions. +// We could possibly have reused op-node's L1Retriever, but the API felt very derivation-pipeline specific, +// and there doesn't seem to be a way to reuse it easily for constructing a custom derivation-pipeline with a subset of stages +// like what we need here. Could consider migrating in the future if we need more complex logic. +func fetchBatcherTxs(gethL1Endpoint string, batchInbox string, fromBlockNum, toBlockNum uint64) ([]BatcherTx, error) { + // We use standard HTTP for GraphQL as it's not directly supported by the rpc package + // Visit gethL1Endpoint/graphql/ui to see the schema and test queries + query := fmt.Sprintf(` + { + "query": "query txInfo { blocks(from:%v, to:%v) { transactions { to { address } inputData block { number } } } }" + }`, fromBlockNum, toBlockNum) + + // Make GraphQL request + req, err := http.NewRequest("POST", gethL1Endpoint+"/graphql", strings.NewReader(query)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + httpClient := &http.Client{} + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Parse the response + type GraphQLResponse struct { + Data struct { + Blocks []struct { + Transactions []struct { + To struct { + Address string `json:"address"` + } `json:"to"` + InputData string `json:"inputData"` + Block struct { + // we use HexUint64 to properly parse the hex strings returned + Number HexUint64 `json:"number"` + } `json:"block"` + } `json:"transactions"` + } `json:"blocks"` + } `json:"data"` + } + var graphQLResp GraphQLResponse + if err := json.NewDecoder(resp.Body).Decode(&graphQLResp); err != nil { + return nil, err + } + if len(graphQLResp.Data.Blocks) == 0 { + // Assume that this is a graphQL query error, that would have returned something like + // "errors": [ + // { + // "message": "syntax error: unexpected \"\", expecting Ident", + // } + // ] + // TODO: prob should just switch to a proper graphql client that can handle these properly + return nil, fmt.Errorf("no blocks returned in GraphQL response") + } + + // Filter transactions to the batcher address + var batcherTxs []BatcherTx + for _, block := range graphQLResp.Data.Blocks { + for _, tx := range block.Transactions { + if strings.EqualFold(tx.To.Address, batchInbox) { + batcherTx := BatcherTx{ + commitment: tx.InputData, + block: uint64(tx.Block.Number), + } + if strings.HasPrefix(tx.InputData, eigenDAV1CommitmentPrefix) { + batcherTx.daLayer = DALayerEigenDAV1 + } else if strings.HasPrefix(tx.InputData, eigenDAV2CommitmentPrefix) { + batcherTx.daLayer = DALayerEigenDAV2 + } else if strings.HasPrefix(tx.InputData, ethDACalldataCommitmentPrefix) { + batcherTx.daLayer = DALayerEthCalldata + } else { + return nil, fmt.Errorf("unknown commitment prefix: %s", tx.InputData) + } + batcherTxs = append(batcherTxs, batcherTx) + } + } + } + + return batcherTxs, nil +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/tests/eigenda/harness.go layr-labs/optimism/kurtosis-devnet/tests/eigenda/harness.go new file mode 100644 index 0000000000000000000000000000000000000000..e027198a350496778fde0d8ed6a725c6e4b22c81 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/tests/eigenda/harness.go @@ -0,0 +1,354 @@ +package eigenda + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "log/slog" + "net/http" + "reflect" + "strings" + "testing" + "time" + + "github.com/Layr-Labs/eigenda-proxy/clients/memconfig_client" + "github.com/ethereum-optimism/optimism/op-service/client" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" + "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/services" + "github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context" + "github.com/stretchr/testify/require" +) + +// The below are hardcoded constants that are specific to the eigenda-devnet enclave. +// They could be fetched dynamically from the enclave if/when needed, but for now it's easier to just hardcode them here, +// since we don't plan on changing them anytime soon. + +// All tests are run in the context of the eigenda-devnet enclave. +// We assume that this enclave is already running. +const enclaveName = "eigenda-devnet" + +// Test Harness, which contains all the state needed to run the tests. +// Harness also defines some higher-level "require" methods that are used in the tests. +type Harness struct { + t *testing.T + Logger log.Logger + Endpoints *EnclaveServicePublicEndpoints + Clients *EnclaveServiceClients + BatcherUUID services.ServiceUUID // used to query for batcher logs + BatchInboxAddr common.Address + TestStartL1BlockNum uint64 +} + +func NewHarness(t *testing.T) *Harness { + logger := testlog.Logger(t, slog.LevelInfo) + + // We leave 20 seconds to build the entire testHarness. + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + // Create a Kurtosis context + kurtosisCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine() + require.NoError(t, err) + + // Get the `enclaveName` enclave context (assuming it's already running) + enclaveCtx, err := kurtosisCtx.GetEnclaveContext(ctxWithTimeout, enclaveName) + require.NoError(t, err, "Error getting enclave context: is enclave %v running?", enclaveName) + + endpoints, err := getPublicEndpointsFromKurtosis(enclaveCtx) + require.NoError(t, err) + t.Logf("Endpoints: %+v", endpoints) + + clients, err := getClientsFromEndpoints(ctxWithTimeout, logger, endpoints) + require.NoError(t, err) + + // Get the batch inbox address from the rollup config + rollupConfig, err := clients.OpNodeClient.RollupConfig(ctxWithTimeout) + require.NoError(t, err) + + // Get the current L1 block number + testStartL1BlockNum, err := clients.GethL1Client.BlockNumber(ctxWithTimeout) + require.NoError(t, err) + + batcherCtx, err := enclaveCtx.GetServiceContext("op-batcher-2151908-op-kurtosis") + require.NoError(t, err) + + return &Harness{ + t: t, + Logger: logger, + Endpoints: endpoints, + Clients: clients, + BatcherUUID: batcherCtx.GetServiceUUID(), + BatchInboxAddr: rollupConfig.BatchInboxAddress, + TestStartL1BlockNum: testStartL1BlockNum, + } +} + +func (h *Harness) QueryBatcherLogs(ctx context.Context, shouldFollowLogs bool, logLineFilter *kurtosis_context.LogLineFilter) <-chan string { + outC := make(chan string) + kurtosisCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine() + require.NoError(h.t, err) + + uuidMap := map[services.ServiceUUID]bool{ + h.BatcherUUID: true, + } + logsC, _, err := kurtosisCtx.GetServiceLogs(ctx, enclaveName, uuidMap, shouldFollowLogs, true, 1000, logLineFilter) + require.NoError(h.t, err) + + go func() { + for logContent := range logsC { + logs := logContent.GetServiceLogsByServiceUuids()[h.BatcherUUID] + for _, log := range logs { + outC <- log.GetContent() + } + } + }() + return outC + +} + +// Localhost endpoints for the different services in the enclave +// that we need to interact with. We store the public localhost endpoints instead +// of the private enclave endpoints because we need to interact with the services +// using external shell commands like `cast rpc ...` and `cast geth ...`. +// The public endpoints are the ones that are exposed to the host machine. +type EnclaveServicePublicEndpoints struct { + OpNodeEndpoint string `kurtosis:"op-cl-2151908-node0-op-node,rpc"` + OpGethEndpoint string `kurtosis:"op-el-2151908-node0-op-geth,rpc"` + GethL1Endpoint string `kurtosis:"el-1-geth-teku,rpc"` + EigendaProxyEndpoint string `kurtosis:"op-da-da-server-2151908-op-kurtosis,http"` + // Adding new endpoints is as simple as adding a new field with a kurtosis tag + // NewServiceEndpoint string `kurtosis:"new-service-name,port-name"` +} + +// Constructor for EnclaveServiceEndpoints struct, which assumes a running kurtosis enclave +// and queries the needed services for their public (localhost) ports, and constructs +// the struct with the endpoints. +// +// This function uses reflection to parse the `kurtosis` tags in the struct fields to get the service name and port name. +// See the comments in the EnclaveServicePublicEndpoints struct for more details on adding a new endpoint. +func getPublicEndpointsFromKurtosis(enclaveCtx *enclaves.EnclaveContext) (*EnclaveServicePublicEndpoints, error) { + endpoints := &EnclaveServicePublicEndpoints{} + + // Get the type of the struct to iterate over fields + t := reflect.TypeOf(endpoints).Elem() + v := reflect.ValueOf(endpoints).Elem() + + // Iterate over all fields in the struct + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + // Get the kurtosis tag + tag := field.Tag.Get("kurtosis") + if tag == "" { + return nil, fmt.Errorf("field %s doesn't have a kurtosis tag", field.Name) + } + + // Parse the tag to get service name and port name + parts := strings.Split(tag, ",") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid kurtosis tag format for field %s: %s", field.Name, tag) + } + + serviceName := parts[0] + portName := parts[1] + + // Get the service context + serviceCtx, err := enclaveCtx.GetServiceContext(serviceName) + if err != nil { + return nil, fmt.Errorf("GetServiceContext for %s: %w", serviceName, err) + } + + // Get the port + port, ok := serviceCtx.GetPublicPorts()[portName] + if !ok { + return nil, fmt.Errorf("service %s doesn't expose %s port", serviceName, portName) + } + + // Set the endpoint URL in the struct field + endpoint := fmt.Sprintf("http://localhost:%d", port.GetNumber()) + v.Field(i).SetString(endpoint) + } + + return endpoints, nil +} + +type EnclaveServiceClients struct { + // opNode and opGeth are the L2 clients for the rollup. + OpNodeClient *sources.RollupClient + // opGeth is the client for the L2 execution layer client. + OpGethClient *ethclient.Client + // gethL1 is the client for the L1 chain execution layer client. + GethL1Client *ethclient.Client + // ProxyClients is the client for the eigenda-proxy's APIs: admin and memstore config. + // It allows us to toggle the proxy's failover behavior, + // as well as toggle the dispersal backend between V1 and V2. + ProxyClients *ProxyClients +} + +func getClientsFromEndpoints(ctx context.Context, logger log.Logger, endpoints *EnclaveServicePublicEndpoints) (*EnclaveServiceClients, error) { + opts := []client.RPCOption{ + client.WithCallTimeout(10 * time.Second), + } + opNodeClient, err := dial.DialRollupClientWithTimeout(ctx, logger, endpoints.OpNodeEndpoint, opts...) + if err != nil { + return nil, fmt.Errorf("dial.DialRollupClientWithTimeout: %w", err) + } + + opGethClient, err := dial.DialEthClientWithTimeout(ctx, 10*time.Second, logger, endpoints.OpGethEndpoint) + if err != nil { + return nil, fmt.Errorf("dial.DialEthClientWithTimeout: %w", err) + } + + // TODO: prob also change to use dial.DialEthClient? + gethL1Client, err := ethclient.Dial(endpoints.GethL1Endpoint) + if err != nil { + return nil, fmt.Errorf("ethclient.Dial: %w", err) + } + + proxyClients := NewProxyClients(endpoints.EigendaProxyEndpoint) + + return &EnclaveServiceClients{ + OpNodeClient: opNodeClient, + OpGethClient: opGethClient, + GethL1Client: gethL1Client, + ProxyClients: proxyClients, + }, nil +} + +// ProxyClients is a wrapper around the memconfig client that adds a Failover method +// TODO: we should upstream this to eigenda-proxy repo +type ProxyClients struct { + *memconfig_client.Client + *ProxyAdminAPIClient +} + +func NewProxyClients(proxyEndpoint string) *ProxyClients { + return &ProxyClients{ + Client: memconfig_client.New(&memconfig_client.Config{URL: proxyEndpoint}), + ProxyAdminAPIClient: &ProxyAdminAPIClient{ + proxyEndpoint: proxyEndpoint, + }, + } +} + +// Update the proxy's memstore config to start returning 503 errors +// Note: we have to GetConfig, update it and then UpdateConfig because the client doesn't implement a "patch" method, +// even though the API does support it. +func (c *ProxyClients) Failover(ctx context.Context) error { + memConfig, err := c.GetConfig(ctx) + if err != nil { + return fmt.Errorf("GetConfig: %w", err) + } + memConfig.PutReturnsFailoverError = true + _, err = c.UpdateConfig(ctx, memConfig) + if err != nil { + return fmt.Errorf("UpdateConfig: %w", err) + } + return nil +} +func (c *ProxyClients) Failback(ctx context.Context) error { + memConfig, err := c.GetConfig(ctx) + if err != nil { + return fmt.Errorf("GetConfig: %w", err) + } + memConfig.PutReturnsFailoverError = false + _, err = c.UpdateConfig(ctx, memConfig) + if err != nil { + return fmt.Errorf("UpdateConfig: %w", err) + } + return nil +} + +type EigenDACertVersion string + +const ( + EigenDACertVersionV1 EigenDACertVersion = "V1" + EigenDACertVersionV2 EigenDACertVersion = "V2" +) + +// Simple REST client for the proxy's admin API routes: +// https://github.com/Layr-Labs/eigenda-proxy?tab=readme-ov-file#admin-routes +// TODO: this should prob live in proxy repo? +type ProxyAdminAPIClient struct { + proxyEndpoint string // e.g. http://localhost:3100 +} + +func (c *ProxyAdminAPIClient) GetDispersalBackend(ctx context.Context) (EigenDACertVersion, error) { + // URL to send the request to + url := c.proxyEndpoint + "/admin/eigenda-dispersal-backend" + + // Create a new request + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return "", fmt.Errorf("error creating request: %w", err) + } + + // Create HTTP client + client := &http.Client{} + + // Send the request + resp, err := client.Do(req) + if err != nil { + return "", fmt.Errorf("error sending request: %w", err) + } + defer resp.Body.Close() + + // Check response status + if resp.StatusCode != http.StatusOK { + return "", fmt.Errorf("error response from server: %s", resp.Status) + } + + // Read and parse the response body + var response struct { + EigenDADispersalBackend string `json:"eigenDADispersalBackend"` + } + + if err := json.NewDecoder(resp.Body).Decode(&response); err != nil { + return "", fmt.Errorf("error decoding response: %w", err) + } + + switch response.EigenDADispersalBackend { + case "V1": + return EigenDACertVersionV1, nil + case "V2": + return EigenDACertVersionV2, nil + default: + return "", fmt.Errorf("unknown backend version received from proxy: %s", response.EigenDADispersalBackend) + } +} + +func (c *ProxyAdminAPIClient) SetDispersalBackend(ctx context.Context, version EigenDACertVersion) error { + // URL to send the request to + url := c.proxyEndpoint + "/admin/eigenda-dispersal-backend" + + // body is json containing backend version + jsonData := []byte(fmt.Sprintf(`{"eigenDADispersalBackend": "%s"}`, version)) + + req, err := http.NewRequest("PUT", url, bytes.NewBuffer(jsonData)) + if err != nil { + return fmt.Errorf("error creating request: %w", err) + } + req.Header.Set("Content-Type", "application/json") + + // Create HTTP client + client := &http.Client{} + + // Send the request + resp, err := client.Do(req) + if err != nil { + return fmt.Errorf("error sending request: %w", err) + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return fmt.Errorf("error response from server: %s", resp.Status) + } + return nil +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/tests/eigenda/v2_migration_test.go layr-labs/optimism/kurtosis-devnet/tests/eigenda/v2_migration_test.go new file mode 100644 index 0000000000000000000000000000000000000000..0bbf5684dc090ec89090d100e82991696f0c4d43 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/tests/eigenda/v2_migration_test.go @@ -0,0 +1,109 @@ +package eigenda + +import ( + "context" + "math/big" + "testing" + "time" + + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/stretchr/testify/require" +) + +func TestEigenDAV2Migration_Memstore(t *testing.T) { + // 20 is enough in memstore mode since certs finalize instantly and thus land + // in batcher-inbox very fast. + testEigenDAV2Migration(t, 20) +} + +func TestEigenDAV2Migration_Holesky(t *testing.T) { + // We set to 160 = 16 mins (160 L1Blocks * 6sec/L1Block). + // This is unfortunately required because of the holocene strict ordering rules. + // After switching proxy from v1 to v2, the v2 certs, even though they finalize very quickly + // and are returned to op-batcher, op-batcher cannot send them to the batch-inbox until + // the v1 certs have been sent. But v1 certs are still waiting to be bridged onchain. + // So we need to wait at least 10-15 mins for the v1 certs to be returned + // from proxy to op-batcher and submitted to batch-inbox before any v2 cert lands onchain. + testEigenDAV2Migration(t, 160) +} + +// TestEigenDAV2Migration tests a rollup migration from eigenDA V1 to V2. +// We use the proxy's admin REST API to change the dispersal backend. +// See https://github.com/Layr-Labs/eigenda-proxy?tab=readme-ov-file#on-the-fly-migration for details. +// This test simply checks that the batcher txs have the correct version byte. +// The batches inbox transactions are queried via geth's GraphQL API. +// +// l1BlocksQueriedForBatcherTxs is the number of blocks to query for batcher txs, for v1 and v2 stages. +// Need to make sure each stage contains at least 2 commitments of the correct type. This can only happen if channels +// are being closed in time, which requires either: sending traffic with traffic-generator, or setting a low (e.g. 2 L1 blocks) channel-timeout. +// +// Note: because this test modifies the proxy's state config, it should be run in isolation (sequentially). +func testEigenDAV2Migration(t *testing.T, v2StageL1BlocksQueriedForBatcherTxs uint64) { + // we assume that kurtosis devnet has already been running for a while, + // and check the 20 previous blocks to make sure that the batcher txs are from the correct layer. + v1StageL1BlocksQueriedForBatcherTxs := uint64(40) + + l1BlockTime := 6 * time.Second + v1StageTimeRequired := time.Duration(v1StageL1BlocksQueriedForBatcherTxs) * l1BlockTime + v2StageTimeRequired := time.Duration(v2StageL1BlocksQueriedForBatcherTxs) * l1BlockTime + opNodeFinalizationStageTime := 8 * time.Minute // we leave 8 mins for op-node finalization + testTimeout := v1StageTimeRequired + v2StageTimeRequired + opNodeFinalizationStageTime + ctxWithTestTimeout, cancel := context.WithTimeout(context.Background(), testTimeout) + defer cancel() + + harness := NewHarness(t) + + // explicitly set to v1 since kurtosis can also be started with proxy in v2 dispersal mode + ctxWithTimeout, cancel := context.WithTimeout(ctxWithTestTimeout, 5*time.Second) + defer cancel() + certVersion, err := harness.Clients.ProxyClients.GetDispersalBackend(ctxWithTimeout) + require.NoError(t, err) + if certVersion != EigenDACertVersionV1 { + // V1 certs take a long time to confirm because of batching+bridging, + // so we only run this test if the proxy is already in V1 mode, + // meaning it has a pipeline of certs that are waiting to be confirmed. + t.Skip("skipping test because proxy is not in EigenDA V1 mode") + } + + // 1. Check that the original commitments are EigenDA V1 + t.Logf("[Stage1: EigenDA V1] Checking that the initial commitments are EigenDA V1") + + stage1FromBlockNum := harness.TestStartL1BlockNum + stage1ToBlockNum := stage1FromBlockNum + v1StageL1BlocksQueriedForBatcherTxs + _, err = geth.WaitForBlock(big.NewInt(int64(stage1ToBlockNum)), harness.Clients.GethL1Client, + geth.WithAbsoluteTimeout(v1StageTimeRequired+1*time.Minute)) // add an extra minute to make sure we don't timeout + require.NoError(t, err) + + requireBatcherTxsToBeFromLayer(t, stage1FromBlockNum, stage1ToBlockNum, DALayerEigenDAV1, harness.Endpoints.GethL1Endpoint, harness.BatchInboxAddr) + + // 2. Change dispersal backend to EigenDA V2 and check that the new commitments are EigenDA V2 + t.Logf("[Stage2] Changing proxy's dispersal backend to submit to EigenDA V2") + ctxWithTimeout, cancel = context.WithTimeout(ctxWithTestTimeout, 5*time.Second) + defer cancel() + err = harness.Clients.ProxyClients.SetDispersalBackend(ctxWithTimeout, EigenDACertVersionV2) + require.NoError(t, err) + + ctxWithTimeout, cancel = context.WithTimeout(ctxWithTestTimeout, 5*time.Second) + defer cancel() + stage2FromBlockNum, err := harness.Clients.GethL1Client.BlockNumber(ctxWithTimeout) + require.NoError(t, err) + stage2ToBlockNum := stage2FromBlockNum + v2StageL1BlocksQueriedForBatcherTxs + _, err = geth.WaitForBlock(big.NewInt(int64(stage2ToBlockNum)), harness.Clients.GethL1Client, + geth.WithAbsoluteTimeout(v2StageTimeRequired+1*time.Minute)) // add an extra minute to make sure we don't timeout + require.NoError(t, err) + + requireBatcherTxsToBeFromLayer(t, stage2FromBlockNum, stage2ToBlockNum, DALayerEigenDAV2, harness.Endpoints.GethL1Endpoint, harness.BatchInboxAddr) + + // We also check that the op-node is still finalizing blocks after the migration to v2 + t.Logf("[Stage3] Check that op-node is still finalizing blocks after v2 migration") + ctxWithTimeout, cancel = context.WithTimeout(ctxWithTestTimeout, 5*time.Second) + defer cancel() + syncStatus, err := harness.Clients.OpNodeClient.SyncStatus(ctxWithTimeout) + require.NoError(t, err) + afterFailoverFinalizedL2 := syncStatus.FinalizedL2 + t.Logf("[Finalization] Current finalized L2 block: %d. Waiting for next block to finalize to make sure finalization is still happening.", afterFailoverFinalizedL2.Number) + // On average would expect this to take half an epoch, aka 16 L1 blocks, which at 6 sec/block means 1.5 minutes. + // This generally takes longer (3-6 minutes), but I'm not quite sure why. + _, err = geth.WaitForBlockToBeFinalized(new(big.Int).SetUint64(afterFailoverFinalizedL2.Number+1), harness.Clients.OpGethClient, 6*time.Minute) + require.NoError(t, err, "op-node should still be finalizing blocks after failover") +}

Replaced op’s circleci with github actions relevant to testing our changes.

diff --git ethereum-optimism/optimism/.circleci/config.yml layr-labs/optimism/.circleci/config.yml deleted file mode 100644 index 742cfc39714abdafd52ec232035b07905003e0f0..0000000000000000000000000000000000000000 --- ethereum-optimism/optimism/.circleci/config.yml +++ /dev/null @@ -1,2676 +0,0 @@ -version: 2.1 - -parameters: - default_docker_image: - type: string - default: cimg/base:2024.01 - base_image: - type: string - default: default - # The dispatch parameters are used to manually dispatch pipelines that normally only run post-merge on develop - # from the CircleCI UI. Example configuration: - # when: - # or: - # - equal: [ "develop", <<pipeline.git.branch>> ] - # - equal: [ true, <<pipeline.parameters.main_dispatch>> ] - # Add a new `*_dispatch` parameter for any pipeline you want manual dispatch for. - main_dispatch: - type: boolean - default: true # default to running main in case the manual run cancelled an automatic run - fault_proofs_dispatch: - type: boolean - default: false - reproducibility_dispatch: - type: boolean - default: false - diff_asterisc_bytecode_dispatch: - type: boolean - default: false - kontrol_dispatch: - type: boolean - default: false - cannon_full_test_dispatch: - type: boolean - default: false - sdk_dispatch: - type: boolean - default: false - docker_publish_dispatch: - type: boolean - default: false - publish_contract_artifacts_dispatch: - type: boolean - default: false - stale_check_dispatch: - type: boolean - default: false - contracts_coverage_dispatch: - type: boolean - default: false - acceptance_tests_dispatch: - type: boolean - default: false - github-event-type: - type: string - default: "__not_set__" - github-event-action: - type: string - default: "__not_set__" - github-event-base64: - type: string - default: "__not_set__" - devnet-metrics-collect: - type: boolean - default: false - -orbs: - go: circleci/go@1.8.0 - gcp-cli: circleci/gcp-cli@3.0.1 - slack: circleci/slack@5.1.1 - shellcheck: circleci/shellcheck@3.2.0 - codecov: codecov/codecov@5.0.3 - utils: ethereum-optimism/circleci-utils@1.0.20 - docker: circleci/docker@2.8.2 - github-cli: circleci/github-cli@2.7.0 - -commands: - gcp-oidc-authenticate: - description: "Authenticate with GCP using a CircleCI OIDC token." - parameters: - project_id: - type: env_var_name - default: GCP_PROJECT_ID - workload_identity_pool_id: - type: env_var_name - default: GCP_WIP_ID - workload_identity_pool_provider_id: - type: env_var_name - default: GCP_WIP_PROVIDER_ID - service_account_email: - type: env_var_name - default: GCP_SERVICE_ACCOUNT_EMAIL - gcp_cred_config_file_path: - type: string - default: /home/circleci/gcp_cred_config.json - oidc_token_file_path: - type: string - default: /home/circleci/oidc_token.json - steps: - - run: - name: "Create OIDC credential configuration" - command: | - # Store OIDC token in temp file - echo $CIRCLE_OIDC_TOKEN > << parameters.oidc_token_file_path >> - # Create a credential configuration for the generated OIDC ID Token - gcloud iam workload-identity-pools create-cred-config \ - "projects/${<< parameters.project_id >>}/locations/global/workloadIdentityPools/${<< parameters.workload_identity_pool_id >>}/providers/${<< parameters.workload_identity_pool_provider_id >>}"\ - --output-file="<< parameters.gcp_cred_config_file_path >>" \ - --service-account="${<< parameters.service_account_email >>}" \ - --credential-source-file=<< parameters.oidc_token_file_path >> - - run: - name: "Authenticate with GCP using OIDC" - command: | - # Configure gcloud to leverage the generated credential configuration - gcloud auth login --brief --cred-file "<< parameters.gcp_cred_config_file_path >>" - # Configure ADC - echo "export GOOGLE_APPLICATION_CREDENTIALS='<< parameters.gcp_cred_config_file_path >>'" | tee -a "$BASH_ENV" - - check-changed: - description: "Conditionally halts a step if certain modules change" - parameters: - patterns: - type: string - description: "Comma-separated list of dependencies" - no_go_deps: - type: string - default: "" - description: "If set, does not trigger on `go.mod` / `go.sum` changes." - steps: - - run: - name: "Check for changes" - environment: - CHECK_CHANGED_NO_GO_DEPS: "<<parameters.no_go_deps>>" - command: | - cd ops/check-changed - pip3 install -r requirements.txt - python3 main.py "<<parameters.patterns>>" - - install-contracts-dependencies: - description: "Install the dependencies for the smart contracts" - parameters: - solc_versions: - description: "The versions of solc to install" - type: string - default: "0.8.15,0.8.19,0.8.25,0.8.28" - steps: - - restore_cache: - keys: - # use mise.toml to anchor on the underlying forge/svm versions - - svm-cache-{{ checksum "mise.toml" }}-<<parameters.solc_versions>> - - run: - name: Install solc compilers - command: | - for version in $(echo "<<parameters.solc_versions>>" | tr ',' '\n'); do - svm which $version || svm install $version - done - - save_cache: - key: svm-cache-{{ checksum "mise.toml" }}-<<parameters.solc_versions>> - paths: - - ~/.svm - - run: - name: Install dependencies - command: | - # Manually craft the submodule update command in order to take advantage - # of the -j parameter, which speeds it up a lot. - git submodule update --init --recursive --force -j 8 - working_directory: packages/contracts-bedrock - - # Notifies us on Slack a build fails on develop - notify-failures-on-develop: - description: "Notify Slack" - parameters: - channel: - type: string - default: C03N11M0BBN - mentions: - type: string - default: "" - steps: - - slack/notify: - channel: << parameters.channel >> - event: fail - template: basic_fail_1 - branch_pattern: develop - mentions: "<< parameters.mentions >>" - - # Notifies us on Discord when a build fails on develop - # For Discord to properly trigger notifications, mentions need to be in the format: - # User mentions: <@USER_ID> - # Role mentions: <@&ROLE_ID> - # Example: <@&1346448413172170807> is how we'd tag the Protocol DevX Pod - discord-notification-failures-on-develop: - description: "Notify Discord" - parameters: - message: - type: string - default: "" - mentions: - type: string - default: "" - steps: - - run: - name: "Notify Discord" - command: | - if [ "${CIRCLE_BRANCH}" == "develop" ]; then - # Format message for Discord with better structure and formatting - DISCORD_MESSAGE="🚨 **CI Failure Detected** 🚨\n" - DISCORD_MESSAGE="${DISCORD_MESSAGE}> **Repository:** \`${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}\`\n" - DISCORD_MESSAGE="${DISCORD_MESSAGE}> **Branch:** \`${CIRCLE_BRANCH}\`\n" - DISCORD_MESSAGE="${DISCORD_MESSAGE}> **Job:** \`${CIRCLE_JOB}\`\n" - DISCORD_MESSAGE="${DISCORD_MESSAGE}> **Build Link:** ${CIRCLE_BUILD_URL}" - - # Add failure reason if provided - if [ ! -z "<< parameters.message >>" ]; then - DISCORD_MESSAGE="${DISCORD_MESSAGE}\n\n**Failure message:** << parameters.message >>" - fi - - # Add mentions if provided - if [ ! -z "<< parameters.mentions >>" ]; then - DISCORD_MESSAGE="${DISCORD_MESSAGE}\n\n**Attention:** << parameters.mentions >>" - fi - - # Post to Discord webhook - curl -X POST -H "Content-Type: application/json" \ - -d "{\"content\": \"${DISCORD_MESSAGE}\"}" "${notify_ci}" - fi - when: on_fail - - - get-target-branch: - description: "Determine the PR target branch and export TARGET_BRANCH for subsequent steps" - steps: - - run: - name: Determine target branch for this pipeline - command: | - TARGET_BRANCH="" - if [ -n "${CIRCLE_PULL_REQUEST:-}" ]; then - TARGET_BRANCH=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${CIRCLE_PULL_REQUEST##*/}" | jq -r .base.ref) - fi - - # Fallbacks when not a PR or API did not return a branch - if [ -z "$TARGET_BRANCH" ] || [ "$TARGET_BRANCH" = "null" ]; then - TARGET_BRANCH="<< pipeline.git.branch >>" - fi - - echo "Resolved TARGET_BRANCH=$TARGET_BRANCH" - echo "export TARGET_BRANCH=$TARGET_BRANCH" >> "$BASH_ENV" - - run-contracts-check: - parameters: - command: - description: Just command that runs the check - type: string - steps: - - run: - name: <<parameters.command>> - command: | - git reset --hard - git clean -df - just <<parameters.command>> - git status --porcelain - [ -z "$(git status --porcelain)" ] || exit 1 - working_directory: packages/contracts-bedrock - when: always - environment: - FOUNDRY_PROFILE: ci - - checkout-from-workspace: - steps: - - attach_workspace: - at: "." - - utils/install-mise - - -jobs: - initialize: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - utils/checkout-with-mise - - install-contracts-dependencies - - persist_to_workspace: - root: "." - paths: - - "." - - cannon-go-lint-and-test: - machine: true - resource_class: ethereum-optimism/latitude-1 - parameters: - skip_slow_tests: - type: boolean - default: false - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 10m - notify: - description: Whether to notify on failure - type: boolean - default: false - steps: - - checkout-from-workspace - - check-changed: - patterns: cannon,packages/contracts-bedrock/src/cannon,op-preimage,go.mod - - run: - name: prep Cannon results dir - command: | - mkdir -p ./tmp/test-results - mkdir -p ./tmp/testlogs - - run: - name: build Cannon example binaries - command: make elf # only compile ELF binaries with Go, we do not have MIPS GCC for creating the debug-dumps. - working_directory: cannon/testdata - - run: - name: Cannon Go lint - command: | - make lint - working_directory: cannon - - run: - name: Cannon Go 64-bit tests - no_output_timeout: <<parameters.no_output_timeout>> - command: | - export SKIP_SLOW_TESTS="<<parameters.skip_slow_tests>>" - TIMEOUT="10m" - if [ "$SKIP_SLOW_TESTS" = "false" ]; then - TIMEOUT="45m" - fi - gotestsum --format=testname --junitfile=../tmp/test-results/cannon-64.xml --jsonfile=../tmp/testlogs/log-64.json \ - -- -timeout=$TIMEOUT -parallel=$(nproc) -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage-64.out ./... - working_directory: cannon - - codecov/upload: - disable_search: true - files: ./cannon/coverage-64.out - flags: cannon-go-tests-64 - - store_test_results: - path: ./tmp/test-results - - store_artifacts: - path: ./tmp/testlogs - when: always - - when: - condition: <<parameters.notify>> - steps: - - notify-failures-on-develop: - mentions: "@proofs-team" - - diff-asterisc-bytecode: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - run: - name: Check `RISCV.sol` bytecode - working_directory: packages/contracts-bedrock - command: | - # Clone asterisc @ the pinned version to fetch remote `RISCV.sol` - ASTERISC_REV="v$(yq '.tools.asterisc' ../../mise.toml)" - REMOTE_ASTERISC_PATH="./src/vendor/asterisc/RISCV_Remote.sol" - git clone https://github.com/ethereum-optimism/asterisc \ - -b $ASTERISC_REV && \ - cp ./asterisc/rvsol/src/RISCV.sol $REMOTE_ASTERISC_PATH - - # Replace import paths - sed -i -e 's/@optimism\///' $REMOTE_ASTERISC_PATH - # Replace legacy interface paths - sed -i -e 's/src\/cannon\/interfaces\//interfaces\/cannon\//g' $REMOTE_ASTERISC_PATH - sed -i -e 's/src\/dispute\/interfaces\//interfaces\/dispute\//g' $REMOTE_ASTERISC_PATH - # Replace contract name - sed -i -e 's/contract RISCV/contract RISCV_Remote/' $REMOTE_ASTERISC_PATH - - # Install deps - forge install - - # Diff bytecode, with both contracts compiled in the local environment. - REMOTE_ASTERISC_CODE="$(forge inspect RISCV_Remote bytecode | tr -d '\n')" - LOCAL_ASTERISC_CODE="$(forge inspect RISCV bytecode | tr -d '\n')" - if [ "$REMOTE_ASTERISC_CODE" != "$LOCAL_ASTERISC_CODE" ]; then - echo "Asterisc bytecode mismatch. Local version does not match remote. Diff:" - diff <(echo "$REMOTE_ASTERISC_CODE") <(echo "$LOCAL_ASTERISC_CODE") - else - echo "Asterisc version up to date." - fi - - notify-failures-on-develop: - mentions: "@clabby @proofs-team" - - contracts-bedrock-build: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - parameters: - build_args: - description: Forge build arguments - type: string - default: "" - profile: - description: Profile to use for building - type: string - default: ci - steps: - - checkout-from-workspace - - run: - name: Print forge version - command: forge --version - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Build contracts - command: forge build <<parameters.build_args>> - environment: - FOUNDRY_PROFILE: <<parameters.profile>> - working_directory: packages/contracts-bedrock - - run: - name: "Copy artifacts into deployer" - command: | - just copy-contract-artifacts - working_directory: op-deployer - - persist_to_workspace: - root: "." - paths: - - "packages/contracts-bedrock/cache" - - "packages/contracts-bedrock/artifacts" - - "packages/contracts-bedrock/forge-artifacts" - - "op-deployer/pkg/deployer/artifacts/forge-artifacts" - - notify-failures-on-develop - - check-kontrol-build: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Run Kontrol build - command: just kontrol-summary-full - working_directory: packages/contracts-bedrock - - run: - name: Build Kontrol summary files - command: forge build ./test/kontrol/proofs - working_directory: packages/contracts-bedrock - - notify-failures-on-develop - - docker-build: - environment: - DOCKER_BUILDKIT: 1 - parameters: - docker_tags: - description: Docker image tags, comma-separated - type: string - docker_name: - description: "Docker buildx bake target" - type: string - default: "" - registry: - description: Docker registry - type: string - default: "us-docker.pkg.dev" - repo: - description: Docker repo - type: string - default: "oplabs-tools-artifacts/images" - save_image_tag: - description: Save docker image with given tag - type: string - default: "" - platforms: - description: Platforms to build for, comma-separated - type: string - default: "linux/amd64" - publish: - description: Publish the docker image (multi-platform, all tags) - type: boolean - default: false - release: - description: Run the release script - type: boolean - default: false - resource_class: - description: Docker resource class - type: string - default: medium - machine: - image: <<pipeline.parameters.base_image>> - resource_class: "<<parameters.resource_class>>" - docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages - steps: - - checkout-from-workspace - - run: - command: mkdir -p /tmp/docker_images - - when: - condition: - or: - - "<<parameters.publish>>" - - "<<parameters.release>>" - steps: - - gcp-cli/install - - when: - condition: - or: - - "<<parameters.publish>>" - - "<<parameters.release>>" - steps: - - gcp-oidc-authenticate - - run: - name: Build - command: | - # Check to see if DOCKER_HUB_READ_ONLY_TOKEN is set (i.e. we are in repo) before attempting to use secrets. - # Building should work without this read only login, but may get rate limited. - if [[ -v DOCKER_HUB_READ_ONLY_TOKEN ]]; then - echo "$DOCKER_HUB_READ_ONLY_TOKEN" | docker login -u "$DOCKER_HUB_READ_ONLY_USER" --password-stdin - fi - - export REGISTRY="<<parameters.registry>>" - export REPOSITORY="<<parameters.repo>>" - export IMAGE_TAGS="$(echo -ne "<<parameters.docker_tags>>" | sed "s/[^a-zA-Z0-9\n,]/-/g")" - export GIT_COMMIT="$(git rev-parse HEAD)" - export GIT_DATE="$(git show -s --format='%ct')" - export PLATFORMS="<<parameters.platforms>>" - - echo "Checking git tags pointing at $GIT_COMMIT:" - tags_at_commit=$(git tag --points-at $GIT_COMMIT) - echo "Tags at commit:\n$tags_at_commit" - - filtered_tags=$(echo "$tags_at_commit" | grep "^<<parameters.docker_name>>/" || true) - echo "Filtered tags: $filtered_tags" - - if [ -z "$filtered_tags" ]; then - export GIT_VERSION="untagged" - else - sorted_tags=$(echo "$filtered_tags" | sed "s/<<parameters.docker_name>>\///" | sort -V) - echo "Sorted tags: $sorted_tags" - - # prefer full release tag over "-rc" release candidate tag if both exist - full_release_tag=$(echo "$sorted_tags" | grep -v -- "-rc" || true) - if [ -z "$full_release_tag" ]; then - export GIT_VERSION=$(echo "$sorted_tags" | tail -n 1) - else - export GIT_VERSION=$(echo "$full_release_tag" | tail -n 1) - fi - fi - - echo "Setting GIT_VERSION=$GIT_VERSION" - - # Create, start (bootstrap) and use a *named* docker builder - # This allows us to cross-build multi-platform, - # and naming allows us to use the DLC (docker-layer-cache) - docker buildx create --driver=docker-container --name=buildx-build --bootstrap --use - - DOCKER_OUTPUT_DESTINATION="" - if [ "<<parameters.publish>>" == "true" ]; then - gcloud auth configure-docker <<parameters.registry>> - echo "Building for platforms $PLATFORMS and then publishing to registry" - DOCKER_OUTPUT_DESTINATION="--push" - if [ "<<parameters.save_image_tag>>" != "" ]; then - echo "ERROR: cannot save image to docker when publishing to registry" - exit 1 - fi - else - if [ "<<parameters.save_image_tag>>" == "" ]; then - echo "Running $PLATFORMS build without destination (cache warm-up)" - DOCKER_OUTPUT_DESTINATION="" - elif [[ $PLATFORMS == *,* ]]; then - echo "ERROR: cannot perform multi-arch (platforms: $PLATFORMS) build while also loading the result into regular docker" - exit 1 - else - echo "Running single-platform $PLATFORMS build and loading into docker" - DOCKER_OUTPUT_DESTINATION="--load" - fi - fi - - # Let them cook! - docker buildx bake \ - --progress plain \ - --builder=buildx-build \ - -f docker-bake.hcl \ - $DOCKER_OUTPUT_DESTINATION \ - <<parameters.docker_name>> - - no_output_timeout: 45m - - when: - condition: "<<parameters.publish>>" - steps: - - notify-failures-on-develop - - when: - condition: "<<parameters.save_image_tag>>" - steps: - - run: - name: Save - command: | - IMAGE_NAME="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>:<<parameters.save_image_tag>>" - docker save -o /tmp/docker_images/<<parameters.docker_name>>.tar $IMAGE_NAME - - persist_to_workspace: - root: /tmp/docker_images - paths: # only write the one file, to avoid concurrent workspace-file additions - - "<<parameters.docker_name>>.tar" - - when: - condition: "<<parameters.release>>" - steps: - - run: - name: Tag - command: | - ./ops/scripts/ci-docker-tag-op-stack-release.sh <<parameters.registry>>/<<parameters.repo>> $CIRCLE_TAG $CIRCLE_SHA1 - - when: - condition: - or: - - and: - - "<<parameters.publish>>" - - "<<parameters.release>>" - - and: - - "<<parameters.publish>>" - - equal: [develop, << pipeline.git.branch >>] - steps: - - gcp-oidc-authenticate: - service_account_email: GCP_SERVICE_ATTESTOR_ACCOUNT_EMAIL - - run: - name: Sign - command: | - VER=$(yq '.tools.binary_signer' mise.toml) - wget -O - "https://github.com/ethereum-optimism/binary_signer/archive/refs/tags/v${VER}.tar.gz" | tar xz - cd "binary_signer-${VER}/signer" - - IMAGE_PATH="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>:<<pipeline.git.revision>>" - echo $IMAGE_PATH - pip3 install -r requirements.txt - - python3 ./sign_image.py --command="sign"\ - --attestor-project-name="$ATTESTOR_PROJECT_NAME"\ - --attestor-name="$ATTESTOR_NAME"\ - --image-path="$IMAGE_PATH"\ - --signer-logging-level="INFO"\ - --attestor-key-id="//cloudkms.googleapis.com/v1/projects/$ATTESTOR_PROJECT_NAME/locations/global/keyRings/$ATTESTOR_NAME-key-ring/cryptoKeys/$ATTESTOR_NAME-key/cryptoKeyVersions/1" - - # Verify newly published images (built on AMD machine) will run on ARM - check-cross-platform: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: arm.medium - parameters: - registry: - description: Docker registry - type: string - default: "us-docker.pkg.dev" - repo: - description: Docker repo - type: string - default: "oplabs-tools-artifacts/images" - op_component: - description: "Name of op-stack component (e.g. op-node)" - type: string - default: "" - docker_tag: - description: "Tag of docker image" - type: string - default: "<<pipeline.git.revision>>" - steps: - - setup_remote_docker - - run: - name: "Verify Image Platform" - command: | - image_name="<<parameters.registry>>/<<parameters.repo>>/<<parameters.op_component>>:<<parameters.docker_tag>>" - echo "Retrieving Docker image manifest: $image_name" - MANIFEST=$(docker manifest inspect $image_name) - - echo "Verifying 'linux/arm64' is supported..." - SUPPORTED_PLATFORM=$(echo "$MANIFEST" | jq -r '.manifests[] | select(.platform.architecture == "arm64" and .platform.os == "linux")') - echo $SUPPORT_PLATFORM - if [ -z "$SUPPORTED_PLATFORM" ]; then - echo "Platform 'linux/arm64' not supported by this image" - exit 1 - fi - - run: - name: "Pull and run docker image" - command: | - image_name="<<parameters.registry>>/<<parameters.repo>>/<<parameters.op_component>>:<<parameters.docker_tag>>" - docker pull $image_name || exit 1 - docker run $image_name <<parameters.op_component>> --version || exit 1 - - - contracts-bedrock-frozen-code: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - check-changed: - patterns: contracts-bedrock - - get-target-branch - - run: - name: Check if target branch is develop - command: | - # If the target branch is not develop, do not run this check - if [ "${TARGET_BRANCH}" != "develop" ]; then - echo "Target branch is not develop, skipping frozen files check" - circleci-agent step halt - fi - - run: - name: Check if PR has exempt label - command: | - # Get PR number from CIRCLE_PULL_REQUEST - PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | rev | cut -d/ -f1 | rev) - - # Use GitHub API to get labels - LABELS=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${PR_NUMBER}" | jq -r .labels) - - # If the PR has the "M-exempt-frozen-files" label, do not run this check - if echo $LABELS | jq -e 'any(.[]; .name == "M-exempt-frozen-files")' > /dev/null; then - echo "Skipping frozen files check, PR has exempt label" - circleci-agent step halt - fi - - run: - name: Check frozen files - command: just check-frozen-code - working_directory: packages/contracts-bedrock - - contracts-bedrock-tests: - circleci_ip_ranges: true - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - parameters: - test_list: - description: List of test files to run - type: string - test_command: - description: Test command to execute (test or coverage) - type: string - default: test - test_flags: - description: Additional flags to pass to the test command - type: string - default: "" - test_timeout: - description: Timeout for running tests - type: string - default: 15m - test_profile: - description: Profile to use for testing - type: string - default: ci - check_changed_patterns: - description: List of changed files to run tests on - type: string - default: contracts-bedrock - steps: - - checkout-from-workspace - - run: - name: Check if test list is empty - command: | - TEST_FILES=$(<<parameters.test_list>>) - if [ -z "$TEST_FILES" ]; then - echo "No test files to run. Exiting early." - circleci-agent step halt - fi - working_directory: packages/contracts-bedrock - - check-changed: - patterns: <<parameters.check_changed_patterns>> - - run: - name: Print dependencies - command: just dep-status - working_directory: packages/contracts-bedrock - - run: - name: Print forge version - command: forge --version - working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Build go-ffi - command: just build-go-ffi - working_directory: packages/contracts-bedrock - - run: - name: Run tests - command: | - TEST_FILES=$(<<parameters.test_list>>) - TEST_FILES=$(echo "$TEST_FILES" | circleci tests split --split-by=timings) - TEST_FILES=$(echo "$TEST_FILES" | sed 's|^test/||') - MATCH_PATH="./test/{$(echo "$TEST_FILES" | paste -sd "," -)}" - forge <<parameters.test_command>> <<parameters.test_flags>> --match-path "$MATCH_PATH" - environment: - FOUNDRY_PROFILE: <<parameters.test_profile>> - working_directory: packages/contracts-bedrock - no_output_timeout: <<parameters.test_timeout>> - - run: - name: Print failed test traces - command: just test-rerun - environment: - FOUNDRY_PROFILE: ci - working_directory: packages/contracts-bedrock - when: on_fail - - run: - name: Lint forge test names - command: just lint-forge-tests-check-no-build - working_directory: packages/contracts-bedrock - - save_cache: - name: Save Go build cache - key: golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} - paths: - - "/root/.cache/go-build" - - notify-failures-on-develop - - contracts-bedrock-coverage: - circleci_ip_ranges: true - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: 2xlarge - parameters: - test_flags: - description: Additional flags to pass to the test command - type: string - default: "" - test_timeout: - description: Timeout for running tests - type: string - default: 15m - test_profile: - description: Profile to use for testing - type: string - default: ci - steps: - - checkout-from-workspace - - check-changed: - patterns: contracts-bedrock - - run: - name: Print dependencies - command: just dep-status - working_directory: packages/contracts-bedrock - - run: - name: Print forge version - command: forge --version - working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Install lcov - command: | - sudo apt-get update - sudo apt-get install -y lcov - - run: - name: Write pinned block number for cache key - command: | - just print-pinned-block-number > ./pinnedBlockNumber.txt - cat pinnedBlockNumber.txt - working_directory: packages/contracts-bedrock - - restore_cache: - name: Restore forked state - key: forked-state-contracts-bedrock-tests-upgrade-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - - run: - name: Run coverage tests - command: just coverage-lcov-all <<parameters.test_flags>> - environment: - FOUNDRY_PROFILE: <<parameters.test_profile>> - ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io - working_directory: packages/contracts-bedrock - no_output_timeout: <<parameters.test_timeout>> - - run: - name: Print failed test traces - command: | - just test-rerun | tee failed-test-traces.log - environment: - FOUNDRY_PROFILE: <<parameters.test_profile>> - ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io - working_directory: packages/contracts-bedrock - when: on_fail - - codecov/upload: - disable_search: true - files: ./packages/contracts-bedrock/lcov-all.info - flags: contracts-bedrock-tests - - store_artifacts: - path: packages/contracts-bedrock/failed-test-traces.log - when: on_fail - - notify-failures-on-develop - - contracts-bedrock-tests-upgrade: - circleci_ip_ranges: true - parameters: - fork_op_chain: - description: Fork OP Chain - type: string - default: "op" - fork_base_chain: - description: Fork Base Chain - type: string - default: "mainnet" - fork_base_rpc: - description: Fork Base RPC - type: string - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - checkout-from-workspace - - check-changed: - patterns: contracts-bedrock - - run: - name: Print dependencies - command: just dep-status - working_directory: packages/contracts-bedrock - - run: - name: Print forge version - command: forge --version - working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Write pinned block number for cache key - command: | - just print-pinned-block-number > ./pinnedBlockNumber.txt - cat pinnedBlockNumber.txt - environment: - FORK_BASE_CHAIN: <<parameters.fork_base_chain>> - working_directory: packages/contracts-bedrock - - restore_cache: - name: Restore forked state - key: forked-state-contracts-bedrock-tests-upgrade-<<parameters.fork_op_chain>>-<<parameters.fork_base_chain>>-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - - run: - name: Run tests - command: just test-upgrade - environment: - FOUNDRY_FUZZ_SEED: 42424242 - FOUNDRY_FUZZ_RUNS: 1 - FOUNDRY_PROFILE: ci - ETH_RPC_URL: <<parameters.fork_base_rpc>> - FORK_OP_CHAIN: <<parameters.fork_op_chain>> - FORK_BASE_CHAIN: <<parameters.fork_base_chain>> - working_directory: packages/contracts-bedrock - no_output_timeout: 15m - - run: - name: Print failed test traces - command: | - just test-upgrade-rerun | tee failed-test-traces.log - environment: - FOUNDRY_FUZZ_SEED: 42424242 - FOUNDRY_FUZZ_RUNS: 1 - FOUNDRY_PROFILE: ci - ETH_RPC_URL: <<parameters.fork_base_rpc>> - FORK_OP_CHAIN: <<parameters.fork_op_chain>> - FORK_BASE_CHAIN: <<parameters.fork_base_chain>> - working_directory: packages/contracts-bedrock - when: on_fail - - save_cache: - name: Save Go build cache - key: golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} - paths: - - "/root/.cache/go-build" - - save_cache: - name: Save forked state - key: forked-state-contracts-bedrock-tests-upgrade-<<parameters.fork_op_chain>>-<<parameters.fork_base_chain>>-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - when: always - paths: - - "/root/.foundry/cache" - - store_artifacts: - path: packages/contracts-bedrock/failed-test-traces.log - when: on_fail - - notify-failures-on-develop - - contracts-bedrock-checks: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - check-changed: - patterns: contracts-bedrock - - get-target-branch - - run: - name: print forge version - command: forge --version - - run-contracts-check: - command: check-kontrol-summaries-unchanged - - run-contracts-check: - command: semgrep-test-validity-check - - run-contracts-check: - command: semgrep - - run-contracts-check: - command: semver-lock-no-build - - run-contracts-check: - command: semver-diff-check-no-build - - run-contracts-check: - command: validate-deploy-configs - - run-contracts-check: - command: lint - - run-contracts-check: - command: snapshots-check-no-build - - run-contracts-check: - command: interfaces-check-no-build - - run-contracts-check: - command: reinitializer-check-no-build - - run-contracts-check: - command: size-check - - run-contracts-check: - command: unused-imports-check-no-build - - run-contracts-check: - command: validate-spacers-no-build - - run-contracts-check: - command: opcm-upgrade-checks-no-build - - todo-issues: - parameters: - check_closed: - type: boolean - default: true - machine: - image: <<pipeline.parameters.base_image>> - steps: - - utils/checkout-with-mise - - run: - name: Install ripgrep - command: sudo apt-get install -y ripgrep - - run: - name: Check TODO issues - command: ./ops/scripts/todo-checker.sh --verbose --strict <<#parameters.check_closed>> --check-closed <</parameters.check_closed>> - - notify-failures-on-develop - - fuzz-golang: - parameters: - package_name: - description: Go package name - type: string - on_changes: - description: changed pattern to fire fuzzer on - type: string - uses_artifacts: - description: should load in foundry artifacts - type: boolean - default: false - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - checkout-from-workspace - - check-changed: - patterns: "<<parameters.package_name>>" - - attach_workspace: - at: "." - if: ${{ uses_artifacts }} - - run: - name: Fuzz - no_output_timeout: 15m - command: | - make fuzz - working_directory: "<<parameters.package_name>>" - - run: - name: Copy fuzz artifacts - command: | - mkdir -p fuzzdata - find ./<<parameters.package_name>> -type d -name "fuzz" -exec sh -c 'cp -r "{}"/* fuzzdata/ 2>/dev/null || true' \; - when: always - - store_artifacts: - path: ./fuzzdata - when: always - - go-lint: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - checkout-from-workspace - - restore_cache: - key: golangci-v1-{{ checksum ".golangci.yaml" }} - - run: - name: run Go linter - command: | - make lint-go - - save_cache: - key: golangci-v1-{{ checksum ".golangci.yaml" }} - paths: - - "/home/circleci/.cache/golangci-lint" - - go-tests: - parameters: - notify: - description: Whether to notify on failure - type: boolean - default: false - mentions: - description: Slack user or group to mention when notifying of failures - type: string - default: "" - resource_class: - description: Machine resource class - type: string - default: ethereum-optimism/latitude-1-go-e2e - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 60m - test_timeout: - description: Timeout for running tests - type: string - default: 10m - environment_overrides: - description: Environment overrides - type: string - default: "" - rule: - description: Rule to run the tests - type: string - default: "go-tests-short-ci" - machine: true - resource_class: <<parameters.resource_class>> - steps: - - checkout-from-workspace - - run: - name: Run all Go tests via Makefile - no_output_timeout: <<parameters.no_output_timeout>> - command: | - <<parameters.environment_overrides>> - export TEST_TIMEOUT=<<parameters.test_timeout>> - make <<parameters.rule>> - - codecov/upload: - disable_search: true - files: ./coverage.out - - store_test_results: - path: ./tmp/test-results - - run: - name: Compress test logs - command: tar -czf testlogs.tar.gz -C ./tmp testlogs - when: always - - store_artifacts: - path: testlogs.tar.gz - when: always - - when: - condition: "<<parameters.notify>>" - steps: - - notify-failures-on-develop: - mentions: "<<parameters.mentions>>" - - go-tests-with-fault-proof-deps: - parameters: - notify: - description: Whether to notify on failure - type: boolean - default: false - mentions: - description: Slack user or group to mention when notifying of failures - type: string - default: "" - resource_class: - description: Machine resource class - type: string - default: ethereum-optimism/latitude-1-go-e2e - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 60m - test_timeout: - description: Timeout for running tests - type: string - default: 10m - environment_overrides: - description: Environment overrides - type: string - default: "" - machine: true - resource_class: <<parameters.resource_class>> - steps: - - checkout-from-workspace - - run: - name: build op-program-client - command: make op-program-client - working_directory: op-program - - run: - name: build op-program-host - command: make op-program-host - working_directory: op-program - - run: - name: build cannon - command: make cannon - - run: - name: run tests - no_output_timeout: <<parameters.no_output_timeout>> - command: | - <<parameters.environment_overrides>> - export TEST_TIMEOUT=<<parameters.test_timeout>> - make go-tests-fraud-proofs-ci - - codecov/upload: - disable_search: true - files: ./coverage.out - - store_test_results: - path: ./tmp/test-results - - run: - name: Compress test logs - command: tar -czf testlogs.tar.gz -C ./tmp testlogs - when: always - - store_artifacts: - path: testlogs.tar.gz - when: always - - when: - condition: "<<parameters.notify>>" - steps: - - notify-failures-on-develop: - mentions: "<<parameters.mentions>>" - - op-acceptance-tests: - parameters: - devnet: - description: | - The name of the pre-defined kurtosis devnet to run the acceptance tests against - (e.g. 'simple', 'isthmus', 'interop'). Empty string uses - in-process testing (sysgo orchestrator). Named devnets use - external testing (sysext orchestrator) and must have a - recipe defined in kurtosis-devnet/Justfile. - type: string - default: "" - gate: - description: The gate to run the acceptance tests against. This gate should be defined in op-acceptance-tests/acceptance-tests.yaml. - type: string - default: "" - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 30m - machine: - image: ubuntu-2404:current - docker_layer_caching: true # Since we are building docker images for components, we'll cache the layers for faster builds - resource_class: xlarge - steps: - - checkout-from-workspace - - run: - name: Setup Kurtosis (if needed) - command: | - if [[ "<<parameters.devnet>>" != "" ]]; then - echo "Setting up Kurtosis for external devnet testing..." - - # Print Kurtosis version - echo "Using Kurtosis from: $(which kurtosis || echo 'not found')" - kurtosis version - - # Start Kurtosis engine - echo "Starting Kurtosis engine..." - kurtosis engine start || true - - # Clean old instances - echo "Cleaning old instances..." - kurtosis clean -a || true - - # Check engine status - kurtosis engine status || true - - echo "Kurtosis setup complete" - else - echo "Using in-process testing (sysgo orchestrator) - no Kurtosis setup needed" - fi - # Notify us of a setup failure - - when: - condition: on_fail - steps: - - discord-notification-failures-on-develop: - mentions: "<@&1346448413172170807>" # Protocol DevX Pod - message: "Devnet <<parameters.devnet>>-devnet failed to start" - - run: - name: Stop the job if the devnet failed to start - command: circleci-agent step halt - when: on_fail - # Restore cached Go modules - - restore_cache: - keys: - - go-mod-v1-{{ checksum "go.sum" }} - - go-mod-v1- - # Download Go dependencies - - run: - name: Download Go dependencies - working_directory: op-acceptance-tests - command: go mod download - # Prepare the test environment - - run: - name: Prepare test environment (compile tests and cache build results) - working_directory: op-acceptance-tests - command: go test -v -c -o /dev/null $(go list -f '{{if .TestGoFiles}}{{.ImportPath}}{{end}}' ./tests/...) - # Run the acceptance tests (if the devnet is running) - - run: - name: Run acceptance tests (gate=<<parameters.gate>>) - working_directory: op-acceptance-tests - no_output_timeout: 1h - environment: - GOFLAGS: "-mod=mod" - GO111MODULE: "on" - GOGC: "0" - command: | - # Run the tests - LOG_LEVEL=debug just acceptance-test "<<parameters.devnet>>" "<<parameters.gate>>" - - run: - name: Print results (summary) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/summary.log" || true - - run: - name: Print results (failures) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/failed/*.log" || true - when: on_fail - - run: - name: Print results (all) - working_directory: op-acceptance-tests - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - cat "$LOG_DIR/all.log" || true - - run: - name: Generate JUnit XML test report for CircleCI - working_directory: op-acceptance-tests - when: always - command: | - LOG_DIR=$(ls -td -- logs/* | head -1) - gotestsum --junitfile results/results.xml --raw-command cat $LOG_DIR/raw_go_events.log || true - # Save the module cache for future runs - - save_cache: - key: go-mod-v1-{{ checksum "go.sum" }} - paths: - - "/go/pkg/mod" - # Store test results and artifacts - - when: - condition: always - steps: - - store_test_results: - path: ./op-acceptance-tests/results - - when: - condition: always - steps: - - store_artifacts: - path: ./op-acceptance-tests/logs - # Dump kurtosis logs if external devnet was used - - run: - name: Dump kurtosis logs (if external devnet was used) - when: on_fail - command: | - if [[ "<<parameters.devnet>>" != "" ]]; then - # Dump logs & specs - kurtosis dump ./.kurtosis-dump - - # Remove spec.json files - rm -rf ./.kurtosis-dump/enclaves/**/*.json - - # Remove all unnecessary logs - rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-api--* - rm -rf ./.kurtosis-dump/enclaves/*/kurtosis-logs-collector--* - rm -rf ./.kurtosis-dump/enclaves/*/task-* - else - echo "In-process testing was used - no kurtosis logs to dump" - fi - - when: - condition: always - steps: - - store_artifacts: - path: ./.kurtosis-dump/enclaves - destination: op-acceptance-tests/kurtosis-logs - - when: - condition: on_fail - steps: - - discord-notification-failures-on-develop: - mentions: "Platforms (<@225161927351992320>) & Protocol (<@590878816004603924>)" # stefano, changwan - message: "Acceptance tests failed for gate <<parameters.gate>> on devnet <<parameters.devnet>>" - - sanitize-op-program: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - checkout-from-workspace - - run: - name: Install tools - command: | - sudo apt-get update - sudo apt-get install -y binutils-mips-linux-gnu - - run: - name: Build cannon - command: make cannon - - run: - name: Build op-program - command: make op-program - - run: - name: Sanitize op-program client - command: make sanitize-program GUEST_PROGRAM=../op-program/bin/op-program-client64.elf - working_directory: cannon - - cannon-prestate-quick: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - restore_cache: - name: Restore cannon prestate cache - key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} - - run: - name: Build prestates - command: make cannon-prestates - - save_cache: - key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} - name: Save Cannon prestate to cache - paths: - - "op-program/bin/prestate*.bin.gz" - - "op-program/bin/meta*.json" - - "op-program/bin/prestate-proof*.json" - - persist_to_workspace: - root: . - paths: - - "op-program/bin/prestate*" - - "op-program/bin/meta*" - - "op-program/bin/op-program" - - "op-program/bin/op-program-client" - - "cannon/bin" - - cannon-prestate: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - checkout-from-workspace - - setup_remote_docker - - run: - name: Build prestates - command: make reproducible-prestate - - persist_to_workspace: - root: . - paths: - - "op-program/bin/prestate*" - - "op-program/bin/meta*" - - publish-cannon-prestates: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - attach_workspace: - at: "." - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - - run: - name: Upload cannon prestates - command: | - # Use the actual hash for tags (hash can be found by reading releases.json) - PRESTATE_MT64_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-mt64.json) - PRESTATE_MT64NEXT_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-mt64Next.json) - PRESTATE_INTEROP_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-interop.json) - PRESTATE_INTEROP_NEXT_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-interopNext.json) - - BRANCH_NAME=$(echo "<< pipeline.git.branch >>" | tr '/' '-') - echo "Publishing ${PRESTATE_MT64_HASH}, ${PRESTATE_MT64NEXT_HASH}, ${PRESTATE_INTEROP_HASH}, ${PRESTATE_INTEROP_NEXT_HASH} as ${BRANCH_NAME}" - if [[ "" != "<< pipeline.git.branch >>" ]] - then - # Upload the git commit info for each prestate since this won't be recorded in releases.json - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64NEXT_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64Next.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interop.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_NEXT_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interopNext.bin.gz.txt" - - - # Use the branch name for branches to provide a consistent URL - PRESTATE_MT64_HASH="${BRANCH_NAME}-mt64" - PRESTATE_MT64NEXT_HASH="${BRANCH_NAME}-mt64Next" - PRESTATE_INTEROP_HASH="${BRANCH_NAME}-interop" - PRESTATE_INTEROP_NEXT_HASH="${BRANCH_NAME}-interopNext" - fi - gsutil cp ./op-program/bin/prestate-mt64.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT64_HASH}.bin.gz" - - gsutil cp ./op-program/bin/prestate-mt64Next.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT64NEXT_HASH}.bin.gz" - - gsutil cp ./op-program/bin/prestate-interop.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_INTEROP_HASH}.bin.gz" - - gsutil cp ./op-program/bin/prestate-interopNext.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_INTEROP_NEXT_HASH}.bin.gz" - - notify-failures-on-develop: - mentions: "@proofs-team" - - preimage-reproducibility: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - utils/checkout-with-mise - - setup_remote_docker - - run: - name: Verify reproducibility - command: make -C op-program verify-reproducibility - - store_artifacts: - path: ./op-program/temp/logs - when: always - - notify-failures-on-develop: - mentions: "@proofs-team" - - cannon-stf-verify: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - utils/checkout-with-mise - - setup_remote_docker - - run: - name: Build cannon - command: make cannon - - run: - name: Verify the Cannon STF - command: make -C ./cannon cannon-stf-verify - - notify-failures-on-develop: - mentions: "@proofs-team" - - semgrep-scan: - parameters: - diff_branch: - type: string - default: develop - scan_command: - type: string - default: semgrep ci --timeout=100 - environment: - TEMPORARY_BASELINE_REF: << parameters.diff_branch >> - SEMGREP_REPO_URL: << pipeline.project.git_url >> - SEMGREP_BRANCH: << pipeline.git.branch >> - SEMGREP_COMMIT: << pipeline.git.revision >> - docker: - - image: returntocorp/semgrep - resource_class: xlarge - steps: - - checkout # no need to use mise here since the docker image contains the only dependency - - unless: - condition: - equal: ["develop", << pipeline.git.branch >>] - steps: - - run: - # Scan changed files in PRs, block on new issues only (existing issues ignored) - # Do a full scan when scanning develop, otherwise do an incremental scan. - name: "Conditionally set BASELINE env var" - command: | - echo 'export SEMGREP_BASELINE_REF=${TEMPORARY_BASELINE_REF}' >> $BASH_ENV - - run: - name: "Set environment variables" # for PR comments and in-app hyperlinks to findings - command: | - echo 'export SEMGREP_PR_ID=${CIRCLE_PULL_REQUEST##*/}' >> $BASH_ENV - echo 'export SEMGREP_JOB_URL=$CIRCLE_BUILD_URL' >> $BASH_ENV - echo 'export SEMGREP_REPO_NAME=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME' >> $BASH_ENV - - run: - name: "Semgrep scan" - # --timeout (in seconds) limits the time per rule and file. - # SEMGREP_TIMEOUT is the same, but docs have conflicting defaults (5s in CLI flag, 1800 in some places) - # https://semgrep.dev/docs/troubleshooting/semgrep-app#if-the-job-is-aborted-due-to-taking-too-long - command: << parameters.scan_command >> - # If semgrep hangs, stop the scan after 20m, to prevent a useless 5h job - no_output_timeout: 20m - - notify-failures-on-develop - - bedrock-go-tests: # just a helper, that depends on all the actual test jobs - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: medium - steps: - - run: echo Done - - analyze-op-program-client: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - checkout-from-workspace - - setup_remote_docker - - run: - name: Run Analyzer - command: | - make run-vm-compat - working_directory: op-program - - op-program-compat: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - run: - name: Verify Compatibility - command: | - make verify-compat - working_directory: op-program - - check-generated-mocks-op-node: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - check-changed: - patterns: op-node - - run: - name: check-generated-mocks - command: make generate-mocks-op-node && git diff --exit-code - - check-generated-mocks-op-service: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - checkout-from-workspace - - check-changed: - patterns: op-service - - run: - name: check-generated-mocks - command: make generate-mocks-op-service && git diff --exit-code - - kontrol-tests: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - utils/checkout-with-mise - - install-contracts-dependencies - - check-changed: - no_go_deps: "true" - patterns: contracts-bedrock/test/kontrol,contracts-bedrock/src/L1/OptimismPortal\.sol,contracts-bedrock/src/L1/OptimismPortal2\.sol,contracts-bedrock/src/L1/L1CrossDomainMessenger\.sol,contracts-bedrock/src/L1/L1ERC721Bridge\.sol,contracts-bedrock/src/L1/L1StandardBridge\.sol,contracts-bedrock/src/L1/ResourceMetering\.sol,contracts-bedrock/src/universal/StandardBridge\.sol,contracts-bedrock/src/universal/ERC721Bridge\.sol,contracts-bedrock/src/universal/CrossDomainMessenger\.sol - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Run Kontrol Tests - command: | - curl -X POST \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer $RV_COMPUTE_TOKEN" \ - https://api.github.com/repos/runtimeverification/optimism-ci/actions/workflows/optimism-ci.yaml/dispatches \ - -d '{ - "ref": "master", - "inputs": { - "branch_name": "<<pipeline.git.branch>>", - "extra_args": "script", - "statuses_sha": "<< pipeline.git.revision >>", - "org": "ethereum-optimism", - "repository": "optimism" - } - }' - working_directory: ./packages/contracts-bedrock - - notify-failures-on-develop - - publish-contract-artifacts: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - project_id: GCP_TOOLS_ARTIFACTS_PROJECT_ID - service_account_email: GCP_CONTRACTS_PUBLISHER_SERVICE_ACCOUNT_EMAIL - - utils/checkout-with-mise - - install-contracts-dependencies - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Build contracts - environment: - FOUNDRY_PROFILE: ci - command: just forge-build - working_directory: packages/contracts-bedrock - - run: - name: Publish artifacts - command: bash scripts/ops/publish-artifacts.sh - working_directory: packages/contracts-bedrock - - go-release: - parameters: - module: - description: Go Module Name - type: string - filename: - description: Goreleaser config file - default: .goreleaser.yaml - type: string - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - setup_remote_docker: - docker_layer_caching: true - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - run: - name: Configure Docker - command: | - gcloud auth configure-docker us-docker.pkg.dev - - run: - name: Run goreleaser - command: | - goreleaser release --clean -f ./<<parameters.module>>/<<parameters.filename>> - - - diff-fetcher-forge-artifacts: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: medium - steps: - - checkout-from-workspace - - run: - name: Build contracts - command: | - just build-contracts - working_directory: op-fetcher - - run: - name: Compare forge artifacts - command: | - diff -qr "packages/contracts-bedrock/forge-artifacts/FetchChainInfo.s.sol" \ - "op-fetcher/pkg/fetcher/fetch/forge-artifacts/FetchChainInfo.s.sol" - - if [ $? -ne 0 ]; then - echo "ERROR: The checked-in forge artifacts for FetchChainInfo.s.sol do not match the ci build." - echo "Please run 'just build-contracts' in the op-fetcher directory and commit the changes." - exit 1 - fi - - echo "✅ Checked-in forge artifacts match the ci build" - - - stale-check: - machine: - image: ubuntu-2204:2024.08.1 - steps: - - utils/github-stale: - stale-issue-message: 'This issue has been automatically marked as stale and will be closed in 5 days if no updates' - stale-pr-message: 'This pr has been automatically marked as stale and will be closed in 5 days if no updates' - close-issue-message: 'This issue was closed as stale. Please reopen if this is a mistake' - close-pr-message: 'This PR was closed as stale. Please reopen if this is a mistake' - days-before-issue-stale: 999 - days-before-pr-stale: 14 - days-before-issue-close: 5 - days-before-pr-close: 5 - - close-issue: - machine: - image: ubuntu-2204:2024.08.1 - parameters: - label_name: - type: string - message: - type: string - steps: - - github-cli/install - - utils/github-event-handler-setup: - github_event_base64: << pipeline.parameters.github-event-base64 >> - env_prefix: "github_" - - run: - name: Close issue if label is added - command: | - if [ ! -z "$github_pull_request_number" ] && [ "$github_label_name" = "$LABEL_NAME" ]; then - echo "Closing issue $github_pull_request_number as label $LABEL_NAME is added on repository ${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME} " - export GH_TOKEN=$GITHUB_TOKEN_GOVERNANCE - gh issue close --repo "${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}" "$github_pull_request_number" --comment "$MESSAGE" - fi - environment: - MESSAGE: << parameters.message >> - LABEL_NAME: << parameters.label_name >> - - devnet-metrics-collect-authorship: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - utils/checkout-with-mise - - run: - name: Collect devnet metrics for op-acceptance-tests - command: | - ./devnet-sdk/scripts/metrics-collect-authorship.sh op-acceptance-tests/tests > .metrics--authorship--op-acceptance-tests - echo "Wrote file .metrics--authorship--op-acceptance-tests" - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - - run: - name: Store artifact in Bucket - command: | - CURRENT_DATE=$(date '+%Y-%m-%d') - FOLDER_NAME="dt=$CURRENT_DATE" - - # Upload to the date-partitioned folder structure - gsutil cp .metrics--authorship--op-acceptance-tests gs://oplabs-tools-data-public-metrics/metrics-authorship/$FOLDER_NAME/metrics-$CIRCLE_SHA1.csv - - - generate-flaky-report: - machine: true - resource_class: medium - steps: - - utils/checkout-with-mise - - run: - name: Generate flaky acceptance tests report - command: | - # Create reports directory - mkdir -p ./op-acceptance-tests/reports - - # Make the script executable - chmod +x ./op-acceptance-tests/scripts/generate-flaky-tests-report.sh - - # Run the script - ./op-acceptance-tests/scripts/generate-flaky-tests-report.sh \ - --branch "${CIRCLE_BRANCH:-develop}" \ - --org "${CIRCLE_PROJECT_USERNAME}" \ - --repo "${CIRCLE_PROJECT_REPONAME}" \ - --token "${CIRCLE_API_TOKEN}" \ - --output-dir "./op-acceptance-tests/reports" - - # Store the flaky test reports - - store_artifacts: - path: ./op-acceptance-tests/reports - destination: flaky-test-reports - - -workflows: - main: - when: - or: - - equal: ["webhook",<< pipeline.trigger_source >>] - - and: - - equal: [true, <<pipeline.parameters.main_dispatch>>] - - equal: ["api",<< pipeline.trigger_source >>] - - equal: [<< pipeline.parameters.github-event-type >>, "__not_set__"] #this is to prevent triggering this workflow as the default value is always set for main_dispatch - jobs: - - initialize: - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: - name: contracts-bedrock-build - # Build with just core + script contracts. - build_args: --deny-warnings --skip test - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - check-kontrol-build: - requires: - - contracts-bedrock-build - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-tests: - # Test everything except PreimageOracle.t.sol since it's slow. - name: contracts-bedrock-tests - test_list: find test -name "*.t.sol" -not -name "PreimageOracle.t.sol" - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - check_changed_patterns: contracts-bedrock,op-node - - contracts-bedrock-tests: - # PreimageOracle test is slow, run it separately to unblock CI. - name: contracts-bedrock-tests-preimage-oracle - test_list: find test -name "PreimageOracle.t.sol" - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - contracts-bedrock-tests: - # Heavily fuzz any fuzz tests within added or modified test files. - name: contracts-bedrock-tests-heavy-fuzz-modified - test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' - test_timeout: 1h - test_profile: ciheavy - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - contracts-bedrock-coverage: - # Generate coverage reports. - name: contracts-bedrock-coverage - test_timeout: 1h - test_profile: cicoverage - # need this requires to ensure that all FFI JSONs exist - requires: - - contracts-bedrock-build - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade - fork_op_chain: op - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade base-mainnet - fork_op_chain: base - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade ink-mainnet - fork_op_chain: ink - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade unichain-mainnet - fork_op_chain: unichain - fork_base_chain: mainnet - fork_base_rpc: https://ci-mainnet-l1-archive.optimism.io - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - contracts-bedrock-checks: - requires: - - contracts-bedrock-build - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-frozen-code: - requires: - - contracts-bedrock-build - context: - - circleci-repo-readonly-authenticated-github-token - - diff-fetcher-forge-artifacts: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - contracts-bedrock-build - - diff-asterisc-bytecode: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - semgrep-scan: - name: semgrep-scan-local - scan_command: semgrep scan --timeout=100 --config .semgrep/rules/ --error . - context: - - circleci-repo-readonly-authenticated-github-token - - semgrep-scan: - name: semgrep-test - scan_command: semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ - context: - - circleci-repo-readonly-authenticated-github-token - - go-lint: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - fuzz-golang: - name: fuzz-golang-<<matrix.package_name>> - on_changes: <<matrix.package_name>> - matrix: - parameters: - package_name: - - op-challenger - - op-node - - op-service - - op-chain-ops - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - fuzz-golang: - name: cannon-fuzz - package_name: cannon - on_changes: cannon,packages/contracts-bedrock/src/cannon - uses_artifacts: true - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - contracts-bedrock-build - - fuzz-golang: - name: op-e2e-fuzz - package_name: op-e2e - on_changes: op-e2e,packages/contracts-bedrock/src - uses_artifacts: true - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - contracts-bedrock-build - - go-tests: - name: go-tests-short - no_output_timeout: 19m - test_timeout: 20m - requires: - - contracts-bedrock-build - - cannon-prestate-quick - context: - - circleci-repo-readonly-authenticated-github-token - filters: - branches: - ignore: develop # Run on all branches EXCEPT develop (PR branches only) - - go-tests: - name: go-tests-full - rule: "go-tests-ci" # Run full test suite instead of short - no_output_timeout: 89m # Longer timeout for full tests - test_timeout: 90m - notify: true - filters: - branches: - only: develop # Only runs on develop branch (post-merge) - requires: - - contracts-bedrock-build - - cannon-prestate-quick - context: - - circleci-repo-readonly-authenticated-github-token - - slack - - analyze-op-program-client: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - op-program-compat: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - bedrock-go-tests: - requires: - - go-lint - - cannon-go-lint-and-test - - check-generated-mocks-op-node - - check-generated-mocks-op-service - - op-program-compat - # Not needed for the devnet but we want to make sure they build successfully - - cannon-docker-build - - op-dispute-mon-docker-build - - op-program-docker-build - - op-supervisor-docker-build - - op-test-sequencer-docker-build - - go-tests-short - - sanitize-op-program - context: - - circleci-repo-readonly-authenticated-github-tokens - - docker-build: - name: <<matrix.docker_name>>-docker-build - docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> - save_image_tag: <<pipeline.git.revision>> - matrix: - parameters: - docker_name: - - op-node - - op-batcher - - op-faucet - - op-program - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - da-server - - op-supervisor - - op-test-sequencer - - cannon - - op-dripper - - op-interop-mon - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - cannon-prestate-quick: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - sanitize-op-program: - requires: - - cannon-prestate-quick - context: - - circleci-repo-readonly-authenticated-github-token - - check-generated-mocks-op-node: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - check-generated-mocks-op-service: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - cannon-go-lint-and-test: - requires: - - contracts-bedrock-build - skip_slow_tests: true - notify: true - context: - - circleci-repo-readonly-authenticated-github-token - - todo-issues: - name: todo-issues-check - check_closed: false - context: - - circleci-repo-readonly-authenticated-github-token - - shellcheck/check: - name: shell-check - # We don't need the `exclude` key as the orb detects the `.shellcheckrc` - dir: . - ignore-dirs: ./packages/contracts-bedrock/lib - context: - - circleci-repo-readonly-authenticated-github-token - - go-release-op-deployer: - jobs: - - initialize: - filters: - tags: - only: /^op-deployer.*/ - branches: - ignore: /.*/ - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: - name: build-contracts-go-release-op-deployer - filters: - tags: - only: /^op-deployer.*/ - branches: - ignore: /.*/ - build_args: --skip test - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - go-release: - filters: - tags: - only: /^op-deployer.*/ - branches: - ignore: /.*/ - module: op-deployer - context: - - oplabs-gcr-release - - circleci-repo-readonly-authenticated-github-token - requires: - - build-contracts-go-release-op-deployer - - go-release-op-up: - jobs: - - initialize: - filters: - tags: - only: /^op-up.*/ - branches: - ignore: /.*/ - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: - name: build-contracts-go-release-op-up - filters: - tags: - only: /^op-up.*/ - branches: - ignore: /.*/ - build_args: --skip test - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - go-release: - filters: - tags: - only: /^op-up.*/ - branches: - ignore: /.*/ - module: op-up - context: - - oplabs-gcr-release - - circleci-repo-readonly-authenticated-github-token - requires: - - build-contracts-go-release-op-up - - release: - when: - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - # Wait for approval on the release - - hold: - type: approval - filters: - tags: - only: /^(da-server|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ - branches: - ignore: /.*/ - - initialize: - requires: - - hold - context: - - circleci-repo-readonly-authenticated-github-token - filters: - tags: - only: /^(da-server|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ - branches: - ignore: /.*/ - # Standard (medium) cross-platform docker images go here - - docker-build: - matrix: - parameters: - docker_name: - - op-node - - op-batcher - - op-faucet - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - da-server - - op-ufm - - op-supervisor - - op-test-sequencer - - op-deployer - - cannon - - op-dripper - - op-interop-mon - name: <<matrix.docker_name>>-docker-release - docker_tags: <<pipeline.git.revision>> - platforms: "linux/amd64,linux/arm64" - publish: true - release: true - filters: - tags: - only: /^<<matrix.docker_name>>\/v.*/ - branches: - ignore: /.*/ - context: - - oplabs-gcr-release - requires: - - initialize - # Checks for cross-platform images go here - - check-cross-platform: - matrix: - parameters: - op_component: - - op-node - - op-batcher - - op-faucet - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - da-server - - op-ufm - - op-supervisor - - op-test-sequencer - - op-deployer - - cannon - - op-dripper - - op-interop-mon - name: <<matrix.op_component>>-cross-platform - requires: - - op-node-docker-release - - op-batcher-docker-release - - op-faucet-docker-release - - op-proposer-docker-release - - op-challenger-docker-release - - op-dispute-mon-docker-release - - op-conductor-docker-release - - da-server-docker-release - - op-ufm-docker-release - - op-supervisor-docker-release - - op-test-sequencer-docker-release - - cannon-docker-release - - op-dripper-docker-release - - op-interop-mon-docker-release - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-prestate: - filters: - tags: - only: /^op-program\/v.*/ - branches: - ignore: /.*/ - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - publish-cannon-prestates: - context: - - circleci-repo-readonly-authenticated-github-token - - slack - - oplabs-network-optimism-io-bucket - requires: - - cannon-prestate - filters: - tags: - only: /^op-program\/v.*/ - branches: - ignore: /.*/ - - scheduled-todo-issues: - when: - equal: [build_four_hours, <<pipeline.schedule.name>>] - jobs: - - todo-issues: - name: todo-issue-checks - context: - - slack - - circleci-repo-readonly-authenticated-github-token - - develop-publish-contract-artifacts: - when: - or: - - and: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: ["webhook",<< pipeline.trigger_source >>] - - and: - - equal: [true, <<pipeline.parameters.publish_contract_artifacts_dispatch>>] - - equal: ["api",<< pipeline.trigger_source >>] - jobs: - - publish-contract-artifacts: - context: - - circleci-repo-readonly-authenticated-github-token - - develop-fault-proofs: - when: - or: - - and: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: ["webhook",<< pipeline.trigger_source >>] - - and: - - equal: [true, <<pipeline.parameters.fault_proofs_dispatch>>] - - equal: ["api",<< pipeline.trigger_source >>] - jobs: - - initialize: - context: - - circleci-repo-readonly-authenticated-github-token - - cannon-prestate: - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - cannon-stf-verify: - context: - - slack - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: - build_args: --deny-warnings --skip test - context: - - slack - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - go-tests-with-fault-proof-deps: - name: op-e2e-cannon-tests - notify: true - mentions: "@proofs-team" - no_output_timeout: 90m - test_timeout: 480m - resource_class: ethereum-optimism/latitude-fps-1 - context: - - slack - - circleci-repo-readonly-authenticated-github-token - requires: - - contracts-bedrock-build - - cannon-prestate - - publish-cannon-prestates: - context: - - slack - - oplabs-network-optimism-io-bucket - - circleci-repo-readonly-authenticated-github-token - requires: - - cannon-prestate - - op-e2e-cannon-tests - - develop-kontrol-tests: - when: - or: - - and: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: ["webhook",<< pipeline.trigger_source >>] - - and: - - equal: [true, <<pipeline.parameters.kontrol_dispatch>>] - - equal: ["api",<< pipeline.trigger_source >>] - jobs: - - kontrol-tests: - context: - - slack - - runtimeverification - - circleci-repo-readonly-authenticated-github-token - - scheduled-cannon-full-tests: - when: - or: - - equal: [build_four_hours, <<pipeline.schedule.name>>] - - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] - jobs: - - initialize: - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: - build_args: --deny-warnings --skip test - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - cannon-go-lint-and-test: - requires: - - contracts-bedrock-build - skip_slow_tests: false - no_output_timeout: 30m - notify: true - context: - - slack - - circleci-repo-readonly-authenticated-github-token - - scheduled-docker-publish: - when: - or: - - equal: [build_daily, <<pipeline.schedule.name>>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.docker_publish_dispatch >>] - jobs: - - initialize: - context: - - circleci-repo-readonly-authenticated-github-token - - docker-build: - matrix: - parameters: - docker_name: - - op-node - - op-batcher - - op-faucet - - op-program - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - op-supervisor - - op-test-sequencer - - cannon - - op-dripper - - op-interop-mon - name: <<matrix.docker_name>>-docker-publish - docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> - platforms: "linux/amd64,linux/arm64" - publish: true - context: - - oplabs-gcr - - slack - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - check-cross-platform: - matrix: - parameters: - op_component: - - op-node - - op-batcher - - op-faucet - - op-program - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - op-supervisor - - op-test-sequencer - - cannon - - op-dripper - - op-interop-mon - name: <<matrix.op_component>>-cross-platform - requires: - - <<matrix.op_component>>-docker-publish - context: - - circleci-repo-readonly-authenticated-github-token - - scheduled-preimage-reproducibility: - when: - or: - - equal: [build_daily, <<pipeline.schedule.name>>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.reproducibility_dispatch >>] - jobs: - - preimage-reproducibility: - context: - - slack - - circleci-repo-readonly-authenticated-github-token - - scheduled-stale-check: - when: - or: - - equal: [build_daily, <<pipeline.schedule.name>>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.stale_check_dispatch >>] - jobs: - - stale-check: - context: - - circleci-repo-optimism - - # Acceptance tests (post-merge to develop) - acceptance-tests: - when: - or: - - and: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: ["webhook",<< pipeline.trigger_source >>] - - and: - - equal: [true, <<pipeline.parameters.acceptance_tests_dispatch>>] - - equal: ["api",<< pipeline.trigger_source >>] - jobs: - - initialize: - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - cannon-prestate-quick: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - # IN-PROCESS (base) - - op-acceptance-tests: - # Acceptance Testing params - name: memory-base - gate: base - # CircleCI params - no_output_timeout: 10m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - contracts-bedrock-build - - cannon-prestate-quick - # KURTOSIS (Simple) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-simple - devnet: simple - gate: base - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - initialize - # KURTOSIS (Isthmus) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-isthmus - devnet: isthmus - gate: isthmus - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - initialize - # KURTOSIS (Interop) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-interop - devnet: interop - gate: interop - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - initialize - # Generate flaky test report - - generate-flaky-report: - name: generate-flaky-tests-report - context: - - circleci-repo-readonly-authenticated-github-token - - circleci-api-token - - # Acceptance tests (pre-merge to develop) - acceptance-tests-pr: - when: - not: - equal: [<< pipeline.git.branch >>, "develop"] - jobs: - - initialize: - context: - - circleci-repo-readonly-authenticated-github-token - - contracts-bedrock-build: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - - cannon-prestate-quick: # needed for sysgo tests - context: - - circleci-repo-readonly-authenticated-github-token - requires: - - initialize - # IN-PROCESS (base) - - op-acceptance-tests: - # Acceptance Testing params - name: memory-base - gate: base - # CircleCI params - no_output_timeout: 10m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - contracts-bedrock-build - - cannon-prestate-quick - # KURTOSIS (Simple) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-simple - devnet: simple - gate: base - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - initialize - # KURTOSIS (Isthmus) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-isthmus - devnet: isthmus - gate: isthmus - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - initialize - # KURTOSIS (Interop) - - op-acceptance-tests: - # Acceptance Testing params - name: kurtosis-interop - devnet: interop - gate: interop - # CircleCI params - no_output_timeout: 30m - context: - - circleci-repo-readonly-authenticated-github-token - - discord - requires: - - initialize - # Generate flaky test report - - generate-flaky-report: - name: generate-flaky-tests-report - context: - - circleci-repo-readonly-authenticated-github-token - - circleci-api-token - - close-issue-workflow: - when: - and: - - equal: [<< pipeline.trigger_source >>, "api"] - - equal: [<< pipeline.parameters.github-event-type >>, "pull_request"] - - equal: [<< pipeline.parameters.github-event-action >>, "labeled"] - jobs: - - close-issue: - label_name: "auto-close-trivial-contribution" - message: "Thank you for your interest in contributing! - At this time, we are not accepting contributions that primarily fix spelling, stylistic, or grammatical errors in documentation, code, or elsewhere. - Please check our [contribution guidelines](https://github.com/ethereum-optimism/optimism/blob/develop/CONTRIBUTING.md#contributions-related-to-spelling-and-grammar) for more information. - This issue will be closed now." - context: - - circleci-repo-optimism - - devnet-metrics-collect: - when: - or: - - equal: [<< pipeline.trigger_source >>, "webhook"] - - and: - - equal: [true, << pipeline.parameters.devnet-metrics-collect >>] - - equal: [<< pipeline.trigger_source >>, "api"] - jobs: - - devnet-metrics-collect-authorship: - context: - - circleci-repo-readonly-authenticated-github-token - - oplabs-tools-data-public-metrics-bucket
diff --git ethereum-optimism/optimism/.circleci/github-event-handler.yml layr-labs/optimism/.circleci/github-event-handler.yml deleted file mode 100644 index b4ed1dab058eca1e83fd234805078890e8a698c4..0000000000000000000000000000000000000000 --- ethereum-optimism/optimism/.circleci/github-event-handler.yml +++ /dev/null @@ -1,9 +0,0 @@ -version: 1 -listen-for-events: - pull_request: - #https://docs.github.com/en/webhooks/webhook-events-and-payloads#issues - types: [labeled] - event-to-parameters-mappings: - - pull_request_number: .pull_request.number - - label_name: .label.name -
diff --git ethereum-optimism/optimism/.github/workflows/kurtosis-devnet.yml layr-labs/optimism/.github/workflows/kurtosis-devnet.yml new file mode 100644 index 0000000000000000000000000000000000000000..df63a43671f081606727565da424101e81f6a832 --- /dev/null +++ layr-labs/optimism/.github/workflows/kurtosis-devnet.yml @@ -0,0 +1,91 @@ +name: Kurtosis Devnet + +on: + push: + branches: [eigenda-develop] + pull_request: + # Workflow dispatch allows you to trigger this workflow manually from the Actions tab. + # We use this to trigger the workflow on release branches. + workflow_dispatch: + +env: + MISE_VERSION: 2024.12.14 + +jobs: + # Tests that run against a kurtosis devnet that uses the proxy in memstore mode. + run_op_eigenda_memstore_devnet: + strategy: + fail-fast: false # run all matrix jobs even if one fails + matrix: + # This list should be kept up-to-date with the files in eigenda-template-values + valuesFile: + [ + "memstore-sequential-small-blobs.json", + "memstore-concurrent-large-blobs.json", + ] + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + experimental: true + - run: just eigenda-devnet-start eigenda-template-values/${{ matrix.valuesFile }} + working-directory: kurtosis-devnet + env: + # Temporary fix for error (see https://github.com/Layr-Labs/optimism/actions/runs/14325605690/job/40150440399?pr=42) + # 2025/04/08 05:13:48 Error: [...]: Error response from daemon: client version 1.47 is too new. Maximum supported API version is 1.45 + # This should only be needed until 2025-05-09 when latest ubuntu image's docker version will be bumped. + # See https://github.com/actions/runner-images/blob/main/images/ubuntu/Ubuntu2404-Readme.md + # and https://github.com/docker/cli/issues/613 + DOCKER_API_VERSION: 1.45 + - run: just eigenda-devnet-test-memstore -v + working-directory: kurtosis-devnet + + # This is an optimism devnet which talks to the eigenda holesky testnet via an eigenda-proxy. + # TODO: we should connect this to an eigenda kurtosis devnet instead of using our holesky testnet. + run_op_eigenda_holesky_devnet: + runs-on: ubuntu-latest + strategy: + fail-fast: false # run all matrix jobs even if one fails + matrix: + # This list should be kept up-to-date with the files in eigenda-template-values + valuesFile: + [ + # v1 is too slow for sequential large blobs. + # Could send a single massive blob every few minutes, but then migration test + # takes too long waiting for the few certs to land onchain, so opted to just not run it. + "holesky-concurrent-small-blobs.json", + "holesky-v2-sequential-large-blobs.json", + "holesky-v2-concurrent-small-blobs.json", + ] + steps: + - uses: actions/checkout@v4 + - uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + experimental: true + # This config gets injected into the eigenda.yaml kurtosis template config file + - name: Inject secrets into values config files + run: | + jq --argjson newconfig '{ + "eigenda-proxy": { + "secrets": { + "eigenda.signer-private-key-hex": "${{ secrets.EIGENDA_V1_HOLESKY_TESTNET_SIGNER_KEY }}", + "eigenda.v2.signer-payment-key-hex": "${{ secrets.EIGENDA_V2_HOLESKY_TESTNET_SIGNER_KEY }}", + "eigenda.eth_rpc": "https://ethereum-holesky-rpc.publicnode.com" + } + } + }' '. *= $newconfig' eigenda-template-values/${{ matrix.valuesFile }} > holesky-config-with-secrets.json + working-directory: kurtosis-devnet + - run: just eigenda-devnet-start holesky-config-with-secrets.json + working-directory: kurtosis-devnet + env: + # Temporary fix for error (see https://github.com/Layr-Labs/optimism/actions/runs/14325605690/job/40150440399?pr=42) + # 2025/04/08 05:13:48 Error: [...]: Error response from daemon: client version 1.47 is too new. Maximum supported API version is 1.45 + # This should only be needed until 2025-05-09 when latest ubuntu image's docker version will be bumped. + # See https://github.com/actions/runner-images/blob/main/images/ubuntu/Ubuntu2404-Readme.md + # and https://github.com/docker/cli/issues/613 + DOCKER_API_VERSION: 1.45 + - run: just eigenda-devnet-test-holesky -v + working-directory: kurtosis-devnet
diff --git ethereum-optimism/optimism/.github/workflows/pages.yml layr-labs/optimism/.github/workflows/pages.yml new file mode 100644 index 0000000000000000000000000000000000000000..5bd173a3aca1c373180531d9994340a94d2b454a --- /dev/null +++ layr-labs/optimism/.github/workflows/pages.yml @@ -0,0 +1,48 @@ +name: Build & publish forkdiff github-pages +permissions: + contents: read + pages: write + id-token: write +on: + workflow_dispatch: + push: + branches: + - eigenda + +jobs: + build: + concurrency: ci-${{ github.ref }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 1000 # make sure to fetch the old commit we diff against + + - name: Build forkdiff + uses: "docker://protolambda/forkdiff:0.1.0" + with: + args: -repo=/github/workspace -fork=/github/workspace/fork.yaml -out=/github/workspace/index.html + + - name: Setup Pages + uses: actions/configure-pages@v5 + + - name: Build with Jekyll + uses: actions/jekyll-build-pages@v1 + with: + source: ./ + destination: ./_site + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4
diff --git ethereum-optimism/optimism/.github/workflows/push-ghcr.yml layr-labs/optimism/.github/workflows/push-ghcr.yml new file mode 100644 index 0000000000000000000000000000000000000000..f99dbeade0ed8bc8300a789b830075effda15ca4 --- /dev/null +++ layr-labs/optimism/.github/workflows/push-ghcr.yml @@ -0,0 +1,32 @@ +name: Push Images to ghcr + +on: + push: + branches: + - eigenda-develop + tags: + - "op-*/v*-eigenda.*" # Will match tags like op-node/v1.11.1-eigenda.1, op-batcher/v1.12.3-eigenda.2, etc. + +jobs: + build-and-push: + runs-on: ubuntu-latest + permissions: + contents: read + packages: write + steps: + - uses: actions/checkout@v3 + + - name: Set up Docker Buildx + uses: docker/setup-buildx-action@v2 + + - name: Login to GitHub Container Registry + uses: docker/login-action@v2 + with: + registry: ghcr.io + username: ${{ github.actor }} + password: ${{ secrets.GITHUB_TOKEN }} + + - name: Setup multi-arch docker builder + run: make docker-builder + + - run: make layr-labs-docker-push
diff --git ethereum-optimism/optimism/.github/workflows/test-golang.yml layr-labs/optimism/.github/workflows/test-golang.yml new file mode 100644 index 0000000000000000000000000000000000000000..2fa67da27452b3becf0c9679d7320f4b52dc357b --- /dev/null +++ layr-labs/optimism/.github/workflows/test-golang.yml @@ -0,0 +1,99 @@ +name: Go + +on: + push: + branches: [eigenda-develop] + pull_request: + # Workflow dispatch allows you to trigger this workflow manually from the Actions tab. + # We use this to trigger the workflow on release branches. + workflow_dispatch: + +jobs: + go-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: jdx/mise-action@v2 + with: + version: 2024.12.14 # [default: latest] mise version to install + install: true # [default: true] run `mise install` + cache: true # [default: true] cache mise using GitHub's cache + experimental: true # [default: false] enable experimental features + - run: golangci-lint run -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./... + + build-and-cache-contracts: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: jdx/mise-action@v2 + with: + version: 2024.12.14 # [default: latest] mise version to install + install: true # [default: true] run `mise install` + cache: true # [default: true] cache mise using GitHub's cache + experimental: true # [default: false] enable experimental features + - uses: actions/cache@v3 + id: cache-artifacts + with: + path: packages/contracts-bedrock/forge-artifacts + # If any of the contracts file changes, the cache key will change, forcing a rebuild of the forge artifacts + key: ${{ runner.os }}-forge-${{ hashFiles('packages/contracts-bedrock/src/**/*.sol') }} + - name: Build contracts if cache miss + if: steps.cache-artifacts.outputs.cache-hit != 'true' + run: make build-contracts + + go-tests: + needs: [build-and-cache-contracts] + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + packages: + - op-batcher + - op-node + - op-alt-da + - op-e2e/system/altda + - op-e2e/actions/altda + steps: + - uses: actions/checkout@v4 + + - uses: jdx/mise-action@v2 + with: + version: 2024.12.14 # [default: latest] mise version to install + install: true # [default: true] run `mise install` + cache: true # [default: true] cache mise using GitHub's cache + experimental: true # [default: false] enable experimental features + + - name: Restore cached forge artifacts cached + uses: actions/cache@v3 + id: cache-restore + with: + path: packages/contracts-bedrock/forge-artifacts + key: ${{ runner.os }}-forge-${{ hashFiles('packages/contracts-bedrock/src/**/*.sol') }} + + # Cache has been stored in the build-and-cache-contracts job, so if this fails there's a problem + - name: Check cache restore + if: steps.cache-restore.outputs.cache-hit != 'true' + run: | + echo "Cache restore failed" + exit 1 + + # We use mise to install golang instead of the setup-go action, + # so we need to do the cache setup ourselves + - name: Go Module Cache + uses: actions/cache@v3 + id: go-cache + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + # Add explicit download on cache miss + # go test runs `go mod download` implicitly, but this separation is nice to see how long downloading vs running tests takes + - name: Download Go modules + if: steps.go-cache.outputs.cache-hit != 'true' + run: go mod download + + - name: Run tests + run: | + go test -timeout=10m ./${{ matrix.packages }}/...

Added kurtosis devnet yaml file to spin up an op chain that uses altda and spins up an eigenda-proxy in memstore mode to simulate interactions with an EigenDA network.

diff --git ethereum-optimism/optimism/kurtosis-devnet/README.md layr-labs/optimism/kurtosis-devnet/README.md index e6bc14a760448d5b66c3bc51ec0a04a2afaa1313..44d17930ad80e5c81ac3a5ddb465c81006c2b726 100644 --- ethereum-optimism/optimism/kurtosis-devnet/README.md +++ layr-labs/optimism/kurtosis-devnet/README.md @@ -14,6 +14,7 @@ To see available devnets, consult the `justfile` to see what `.*-devnet` targets exist, currently - `simple-devnet` - `interop-devnet` - `user-devnet` +- `eigenda-holesky-devnet`   You can read over the referenced `yaml` files located in this directory to see the network definition which would be deployed. Mini and Simple are example network definitions, and User expects a provided network definition.
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-template-values/holesky-concurrent-small-blobs.json layr-labs/optimism/kurtosis-devnet/eigenda-template-values/holesky-concurrent-small-blobs.json new file mode 100644 index 0000000000000000000000000000000000000000..03744ce22c36f9f58182a924432f7a4afd11c032 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-template-values/holesky-concurrent-small-blobs.json @@ -0,0 +1,18 @@ +{ + "eigenda-proxy": { + "useMemstore": false, + "dispersalBackend": "v1", + "secrets": { + "eigenda.signer-private-key-hex": "", + "eigenda.v2.signer-payment-key-hex": "", + "eigenda.eth_rpc": "" + } + }, + "batcher": { + "max-channel-duration": 10, + "altda.max-concurrent-da-requests": 100, + "target-num-frames": 10, + "max-l1-tx-size-bytes": 1000, + "batch-type": 1 + } +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-template-values/holesky-v2-concurrent-small-blobs.json layr-labs/optimism/kurtosis-devnet/eigenda-template-values/holesky-v2-concurrent-small-blobs.json new file mode 100644 index 0000000000000000000000000000000000000000..506f7ac4000914a903e731c07ad00e48bf396ff4 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-template-values/holesky-v2-concurrent-small-blobs.json @@ -0,0 +1,18 @@ +{ + "eigenda-proxy": { + "useMemstore": false, + "dispersalBackend": "v2", + "secrets": { + "eigenda.signer-private-key-hex": "", + "eigenda.v2.signer-payment-key-hex": "", + "eigenda.eth_rpc": "" + } + }, + "batcher": { + "max-channel-duration": 10, + "altda.max-concurrent-da-requests": 100, + "target-num-frames": 10, + "max-l1-tx-size-bytes": 1000, + "batch-type": 1 + } +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-template-values/holesky-v2-sequential-large-blobs.json layr-labs/optimism/kurtosis-devnet/eigenda-template-values/holesky-v2-sequential-large-blobs.json new file mode 100644 index 0000000000000000000000000000000000000000..7ac6d1b32072978a1bab53dce8b237729181566d --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-template-values/holesky-v2-sequential-large-blobs.json @@ -0,0 +1,18 @@ +{ + "eigenda-proxy": { + "useMemstore": false, + "dispersalBackend": "v2", + "secrets": { + "eigenda.signer-private-key-hex": "", + "eigenda.v2.signer-payment-key-hex": "", + "eigenda.eth_rpc": "" + } + }, + "batcher": { + "max-channel-duration": 10, + "altda.max-concurrent-da-requests": 1, + "target-num-frames": 1000, + "max-l1-tx-size-bytes": 1000, + "batch-type": 1 + } +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-template-values/memstore-concurrent-large-blobs.json layr-labs/optimism/kurtosis-devnet/eigenda-template-values/memstore-concurrent-large-blobs.json new file mode 100644 index 0000000000000000000000000000000000000000..a0ca6c695eb755b883675907d6bb6fd69850fdca --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-template-values/memstore-concurrent-large-blobs.json @@ -0,0 +1,12 @@ +{ + "eigenda-proxy": { + "useMemstore": true + }, + "batcher": { + "max-channel-duration": 10, + "altda.max-concurrent-da-requests": 10, + "target-num-frames": 5, + "max-l1-tx-size-bytes": 1000, + "batch-type": 1 + } +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-template-values/memstore-sequential-small-blobs.json layr-labs/optimism/kurtosis-devnet/eigenda-template-values/memstore-sequential-small-blobs.json new file mode 100644 index 0000000000000000000000000000000000000000..3c6243209ae52a1071ea15b6cdb68cea2eca4a72 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-template-values/memstore-sequential-small-blobs.json @@ -0,0 +1,12 @@ +{ + "eigenda-proxy": { + "useMemstore": true + }, + "batcher": { + "max-channel-duration": 10, + "altda.max-concurrent-da-requests": 1, + "target-num-frames": 1, + "max-l1-tx-size-bytes": 1000, + "batch-type": 1 + } +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda.yaml layr-labs/optimism/kurtosis-devnet/eigenda.yaml new file mode 100644 index 0000000000000000000000000000000000000000..ffd784aa9b58499ea69264dbaa35d051c27da550 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda.yaml @@ -0,0 +1,144 @@ +# This devnet uses an eigenda-proxy to interact with the eigenda holesky testnet network. +# As a requirement, you must first create and populate the eigenda-secrets.json file +# 1. cp eigenda-secrets.example.json eigenda-secrets.json +# 2. Populate the file with the required values +# TODO: Connect this with an eigenda v1 kurtosis devnet instead of using our holesky testnet. +# See https://github.com/Layr-Labs/avs-devnet/blob/main/examples/eigenda.yaml +{{- $context := or . (dict)}} +# TODO: I hate yaml templating... should really move to something more reasonable. CUE perhaps? +# use proxy by default, otherwise connect with eigenda holesky backend +{{- $useProxyMemstore := dig "eigenda-proxy" "useMemstore" true $context }} +{{- $proxyDispersalBackend := dig "eigenda-proxy" "dispersalBackend" "v1" $context }} +{{- $eigendaV1SignerKey := dig "eigenda-proxy" "secrets" "eigenda.signer-private-key-hex" "" $context }} +{{- $eigendaV2SignerKey := dig "eigenda-proxy" "secrets" "eigenda.v2.signer-payment-key-hex" "" $context }} +{{- $eigendaBackendEthRpc := dig "eigenda-proxy" "secrets" "eigenda.eth_rpc" "" $context }} +{{- if not $useProxyMemstore}} + # make sure the secrets are populated when using eigenda-holesky backend + {{- if or (eq $eigendaV1SignerKey "") (eq $eigendaV2SignerKey "") (eq $eigendaBackendEthRpc "") }} + {{- fail "Missing required eigenda-proxy eigenda-backend secrets." }} + {{- end }} +{{- end }} +--- +optimism_package: + altda_deploy_config: + use_altda: true + # We use the generic commitment which means that the dachallenge contract won't get deployed. + # We align with l2beat's analysis of the da_challenge contract not being economically viable, + # so even if a rollup failsover to keccak commitments, not using the da_challenge contract is fine + # (has same security as using it). + # See https://l2beat.com/scaling/projects/redstone#da-layer-risk-analysis and + # https://discord.com/channels/1244729134312198194/1260612364865245224/1290294353688002562 for + # an economic analysis of the da challenge contract. + da_commitment_type: GenericCommitment + da_challenge_window: 16 + da_resolve_window: 16 + da_bond_size: 0 + da_resolver_refund_percentage: 0 + chains: + op-kurtosis: + participants: + node0: + el: + type: op-geth + # latest tag is currently broken until the next stable release, see https://github.com/ethereum-optimism/op-geth/pull/515 + # Also see discussion in https://discord.com/channels/1244729134312198194/1260624141497798706/1342556343495692320 + image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:optimism" + log_level: "" + extra_env_vars: {} + extra_labels: {} + extra_params: [] + cl: + type: op-node + image: {{ localDockerImage "op-node" }} + log_level: "debug" + extra_env_vars: {} + extra_labels: {} + extra_params: [] + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + fjord_time_offset: 0 + granite_time_offset: 0 + holocene_time_offset: 0 + fund_dev_accounts: true + batcher_params: + image: {{ localDockerImage "op-batcher" }} + extra_params: + - --max-channel-duration + - '{{ dig "batcher" "max-channel-duration" 2 $context }}' + - --altda.max-concurrent-da-requests + - '{{ dig "batcher" "altda.max-concurrent-da-requests" 1 $context }}' + # max-pending-tx should be same as altda.max-concurrent-da-requests given that each commitment is sent as a single l1 tx right now. + # See https://github.com/ethereum-optimism/SUPs/pull/1 for progress on batching commitments. + - --max-pending-tx + - '{{ dig "batcher" "altda.max-concurrent-da-requests" 1 $context }}' + - --target-num-frames + - '{{ dig "batcher" "target-num-frames" 1 $context }}' + - --max-l1-tx-size-bytes + - '{{ dig "batcher" "max-l1-tx-size-bytes" 1000 $context }}' + - --batch-type + - '{{ dig "batcher" "batch-type" 1 $context }}' + proposer_params: + image: {{ localDockerImage "op-proposer" }} + extra_params: [] + game_type: 1 + proposal_interval: 10m + da_params: + enabled: true + image: ghcr.io/layr-labs/eigenda-proxy:v1.7.0 + cmd: + - --addr=0.0.0.0 + - --port=3100 + - --storage.backends-to-enable=V1,V2 + - --storage.dispersal-backend={{ $proxyDispersalBackend }} #v1 or v2 + - --api-enabled=admin # to enable changing dispersal-backend to v2 without restarting + - --eigenda.v2.max-blob-length=1MiB # makes startup faster by only loading 1MiB/2MiB of G1/G2 SRS points. + {{- if $useProxyMemstore }} + - --memstore.enabled + - --memstore.expiration=30m + {{- else }} # connect to eigenda holesky backend + # V1 flags + - --eigenda.disperser-rpc=disperser-holesky.eigenda.xyz:443 + - --eigenda.svc-manager-addr=0xD4A7E1Bd8015057293f0D0A557088c286942e84b + # The two params below are loaded from the eigenda-secrets.json file + - --eigenda.signer-private-key-hex={{ $eigendaV1SignerKey }} + - --eigenda.eth-rpc={{ $eigendaBackendEthRpc }} + # V2 flags + - --eigenda.v2.disperser-rpc=disperser-holesky.eigenda.xyz:443 + - --eigenda.v2.cert-verifier-addr=0xFe52fE1940858DCb6e12153E2104aD0fDFbE1162 + - --eigenda.v2.signer-payment-key-hex={{ $eigendaV2SignerKey }} + - --eigenda.v2.eth-rpc={{ $eigendaBackendEthRpc }} + {{- end }} + + challengers: + challenger: + # TODO: reenable once we start testing secure integrations + enabled: false + image: {{ localDockerImage "op-challenger" }} + participants: "*" + cannon_prestates_url: {{ localPrestate.URL }} + cannon_trace_types: ["cannon", "permissioned"] + extra_params: [] + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + el_extra_params: + - --graphql # needed to query for batcher-inbox txs to test failover working correctly + cl_type: teku + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: | + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + }
diff --git ethereum-optimism/optimism/kurtosis-devnet/justfile layr-labs/optimism/kurtosis-devnet/justfile index c2ff0f3bd2e4f7bc848145b7956ed1d31f7ce639..8d0bb65eef45d5ab2c3c42dfb070805caae9ccc9 100644 --- ethereum-optimism/optimism/kurtosis-devnet/justfile +++ layr-labs/optimism/kurtosis-devnet/justfile @@ -51,6 +51,8 @@ just op-program-svc/op-program-svc {{TAG}}   # Devnet template recipe +# Note: Make sure that TEMPLATE_FILE is in the kurtosis-devnet/ directory. +# Cannot use a file from the kurtosis-devnet/templates/ directory for example. devnet TEMPLATE_FILE DATA_FILE="" NAME="" PACKAGE=KURTOSIS_PACKAGE: _prerequisites #!/usr/bin/env bash export DEVNET_NAME={{NAME}} @@ -81,6 +83,88 @@ --show-enclave-inspect=false \ ./tests/ "$ARGS"   # Devnet recipes + +##### These are the main eigenda commands used by CI and automated tests ##### + +# Start an EigenDA devnet with the given values file (see options in eigenda-template-values/). +# The devnet will be named "eigenda-devnet" by default, but you can override this with the NAME argument. +# We also start a tx-fuzzer separately, since the optimism-package doesn't currently have that configurable as part of its package. +[group('eigenda')] +eigenda-devnet-start VALUES_FILE="eigenda-template-values/memstore-concurrent-large-blobs.json" ENCLAVE_PREFIX="eigenda": (devnet "eigenda.yaml" VALUES_FILE ENCLAVE_PREFIX) + just eigenda-devnet-add-tx-fuzzer {{ENCLAVE_PREFIX}}-devnet +[group('eigenda')] +eigenda-devnet-clean ENCLAVE_NAME="eigenda-devnet": + kurtosis enclave rm {{ENCLAVE_NAME}} --force +# Runs all tests that end with the `_Memstore` suffix, assuming the devnet was started with memstore proxy, +# meaning with a config file in eigenda-template-values/memstore-* . +[group('eigenda')] +eigenda-devnet-test-memstore *ARGS="": + go test ./tests/eigenda/... -run "_Memstore$" -v -timeout 30m {{ARGS}} +# Runs all tests that end with the `_Holesky` suffix, assuming the devnet was started with proxy using holesky backend. +# To start the devnet with holesky backend, the files in eigenda-template-values/holesky-* need have secrets injected. +# Take a look at how CI does it in .github/workflows/kurtosis-devnet.yml . +[group('eigenda')] +eigenda-devnet-test-holesky *ARGS="": + go test ./tests/eigenda/... -run "_Holesky$" -v -timeout 40m {{ARGS}} +[group('eigenda')] +eigenda-devnet-add-tx-fuzzer ENCLAVE_NAME="eigenda-devnet" *ARGS="": + kurtosis service add {{ENCLAVE_NAME}} tx-fuzz ethpandaops/tx-fuzz:master --cmd \ + "spam \ + --sk 0xac0974bec39a17e36ba4a6b4d238ff944bacb478cbed5efcae784d7bf4f2ff80 \ + --rpc http://op-el-2151908-node0-op-geth:8545 \ + --slot-time 2 \ + --accounts 100 --txcount 1" \ + {{ARGS}} + +##### EigenDA Commands below are for debugging and manual testing ##### + +# Cause proxy to start returning 503 errors to batcher, as a signal +# to failover to ethDA. Use `eigenda-devnet-failback` to revert. +[group('eigenda')] +eigenda-devnet-failover ENCLAVE_NAME="eigenda-devnet": + #!/usr/bin/env bash + PROXY_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} da-server-op-kurtosis http) + curl -X PATCH $PROXY_ENDPOINT/memstore/config -d '{"PutReturnsFailoverError": true}' +[group('eigenda')] +eigenda-devnet-failback ENCLAVE_NAME="eigenda-devnet": + #!/usr/bin/env bash + PROXY_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} da-server-op-kurtosis http) + curl -X PATCH $PROXY_ENDPOINT/memstore/config -d '{"PutReturnsFailoverError": false}' +[group('eigenda')] +eigenda-devnet-grafana ENCLAVE_NAME="eigenda-devnet": + #!/usr/bin/env bash + GRAFANA_URL=$(kurtosis port print {{ENCLAVE_NAME}} grafana http) + open $GRAFANA_URL +[group('eigenda')] +eigenda-devnet-sync-status ENCLAVE_NAME="eigenda-devnet": + #!/usr/bin/env bash + OPNODE_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} op-cl-2151908-node0-op-node rpc) + cast rpc optimism_syncStatus --rpc-url $OPNODE_ENDPOINT | jq +[group('eigenda')] +eigenda-devnet-configs ENCLAVE_NAME="eigenda-devnet": + #!/usr/bin/env bash + echo "OP-NODE ROLLUP CONFIG:" + OPNODE_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} op-cl-2151908-node0-op-node http) + cast rpc optimism_rollupConfig --rpc-url $OPNODE_ENDPOINT | jq + echo "TEKU L1-CL SPEC:" + TEKU_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} cl-1-teku-geth http) + curl $TEKU_ENDPOINT/eth/v1/config/spec | jq + echo "PROXY MEMSTORE CONFIG:" + PROXY_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} da-server-op-kurtosis http) + curl $PROXY_ENDPOINT/memstore/config | jq + curl $PROXY_ENDPOINT/admin/eigenda-dispersal-backend | jq +[group('eigenda')] +eigenda-devnet-change-proxy-backend backend="V1" ENCLAVE_NAME="eigenda-devnet": + #!/usr/bin/env bash + PROXY_ENDPOINT=$(kurtosis port print {{ENCLAVE_NAME}} da-server-op-kurtosis http) + curl -X PUT $PROXY_ENDPOINT/admin/eigenda-dispersal-backend \ + -H "Content-Type: application/json" \ + -d '{"eigenDADispersalBackend": "{{backend}}"}' +# See https://docs.kurtosis.com/service-update/ +[group('eigenda')] +eigenda-devnet-restart-batcher ENCLAVE_NAME="eigenda-devnet": + kurtosis service update {{ENCLAVE_NAME}} op-batcher-2151908-op-kurtosis +   # Simple devnet simple-devnet: (devnet "simple.yaml")
diff --git ethereum-optimism/optimism/Makefile layr-labs/optimism/Makefile index d228d9f92fe902b6d930f2b65a9fd952c357d3b5..7d72424203124cbbdd1bc20c3068698f689d3564 100644 --- ethereum-optimism/optimism/Makefile +++ layr-labs/optimism/Makefile @@ -41,6 +41,28 @@ -f docker-bake.hcl \ op-node op-batcher op-proposer op-challenger op-dispute-mon op-supervisor .PHONY: golang-docker   +# Make sure to run `make docker-builder` first, to setup a multi-arch builder. +layr-labs-docker-push: ## Builds and pushes op-batcher/node Docker image to ghcr.io + @GIT_TAG=$$(git tag --points-at HEAD 2>/dev/null | grep -E 'op-' | head -n1); \ + if [ -n "$$GIT_TAG" ]; then \ + IMAGE_TAGS="$$(echo $$GIT_TAG | tr '/' '-'),latest"; \ + else \ + IMAGE_TAGS="$$(git rev-parse HEAD),latest"; \ + fi; \ + echo "Using tags: $$IMAGE_TAGS"; \ + GIT_COMMIT=$$(git rev-parse HEAD) \ + GIT_DATE=$$(git show -s --format='%ct') \ + IMAGE_TAGS=$$IMAGE_TAGS \ + REGISTRY=ghcr.io \ + REPOSITORY=layr-labs/optimism \ + PLATFORMS="linux/amd64,linux/arm64" \ + docker buildx bake \ + --progress plain \ + --push \ + -f docker-bake.hcl \ + op-batcher op-node +.PHONY: layr-labs-docker + docker-builder-clean: ## Removes the Docker buildx builder docker buildx rm buildx-build .PHONY: docker-builder-clean
diff --git ethereum-optimism/optimism/README.md layr-labs/optimism/README.md index e89b10785624a90c70a5bd4b9201444a52e02356..3319e14ec66688638b40d526c7d74febd4466bfc 100644 --- ethereum-optimism/optimism/README.md +++ layr-labs/optimism/README.md @@ -1,134 +1,102 @@ -<div align="center"> - <br /> - <br /> - <a href="https://optimism.io"><img alt="Optimism" src="https://raw.githubusercontent.com/ethereum-optimism/brand-kit/main/assets/svg/OPTIMISM-R.svg" width=600></a> - <br /> - <h3><a href="https://optimism.io">Optimism</a> is Ethereum, scaled.</h3> - <br /> -</div> +![](./assets/EigenDA_TextLogo_White.svg)   -**Table of Contents** +# EigenDA powered Optimism-Fork   -<!--TOC--> +[![golang](https://github.com/Layr-Labs/optimism/actions/workflows/test-golang.yml/badge.svg)](https://github.com/Layr-Labs/optimism/actions/workflows/test-golang.yml) +[![kurtosis](https://github.com/Layr-Labs/optimism/actions/workflows/kurtosis-devnet.yml/badge.svg)](https://github.com/Layr-Labs/optimism/actions/workflows/kurtosis-devnet.yml)   -- [What is Optimism?](#what-is-optimism) -- [Documentation](#documentation) -- [Specification](#specification) -- [Community](#community) -- [Contributing](#contributing) -- [Security Policy and Vulnerability Reporting](#security-policy-and-vulnerability-reporting) -- [Directory Structure](#directory-structure) -- [Development and Release Process](#development-and-release-process) - - [Overview](#overview) - - [Production Releases](#production-releases) - - [Development branch](#development-branch) -- [License](#license) +[ForkDiff](https://layr-labs.github.io/optimism)   -<!--TOC--> +This is repo contains our fork of [optimism](https://github.com/ethereum-optimism/optimism) to support EigenDA.   -## What is Optimism? +- [EigenDA powered Optimism-Fork](#eigenda-powered-optimism-fork) + - [EigenDA Proxy](#eigenda-proxy) + - [Fork Features](#fork-features) + - [1. High Throughput (large parallel blobs)](#1-high-throughput-large-parallel-blobs) + - [2. Failover (for Liveness)](#2-failover-for-liveness) + - [3. Security (for Safety)](#3-security-for-safety) + - [Testing](#testing) + - [CI](#ci) + - [Unit Tests](#unit-tests) + - [op-e2e Tests](#op-e2e-tests) + - [Kurtosis Devnet Tests](#kurtosis-devnet-tests) + - [Releases and Branching Strategy](#releases-and-branching-strategy)   -[Optimism](https://www.optimism.io/) is a project dedicated to scaling Ethereum's technology and expanding its ability to coordinate people from across the world to build effective decentralized economies and governance systems. The [Optimism Collective](https://www.optimism.io/vision) builds open-source software that powers scalable blockchains and aims to address key governance and economic challenges in the wider Ethereum ecosystem. Optimism operates on the principle of **impact=profit**, the idea that individuals who positively impact the Collective should be proportionally rewarded with profit. **Change the incentives and you change the world.** +## EigenDA Proxy   -In this repository you'll find numerous core components of the OP Stack, the decentralized software stack maintained by the Optimism Collective that powers Optimism and forms the backbone of blockchains like [OP Mainnet](https://explorer.optimism.io/) and [Base](https://base.org). The OP Stack is designed to be aggressively open-source — you are welcome to explore, modify, and extend this code. +OP's altda spec has both op-batcher and op-nodes interface with AltDA layers via a [da-server](https://specs.optimism.io/experimental/alt-da.html#da-server). EigenDA's implementation of the da-server is called the [EigenDA Proxy](https://github.com/Layr-Labs/eigenda-proxy). The proxy hides EigenDA's async grpc API behind a simple POST/GET sync (blocking) REST API.   -## Documentation +## Fork Features   -- If you want to build on top of OP Mainnet, refer to the [Optimism Documentation](https://docs.optimism.io) -- If you want to build your own OP Stack based blockchain, refer to the [OP Stack Guide](https://docs.optimism.io/stack/getting-started) and make sure to understand this repository's [Development and Release Process](#development-and-release-process) +There are 3 important features for any rollup: +1. Performance +2. Liveness +3. Safety   -## Specification +The upstream code in optimism's repo currently does not support these features for altda rollups. The goal of our fork is to provide these for downstream altda rollups. We will try to upstream as many changes as possible, but the op-team has stopped being receptive to our PRs since the pectra upgrade.   -Detailed specifications for the OP Stack can be found within the [OP Stack Specs](https://github.com/ethereum-optimism/specs) repository. +We describe below the current feature-set of the upstream altda code. See release notes for the latest features.   -## Community +### 1. High Throughput (large parallel blobs)   -General discussion happens most frequently on the [Optimism discord](https://discord.gg/optimism). -Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/). +Because POSTs to the EigenDA Proxy are blocking (see [EigenDA Proxy](#eigenda-proxy) section), the throughput which a rollup can achieve is limited by the number and size of parallel blobs it can submit. The upstream code supports [parallel blobs submissions](https://github.com/ethereum-optimism/optimism/pull/11698) pre-holocene, but the [Holocene strict ordering rules](https://specs.optimism.io/protocol/holocene/derivation.html) have broken that implementation.   -## Contributing - -The OP Stack is a collaborative project. By collaborating on free, open software and shared standards, the Optimism Collective aims to prevent siloed software development and rapidly accelerate the development of the Ethereum ecosystem. Come contribute, build the future, and redefine power, together. - -[CONTRIBUTING.md](./CONTRIBUTING.md) contains a detailed explanation of the contributing process for this repository. Make sure to use the [Developer Quick Start](./CONTRIBUTING.md#development-quick-start) to properly set up your development environment. +We will implement a new parallel blobs submission mechanism which is compatible with the Holocene strict ordering rules, and also enable submitting large blobs (EigenDA allows blobs up to 16MiB currently).   -[Good First Issues](https://github.com/ethereum-optimism/optimism/issues?q=is:open+is:issue+label:D-good-first-issue) are a great place to look for tasks to tackle if you're not sure where to start, and see [CONTRIBUTING.md](./CONTRIBUTING.md) for info on larger projects. - -## Security Policy and Vulnerability Reporting +### 2. Failover (for Liveness)   -Please refer to the canonical [Security Policy](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md) document for detailed information about how to report vulnerabilities in this codebase. -Bounty hunters are encouraged to check out the [Optimism Immunefi bug bounty program](https://immunefi.com/bounty/optimism/). -The Optimism Immunefi program offers up to $2,000,042 for in-scope critical vulnerabilities. +The upstream altda code does not support failover. If the EigenDA network goes down, the rollup will be stuck.   -## Directory Structure +We will implement a failover mechanism to allow the rollup to continue processing transactions even if the EigenDA network is down.   -<pre> -├── <a href="./docs">docs</a>: A collection of documents including audits and post-mortems -├── <a href="./kurtosis-devnet">kurtosis-devnet</a>: OP-Stack Kurtosis devnet -├── <a href="./op-batcher">op-batcher</a>: L2-Batch Submitter, submits bundles of batches to L1 -├── <a href="./op-chain-ops">op-chain-ops</a>: State surgery utilities -├── <a href="./op-challenger">op-challenger</a>: Dispute game challenge agent -├── <a href="./op-e2e">op-e2e</a>: End-to-End testing of all bedrock components in Go -├── <a href="./op-node">op-node</a>: rollup consensus-layer client -├── <a href="./op-preimage">op-preimage</a>: Go bindings for Preimage Oracle -├── <a href="./op-program">op-program</a>: Fault proof program -├── <a href="./op-proposer">op-proposer</a>: L2-Output Submitter, submits proposals to L1 -├── <a href="./op-service">op-service</a>: Common codebase utilities -├── <a href="./op-wheel">op-wheel</a>: Database utilities -├── <a href="./ops">ops</a>: Various operational packages -├── <a href="./packages">packages</a> -│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: OP Stack smart contracts -├── <a href="./.semgrep">semgrep</a>: Semgrep rules and tests -</pre> +### 3. Security (for Safety)   -## Development and Release Process +The upstream derivation pipeline and challenger code does not currently support the EigenDA security model.   -### Overview +Because making altda fraud proofs secure is very involved, we have opted to first secure zk integrations like [op-succinct](https://github.com/succinctlabs/op-succinct) and [risc0-kailua](https://github.com/risc0/kailua) by using [op-rs](https://op-rs.github.io/kona/)'s stack. See our [Hokulea](https://github.com/Layr-Labs/hokulea) repo for the latest on this.   -Please read this section carefully if you're planning to fork or make frequent PRs into this repository. +## Testing   -### Production Releases +### CI   -Production releases are always tags, versioned as `<component-name>/v<semver>`. -For example, an `op-node` release might be versioned as `op-node/v1.1.2`, and smart contract releases might be versioned as `op-contracts/v1.0.0`. -Release candidates are versioned in the format `op-node/v1.1.2-rc.1`. -We always start with `rc.1` rather than `rc`. +OP uses circleci for CI, but we migrated to github actions for this fork. The unit and op-e2e tests are purely golang and so run as part of the [test-golang.yml](./.github/workflows/test-golang.yml) github workflow, whereas the kurtosis tests are run as part of the [test-kurtosis.yml](./.github/workflows/test-kurtosis.yml) workflow.   -For contract releases, refer to the GitHub release notes for a given release which will list the specific contracts being released. Not all contracts are considered production ready within a release and many are under active development. +### Unit Tests   -Tags of the form `v<semver>`, such as `v1.1.4`, indicate releases of all Go code only, and **DO NOT** include smart contracts. -This naming scheme is required by Golang. -In the above list, this means these `v<semver>` releases contain all `op-*` components and exclude all `contracts-*` components. +For each feature we add simple unit tests where applicable.   -`op-geth` embeds upstream geth’s version inside its own version as follows: `vMAJOR.GETH_MAJOR GETH_MINOR GETH_PATCH.PATCH`. -Basically, geth’s version is our minor version. -For example if geth is at `v1.12.0`, the corresponding op-geth version would be `v1.101200.0`. -Note that we pad out to three characters for the geth minor version and two characters for the geth patch version. -Since we cannot left-pad with zeroes, the geth major version is not padded. +### op-e2e Tests   -See the [Node Software Releases](https://docs.optimism.io/builders/node-operators/releases) page of the documentation for more information about releases for the latest node components. +We also add integration tests using op-e2e's framework. These tests are very useful as they are run purely in golang in a single process with very fast block times, but they are limited in that proxy is not spun up and the batcher available there is only a fake.   -The full set of components that have releases are: +### Kurtosis Devnet Tests   -- `op-batcher` -- `op-contracts` -- `op-challenger` -- `op-node` -- `op-proposer` +For full e2e tests we leverage optimism's [kurtosis devnet](./kurtosis-devnet/README.md). See the config file to spin up a devnet with eigenda-proxy in [memstore](./kurtosis-devnet/eigenda-memstore.yaml) mode and [holesky](./kurtosis-devnet/eigenda-holesky.yaml) mode, as well as the available eigenda group commands in the [justfile](./kurtosis-devnet/justfile): +```sh +$ just --list +Available recipes: + ...   -All other components and packages should be considered development components only and do not have releases. + [eigenda] + eigenda-holesky-devnet-clean + eigenda-holesky-devnet-start # EigenDA devnet that uses eigenda-proxy connected to eigenda holesky testnet network + eigenda-memstore-devnet-clean + eigenda-memstore-devnet-configs + eigenda-memstore-devnet-failback + eigenda-memstore-devnet-failover # to failover to ethDA. Use `eigenda-memstore-devnet-failback` to revert. + eigenda-memstore-devnet-grafana + eigenda-memstore-devnet-restart-batcher # Restart batcher with new flags or image. + eigenda-memstore-devnet-start # EigenDA devnet that uses the eigenda-proxy in memstore mode (simulates an eigenda network but generates random certs) + eigenda-memstore-devnet-sync-status + eigenda-memstore-devnet-test +```   -### Development branch +## Releases and Branching Strategy   -The primary development branch is [`develop`](https://github.com/ethereum-optimism/optimism/tree/develop/). -`develop` contains the most up-to-date software that remains backwards compatible with the latest experimental [network deployments](https://docs.optimism.io/chain/networks). -If you're making a backwards compatible change, please direct your pull request towards `develop`. +Our main development branch, `eigenda-develop`, contains a linear history with new feature work and fixes, as well as upstream merges. We maintain this branch to be able to track the entire history of our fork changes. It might also be useful for some teams who want to use our fork directly as their upstream, so that they can just merge/rebase our latest changes (which will incorporate the OP changes as well).   -**Changes to contracts within `packages/contracts-bedrock/src` are usually NOT considered backwards compatible.** -Some exceptions to this rule exist for cases in which we absolutely must deploy some new contract after a tag has already been fully deployed. -If you're changing or adding a contract and you're unsure about which branch to make a PR into, default to using a feature branch. -Feature branches are typically used when there are conflicts between 2 projects touching the same code, to avoid conflicts from merging both into `develop`. +For teams that want/need more flexibility in how they manage their own fork, we also create release-specific branches which contain cleaned up history of commits on top of a specific upstream release. For example, the second eigenda-fork release in the picture below would be named `op-batcher/v1.11.2-eigenda.2`, and will consist of a cleaned-up history of commits (one per feature/service pair) on top of the upstream [op-batcher/v1.11.2](https://github.com/ethereum-optimism/optimism/releases/tag/op-batcher%2Fv1.11.2) release. We will strive to make our releases on top of op [production releases](https://github.com/ethereum-optimism/optimism?tab=readme-ov-file#production-releases), unless an urgent fix is needed.   -## License +![](./assets/fork-branching-and-releases.png)   -All other files within this repository are licensed under the [MIT License](https://github.com/ethereum-optimism/optimism/blob/master/LICENSE) unless stated otherwise. +Fork developers should consult the [Fork Release Runbook](./docs/handbook/fork-release-runbook.md) for more details on how to make a new release.
diff --git ethereum-optimism/optimism/assets/EigenDA_TextLogo_White.svg layr-labs/optimism/assets/EigenDA_TextLogo_White.svg new file mode 100644 index 0000000000000000000000000000000000000000..9037e313f9cf60be953f733db175a930c981d73b --- /dev/null +++ layr-labs/optimism/assets/EigenDA_TextLogo_White.svg @@ -0,0 +1,18 @@ +<svg width="1101" height="482" viewBox="0 0 1101 482" fill="none" xmlns="http://www.w3.org/2000/svg"> +<g clip-path="url(#clip0_344_225)"> +<path fill-rule="evenodd" clip-rule="evenodd" d="M142.241 10.5986V57.4135H188.725V10.5986H142.241Z" fill="white"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M326.301 291.488V197.858H233.349V104.228H186.865V197.858H93.9131V10.5986H0.945488V197.858H93.9131V385.117H186.865H233.349V478.747H279.833V385.117H324.74H326.301H372.785V291.488H326.301Z" fill="white"/> +<path d="M434.29 234.71L433.266 226.719C447.058 221.612 449.47 214.582 450.494 200.521C451.519 189.976 451.519 175.916 451.519 164.74V79.7801C451.519 68.6045 451.519 54.5285 450.494 43.9991C449.47 29.9231 447.042 22.9245 433.266 17.8017L434.29 9.81006C447.751 10.7558 457.036 10.7558 465.642 10.7558H529.055C543.178 10.7558 553.881 10.7558 567.311 9.81006C569.722 26.0928 571.094 37.9147 572.15 55.49L562.14 57.082C557.664 45.2601 553.849 38.561 548.679 33.4224C543.509 28.2996 535.58 25.746 529.023 24.7845C520.401 23.492 512.819 23.1925 499.39 23.1925C485.96 23.1925 481.121 25.746 480.096 38.5295C479.072 48.4284 479.072 59.935 479.072 71.426V112.314C490.799 112.314 514.932 111.668 525.934 110.722C541.444 109.13 546.283 104.023 551.453 89.3165L559.744 90.9085C558.72 103.377 558.72 110.391 558.72 118.367C558.72 126.343 558.72 133.704 559.744 146.172L551.453 147.764C546.283 133.073 541.444 128.282 525.934 126.359C514.9 125.066 490.436 124.766 479.072 124.766V167.893C479.072 179.069 479.765 196.344 480.096 202.728C480.79 218.396 486.653 221.249 502.164 221.249C517.674 221.249 529.402 220.934 538.686 219.657C550.744 218.065 556.261 213.573 561.431 206.874C565.577 200.789 569.013 194.09 573.489 182.268L582.805 183.86C581.087 203.028 578.329 218.696 574.514 235.294C549.357 234.001 518.005 233.702 473.854 233.702C429.703 233.702 447.673 233.702 434.211 234.663L434.274 234.71H434.29Z" fill="white"/> +<path d="M623.473 134.713C628.312 122.576 629.336 116.491 629.336 113.307C629.336 110.123 628.312 110.423 626.578 110.423C623.126 110.423 616.237 115.53 600.396 132.79L593.839 127.352C613.495 100.839 627.618 88.7019 638.983 88.7019C650.348 88.7019 652.775 94.7862 652.775 104.37C652.775 113.954 647.274 128.644 642.104 141.759L619.705 200.537C615.922 210.121 615.229 212.675 615.229 213.967C615.229 215.26 616.616 217.167 618.681 217.167C622.464 217.167 629.352 213.668 646.249 191.3L653.138 195.777C634.869 228.674 618.996 237.627 606.938 237.627C594.879 237.627 592.121 231.243 592.121 222.936C592.121 214.629 594.532 208.56 602.792 187.785L623.473 134.76V134.729V134.713ZM650.017 0.872803C660.357 0.872803 668.286 8.21816 668.286 17.8018C668.286 27.3855 660.357 34.7308 650.017 34.7308C639.676 34.7308 631.748 27.3855 631.748 17.8018C631.748 8.21816 639.676 0.872803 650.017 0.872803Z" fill="white"/> +<path d="M781.366 31.9882C789.642 40.6261 792.731 51.4707 792.731 63.2927C792.731 94.5971 767.905 116.964 737.578 116.964C707.251 116.964 710.703 112.819 700.693 105.142C690.022 112.819 687.595 120.164 687.595 127.509C687.595 134.855 695.523 142.846 711.034 143.477L752.726 144.769C786.174 145.715 802.346 164.252 802.346 188.211C802.346 212.17 771.656 242.198 723.754 242.198C675.851 242.198 665.149 226.23 665.149 204.494C665.149 182.757 673.077 177.65 692.717 165.198C677.901 160.406 667.891 149.861 667.891 134.839C667.891 119.817 674.086 109.587 691.693 97.1507C683.764 87.2518 679.966 77.0219 679.966 63.2927C679.966 33.2492 705.816 9.62109 736.143 9.62109C766.471 9.62109 761.3 16.0049 780.956 16.0049C800.612 16.0049 801.637 15.6897 813.695 14.7124L816.454 20.1505L813.695 31.9724H781.287H781.35L781.366 31.9882ZM746.562 169.028C731.052 169.028 718.647 169.028 704.508 168.082C692.449 178.612 687.61 189.157 687.61 199.087C687.61 209.018 700.362 224.339 729.302 224.339C758.242 224.339 783.431 208.671 783.431 193.681C783.431 178.69 772.397 169.075 746.547 169.075L746.578 169.044L746.562 169.028ZM706.589 61.3539C706.589 89.4585 718.316 104.181 737.609 104.181C756.903 104.181 766.581 93.0051 766.581 64.8847C766.581 36.7642 754.522 22.4045 735.56 22.4045C716.598 22.4045 706.589 34.5417 706.589 61.3854V61.3539Z" fill="white"/> +<path d="M883.003 90.9877C916.089 90.9877 941.262 113.654 941.262 144.344C941.262 175.034 941.262 149.136 940.568 151.689H834.407C833.714 155.535 833.714 159.035 833.714 163.827C833.714 196.723 855.088 219.09 882.672 219.09C910.257 219.09 918.17 212.06 932.293 193.224L940.584 198.016C930.244 221.975 909.894 241.788 879.236 241.788C848.578 241.788 807.548 207.93 807.548 168.949C807.548 129.969 840.302 91.0035 883.019 91.0035V90.9719L883.003 90.9877ZM910.919 138.922C910.588 117.201 894.715 105.379 875.421 105.379C856.128 105.379 840.949 120.07 836.803 138.922H910.919Z" fill="white"/> +<path d="M1084.31 92.4532C1084.31 102.352 1084.31 113.528 1085 123.442C1086.03 136.525 1088.12 141.664 1100.84 146.456L1099.82 153.801C1087.76 152.855 1080.16 152.855 1072.25 152.855C1064.34 152.855 1056.74 152.855 1044.68 153.801L1043.66 146.456C1056.42 141.664 1058.14 136.557 1059.17 123.442C1060.19 113.543 1060.19 102.368 1060.19 92.4532V58.5952C1060.19 40.7204 1048.46 28.5517 1030.19 28.5517C1011.93 28.5517 991.576 43.5734 991.576 64.0017V92.4532C991.576 102.352 991.576 113.528 992.269 123.442C993.294 136.525 995.39 141.664 1008.11 146.456L1007.09 153.801C995.028 152.855 987.43 152.855 979.502 152.855C971.573 152.855 963.991 152.855 951.933 153.801L950.908 146.456C963.66 141.664 965.394 136.557 966.419 123.442C967.443 113.543 967.443 102.368 967.443 92.4532V84.4616C967.443 74.5627 967.443 64.3327 966.419 54.4181C965.394 41.3352 961.942 38.4506 949.521 36.1966V29.4975C965.363 22.4674 973.985 17.3603 986.043 7.13037L993.278 9.68391C992.254 19.9138 991.56 28.5517 991.213 36.5276L991.544 36.8428C1000.86 20.5601 1017.06 7.77664 1037.74 7.77664C1058.42 7.77664 1068.77 15.122 1077.06 27.5902C1081.53 34.9355 1084.29 42.9272 1084.29 55.6949V92.4374H1084.32L1084.31 92.4532Z" fill="white"/> +<path d="M675.82 481.111C646.88 481.111 627.587 478.558 612.77 478.558C597.953 478.558 595.194 478.558 581.765 479.519L580.724 471.528C594.501 466.405 596.913 459.375 597.953 445.299C598.993 434.753 598.993 420.677 598.993 409.486V324.415C598.993 313.224 598.993 299.148 597.953 288.603C596.913 274.527 594.501 267.497 580.724 262.374L581.765 254.382C595.194 255.344 604.51 255.344 613.117 255.344C634.128 255.344 649.985 253.736 673.408 253.736C743.363 253.736 805.719 292.118 805.719 367.92C805.719 443.722 752.994 481.143 675.82 481.143V481.111ZM673.393 466.074C737.136 466.074 774.005 430.576 774.005 368.204C774.005 305.831 729.555 268.096 670.634 268.096C611.714 268.096 628.595 270.334 627.571 283.133C626.53 293.048 626.53 304.555 626.53 316.077V412.67C626.53 423.862 626.877 441.137 627.571 447.537C629.289 463.851 638.258 466.089 673.393 466.089V466.074Z" fill="white"/> +<path d="M895.85 249.559H902.391L966.135 409.47C970.265 419.7 976.475 434.738 981.646 445.283C988.187 459.359 994.051 466.074 1005.42 471.512L1004.38 479.503C990.945 478.542 984.735 478.542 976.129 478.542C967.522 478.542 954.77 478.542 941.325 479.503L940.285 471.512C949.584 467.681 953.383 464.797 953.383 458.728C953.383 452.66 948.213 438.899 936.848 409.47L934.09 403.071H850.012L847.601 409.47C835.542 438.899 833.477 451.052 833.477 458.397C833.477 465.742 836.583 467.997 846.576 471.512L845.536 479.503C832.09 478.542 825.202 478.542 816.596 478.542C807.989 478.542 803.166 478.542 791.092 479.503L790.051 471.512C801.416 466.074 807.28 459.359 813.821 445.283C818.992 434.738 825.186 419.7 829.332 409.47L895.834 249.559H895.85ZM893.438 298.801L855.876 389.31H928.92L893.422 298.801H893.438Z" fill="white"/> +</g> +<defs> +<clipPath id="clip0_344_225"> +<rect width="1100.07" height="480.443" fill="white" transform="translate(0.803467 0.77832)"/> +</clipPath> +</defs> +</svg>
diff --git ethereum-optimism/optimism/assets/fork-branching-and-releases.png layr-labs/optimism/assets/fork-branching-and-releases.png new file mode 100644 index 0000000000000000000000000000000000000000..ba05f2a7b7ab5b8d00762fa780840eba4ac786ef Binary files /dev/null and layr-labs/optimism/assets/fork-branching-and-releases.png differ
diff --git ethereum-optimism/optimism/docs/handbook/fork-release-runbook.md layr-labs/optimism/docs/handbook/fork-release-runbook.md new file mode 100644 index 0000000000000000000000000000000000000000..1e5bb63e65832d829d7393a56aab3ad958e11554 --- /dev/null +++ layr-labs/optimism/docs/handbook/fork-release-runbook.md @@ -0,0 +1,37 @@ +# Fork Release Runbook + +This document describes the process for releasing a new version of the EigenDA powered Optimism-Fork. This adds details to the explanation in the main [README](../../README.md#releases-and-branching-strategy). + +![](../../assets/fork-branching-and-releases.png) + +## Example Release For op-batcher/v1.11.2-eigenda.2 + +First we update the eigenda-develop branch: + +```bash +git checkout eigenda-develop +git checkout -b merge-op-batcher/v1.11.2 +git merge op-batcher/v1.11.2 +# fix any conflicts or new issues after the merge +git commit -a -m "chore: fix issues after merging op-batcher/v1.11.2" +git push +# Create a PR to get review on the fixes/new stuff - make sure to merge with merge commit +``` + +Then we make the cleaned-up release branch/tag: + +```bash +git checkout op-node/v1.11.1-eigenda.1 +git checkout -b op-batcher/v1.11.2-eigenda.2 +git rebase op-batcher/v1.11.2 +# cherry pick all the new commits (including the fixes after the merge) +# Can also do it manually: `git cherry-pick <fixA> <featC> <fixB>` +git cherry-pick op-node/v1.11.1-eigenda.1^..eigenda-develop +# cleanup history +git rebase -i op-batcher/v1.11.2 +# tag the release +git tag op-batcher/v1.11.2-eigenda.2 +git push --tags +``` + +Can also do the rebase first if that is preferred. In any case, make sure after both are done to make sure that they contain the same content by checking that `git diff eigenda-develop op-batcher/v1.11.2-eigenda.2` is empty.
diff --git ethereum-optimism/optimism/fork.yaml layr-labs/optimism/fork.yaml new file mode 100644 index 0000000000000000000000000000000000000000..a4ed777106874c1ce3c6b2c5cc62c58bfed25c3b --- /dev/null +++ layr-labs/optimism/fork.yaml @@ -0,0 +1,92 @@ +title: "layr-labs/optimism" # Define the HTML page title +logo: "logo.png" +footer: | # define the footer with markdown + EigenDA's [OP-Fork](https://github.com/layr-labs/optimism) fork overview - created with [Forkdiff](https://github.com/protolambda/forkdiff) +base: + name: ethereum-optimism/optimism + url: https://github.com/ethereum-optimism/optimism + # ref: refs/tags/op-batcher/v1.15.0 + hash: 061043e7ef21343f709768651261fa43f5714f6b +fork: + name: layr-labs/optimism + url: https://github.com/layr-labs/optimism + ref: refs/heads/eigenda-develop +def: + title: "EigenDA x Optimism fork diff" + description: | # description in markdown + The original optimism codebase can be found at [`github.com/ethereum-optimism/optimism`](https://github.com/ethereum-optimism/optimism). + And our fork at [`github.com/Layr-Labs/optimism`](https://github.com/Layr-Labs/optimism). + + sub: + - title: "OP Batcher" + description: | + Modifications to op-batcher. + sub: + # Adding this test section is the easy way to remove the tests from the main batcher section, so we can focus on the meaningful code changes. + - title: "Tests" + globs: + - "op-batcher/**/*_test.go" + - title: "Batcher" + globs: + - "op-batcher/**" + + - title: "OP Node" + description: | + Modifications to op-node. + sub: + # Adding this test section is the easy way to remove the tests from the main batcher section, so we can focus on the meaningful code changes. + - title: "Tests" + globs: + - "op-node/**/*_test.go" + - title: "Node" + globs: + - "op-node/**" + + - title: "OP AltDA Client" + description: | + Modifications to op-alt-da client. + sub: + # Adding this test section is the easy way to remove the tests from the main batcher section, so we can focus on the meaningful code changes. + - title: "Tests" + globs: + - "op-alt-da/**/*_test.go" + - "op-alt-da/**/*mock.go" + - title: "AltDA Client" + globs: + - "op-alt-da/**" + + - title: "Testing" + sub: + - title: "OP E2E Tests" + globs: + - "op-e2e/**" + - title: "Kurtosis Tests" + globs: + - "kurtosis-devnet/tests/**" + + - title: "CI/CD" + description: | + Replaced op's circleci with github actions relevant to testing our changes. + sub: + - title: "Github workflows" + globs: + - ".github/**" + - ".circleci/**" + + - title: "Kurtosis Devnet" + description: | + Added kurtosis devnet yaml file to spin up an op chain that uses altda and spins up an eigenda-proxy in memstore mode to simulate interactions with an EigenDA network. + sub: + - title: "Kurtosis Devnet" + globs: + - "kurtosis-devnet/**" +# files can be ignored globally, these will be listed in a separate grayed-out section, +# and do not count towards the total line count. +ignore: + - "**/go.mod" + - "**/go.sum" + - "**/.gitignore" + # Not very useful to see utils added for testing, so prefer to remove to reduce noise + # and focus on actually meaningful test changes/additions. + - "**/e2eutils/**" + - "op-e2e/actions/helpers/**"
diff --git ethereum-optimism/optimism/op-chain-ops/genesis/config.go layr-labs/optimism/op-chain-ops/genesis/config.go index df8a0efbd19dd4f157faf93ab5bbceb95406d059..9764e290219b50446087762b78239a20a67987ab 100644 --- ethereum-optimism/optimism/op-chain-ops/genesis/config.go +++ layr-labs/optimism/op-chain-ops/genesis/config.go @@ -1227,7 +1227,7 @@ if deployConfig.UseFaultProofs && (name == "OptimismPortal" || name == "L2OutputOracle" || name == "L2OutputOracleProxy") { continue } - if !deployConfig.UseAltDA && + if (!deployConfig.UseAltDA || deployConfig.DACommitmentType == altda.GenericCommitmentString) && (name == "DataAvailabilityChallenge" || name == "DataAvailabilityChallengeProxy") { continue
diff --git ethereum-optimism/optimism/op-service/testutils/fake_txmgr.go layr-labs/optimism/op-service/testutils/fake_txmgr.go new file mode 100644 index 0000000000000000000000000000000000000000..22a98c5218987ad1888a8d10d523f760a177553d --- /dev/null +++ layr-labs/optimism/op-service/testutils/fake_txmgr.go @@ -0,0 +1,81 @@ +package testutils + +import ( + "context" + "errors" + "math/big" + + "github.com/ethereum-optimism/optimism/op-service/eth" + "github.com/ethereum-optimism/optimism/op-service/txmgr" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/core/types" + "github.com/ethereum/go-ethereum/log" + "github.com/ethereum/go-ethereum/rpc" +) + +// FakeTxMgr is a fake txmgr.TxManager for testing the op-batcher. +type FakeTxMgr struct { + log log.Logger + FromAddr common.Address + Closed bool + Nonce uint64 + errorEveryNthSend uint // 0 means never error, 1 means every send errors, etc. + sendCount uint +} + +var _ txmgr.TxManager = (*FakeTxMgr)(nil) + +func NewFakeTxMgr(log log.Logger, from common.Address) *FakeTxMgr { + return &FakeTxMgr{ + log: log, + FromAddr: from, + } +} + +func (f *FakeTxMgr) ErrorEveryNthSend(n uint) { + f.errorEveryNthSend = n +} + +func (f *FakeTxMgr) Send(ctx context.Context, candidate txmgr.TxCandidate) (*types.Receipt, error) { + // We currently only use the FakeTxMgr to test the op-batcher, which only uses SendAsync. + // Send makes it harder to track failures and nonce management (prob need to add mutex, etc). + // We can implement this if/when its needed. + panic("FakeTxMgr does not implement Send") +} +func (f *FakeTxMgr) SendAsync(ctx context.Context, candidate txmgr.TxCandidate, ch chan txmgr.SendResponse) { + f.log.Debug("SendingAsync tx", "nonce", f.Nonce) + f.sendCount++ + var sendResponse txmgr.SendResponse + if f.errorEveryNthSend != 0 && f.sendCount%f.errorEveryNthSend == 0 { + sendResponse.Err = errors.New("errorEveryNthSend") + } else { + sendResponse.Receipt = &types.Receipt{ + BlockHash: common.Hash{}, + BlockNumber: big.NewInt(0), + } + sendResponse.Nonce = f.Nonce + f.Nonce++ + } + ch <- sendResponse +} +func (f *FakeTxMgr) From() common.Address { + return f.FromAddr +} +func (f *FakeTxMgr) BlockNumber(ctx context.Context) (uint64, error) { + return 0, nil +} +func (f *FakeTxMgr) API() rpc.API { + return rpc.API{} +} +func (f *FakeTxMgr) Close() { + f.Closed = true +} +func (f *FakeTxMgr) IsClosed() bool { + return f.Closed +} +func (f *FakeTxMgr) SuggestGasPriceCaps(ctx context.Context) (tipCap *big.Int, baseFee *big.Int, blobBaseFee *big.Int, err error) { + return nil, nil, nil, nil +} +func (f *FakeTxMgr) ChainID() eth.ChainID { + return eth.ChainID{} +}
diff --git ethereum-optimism/optimism/.gitignore layr-labs/optimism/.gitignore index ea86d7c6834808e07eacd8a556ccf14dbe932ea8..c2f5b94e64a034eff7831295bcaccd3cd1dc9d90 100644 --- ethereum-optimism/optimism/.gitignore +++ layr-labs/optimism/.gitignore @@ -17,6 +17,9 @@ dist artifacts cache   +# forkdiff output +index.html + !op-chain-ops/foundry/testdata/srcmaps/cache !op-chain-ops/foundry/testdata/srcmaps/artifacts
diff --git ethereum-optimism/optimism/go.mod layr-labs/optimism/go.mod index d2df6e7cf5fdb2c6b0eda2dd75717fd2d567490f..14534618f77417d89c0b3ae0d72f186d6e925ea8 100644 --- ethereum-optimism/optimism/go.mod +++ layr-labs/optimism/go.mod @@ -6,6 +6,7 @@ toolchain go1.23.8   require ( github.com/BurntSushi/toml v1.5.0 + github.com/Layr-Labs/eigenda-proxy/clients v1.0.1 github.com/Masterminds/semver/v3 v3.3.1 github.com/andybalholm/brotli v1.1.0 github.com/bmatcuk/doublestar/v4 v4.8.1
diff --git ethereum-optimism/optimism/go.sum layr-labs/optimism/go.sum index e078da3a917b57791082766d2df1c00437b273e3..a14b1de9a50829009c07bc64665b94dd2e4142a7 100644 --- ethereum-optimism/optimism/go.sum +++ layr-labs/optimism/go.sum @@ -32,6 +32,8 @@ github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Layr-Labs/eigenda-proxy/clients v1.0.1 h1:62NFB1fUauwQPGvTiOXhz1HKaL0fRhGy34tI9EpKz6I= +github.com/Layr-Labs/eigenda-proxy/clients v1.0.1/go.mod h1:JbDNvSritUGHErvzwB5Tb1IrVk7kea9DSBLKEOkBebE= github.com/Masterminds/semver/v3 v3.3.1 h1:QtNSWtVZ3nBfk8mAOu/B6v7FMJ+NHTIgUPi7rj+4nv4= github.com/Masterminds/semver/v3 v3.3.1/go.mod h1:4V+yj/TJE1HU9XfppCwVMZq3I84lprf4nC11bSS5beM= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
diff --git ethereum-optimism/optimism/kurtosis-devnet/.gitignore layr-labs/optimism/kurtosis-devnet/.gitignore index 7b6543377da0ee8836cad7dbff871e820554597b..22bb51ab5f89b1e455ce588aab2b14e6835ed770 100644 --- ethereum-optimism/optimism/kurtosis-devnet/.gitignore +++ layr-labs/optimism/kurtosis-devnet/.gitignore @@ -1,3 +1,2 @@ *-user.json -fileserver/upload-content/* -cmd/__debug_bin* +eigenda-secrets.json
diff --git ethereum-optimism/optimism/op-e2e/actions/helpers/l2_batcher.go layr-labs/optimism/op-e2e/actions/helpers/l2_batcher.go index 6ca2924aab4cf83f5aa489fb50013f967e397ef9..f3799e79b7d8e2393af1f1a5558d4cdebc22762d 100644 --- ethereum-optimism/optimism/op-e2e/actions/helpers/l2_batcher.go +++ layr-labs/optimism/op-e2e/actions/helpers/l2_batcher.go @@ -331,6 +331,20 @@ return data.Bytes() }   +func (s *L2Batcher) ActAltDAFailoverToEthDA(t Testing) { + if !s.l2BatcherCfg.UseAltDA { + t.Fatalf("cannot failover to ethda when already using ethda") + } + s.l2BatcherCfg.UseAltDA = false +} + +func (s *L2Batcher) ActAltDAFallbackToAltDA(t Testing) { + if s.l2BatcherCfg.UseAltDA { + t.Fatalf("cannot fallback to altDA when already using altDA") + } + s.l2BatcherCfg.UseAltDA = true +} + // ActL2BatchSubmit constructs a batch tx from previous buffered L2 blocks, and submits it to L1 func (s *L2Batcher) ActL2BatchSubmit(t Testing, txOpts ...func(tx *types.DynamicFeeTx)) { s.ActL2BatchSubmitRaw(t, s.ReadNextOutputFrame(t), txOpts...)
diff --git ethereum-optimism/optimism/op-e2e/e2eutils/geth/wait.go layr-labs/optimism/op-e2e/e2eutils/geth/wait.go index 8356058afda753028fa52584a58b7000e029a636..ce3b1d377e298f0d662f4651d32c3dc6e54308a6 100644 --- ethereum-optimism/optimism/op-e2e/e2eutils/geth/wait.go +++ layr-labs/optimism/op-e2e/e2eutils/geth/wait.go @@ -8,6 +8,7 @@ "math/big" "strings" "time"   + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum/go-ethereum" @@ -84,6 +85,33 @@ return nil, fmt.Errorf("receipt for transaction %s not found. tip block number is %d: %w", hash.Hex(), tip.NumberU64(), errTimeout) case <-ticker.C: } } +} + +// WaitForBlockWithTxFromSender waits for a block with a transaction from a specific sender address. +// It starts from the current block and checks up to the next nBlocks blocks. +// As soon as it finds a block that contains a tx from sender, it returns that block. +// If no such block is found in the next nBlocks blocks, it returns an error. +func WaitForBlockWithTxFromSender(sender common.Address, client *ethclient.Client, nBlocks uint64) (*types.Block, error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + blockNum, err := client.BlockNumber(ctx) + if err != nil { + return nil, err + } + for blockNum := blockNum; blockNum < blockNum+nBlocks; blockNum++ { + blockL1, err := WaitForBlock(big.NewInt(0).SetUint64(blockNum), client) + if err != nil { + return nil, err + } + batcherTxCount, err := transactions.TransactionsBySenderCount(blockL1, sender) + if err != nil { + return nil, err + } + if batcherTxCount > 0 { + return blockL1, nil + } + } + return nil, fmt.Errorf("no block with tx from sender %s found in the last %d blocks", sender.Hex(), nBlocks) }   type waitForBlockOptions struct {
diff --git ethereum-optimism/optimism/op-e2e/e2eutils/setup.go layr-labs/optimism/op-e2e/e2eutils/setup.go index bfd1d123b484158ec961e1633a878362d21b4284..cbd8d2250c26910933303c8d1409b29e4b76ce8f 100644 --- ethereum-optimism/optimism/op-e2e/e2eutils/setup.go +++ layr-labs/optimism/op-e2e/e2eutils/setup.go @@ -1,6 +1,7 @@ package e2eutils   import ( + "log/slog" "math/big" "os" "path" @@ -52,6 +53,7 @@ ChannelTimeout uint64 L1BlockTime uint64 UseAltDA bool AllocType config.AllocType + LogLevel slog.Level }   func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { @@ -67,7 +69,7 @@ deployConfig.L1BlockTime = tp.L1BlockTime deployConfig.UseAltDA = tp.UseAltDA ApplyDeployConfigForks(deployConfig)   - logger := log.NewLogger(log.DiscardHandler()) + logger := log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stdout, tp.LogLevel, true)) require.NoError(t, deployConfig.Check(logger)) require.Equal(t, addresses.Batcher, deployConfig.BatchSenderAddress) require.Equal(t, addresses.Proposer, deployConfig.L2OutputOracleProposer)
diff --git ethereum-optimism/optimism/op-e2e/e2eutils/transactions/count.go layr-labs/optimism/op-e2e/e2eutils/transactions/count.go index 0f4d41fe04786da83030e0f3465f48f7c4fd812c..a7815b4aa4d847445179a5cd523f454b48872951 100644 --- ethereum-optimism/optimism/op-e2e/e2eutils/transactions/count.go +++ layr-labs/optimism/op-e2e/e2eutils/transactions/count.go @@ -5,7 +5,8 @@ "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" )   -func TransactionsBySender(block *types.Block, sender common.Address) (int64, error) { +// TransactionsBySenderCount returns the number of transactions in the block that were sent by the given sender. +func TransactionsBySenderCount(block *types.Block, sender common.Address) (int64, error) { txCount := int64(0) for _, tx := range block.Transactions() { signer := types.NewCancunSigner(tx.ChainId()) @@ -19,3 +20,20 @@ } } return txCount, nil } + +// TransactionsBySender returns the transactions (possibly none) in the block that were sent by the given sender. +// It returns an error if any of the transactions in the block have an invalid signature. +func TransactionsBySender(block *types.Block, sender common.Address) ([]*types.Transaction, error) { + txs := make([]*types.Transaction, 0) + for _, tx := range block.Transactions() { + signer := types.NewCancunSigner(tx.ChainId()) + txSender, err := types.Sender(signer, tx) + if err != nil { + return nil, err + } + if txSender == sender { + txs = append(txs, tx) + } + } + return txs, nil +}