EigenDA x Optimism fork diff

diff: ignored:
+1597
-2096
+71
-2

The original optimism codebase can be found at github.com/ethereum-optimism/optimism. And our fork at github.com/Layr-Labs/optimism.

Modifications to op-batcher.

diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_config_provider_test.go layr-labs/optimism/op-batcher/batcher/channel_config_provider_test.go index 95e51a921e5fdb3ef401758853d19b6f75a6647f..fccc26d64921974374f7e1f6462ee26b8841432c 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_config_provider_test.go +++ layr-labs/optimism/op-batcher/batcher/channel_config_provider_test.go @@ -31,11 +31,12 @@ func TestDynamicEthChannelConfig_ChannelConfig(t *testing.T) { calldataCfg := ChannelConfig{ MaxFrameSize: 120_000 - 1, TargetNumFrames: 1, + DaType: DaTypeCalldata, } blobCfg := ChannelConfig{ MaxFrameSize: eth.MaxBlobDataSize - 1, TargetNumFrames: 3, // gets closest to amortized fixed tx costs - UseBlobs: true, + DaType: DaTypeBlob, }   tests := []struct {
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_manager_test.go layr-labs/optimism/op-batcher/batcher/channel_manager_test.go index d7c8abcd87e91d0f5522b81ecac532a848d30182..460320e515c6b6a7a67b90859afd1169a811b42d 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_manager_test.go +++ layr-labs/optimism/op-batcher/batcher/channel_manager_test.go @@ -211,7 +211,7 @@ _, err = m.TxData(eth.BlockID{}, false) require.ErrorIs(err, io.EOF)   // requeue frame - m.TxFailed(txdata0.ID()) + m.TxFailed(txdata0.ID(), false)   txdata1, err := m.TxData(eth.BlockID{}, false) require.NoError(err) @@ -290,11 +290,12 @@ calldataCfg := ChannelConfig{ MaxFrameSize: 120_000 - 1, TargetNumFrames: 1, + DaType: DaTypeCalldata, } blobCfg := ChannelConfig{ MaxFrameSize: eth.MaxBlobDataSize - 1, TargetNumFrames: 3, // gets closest to amortized fixed tx costs - UseBlobs: true, + DaType: DaTypeBlob, } calldataCfg.InitNoneCompressor() blobCfg.InitNoneCompressor() @@ -348,7 +349,7 @@ cfg := newFakeDynamicEthChannelConfig(l, 1000)   cfg.chooseBlobs = tc.chooseBlobsWhenChannelCreated m := NewChannelManager(l, metrics.NoopMetrics, cfg, defaultTestRollupConfig) - require.Equal(t, tc.chooseBlobsWhenChannelCreated, m.defaultCfg.UseBlobs) + require.Equal(t, tc.chooseBlobsWhenChannelCreated, m.defaultCfg.DaType == DaTypeBlob)   // Seed channel manager with a block rng := rand.New(rand.NewSource(99)) @@ -385,8 +386,8 @@ } }   require.Equal(t, tc.numExpectedAssessments, cfg.assessments) - require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, data.asBlob) - require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, m.defaultCfg.UseBlobs) + require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, data.daType == DaTypeBlob) + require.Equal(t, tc.chooseBlobsWhenChannelSubmitted, m.defaultCfg.DaType == DaTypeBlob) }) }
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_test.go layr-labs/optimism/op-batcher/batcher/channel_test.go index b36ce9311bcea7a1c692a777bc0ba626ce14a99f..3b847e420196c361115f240bd77d7f12bbbf057d 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_test.go +++ layr-labs/optimism/op-batcher/batcher/channel_test.go @@ -131,7 +131,7 @@ require := require.New(t) const n = 6 lgr := testlog.Logger(t, log.LevelWarn) ch, err := newChannelWithChannelOut(lgr, metrics.NoopMetrics, ChannelConfig{ - UseBlobs: false, + DaType: DaTypeCalldata, TargetNumFrames: n, CompressorConfig: compressor.Config{ CompressionAlgo: derive.Zlib, @@ -172,7 +172,7 @@ require := require.New(t) const n = eth.MaxBlobsPerBlobTx lgr := testlog.Logger(t, log.LevelWarn) ch, err := newChannelWithChannelOut(lgr, metrics.NoopMetrics, ChannelConfig{ - UseBlobs: true, + DaType: DaTypeBlob, TargetNumFrames: n, CompressorConfig: compressor.Config{ CompressionAlgo: derive.Zlib, @@ -305,13 +305,13 @@ require.Len(t, m.currentChannel.pendingTransactions, 1)   // Trying to mark an unknown pending transaction as failed // shouldn't modify state - m.TxFailed(zeroFrameTxID(0)) + m.TxFailed(zeroFrameTxID(0), false) require.Equal(t, 0, m.currentChannel.PendingFrames()) require.Equal(t, expectedTxData, m.currentChannel.pendingTransactions[expectedChannelID.String()])   // Now we still have a pending transaction // Let's mark it as failed - m.TxFailed(expectedChannelID) + m.TxFailed(expectedChannelID, false) require.Empty(t, m.currentChannel.pendingTransactions) // There should be a frame in the pending channel now require.Equal(t, 1, m.currentChannel.PendingFrames())
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel.go layr-labs/optimism/op-batcher/batcher/channel.go index 6b936c112d3465d6bf9d46d917ef1981c23add74..cb5bbd2f8d7e42c1a70396649d57c2d27f15fdbc 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel.go +++ layr-labs/optimism/op-batcher/batcher/channel.go @@ -45,8 +45,9 @@ } }   // TxFailed records a transaction as failed. It will attempt to resubmit the data -// in the failed transaction. -func (c *channel) TxFailed(id string) { +// in the failed transaction. failoverToEthDA should be set to true when using altDA +// and altDA is down. This will switch the channel to submit frames to ethDA instead. +func (c *channel) TxFailed(id string, failoverToEthDA bool) { if data, ok := c.pendingTransactions[id]; ok { c.log.Trace("marked transaction as failed", "id", id) // Rewind to the first frame of the failed tx @@ -57,7 +58,16 @@ delete(c.pendingTransactions, id) } else { c.log.Warn("unknown transaction marked as failed", "id", id) } - + if failoverToEthDA { + // We failover to calldata txs because in altda mode the channel and channelManager + // are configured to use a calldataConfigManager, as opposed to DynamicEthChannelConfig + // which can use both calldata and blobs. Failover should happen extremely rarely, + // and is only used while the altDA is down, so we can afford to be inefficient here. + // TODO: figure out how to switch to blobs/auto instead. Might need to make + // batcherService.initChannelConfig function stateless so that we can reuse it. + c.log.Info("Failing over to calldata txs", "id", c.ID()) + c.cfg.DaType = DaTypeCalldata + } c.metr.RecordBatchTxFailed() }   @@ -125,21 +135,21 @@ return c.channelBuilder.ID() }   // NextTxData dequeues the next frames from the channel and returns them encoded in a tx data packet. -// If cfg.UseBlobs is false, it returns txData with a single frame. -// If cfg.UseBlobs is true, it will read frames from its channel builder +// If cfg.DaType == DaTypeCalldata, it returns txData with a single frame. +// Else when cfg.DaType == DaTypeBlob or DaTypeAltDA, it will read frames from its channel builder // until it either doesn't have more frames or the target number of frames is reached. // // NextTxData should only be called after HasTxData returned true. func (c *channel) NextTxData() txData { nf := c.cfg.MaxFramesPerTx() - txdata := txData{frames: make([]frameData, 0, nf), asBlob: c.cfg.UseBlobs} + txdata := txData{frames: make([]frameData, 0, nf), daType: c.cfg.DaType} for i := 0; i < nf && c.channelBuilder.HasPendingFrame(); i++ { frame := c.channelBuilder.NextFrame() txdata.frames = append(txdata.frames, frame) }   id := txdata.ID().String() - c.log.Debug("returning next tx data", "id", id, "num_frames", len(txdata.frames), "as_blob", txdata.asBlob) + c.log.Debug("returning next tx data", "id", id, "num_frames", len(txdata.frames), "da_type", txdata.daType) c.pendingTransactions[id] = txdata   return txdata @@ -147,7 +157,7 @@ }   func (c *channel) HasTxData() bool { if c.IsFull() || // If the channel is full, we should start to submit it - !c.cfg.UseBlobs { // If using calldata, we only send one frame per tx + c.cfg.DaType == DaTypeCalldata { // If using calldata, we only send one frame per tx return c.channelBuilder.HasPendingFrame() } // Collect enough frames if channel is not full yet
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_config.go layr-labs/optimism/op-batcher/batcher/channel_config.go index bf0f5ffb4adbe8f97e59262cccbcf0cf52c354bb..5054bb39895077402a2b029b041884433bce0dfe 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_config.go +++ layr-labs/optimism/op-batcher/batcher/channel_config.go @@ -46,9 +46,12 @@ // BatchType indicates whether the channel uses SingularBatch or SpanBatch. BatchType uint   - // UseBlobs indicates that this channel should be sent as a multi-blob - // transaction with one blob per frame. - UseBlobs bool + // DaType indicates how the frames in this channel should be sent to the L1. + DaType DaType +} + +func (cc ChannelConfig) UseBlobs() bool { + return cc.DaType == DaTypeBlob }   // ChannelConfig returns a copy of the receiver. @@ -93,7 +96,7 @@ ) }   func (cc *ChannelConfig) MaxFramesPerTx() int { - if !cc.UseBlobs { + if cc.DaType == DaTypeCalldata { return 1 } return cc.TargetNumFrames
diff --git ethereum-optimism/optimism/op-batcher/batcher/channel_manager.go layr-labs/optimism/op-batcher/batcher/channel_manager.go index 1ea412c4b433734808accf94d437e1ca2949f155..309a4aa77eb2dd671bb33b37ee70d32a1f0065ca 100644 --- ethereum-optimism/optimism/op-batcher/batcher/channel_manager.go +++ layr-labs/optimism/op-batcher/batcher/channel_manager.go @@ -92,12 +92,13 @@ return s.blocks.Len() - s.blockCursor }   // TxFailed records a transaction as failed. It will attempt to resubmit the data -// in the failed transaction. -func (s *channelManager) TxFailed(_id txID) { +// in the failed transaction. failoverToEthDA should be set to true when using altDA +// and altDA is down. This will switch the channel to submit frames to ethDA instead. +func (s *channelManager) TxFailed(_id txID, failoverToEthDA bool) { id := _id.String() if channel, ok := s.txChannels[id]; ok { delete(s.txChannels, id) - channel.TxFailed(id) + channel.TxFailed(id, failoverToEthDA) } else { s.log.Warn("transaction from unknown channel marked as failed", "id", id) } @@ -207,16 +208,16 @@ // Call provider method to reassess optimal DA type newCfg := s.cfgProvider.ChannelConfig(isPectra)   // No change: - if newCfg.UseBlobs == s.defaultCfg.UseBlobs { + if newCfg.UseBlobs() == s.defaultCfg.UseBlobs() { s.log.Debug("Recomputing optimal ChannelConfig: no need to switch DA type", - "useBlobs", s.defaultCfg.UseBlobs) + "useBlobs", s.defaultCfg.UseBlobs()) return s.nextTxData(channel) }   // Change: s.log.Info("Recomputing optimal ChannelConfig: changing DA type and requeing blocks...", - "useBlobsBefore", s.defaultCfg.UseBlobs, - "useBlobsAfter", newCfg.UseBlobs) + "useBlobsBefore", s.defaultCfg.UseBlobs(), + "useBlobsAfter", newCfg.UseBlobs())   // Invalidate the channel so its blocks // get requeued: @@ -317,7 +318,7 @@ "batch_type", cfg.BatchType, "compression_algo", cfg.CompressorConfig.CompressionAlgo, "target_num_frames", cfg.TargetNumFrames, "max_frame_size", cfg.MaxFrameSize, - "use_blobs", cfg.UseBlobs, + "da_type", cfg.DaType.String(), ) s.metr.RecordChannelOpened(pc.ID(), s.pendingBlocks())
diff --git ethereum-optimism/optimism/op-batcher/batcher/driver.go layr-labs/optimism/op-batcher/batcher/driver.go index fb791d4d2166aacee057e34398108856363b97ab..393ce1a88dc02e931000c7ed2036fc1b6b3c9d1b 100644 --- ethereum-optimism/optimism/op-batcher/batcher/driver.go +++ layr-labs/optimism/op-batcher/batcher/driver.go @@ -780,14 +780,6 @@ }   // publishToAltDAAndL1 posts the txdata to the DA Provider and then sends the commitment to L1. func (l *BatchSubmitter) publishToAltDAAndL1(txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) { - // sanity checks - if nf := len(txdata.frames); nf != 1 { - l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) - } - if txdata.asBlob { - l.Log.Crit("Unexpected blob txdata with AltDA enabled") - } - // when posting txdata to an external DA Provider, we use a goroutine to avoid blocking the main loop // since it may take a while for the request to return. goroutineSpawned := daGroup.TryGo(func() error { @@ -827,16 +819,17 @@ // This call will block if the txmgr queue is at the max-pending limit. // The method will block if the queue's MaxPendingTransactions is exceeded. func (l *BatchSubmitter) sendTransaction(txdata txData, queue *txmgr.Queue[txRef], receiptsCh chan txmgr.TxReceipt[txRef], daGroup *errgroup.Group) error { var err error - - // if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment. - if l.Config.UseAltDA { + var candidate *txmgr.TxCandidate + switch txdata.daType { + case DaTypeAltDA: + if !l.Config.UseAltDA { + l.Log.Crit("Received AltDA type txdata without AltDA being enabled") + } + // if Alt DA is enabled we post the txdata to the DA Provider and replace it with the commitment. l.publishToAltDAAndL1(txdata, queue, receiptsCh, daGroup) // we return nil to allow publishStateToL1 to keep processing the next txdata return nil - } - - var candidate *txmgr.TxCandidate - if txdata.asBlob { + case DaTypeBlob: if candidate, err = l.blobTxCandidate(txdata); err != nil { // We could potentially fall through and try a calldata tx instead, but this would // likely result in the chain spending more in gas fees than it is tuned for, so best @@ -844,12 +837,14 @@ // to just fail. We do not expect this error to trigger unless there is a serious bug // or configuration issue. return fmt.Errorf("could not create blob tx candidate: %w", err) } - } else { + case DaTypeCalldata: // sanity check if nf := len(txdata.frames); nf != 1 { l.Log.Crit("Unexpected number of frames in calldata tx", "num_frames", nf) } candidate = l.calldataTxCandidate(txdata.CallData()) + default: + l.Log.Crit("Unknown DA type", "da_type", txdata.daType) }   l.sendTx(txdata, false, candidate, queue, receiptsCh) @@ -867,7 +862,7 @@ } else { candidate.GasLimit = intrinsicGas }   - queue.Send(txRef{id: txdata.ID(), isCancel: isCancel, isBlob: txdata.asBlob}, *candidate, receiptsCh) + queue.Send(txRef{id: txdata.ID(), isCancel: isCancel, isBlob: txdata.daType == DaTypeBlob}, *candidate, receiptsCh) }   func (l *BatchSubmitter) blobTxCandidate(data txData) (*txmgr.TxCandidate, error) { @@ -906,17 +901,18 @@ func (l *BatchSubmitter) recordFailedDARequest(id txID, err error) { l.channelMgrMutex.Lock() defer l.channelMgrMutex.Unlock() + failover := errors.Is(err, altda.ErrAltDADown) if err != nil { - l.Log.Warn("DA request failed", logFields(id, err)...) + l.Log.Warn("DA request failed", append([]interface{}{"failoverToEthDA", failover}, logFields(id, err)...)...) } - l.channelMgr.TxFailed(id) + l.channelMgr.TxFailed(id, failover) }   func (l *BatchSubmitter) recordFailedTx(id txID, err error) { l.channelMgrMutex.Lock() defer l.channelMgrMutex.Unlock() l.Log.Warn("Transaction failed to send", logFields(id, err)...) - l.channelMgr.TxFailed(id) + l.channelMgr.TxFailed(id, false) }   func (l *BatchSubmitter) recordConfirmedTx(id txID, receipt *types.Receipt) {
diff --git ethereum-optimism/optimism/op-batcher/batcher/service.go layr-labs/optimism/op-batcher/batcher/service.go index f884c57b3eabd4b21cab9cb8775c752f21679211..03031d7a494e6455bc229ae6522e29fe8ecb93e9 100644 --- ethereum-optimism/optimism/op-batcher/batcher/service.go +++ layr-labs/optimism/op-batcher/batcher/service.go @@ -218,30 +218,40 @@ MaxBlocksPerSpanBatch: cfg.MaxBlocksPerSpanBatch, TargetNumFrames: cfg.TargetNumFrames, SubSafetyMargin: cfg.SubSafetyMargin, BatchType: cfg.BatchType, + // DaType: set below }   - switch cfg.DataAvailabilityType { - case flags.BlobsType, flags.AutoType: - if !cfg.TestUseMaxTxSizeForBlobs { - // account for version byte prefix - cc.MaxFrameSize = eth.MaxBlobDataSize - 1 + if bs.UseAltDA { + if cfg.DataAvailabilityType == flags.CalldataType { + cc.DaType = DaTypeAltDA + } else { + return fmt.Errorf("altDA is currently only supported with calldata DA Type") } - cc.UseBlobs = true - case flags.CalldataType: // do nothing - default: - return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType) - } + if cc.MaxFrameSize > altda.MaxInputSize { + return fmt.Errorf("max frame size %d exceeds altDA max input size %d", cc.MaxFrameSize, altda.MaxInputSize) + } + } else {   - if bs.UseAltDA && cc.MaxFrameSize > altda.MaxInputSize { - return fmt.Errorf("max frame size %d exceeds altDA max input size %d", cc.MaxFrameSize, altda.MaxInputSize) + switch cfg.DataAvailabilityType { + case flags.BlobsType, flags.AutoType: + if !cfg.TestUseMaxTxSizeForBlobs { + // account for version byte prefix + cc.MaxFrameSize = eth.MaxBlobDataSize - 1 + } + cc.DaType = DaTypeBlob + case flags.CalldataType: // do nothing + cc.DaType = DaTypeCalldata + default: + return fmt.Errorf("unknown data availability type: %v", cfg.DataAvailabilityType) + } }   cc.InitCompressorConfig(cfg.ApproxComprRatio, cfg.Compressor, cfg.CompressionAlgo)   - if cc.UseBlobs && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { + if cc.UseBlobs() && !bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { return errors.New("cannot use Blobs before Ecotone") } - if !cc.UseBlobs && bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { + if !cc.UseBlobs() && bs.RollupConfig.IsEcotone(uint64(time.Now().Unix())) { bs.Log.Warn("Ecotone upgrade is active, but batcher is not configured to use Blobs!") }   @@ -273,7 +283,7 @@ // copy blobs config and use hardcoded calldata fallback config for now calldataCC := cc calldataCC.TargetNumFrames = 1 calldataCC.MaxFrameSize = 120_000 - calldataCC.UseBlobs = false + calldataCC.DaType = DaTypeCalldata calldataCC.ReinitCompressorConfig()   bs.ChannelConfig = NewDynamicEthChannelConfig(bs.Log, 10*time.Second, bs.TxManager, cc, calldataCC)
diff --git ethereum-optimism/optimism/op-batcher/batcher/test_batch_submitter.go layr-labs/optimism/op-batcher/batcher/test_batch_submitter.go index 93083aa0dc6d7bf6cd46513c5069ea0b91582e67..f497a81209dc29f6aab8fd50bb0545af01dc4d10 100644 --- ethereum-optimism/optimism/op-batcher/batcher/test_batch_submitter.go +++ layr-labs/optimism/op-batcher/batcher/test_batch_submitter.go @@ -28,7 +28,7 @@ } var candidate *txmgr.TxCandidate var err error cc := l.channelMgr.cfgProvider.ChannelConfig(true) - if cc.UseBlobs { + if cc.UseBlobs() { candidate = l.calldataTxCandidate([]byte{}) } else if candidate, err = l.blobTxCandidate(emptyTxData); err != nil { return err
diff --git ethereum-optimism/optimism/op-batcher/batcher/tx_data.go layr-labs/optimism/op-batcher/batcher/tx_data.go index 0165f85f079ed61bda4874984b2f8169d83ffa99..79783e63f4b407e7440ba1afd15033f247da2f54 100644 --- ethereum-optimism/optimism/op-batcher/batcher/tx_data.go +++ layr-labs/optimism/op-batcher/batcher/tx_data.go @@ -9,6 +9,31 @@ "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" "github.com/ethereum-optimism/optimism/op-service/eth" )   +// DaType determines how txData is submitted to L1. +type DaType int + +const ( + // DaTypeCalldata means that the (single) frame in the txData is submitted as calldata. + DaTypeCalldata DaType = iota + // DaTypeBlob means that the frame(s) in the txData are submitted as ethereum 4844 blobs. + DaTypeBlob + // DaTypeAltDA means that the frame(s) in the txData are submitted to an altda da-server. + DaTypeAltDA +) + +func (d DaType) String() string { + switch d { + case DaTypeCalldata: + return "calldata" + case DaTypeBlob: + return "blob" + case DaTypeAltDA: + return "alt_da" + default: + return fmt.Sprintf("unknown_da_type_%d", d) + } +} + // txData represents the data for a single transaction. // // Note: The batcher currently sends exactly one frame per transaction. This @@ -16,7 +41,8 @@ // might change in the future to allow for multiple frames from possibly // different channels. type txData struct { frames []frameData - asBlob bool // indicates whether this should be sent as blob + // daType represents the DA type which the frames data will be submitted to. + daType DaType }   func singleFrameTxData(frame frameData) txData {
diff --git ethereum-optimism/optimism/op-batcher/flags/flags.go layr-labs/optimism/op-batcher/flags/flags.go index dee068cdb8f45c581d91dc6997802c3f595d74e2..405fa1fdf81f7ac73fb06ab4053df900400a6dc8 100644 --- ethereum-optimism/optimism/op-batcher/flags/flags.go +++ layr-labs/optimism/op-batcher/flags/flags.go @@ -82,8 +82,10 @@ Usage: "Maximum number of blocks to add to a span batch. Default is 0 - no maximum.", EnvVars: prefixEnvVars("MAX_BLOCKS_PER_SPAN_BATCH"), } TargetNumFramesFlag = &cli.IntFlag{ - Name: "target-num-frames", - Usage: "The target number of frames to create per channel. Controls number of blobs per blob tx, if using Blob DA.", + Name: "target-num-frames", + Usage: "The target number of frames to create per channel. " + + "Controls number of blobs per blob tx, if using Blob DA, " + + "or number of frames per blob, if using altDA.", Value: 1, EnvVars: prefixEnvVars("TARGET_NUM_FRAMES"), }

Modifications to op-node.

diff --git ethereum-optimism/optimism/op-node/rollup/derive/altda_data_source.go layr-labs/optimism/op-node/rollup/derive/altda_data_source.go index 2945a2a9e57b264df389e321f8daf8f25aab9f04..315b40be6e851d0752e3f75ca8ee2438e64e208b 100644 --- ethereum-optimism/optimism/op-node/rollup/derive/altda_data_source.go +++ layr-labs/optimism/op-node/rollup/derive/altda_data_source.go @@ -40,8 +40,10 @@ // for the same origin and noop if the origin was already processed. It is also called if // there is not commitment in the current origin. if err := s.fetcher.AdvanceL1Origin(ctx, s.l1, s.id.ID()); err != nil { if errors.Is(err, altda.ErrReorgRequired) { + s.log.Warn("reorg required, resetting altDA L1 origin", "origin", s.id) return nil, NewResetError(errors.New("new expired challenge")) } + s.log.Warn("failed to advance altDA L1 origin", "err", err) return nil, NewTemporaryError(fmt.Errorf("failed to advance altDA L1 origin: %w", err)) }   @@ -58,6 +60,7 @@ } // If the tx data type is not altDA, we forward it downstream to let the next // steps validate and potentially parse it as L1 DA inputs. if data[0] != params.DerivationVersion1 { + s.log.Info("forwarding downstream non altDA data", "version_byte", data[0]) return data, nil }   @@ -79,7 +82,7 @@ // challenge for a new previously derived commitment expired. return nil, NewResetError(err) } else if errors.Is(err, altda.ErrExpiredChallenge) { // this commitment was challenged and the challenge expired. - s.log.Warn("challenge expired, skipping batch", "comm", s.comm) + s.log.Warn("challenge expired, skipping batch", "comm", s.comm, "err", err) s.comm = nil // skip the input return s.Next(ctx)

Modifications to op-alt-da client.

diff --git ethereum-optimism/optimism/op-alt-da/damgr_test.go layr-labs/optimism/op-alt-da/damgr_test.go index b487fc85c98de323ec5f804850fdbb33b7387de1..9255134ed2cf16b8e00fe2ad80f140703fed9289 100644 --- ethereum-optimism/optimism/op-alt-da/damgr_test.go +++ layr-labs/optimism/op-alt-da/damgr_test.go @@ -53,12 +53,12 @@ require.Empty(t, state.expiredCommitments) require.NoError(t, state.ExpireCommitments(bID(8))) require.Empty(t, state.commitments)   - state.Prune(bID(bn1)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) - state.Prune(bID(7)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) - state.Prune(bID(8)) - require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) + lastPrunedCommitment := state.Prune(bID(bn1)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(7)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(8)) + require.Equal(t, l1Ref(bn1), lastPrunedCommitment)   // Track a commitment, challenge it, & then resolve it c2 := RandomCommitment(rng) @@ -83,12 +83,12 @@ state.ExpireChallenges(bID(30)) require.Empty(t, state.challenges)   // Now finalize everything - state.Prune(bID(20)) - require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) - state.Prune(bID(28)) - require.Equal(t, l1Ref(bn1), state.lastPrunedCommitment) - state.Prune(bID(32)) - require.Equal(t, l1Ref(bn2), state.lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(20)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(28)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment) + lastPrunedCommitment = state.Prune(bID(32)) + require.Equal(t, l1Ref(bn2), lastPrunedCommitment) }   // TestExpireChallenges expires challenges and prunes the state for longer windows @@ -175,8 +175,8 @@ err = state.ExpireCommitments(bID(11)) require.ErrorIs(t, err, ErrReorgRequired)   // pruning finalized block is safe. It should not prune any commitments yet. - state.Prune(bID(1)) - require.Equal(t, eth.L1BlockRef{}, state.lastPrunedCommitment) + lastPrunedCommitment := state.Prune(bID(1)) + require.Equal(t, eth.L1BlockRef{}, lastPrunedCommitment)   // Perform reorg back to bn2 state.ClearCommitments()
diff --git ethereum-optimism/optimism/op-alt-da/damock.go layr-labs/optimism/op-alt-da/damock.go index ad388d0b26535bee1dabbccb5aeeb9ed38ecae1e..62ece16611649fc2cce40f4530ff468fc71a094a 100644 --- ethereum-optimism/optimism/op-alt-da/damock.go +++ layr-labs/optimism/op-alt-da/damock.go @@ -48,6 +48,8 @@ func (c *MockDAClient) DeleteData(key []byte) error { return c.store.Delete(key) }   +// DAErrFaker is a DA client that can be configured to return errors on GetInput +// and SetInput calls. type DAErrFaker struct { Client *MockDAClient   @@ -105,12 +107,20 @@ return ErrNotEnabled }   // FakeDAServer is a fake DA server for e2e tests. -// It is a small wrapper around DAServer that allows for setting request latencies, -// to mimic a DA service with slow responses (eg. eigenDA with 10 min batching interval). +// It is a small wrapper around DAServer that allows for setting: +// - request latencies, to mimic a DA service with slow responses +// (eg. eigenDA with 10 min batching interval). +// - response status codes, to mimic a DA service that is down. +// +// We use this FakeDaServer as opposed to the DAErrFaker client in the op-e2e altda system tests +// because the batcher service only has a constructor to build from CLI flags (no dependency injection), +// meaning the da client is built from an rpc url config instead of being injected. type FakeDAServer struct { *DAServer putRequestLatency time.Duration getRequestLatency time.Duration + // next failoverCount Put requests will return 503 status code for failover testing + failoverCount uint64 }   func NewFakeDAServer(host string, port int, log log.Logger) *FakeDAServer { @@ -130,6 +140,11 @@ }   func (s *FakeDAServer) HandlePut(w http.ResponseWriter, r *http.Request) { time.Sleep(s.putRequestLatency) + if s.failoverCount > 0 { + w.WriteHeader(http.StatusServiceUnavailable) + s.failoverCount-- + return + } s.DAServer.HandlePut(w, r) }   @@ -152,6 +167,11 @@ }   func (s *FakeDAServer) SetGetRequestLatency(latency time.Duration) { s.getRequestLatency = latency +} + +// SetResponseStatusForNRequests sets the next n Put requests to return 503 status code. +func (s *FakeDAServer) SetPutFailoverForNRequests(n uint64) { + s.failoverCount = n }   type MemStore struct {
diff --git ethereum-optimism/optimism/op-alt-da/daclient.go layr-labs/optimism/op-alt-da/daclient.go index 9f0bdab11fbd98d632edc8cf5e148413a7484c8e..dc690bbbbc88198bac1d328d8b6d95b5298b298a 100644 --- ethereum-optimism/optimism/op-alt-da/daclient.go +++ layr-labs/optimism/op-alt-da/daclient.go @@ -16,6 +16,11 @@ // ErrInvalidInput is returned when the input is not valid for posting to the DA storage. var ErrInvalidInput = errors.New("invalid input")   +// ErrAltDADown is returned when the alt DA returns a 503 status code. +// It is used to signify that the alt DA is down and the client should failover to the eth DA. +// See https://github.com/ethereum-optimism/specs/issues/434 +var ErrAltDADown = errors.New("alt DA is down: failover to eth DA") + // DAClient is an HTTP client to communicate with a DA storage service. // It creates commitments and retrieves input data + verifies if needed. type DAClient struct { @@ -131,6 +136,9 @@ if err != nil { return nil, err } defer resp.Body.Close() + if resp.StatusCode == http.StatusServiceUnavailable { + return nil, ErrAltDADown + } if resp.StatusCode != http.StatusOK { return nil, fmt.Errorf("failed to store data: %v", resp.StatusCode) }
diff --git ethereum-optimism/optimism/op-alt-da/damgr.go layr-labs/optimism/op-alt-da/damgr.go index 15814263c4ffa84c0ed69b48c51ef4de39c37eec..1c52f141ab4ed0929c0e917174315f8c9cc587dd 100644 --- ethereum-optimism/optimism/op-alt-da/damgr.go +++ layr-labs/optimism/op-alt-da/damgr.go @@ -117,8 +117,18 @@ // It is called by the Finalize function, as it has an L1 finalized head to use. func (d *DA) updateFinalizedHead(l1Finalized eth.L1BlockRef) { d.l1FinalizedHead = l1Finalized // Prune the state to the finalized head - d.state.Prune(l1Finalized.ID()) - d.finalizedHead = d.state.lastPrunedCommitment + lastPrunedCommIncBlock := d.state.Prune(l1Finalized.ID()) + d.log.Debug("updateFinalizedHead", + "currFinalizedHead", d.finalizedHead.Number, + "lastPrunedCommIncBlock", lastPrunedCommIncBlock.Number, + "l1Finalized", l1Finalized.Number) + // If a commitment was pruned, set the finalized head to that commitment's inclusion block + // When no commitments are left to be pruned (one example is if we have failed over to ethda) + // then updateFinalizedFromL1 becomes the main driver of the finalized head. + // Note that updateFinalizedFromL1 is only called when d.state.NoCommitments() is true. + if lastPrunedCommIncBlock != (eth.L1BlockRef{}) { + d.finalizedHead = lastPrunedCommIncBlock + } }   // updateFinalizedFromL1 updates the finalized head based on the challenge window. @@ -133,6 +143,7 @@ ref, err := l1.L1BlockRefByNumber(ctx, d.l1FinalizedHead.Number-d.cfg.ChallengeWindow) if err != nil { return err } + d.log.Debug("updateFinalizedFromL1", "currFinalizedHead", d.finalizedHead.Number, "newFinalizedHead", ref.Number, "l1FinalizedHead", d.l1FinalizedHead.Number, "challengeWindow", d.cfg.ChallengeWindow) d.finalizedHead = ref return nil } @@ -413,6 +424,7 @@ continue } for _, log := range rec.Logs { if log.Address == d.cfg.DAChallengeContractAddress && len(log.Topics) > 0 && log.Topics[0] == ChallengeStatusEventABIHash { + d.log.Info("found challenge event", "block", block.Number, "log", log.Index) logs = append(logs, log) } }
diff --git ethereum-optimism/optimism/op-alt-da/dastate.go layr-labs/optimism/op-alt-da/dastate.go index 66a2aee1f31ef24e00c89a6079db5f46d8d01501..5d26841ec51180c0fa420bbe9e817f302b318b26 100644 --- ethereum-optimism/optimism/op-alt-da/dastate.go +++ layr-labs/optimism/op-alt-da/dastate.go @@ -52,15 +52,14 @@ // Challenges and Commitments can be pruned when they are beyond a certain block number (e.g. when they are finalized). // In the special case of a L2 reorg, challenges are still tracked but commitments are removed. // This will allow the altDA fetcher to find the expired challenge. type State struct { - commitments []Commitment // commitments where the challenge/resolve period has not expired yet - expiredCommitments []Commitment // commitments where the challenge/resolve period has expired but not finalized - challenges []*Challenge // challenges ordered by L1 inclusion - expiredChallenges []*Challenge // challenges ordered by L1 inclusion - challengesMap map[string]*Challenge // challenges by serialized comm + block number for easy lookup - lastPrunedCommitment eth.L1BlockRef // the last commitment to be pruned - cfg Config - log log.Logger - metrics Metricer + commitments []Commitment // commitments where the challenge/resolve period has not expired yet + expiredCommitments []Commitment // commitments where the challenge/resolve period has expired but not finalized + challenges []*Challenge // challenges ordered by L1 inclusion + expiredChallenges []*Challenge // challenges ordered by L1 inclusion + challengesMap map[string]*Challenge // challenges by serialized comm + block number for easy lookup + cfg Config + log log.Logger + metrics Metricer }   func NewState(log log.Logger, m Metricer, cfg Config) *State { @@ -207,15 +206,18 @@ } }   // Prune removes challenges & commitments which have an expiry block number beyond the given block number. -func (s *State) Prune(origin eth.BlockID) { +// It returns the last pruned commitment's inclusion block number, or eth.L1BlockRef{} if no commitments were pruned. +func (s *State) Prune(origin eth.BlockID) eth.L1BlockRef { // Commitments rely on challenges, so we prune commitments first. - s.pruneCommitments(origin) + lastPrunedCommIncBlock := s.pruneCommitments(origin) s.pruneChallenges(origin) + return lastPrunedCommIncBlock }   // pruneCommitments removes commitments which have are beyond a given block number. // It will remove commitments in order of inclusion until it finds a commitment which is not beyond the given block number. -func (s *State) pruneCommitments(origin eth.BlockID) { +func (s *State) pruneCommitments(origin eth.BlockID) eth.L1BlockRef { + var lastPrunedCommIncBlock eth.L1BlockRef for len(s.expiredCommitments) > 0 { c := s.expiredCommitments[0] challenge, ok := s.GetChallenge(c.data, c.inclusionBlock.Number) @@ -236,8 +238,9 @@ // Remove the commitment s.expiredCommitments = s.expiredCommitments[1:]   // Record the latest inclusion block to be returned - s.lastPrunedCommitment = c.inclusionBlock + lastPrunedCommIncBlock = c.inclusionBlock } + return lastPrunedCommIncBlock }   // pruneChallenges removes challenges which have are beyond a given block number.
diff --git ethereum-optimism/optimism/op-e2e/actions/altda/altda_test.go layr-labs/optimism/op-e2e/actions/altda/altda_test.go index e5171d9241f18149d35501d7111b8b0a89dbbd37..ca080e7c2d23b4ac809802a584b2f6283be69e9d 100644 --- ethereum-optimism/optimism/op-e2e/actions/altda/altda_test.go +++ layr-labs/optimism/op-e2e/actions/altda/altda_test.go @@ -1,6 +1,7 @@ package altda   import ( + "log/slog" "math/big" "math/rand" "testing" @@ -49,6 +50,12 @@ }   type AltDAParam func(p *e2eutils.TestParams)   +func WithLogLevel(level slog.Level) AltDAParam { + return func(p *e2eutils.TestParams) { + p.LogLevel = level + } +} + func NewL2AltDA(t helpers.Testing, params ...AltDAParam) *L2AltDA { p := &e2eutils.TestParams{ MaxSequencerDrift: 40, @@ -57,11 +64,12 @@ ChannelTimeout: 12, L1BlockTime: 12, UseAltDA: true, AllocType: config.AllocTypeAltDA, + LogLevel: log.LevelDebug, } for _, apply := range params { apply(p) } - log := testlog.Logger(t, log.LvlDebug) + log := testlog.Logger(t, p.LogLevel)   dp := e2eutils.MakeDeployParams(t, p) sd := e2eutils.Setup(t, dp, helpers.DefaultAlloc) @@ -75,14 +83,13 @@ jwtPath := e2eutils.WriteDefaultJWT(t) engine := helpers.NewL2Engine(t, log, sd.L2Cfg, jwtPath) engCl := engine.EngineClient(t, sd.RollupCfg)   - storage := &altda.DAErrFaker{Client: altda.NewMockDAClient(log)} - l1F, err := sources.NewL1Client(miner.RPCClient(), log, nil, sources.L1ClientDefaultConfig(sd.RollupCfg, false, sources.RPCKindBasic)) require.NoError(t, err)   altDACfg, err := sd.RollupCfg.GetOPAltDAConfig() require.NoError(t, err)   + storage := &altda.DAErrFaker{Client: altda.NewMockDAClient(log)} daMgr := altda.NewAltDAWithStorage(log, altDACfg, storage, &altda.NoopMetrics{})   sequencer := helpers.NewL2Sequencer(t, log, l1F, miner.BlobStore(), daMgr, engCl, sd.RollupCfg, 0) @@ -177,6 +184,34 @@ a.lastCommBn = a.miner.L1Chain().CurrentBlock().Number.Uint64() }   +// ActNewL2TxFinalized sends a new L2 transaction, submits a batch containing it to L1 +// and finalizes the L1 and L2 chains (including advancing enough to clear the altda challenge window). +// +// TODO: understand why (notation is l1unsafe/l1safe/l1finalized-l2unsafe/l2safe/l2finalized): +// - the first call advances heads by (0/0/17-71/71/1) +// - second call advances by 0/0/17-204/204/82, +// - but all subsequent calls advance status by exactly 0/0/17-204/204/204. +// +// 17 makes sense because challengeWindow=16 and we create 1 extra block before that, +// and 204 L2blocks = 17 L1blocks * 12 L2blocks/L1block (L1blocktime=12s, L2blocktime=1s) +func (a *L2AltDA) ActNewL2TxFinalized(t helpers.Testing) { + // Include a new l2 batcher transaction, submitting an input commitment to the l1. + a.ActNewL2Tx(t) + // Create ChallengeWindow empty blocks so the above batcher blocks can finalize (can't be challenged anymore) + a.ActL1Blocks(t, a.altDACfg.ChallengeWindow) + // Finalize the L1 chain and the L2 chain (by draining all events and running through derivation pipeline) + // TODO: understand why we need to drain the pipeline before AND after actL1Finalized + a.sequencer.ActL2PipelineFull(t) + a.ActL1Finalized(t) + a.sequencer.ActL2PipelineFull(t) + + // Uncomment the below code to observe the behavior described in the TODO above + // syncStatus := a.sequencer.SyncStatus() + // a.log.Info("Sync status after ActNewL2TxFinalized", + // "unsafeL1", syncStatus.HeadL1.Number, "safeL1", syncStatus.SafeL1.Number, "finalizedL1", syncStatus.FinalizedL1.Number, + // "unsafeL2", syncStatus.UnsafeL2.Number, "safeL2", syncStatus.SafeL2.Number, "finalizedL2", syncStatus.FinalizedL2.Number) +} + func (a *L2AltDA) ActDeleteLastInput(t helpers.Testing) { require.NoError(t, a.storage.Client.DeleteData(a.lastComm)) } @@ -363,7 +398,7 @@ require.Equal(t, syncStatus.SafeL2, verifSyncStatus.SafeL2) }   // DA storage service goes offline while sequencer keeps making blocks. When storage comes back online, it should be able to catch up. -func TestAltDA_StorageError(gt *testing.T) { +func TestAltDA_StorageGetError(gt *testing.T) { t := helpers.NewDefaultTesting(gt) harness := NewL2AltDA(t)   @@ -528,11 +563,12 @@ func TestAltDA_Finalization(gt *testing.T) { t := helpers.NewDefaultTesting(gt) a := NewL2AltDA(t)   - // build L1 block #1 + // Notation everywhere below is l1unsafe/l1safe/l1finalized-l2unsafe/l2safe/l2finalized + // build L1 block #1: 0/0/0-0/0/0 -> 1/1/0-0/0/0 a.ActL1Blocks(t, 1) a.miner.ActL1SafeNext(t)   - // Fill with l2 blocks up to the L1 head + // Fill with l2 blocks up to the L1 head: 1/1/0:0/0/0 -> 1/1/0:1/1/0 a.sequencer.ActL1HeadSignal(t) a.sequencer.ActBuildToL1Head(t)   @@ -540,7 +576,7 @@ a.sequencer.ActL2PipelineFull(t) a.sequencer.ActL1SafeSignal(t) require.Equal(t, uint64(1), a.sequencer.SyncStatus().SafeL1.Number)   - // add L1 block #2 + // add L1 block #2: 1/1/0:1/1/0 -> 2/2/1:2/1/0 a.ActL1Blocks(t, 1) a.miner.ActL1SafeNext(t) a.miner.ActL1FinalizeNext(t) @@ -552,7 +588,7 @@ a.sequencer.ActL2PipelineFull(t) a.sequencer.ActL1FinalizedSignal(t) a.sequencer.ActL1SafeSignal(t)   - // commit all the l2 blocks to L1 + // commit all the l2 blocks to L1: 2/2/1:2/1/0 -> 3/2/1:2/1/0 a.batcher.ActSubmitAll(t) a.miner.ActL1StartBlock(12)(t) a.miner.ActL1IncludeTx(a.dp.Addresses.Batcher)(t) @@ -561,31 +597,31 @@ // verify a.sequencer.ActL2PipelineFull(t)   - // fill with more unsafe L2 blocks + // fill with more unsafe L2 blocks: 3/2/1:2/1/0 -> 3/2/1:3/1/0 a.sequencer.ActL1HeadSignal(t) a.sequencer.ActBuildToL1Head(t)   - // submit those blocks too, block #4 + // submit those blocks too, block #4: 3/2/1:3/1/0 -> 4/2/1:3/1/0 a.batcher.ActSubmitAll(t) a.miner.ActL1StartBlock(12)(t) a.miner.ActL1IncludeTx(a.dp.Addresses.Batcher)(t) a.miner.ActL1EndBlock(t)   - // add some more L1 blocks #5, #6 + // add some more L1 blocks #5, #6: 4/2/1:3/1/0 -> 6/2/1:3/1/0 a.miner.ActEmptyBlock(t) a.miner.ActEmptyBlock(t)   - // and more unsafe L2 blocks + // and more unsafe L2 blocks: 6/2/1:3/1/0 -> 6/2/1:6/1/0 a.sequencer.ActL1HeadSignal(t) a.sequencer.ActBuildToL1Head(t)   - // move safe/finalize markers: finalize the L1 chain block with the first batch, but not the second + // move safe/finalize markers: 6/2/1:6/1/0 -> 6/4/3:6/1/0 a.miner.ActL1SafeNext(t) // #2 -> #3 a.miner.ActL1SafeNext(t) // #3 -> #4 a.miner.ActL1FinalizeNext(t) // #1 -> #2 a.miner.ActL1FinalizeNext(t) // #2 -> #3   - // L1 safe and finalized as expected + // L1 safe and finalized as expected: a.sequencer.ActL2PipelineFull(t) a.sequencer.ActL1FinalizedSignal(t) a.sequencer.ActL1SafeSignal(t) @@ -607,3 +643,64 @@ // given 12s l1 time and 1s l2 time, l2 should be 12 * 3 = 36 blocks finalized require.Equal(t, uint64(36), a.sequencer.SyncStatus().FinalizedL2.Number) } + +// This test tests altDA -> ethDA -> altDA finalization behavior, simulating a temp altDA failure. +func TestAltDA_FinalizationAfterEthDAFailover(gt *testing.T) { + t := helpers.NewDefaultTesting(gt) + // we only print critical logs to be able to see the statusLogs + harness := NewL2AltDA(t, WithLogLevel(log.LevelDebug)) + + // We first call this twice because the first 2 times are irregular. + // See ActNewL2TxFinalized's TODO comment. + harness.ActNewL2TxFinalized(t) + harness.ActNewL2TxFinalized(t) + + // ActNewL2TxFinalized advances L1 by (1+ChallengeWindow)L1 blocks, and there are 12 L2 blocks per L1 block. + diffL2Blocks := (1 + harness.altDACfg.ChallengeWindow) * 12 + + for i := 0; i < 5; i++ { + ssBefore := harness.sequencer.SyncStatus() + harness.ActNewL2TxFinalized(t) + ssAfter := harness.sequencer.SyncStatus() + // Finalized head should advance normally in altda mode + require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) + } + + // We swap out altda batcher for ethda batcher + harness.batcher.ActAltDAFailoverToEthDA(t) + + for i := 0; i < 3; i++ { + ssBefore := harness.sequencer.SyncStatus() + harness.ActNewL2TxFinalized(t) + if i == 0 { + // TODO: figure out why we need to act twice for the first time after failover. + // I think it's because the L1 driven finalizedHead is set to L1FinalizedHead-ChallengeWindow (see damgr.go updateFinalizedFromL1), + // so it trails behind by an extra challenge_window when we switch over to ethDA. + harness.ActNewL2TxFinalized(t) + } + ssAfter := harness.sequencer.SyncStatus() + // Even after failover, the finalized head should continue advancing normally + require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) + } + + // Revert back to altda batcher (simulating that altda's temporary outage is resolved) + harness.batcher.ActAltDAFallbackToAltDA(t) + + for i := 0; i < 3; i++ { + ssBefore := harness.sequencer.SyncStatus() + harness.ActNewL2TxFinalized(t) + ssAfter := harness.sequencer.SyncStatus() + + // Even after fallback to altda, the finalized head should continue advancing normally + if i == 0 { + // This is the opposite as the altda->ethda direction. In this case, the first time we fallback to altda, + // the finalized head will advance by 2*diffL2Blocks: in ethda mode when driven by L1 finalization, + // the head is set to L1FinalizedHead-ChallengeWindow. After sending an altda commitment, the finalized head + // is now driven by the finalization of the altda commitment. + require.Equal(t, ssBefore.FinalizedL2.Number+2*diffL2Blocks, ssAfter.FinalizedL2.Number) + } else { + require.Equal(t, ssBefore.FinalizedL2.Number+diffL2Blocks, ssAfter.FinalizedL2.Number) + } + + } +}
diff --git ethereum-optimism/optimism/op-e2e/system/altda/concurrent_test.go layr-labs/optimism/op-e2e/system/altda/concurrent_test.go index ef11a879dc70d59a7f381b0002b39f314df5ed2a..19c0a0103bb4db9274c09e806119053e14945f67 100644 --- ethereum-optimism/optimism/op-e2e/system/altda/concurrent_test.go +++ layr-labs/optimism/op-e2e/system/altda/concurrent_test.go @@ -73,7 +73,7 @@ block, err := geth.WaitForBlock(big.NewInt(int64(startingL1BlockNum)+int64(i)), l1Client) require.NoError(t, err, "Waiting for l1 blocks") // there are possibly other services (proposer/challenger) in the background sending txs // so we only count the batcher txs - batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) + batcherTxCount, err := transactions.TransactionsBySenderCount(block, cfg.DeployConfig.BatchSenderAddress) require.NoError(t, err) if batcherTxCount > 1 { return
diff --git ethereum-optimism/optimism/op-e2e/system/altda/failover_test.go layr-labs/optimism/op-e2e/system/altda/failover_test.go new file mode 100644 index 0000000000000000000000000000000000000000..9e250c8f0b6c616133642774b1486afa2d024e5b --- /dev/null +++ layr-labs/optimism/op-e2e/system/altda/failover_test.go @@ -0,0 +1,80 @@ +package altda + +import ( + "math/big" + "testing" + + op_e2e "github.com/ethereum-optimism/optimism/op-e2e" + "github.com/ethereum-optimism/optimism/op-node/rollup/derive/params" + "github.com/ethereum/go-ethereum/log" + + "github.com/ethereum-optimism/optimism/op-batcher/flags" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" + "github.com/ethereum-optimism/optimism/op-e2e/system/e2esys" + "github.com/stretchr/testify/require" +) + +// TestBatcher_FailoverToEthDA_FallbackToAltDA tests that the batcher will failover to ethDA +// if the da-server returns 503. It also tests that the batcher successfully returns to normal +// behavior of posting batches to altda once it becomes available again +// (i.e. the da-server doesn't return 503 anymore). +func TestBatcher_FailoverToEthDA_FallbackToAltDA(t *testing.T) { + op_e2e.InitParallel(t) + + nChannelsFailover := uint64(2) + + cfg := e2esys.DefaultSystemConfig(t, e2esys.WithLogLevel(log.LevelCrit)) + cfg.DeployConfig.UseAltDA = true + cfg.DeployConfig.DACommitmentType = "GenericCommitment" + cfg.DeployConfig.DAChallengeWindow = 16 + cfg.DeployConfig.DAResolveWindow = 16 + cfg.DeployConfig.DABondSize = 1000000 + cfg.DeployConfig.DAResolverRefundPercentage = 0 + // Default cfg.BatcherMaxChannelDuration is 1, which means at least one channel is sent per L1 block. + // Furthermore, by setting cfg.BatcherMaxPendingTransactions = 1, + // we make sure the batcher posts a single commitment per L1 block. + // This way it's easy to trigger failover and observe the commitment changing on the next L1 block. + cfg.BatcherMaxPendingTransactions = 1 + cfg.BatcherMaxConcurrentDARequest = 1 + cfg.BatcherBatchType = 0 + // currently altda commitments can only be sent as calldata + cfg.DataAvailabilityType = flags.CalldataType + + sys, err := cfg.Start(t) + require.NoError(t, err, "Error starting up system") + defer sys.Close() + l1Client := sys.NodeClient("l1") + + startBlockL1, err := geth.WaitForBlockWithTxFromSender(cfg.DeployConfig.BatchSenderAddress, l1Client, 10) + require.NoError(t, err) + + // Simulate altda server returning 503 + sys.FakeAltDAServer.SetPutFailoverForNRequests(nChannelsFailover) + + countEthDACommitment := uint64(0) + + // There is some nondeterministic timing behavior that affects whether the batcher has already + // posted batches before seeing the above SetPutFailoverForNRequests behavior change. + // Most likely, sequence of blocks will be: altDA, ethDA, ethDA, altDA, altDA, altDA. + // 2 ethDA are expected (and checked for) because nChannelsFailover=2, so da-server will return 503 for 2 requests only, + // and the batcher always tries altda first for a new channel, and failsover to ethDA only if altda returns 503. + for blockNumL1 := startBlockL1.NumberU64(); blockNumL1 < startBlockL1.NumberU64()+6; blockNumL1++ { + blockL1, err := geth.WaitForBlock(big.NewInt(0).SetUint64(blockNumL1), l1Client) + require.NoError(t, err) + batcherTxs, err := transactions.TransactionsBySender(blockL1, cfg.DeployConfig.BatchSenderAddress) + require.NoError(t, err) + require.Equal(t, 1, len(batcherTxs)) // sanity check: ensure BatcherMaxPendingTransactions=1 is working + batcherTx := batcherTxs[0] + if batcherTx.Data()[0] == byte(params.DerivationVersion0) { + countEthDACommitment++ + t.Log("blockL1", blockNumL1, "batcherTxType", "ethda") + } else if batcherTx.Data()[0] == byte(params.DerivationVersion1) { + t.Log("blockL1", blockNumL1, "batcherTxType", "altda") + } else { + t.Fatalf("unexpected batcherTxType: %v", batcherTx.Data()[0]) + } + } + require.Equal(t, nChannelsFailover, countEthDACommitment, "Expected %v ethDA commitments, got %v", nChannelsFailover, countEthDACommitment) + +}
diff --git ethereum-optimism/optimism/op-e2e/system/da/multi_test.go layr-labs/optimism/op-e2e/system/da/multi_test.go index 461270282008b931e8ec4ca4760221a4c0e9e297..e8b7ea6ff2664cc8245909221d21e581ac69a671 100644 --- ethereum-optimism/optimism/op-e2e/system/da/multi_test.go +++ layr-labs/optimism/op-e2e/system/da/multi_test.go @@ -52,7 +52,7 @@ for i := startBlock; i <= headNum; i++ { block, err := l1Client.BlockByNumber(ctx, big.NewInt(int64(i))) require.NoError(t, err)   - batcherTxCount, err := transactions.TransactionsBySender(block, cfg.DeployConfig.BatchSenderAddress) + batcherTxCount, err := transactions.TransactionsBySenderCount(block, cfg.DeployConfig.BatchSenderAddress) require.NoError(t, err) totalBatcherTxsCount += batcherTxCount
diff --git ethereum-optimism/optimism/op-e2e/system/e2esys/setup.go layr-labs/optimism/op-e2e/system/e2esys/setup.go index 12f9af850788510ba8d301a7a43a4da75f874a65..dc67991bc7df3b215ebe5aa86a07e02432ee4b1e 100644 --- ethereum-optimism/optimism/op-e2e/system/e2esys/setup.go +++ layr-labs/optimism/op-e2e/system/e2esys/setup.go @@ -6,6 +6,7 @@ "crypto/ecdsa" "crypto/rand" "errors" "fmt" + "log/slog" "math/big" "net" "os" @@ -87,6 +88,7 @@ )   type SystemConfigOpts struct { AllocType config.AllocType + LogLevel slog.Level }   type SystemConfigOpt func(s *SystemConfigOpts) @@ -97,9 +99,16 @@ s.AllocType = allocType } }   +func WithLogLevel(level slog.Level) SystemConfigOpt { + return func(s *SystemConfigOpts) { + s.LogLevel = level + } +} + func DefaultSystemConfig(t testing.TB, opts ...SystemConfigOpt) SystemConfig { sco := &SystemConfigOpts{ AllocType: config.DefaultAllocType, + LogLevel: slog.LevelInfo, } for _, opt := range opts { opt(sco) @@ -110,7 +119,7 @@ require.NoError(t, err) deployConfig := config.DeployConfig(sco.AllocType) deployConfig.L1GenesisBlockTimestamp = hexutil.Uint64(time.Now().Unix()) e2eutils.ApplyDeployConfigForks(deployConfig) - require.NoError(t, deployConfig.Check(testlog.Logger(t, log.LevelInfo)), + require.NoError(t, deployConfig.Check(testlog.Logger(t, sco.LogLevel).New("role", "config-check")), "Deploy config is invalid, do you need to run make devnet-allocs?") l1Deployments := config.L1Deployments(sco.AllocType) require.NoError(t, l1Deployments.Check(deployConfig)) @@ -172,11 +181,12 @@ Sync: sync.Config{SyncMode: sync.CLSync}, }, }, Loggers: map[string]log.Logger{ - RoleVerif: testlog.Logger(t, log.LevelInfo).New("role", RoleVerif), - RoleSeq: testlog.Logger(t, log.LevelInfo).New("role", RoleSeq), - "batcher": testlog.Logger(t, log.LevelInfo).New("role", "batcher"), - "proposer": testlog.Logger(t, log.LevelInfo).New("role", "proposer"), - "da-server": testlog.Logger(t, log.LevelInfo).New("role", "da-server"), + RoleVerif: testlog.Logger(t, sco.LogLevel).New("role", RoleVerif), + RoleSeq: testlog.Logger(t, sco.LogLevel).New("role", RoleSeq), + "batcher": testlog.Logger(t, sco.LogLevel).New("role", "batcher"), + "proposer": testlog.Logger(t, sco.LogLevel).New("role", "proposer"), + "da-server": testlog.Logger(t, sco.LogLevel).New("role", "da-server"), + "config-check": testlog.Logger(t, sco.LogLevel).New("role", "config-check"), }, GethOptions: map[string][]geth.GethOption{}, P2PTopology: nil, // no P2P connectivity by default @@ -275,12 +285,10 @@ // L1FinalizedDistance is the distance from the L1 head that L1 blocks will be artificially finalized on. L1FinalizedDistance uint64   - Premine map[common.Address]*big.Int - Nodes map[string]*rollupNode.Config // Per node config. Don't use populate rollup.Config - Loggers map[string]log.Logger - GethOptions map[string][]geth.GethOption - ProposerLogger log.Logger - BatcherLogger log.Logger + Premine map[common.Address]*big.Int + Nodes map[string]*rollupNode.Config // Per node config. Don't use populate rollup.Config + Loggers map[string]log.Logger + GethOptions map[string][]geth.GethOption   ExternalL2Shim string   @@ -551,7 +559,7 @@ sys.TimeTravelClock = clock.NewAdvancingClock(100 * time.Millisecond) c = sys.TimeTravelClock }   - if err := cfg.DeployConfig.Check(testlog.Logger(t, log.LevelInfo)); err != nil { + if err := cfg.DeployConfig.Check(cfg.Loggers["config-check"]); err != nil { return nil, err }
diff --git ethereum-optimism/optimism/kurtosis-devnet/tests/eigenda/failover_test.go layr-labs/optimism/kurtosis-devnet/tests/eigenda/failover_test.go new file mode 100644 index 0000000000000000000000000000000000000000..98a13db31f81f8e85677f99d7c272fc40529d4c2 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/tests/eigenda/failover_test.go @@ -0,0 +1,463 @@ +package eigenda_test + +import ( + "context" + "encoding/json" + "fmt" + "log/slog" + "math/big" + "net/http" + "reflect" + "strconv" + "strings" + "testing" + "time" + + "github.com/Layr-Labs/eigenda-proxy/clients/memconfig_client" + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/geth" + "github.com/ethereum-optimism/optimism/op-service/dial" + "github.com/ethereum-optimism/optimism/op-service/sources" + "github.com/ethereum-optimism/optimism/op-service/testlog" + "github.com/ethereum/go-ethereum/common" + "github.com/ethereum/go-ethereum/ethclient" + "github.com/ethereum/go-ethereum/log" + "github.com/kurtosis-tech/kurtosis/api/golang/core/lib/enclaves" + "github.com/kurtosis-tech/kurtosis/api/golang/engine/lib/kurtosis_context" + "github.com/stretchr/testify/require" +) + +// All tests are run in the context of the eigenda-memstore-devnet enclave. +// We assume that this enclave is already running. +const enclaveName = "eigenda-memstore-devnet" + +// TestFailover tests the failover behavior of the batcher, in response to the proxy returning 503 errors. +// See https://github.com/Layr-Labs/eigenda-proxy?tab=readme-ov-file#failover-signals for proxy behavior. +// The proxy's memstore's failover behavior is toggled on and off by this test via a REST api. +// We then check that the batcher correctly interprets the 503 signals and starts submitting batches to EthDACalldata instead. +// The test then toggles the failover back off and checks that the batcher starts submitting EigenDA batches again. +// The batches inbox transactions are queried via geth's GraphQL API. +// +// Note: because this test relies on modifying the proxy's memstore config, it should be run in isolation. +// That is, if we ever implement more kurtosis tests, they would currently need to be run sequentially. +func TestFailoverToEthDACalldata(t *testing.T) { + deadline, ok := t.Deadline() + if !ok { + deadline = time.Now().Add(10 * time.Minute) + } + ctxWithDeadline, cancel := context.WithDeadline(context.Background(), deadline) + defer cancel() + + harness := newHarness(t) + t.Cleanup(func() { + // switch proxy back to normal mode, in case test gets cancelled + ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second) + defer cancel() + err := harness.clients.proxyMemconfigClient.Failback(ctx) + if err != nil { + t.Logf("Error failing back... you might need to reset proxy to normal mode manually: %v", err) + } + }) + + // Number of blocks to queried for batcher txs, for each of the initial/failover/failback stages + // Test will look at batcher txs between blocks: + // - initial altda stage: [testStartL1BlockNum - l1BlocksQueriedForBatcherTxs, testStartL1BlockNum] + // - ethDACalldata stage: [afterFailoverFromBlockNum, afterFailoverFromBlockNum + l1BlocksQueriedForBatcherTxs] + // - altDA stage: [afterFailbackFromBlockNum, afterFailbackFromBlockNum + l1BlocksQueriedForBatcherTxs] + // + // After Failover/Failback, will wait for 10 L1 blocks to make sure failover/failback has happened. + // Assumption is that a cert is being posted every 2 blocks (hardcoded in batcher config) + // TODO: read max-channel-duration from batcher's config instead of assuming 2 blocks + l1BlocksQueriedForBatcherTxs := uint64(10) + + // assume kurtosis is running and is at least at block numBlocksBetweenStages + require.GreaterOrEqual(t, harness.testStartL1BlockNum, l1BlocksQueriedForBatcherTxs, "Test started too early in the chain") + fromBlock := harness.testStartL1BlockNum - l1BlocksQueriedForBatcherTxs + + // 1. Check that the original commitments are EigenDA + harness.requireBatcherTxsToBeFromLayer(t, fromBlock, fromBlock+l1BlocksQueriedForBatcherTxs, DALayerEigenDA) + + // 2. Failover and check that the commitments are now EthDACalldata + t.Logf("Failing over... changing proxy's config to return 503 errors") + err := harness.clients.proxyMemconfigClient.Failover(ctxWithDeadline) + require.NoError(t, err) + + afterFailoverFromBlockNum, err := harness.clients.gethL1Client.BlockNumber(ctxWithDeadline) + require.NoError(t, err) + afterFailoverToBlockNum := afterFailoverFromBlockNum + l1BlocksQueriedForBatcherTxs + _, err = geth.WaitForBlock(big.NewInt(int64(afterFailoverToBlockNum)), harness.clients.gethL1Client) + require.NoError(t, err) + + harness.requireBatcherTxsToBeFromLayer(t, afterFailoverFromBlockNum, afterFailoverToBlockNum, DALayerEthCalldata) + + // We also check that the op-node is still finalizing blocks after the failover + syncStatus, err := harness.clients.opNodeClient.SyncStatus(ctxWithDeadline) + require.NoError(t, err) + afterFailoverFinalizedL2 := syncStatus.FinalizedL2 + t.Logf("Current finalized L2 block: %d. Waiting for next block to finalize to make sure finalization is still happening.", afterFailoverFinalizedL2.Number) + // On average would expect this to take half an epoch, aka 16 L1 blocks, which at 6 sec/block means 1.5 minutes. + // This generally takes longer (3-6 minutes), but I'm not quite sure why. + _, err = geth.WaitForBlockToBeFinalized(new(big.Int).SetUint64(afterFailoverFinalizedL2.Number+1), harness.clients.opGethClient, 6*time.Minute) + require.NoError(t, err, "op-node should still be finalizing blocks after failover") + + // 3. Failback and check that the commitments are EigenDA again + t.Logf("Failing back... changing proxy's config to start processing PUT requests normally again") + err = harness.clients.proxyMemconfigClient.Failback(ctxWithDeadline) + require.NoError(t, err) + + afterFailbackFromBlockNum, err := harness.clients.gethL1Client.BlockNumber(ctxWithDeadline) + require.NoError(t, err) + afterFailbackToBlockNum := afterFailbackFromBlockNum + l1BlocksQueriedForBatcherTxs + _, err = geth.WaitForBlock(big.NewInt(int64(afterFailbackToBlockNum)), harness.clients.gethL1Client) + require.NoError(t, err) + + harness.requireBatcherTxsToBeFromLayer(t, afterFailbackFromBlockNum, afterFailbackToBlockNum, DALayerEigenDA) + +} + +// Test Harness, which contains all the state needed to run the tests. +// harness also defines some higher-level "require" methods that are used in the tests. +type harness struct { + logger log.Logger + endpoints *EnclaveServicePublicEndpoints + clients *EnclaveServiceClients + batchInboxAddr common.Address + testStartL1BlockNum uint64 +} + +func newHarness(t *testing.T) *harness { + logger := testlog.Logger(t, slog.LevelInfo) + + // We leave 20 seconds to build the entire testHarness. + ctxWithTimeout, cancel := context.WithTimeout(context.Background(), 20*time.Second) + defer cancel() + + // Create a Kurtosis context + kurtosisCtx, err := kurtosis_context.NewKurtosisContextFromLocalEngine() + require.NoError(t, err) + + // Get the eigenda-memstore-devnet enclave (assuming it's already running) + enclaveCtx, err := kurtosisCtx.GetEnclaveContext(ctxWithTimeout, enclaveName) + require.NoError(t, err, "Error getting enclave context: is enclave %v running?", enclaveName) + + endpoints, err := getPublicEndpointsFromKurtosis(enclaveCtx) + require.NoError(t, err) + t.Logf("Endpoints: %+v", endpoints) + + clients, err := getClientsFromEndpoints(ctxWithTimeout, logger, endpoints) + require.NoError(t, err) + + // Get the batch inbox address from the rollup config + rollupConfig, err := clients.opNodeClient.RollupConfig(ctxWithTimeout) + require.NoError(t, err) + + // Get the current L1 block number + testStartL1BlockNum, err := clients.gethL1Client.BlockNumber(ctxWithTimeout) + require.NoError(t, err) + + return &harness{ + logger: logger, + endpoints: endpoints, + clients: clients, + batchInboxAddr: rollupConfig.BatchInboxAddress, + testStartL1BlockNum: testStartL1BlockNum, + } +} + +// requireBatcherTxsToBeFromLayer checks that the batcher transactions since startingFromBlockNum are all from the expectedLayer. +// It allows for up to 3 initial commitments to be of the wrong type, as the failover/failback might not have taken effect yet. +// It requires that at least 2 commitments of the expected type are present after the failover/failback. +func (h *harness) requireBatcherTxsToBeFromLayer(t *testing.T, fromBlockNum, toBlockNum uint64, expectedLayer DALayer) { + batcherTxs, err := fetchBatcherTxs(h.endpoints.GethL1Endpoint, h.batchInboxAddr.String(), fromBlockNum, toBlockNum) + require.NoError(t, err) + t.Logf("Fetched %d batcher transactions since block %d", len(batcherTxs), fromBlockNum) + + // We allow first 3 commitments to be of the wrong DA layer, as the failover/failback might not have taken effect yet. + wrongCommitmentsToDiscard := 0 + for _, batcherTx := range batcherTxs { + if batcherTx.daLayer != expectedLayer { + wrongCommitmentsToDiscard++ + } + // as soon as we see a commitment from expectedLayer, or 3 from the other layer, we stop discarding. + if wrongCommitmentsToDiscard > 2 || batcherTx.daLayer == expectedLayer { + break + } + } + batcherTxs = batcherTxs[wrongCommitmentsToDiscard:] + t.Logf("Discarded %d commitments. %d left which should all be %v", wrongCommitmentsToDiscard, len(batcherTxs), expectedLayer) + + // After potentially discarding up to 3 commitments, we expect all future commitments (at least 2) to be of the expectedLayer + require.GreaterOrEqual(t, len(batcherTxs), 2, "Expected at least 2 %v commitments after failover/failback", expectedLayer) + for _, batcherTx := range batcherTxs { + require.Equal(t, expectedLayer, batcherTx.daLayer, + "Invalid commitment in block %d: expected %v, received commitment %s", batcherTx.block, expectedLayer, batcherTx.commitment) + } +} + +// See https://specs.optimism.io/experimental/alt-da.html#example-commitments +// Batcher only supports failing over to calldata txs right now, so this test doesn't test 4844 failover. +// Note that 4844 txs are completely different and don't use normal txs with a prefix in the calldata, +// see https://github.com/ethereum-optimism/optimism/blob/develop/op-node/rollup/derive/blob_data_source.go#L134-L137 +const ethDACalldataCommitmentPrefix = "0x00" +const eigenDACommitmentPrefix = "0x010100" + +type DALayer string + +const ( + DALayerEthCalldata DALayer = "ethda-calldata" + DALayerEigenDA DALayer = "eigenda" +) + +type BatcherTx struct { + commitment string + daLayer DALayer // commitment starts with respective prefix + block uint64 +} + +// HexUint64 is a custom type that can unmarshal from a hex string +type HexUint64 uint64 + +// UnmarshalJSON implements the json.Unmarshaler interface +func (h *HexUint64) UnmarshalJSON(data []byte) error { + // Remove quotes from the JSON string + hexStr := string(data) + hexStr = strings.Trim(hexStr, "\"") + + // Check if it's a hex string + if !strings.HasPrefix(hexStr, "0x") { + return fmt.Errorf("not a hex string: %s", hexStr) + } + + // Parse the hex string (without the 0x prefix) + val, err := strconv.ParseUint(hexStr[2:], 16, 64) + if err != nil { + return err + } + + *h = HexUint64(val) + return nil +} + +// Fetches all the batch-inbox posted commitments from blockNum (inclusive) to current block. +// We rely on geth's GraphQL API to fetch the batcher transactions. +// We could possibly have reused op-node's L1Retriever, but the API felt very derivation-pipeline specific, +// and there doesn't seem to be a way to reuse it easily for constructing a custom derivation-pipeline with a subset of stages +// like what we need here. Could consider migrating in the future if we need more complex logic. +func fetchBatcherTxs(gethL1Endpoint string, batchInbox string, fromBlockNum, toBlockNum uint64) ([]BatcherTx, error) { + // We use standard HTTP for GraphQL as it's not directly supported by the rpc package + // Visit gethL1Endpoint/graphql/ui to see the schema and test queries + query := fmt.Sprintf(` + { + "query": "query txInfo { blocks(from:%v, to:%v) { transactions { to { address } inputData block { number } } } }" + }`, fromBlockNum, toBlockNum) + + // Make GraphQL request + req, err := http.NewRequest("POST", gethL1Endpoint+"/graphql", strings.NewReader(query)) + if err != nil { + return nil, err + } + + req.Header.Set("Content-Type", "application/json") + httpClient := &http.Client{} + resp, err := httpClient.Do(req) + if err != nil { + return nil, err + } + defer resp.Body.Close() + + // Parse the response + type GraphQLResponse struct { + Data struct { + Blocks []struct { + Transactions []struct { + To struct { + Address string `json:"address"` + } `json:"to"` + InputData string `json:"inputData"` + Block struct { + // we use HexUint64 to properly parse the hex strings returned + Number HexUint64 `json:"number"` + } `json:"block"` + } `json:"transactions"` + } `json:"blocks"` + } `json:"data"` + } + var graphQLResp GraphQLResponse + if err := json.NewDecoder(resp.Body).Decode(&graphQLResp); err != nil { + return nil, err + } + if len(graphQLResp.Data.Blocks) == 0 { + // Assume that this is a graphQL query error, that would have returned something like + // "errors": [ + // { + // "message": "syntax error: unexpected \"\", expecting Ident", + // } + // ] + // TODO: prob should just switch to a proper graphql client that can handle these properly + return nil, fmt.Errorf("no blocks returned in GraphQL response") + } + + // Filter transactions to the batcher address + var batcherTxs []BatcherTx + for _, block := range graphQLResp.Data.Blocks { + for _, tx := range block.Transactions { + if strings.EqualFold(tx.To.Address, batchInbox) { + var daLayer DALayer + if strings.HasPrefix(tx.InputData, eigenDACommitmentPrefix) { + daLayer = DALayerEigenDA + } else if strings.HasPrefix(tx.InputData, ethDACalldataCommitmentPrefix) { + daLayer = DALayerEthCalldata + } else { + return nil, fmt.Errorf("unknown commitment prefix: %s", tx.InputData) + } + batcherTxs = append(batcherTxs, BatcherTx{ + commitment: tx.InputData, + daLayer: daLayer, + block: uint64(tx.Block.Number), + }) + } + } + } + + return batcherTxs, nil +} + +// Localhost endpoints for the different services in the enclave +// that we need to interact with. We store the public localhost endpoints instead +// of the private enclave endpoints because we need to interact with the services +// using external shell commands like `cast rpc ...` and `cast geth ...`. +// The public endpoints are the ones that are exposed to the host machine. +type EnclaveServicePublicEndpoints struct { + OpNodeEndpoint string `kurtosis:"op-cl-1-op-node-op-geth-op-kurtosis,http"` + OpGethEndpoint string `kurtosis:"op-el-1-op-geth-op-node-op-kurtosis,rpc"` + GethL1Endpoint string `kurtosis:"el-1-geth-teku,rpc"` + EigendaProxyEndpoint string `kurtosis:"da-server-op-kurtosis,http"` + // Adding new endpoints is as simple as adding a new field with a kurtosis tag + // NewServiceEndpoint string `kurtosis:"new-service-name,port-name"` +} + +// Constructor for EnclaveServiceEndpoints struct, which assumes a running kurtosis enclave +// and queries the needed services for their public (localhost) ports, and constructs +// the struct with the endpoints. +// +// This function uses reflection to parse the `kurtosis` tags in the struct fields to get the service name and port name. +// See the comments in the EnclaveServicePublicEndpoints struct for more details on adding a new endpoint. +func getPublicEndpointsFromKurtosis(enclaveCtx *enclaves.EnclaveContext) (*EnclaveServicePublicEndpoints, error) { + endpoints := &EnclaveServicePublicEndpoints{} + + // Get the type of the struct to iterate over fields + t := reflect.TypeOf(endpoints).Elem() + v := reflect.ValueOf(endpoints).Elem() + + // Iterate over all fields in the struct + for i := 0; i < t.NumField(); i++ { + field := t.Field(i) + + // Get the kurtosis tag + tag := field.Tag.Get("kurtosis") + if tag == "" { + return nil, fmt.Errorf("field %s doesn't have a kurtosis tag", field.Name) + } + + // Parse the tag to get service name and port name + parts := strings.Split(tag, ",") + if len(parts) != 2 { + return nil, fmt.Errorf("invalid kurtosis tag format for field %s: %s", field.Name, tag) + } + + serviceName := parts[0] + portName := parts[1] + + // Get the service context + serviceCtx, err := enclaveCtx.GetServiceContext(serviceName) + if err != nil { + return nil, fmt.Errorf("GetServiceContext for %s: %w", serviceName, err) + } + + // Get the port + port, ok := serviceCtx.GetPublicPorts()[portName] + if !ok { + return nil, fmt.Errorf("service %s doesn't expose %s port", serviceName, portName) + } + + // Set the endpoint URL in the struct field + endpoint := fmt.Sprintf("http://localhost:%d", port.GetNumber()) + v.Field(i).SetString(endpoint) + } + + return endpoints, nil +} + +type EnclaveServiceClients struct { + // opNode and opGeth are the L2 clients for the rollup. + opNodeClient *sources.RollupClient + // opGeth is the client for the L2 execution layer client. + opGethClient *ethclient.Client + // gethL1 is the client for the L1 chain execution layer client. + gethL1Client *ethclient.Client + // proxyMemconfigClient is the client for the eigenda-proxy's memstore config API. + // It allows us to toggle the proxy's failover behavior. + proxyMemconfigClient *ProxyMemconfigClient +} + +func getClientsFromEndpoints(ctx context.Context, logger log.Logger, endpoints *EnclaveServicePublicEndpoints) (*EnclaveServiceClients, error) { + opNodeClient, err := dial.DialRollupClientWithTimeout(ctx, 10*time.Second, logger, endpoints.OpNodeEndpoint) + if err != nil { + return nil, fmt.Errorf("dial.DialRollupClientWithTimeout: %w", err) + } + + opGethClient, err := dial.DialEthClientWithTimeout(ctx, 10*time.Second, logger, endpoints.OpGethEndpoint) + if err != nil { + return nil, fmt.Errorf("dial.DialEthClientWithTimeout: %w", err) + } + + // TODO: prob also change to use dial.DialEthClient? + gethL1Client, err := ethclient.Dial(endpoints.GethL1Endpoint) + if err != nil { + return nil, fmt.Errorf("ethclient.Dial: %w", err) + } + + proxyMemconfigClient := &ProxyMemconfigClient{ + Client: memconfig_client.New(&memconfig_client.Config{URL: endpoints.EigendaProxyEndpoint}), + } + + return &EnclaveServiceClients{ + opNodeClient: opNodeClient, + opGethClient: opGethClient, + gethL1Client: gethL1Client, + proxyMemconfigClient: proxyMemconfigClient, + }, nil +} + +// ProxyMemconfigClient is a wrapper around the memconfig client that adds a Failover method +// TODO: we should upstream this to eigenda-proxy repo +type ProxyMemconfigClient struct { + *memconfig_client.Client +} + +// Update the proxy's memstore config to start returning 503 errors +// Note: we have to GetConfig, update it and then UpdateConfig because the client doesn't implement a "patch" method, +// even though the API does support it. +func (c *ProxyMemconfigClient) Failover(ctx context.Context) error { + memConfig, err := c.GetConfig(ctx) + if err != nil { + return fmt.Errorf("GetConfig: %w", err) + } + memConfig.PutReturnsFailoverError = true + _, err = c.UpdateConfig(ctx, memConfig) + if err != nil { + return fmt.Errorf("UpdateConfig: %w", err) + } + return nil +} +func (c *ProxyMemconfigClient) Failback(ctx context.Context) error { + memConfig, err := c.GetConfig(ctx) + if err != nil { + return fmt.Errorf("GetConfig: %w", err) + } + memConfig.PutReturnsFailoverError = false + _, err = c.UpdateConfig(ctx, memConfig) + if err != nil { + return fmt.Errorf("UpdateConfig: %w", err) + } + return nil +}

Replaced op’s circleci with github actions relevant to testing our changes.

diff --git ethereum-optimism/optimism/.circleci/config.yml layr-labs/optimism/.circleci/config.yml deleted file mode 100644 index d1c6aaa121d619818c25eadbdb99daea869787bb..0000000000000000000000000000000000000000 --- ethereum-optimism/optimism/.circleci/config.yml +++ /dev/null @@ -1,1866 +0,0 @@ -version: 2.1 - -parameters: - default_docker_image: - type: string - default: cimg/base:2024.01 - base_image: - type: string - default: default - # The dispatch parameters are used to manually dispatch pipelines that normally only run post-merge on develop - # from the CircleCI UI. Example configuration: - # when: - # or: - # - equal: [ "develop", <<pipeline.git.branch>> ] - # - equal: [ true, <<pipeline.parameters.main_dispatch>> ] - # Add a new `*_dispatch` parameter for any pipeline you want manual dispatch for. - main_dispatch: - type: boolean - default: true # default to running main in case the manual run cancelled an automatic run - fault_proofs_dispatch: - type: boolean - default: false - reproducibility_dispatch: - type: boolean - default: false - diff_asterisc_bytecode_dispatch: - type: boolean - default: false - kontrol_dispatch: - type: boolean - default: false - cannon_full_test_dispatch: - type: boolean - default: false - sdk_dispatch: - type: boolean - default: false - docker_publish_dispatch: - type: boolean - default: false - publish_contract_artifacts_dispatch: - type: boolean - default: false - stale_check_dispatch: - type: boolean - default: false - contracts_coverage_dispatch: - type: boolean - default: false - -orbs: - go: circleci/go@1.8.0 - gcp-cli: circleci/gcp-cli@3.0.1 - slack: circleci/slack@4.10.1 - shellcheck: circleci/shellcheck@3.2.0 - codecov: codecov/codecov@5.0.3 - utils: ethereum-optimism/circleci-utils@1.0.8 - -commands: - gcp-oidc-authenticate: - description: "Authenticate with GCP using a CircleCI OIDC token." - parameters: - project_id: - type: env_var_name - default: GCP_PROJECT_ID - workload_identity_pool_id: - type: env_var_name - default: GCP_WIP_ID - workload_identity_pool_provider_id: - type: env_var_name - default: GCP_WIP_PROVIDER_ID - service_account_email: - type: env_var_name - default: GCP_SERVICE_ACCOUNT_EMAIL - gcp_cred_config_file_path: - type: string - default: /home/circleci/gcp_cred_config.json - oidc_token_file_path: - type: string - default: /home/circleci/oidc_token.json - steps: - - run: - name: "Create OIDC credential configuration" - command: | - # Store OIDC token in temp file - echo $CIRCLE_OIDC_TOKEN > << parameters.oidc_token_file_path >> - # Create a credential configuration for the generated OIDC ID Token - gcloud iam workload-identity-pools create-cred-config \ - "projects/${<< parameters.project_id >>}/locations/global/workloadIdentityPools/${<< parameters.workload_identity_pool_id >>}/providers/${<< parameters.workload_identity_pool_provider_id >>}"\ - --output-file="<< parameters.gcp_cred_config_file_path >>" \ - --service-account="${<< parameters.service_account_email >>}" \ - --credential-source-file=<< parameters.oidc_token_file_path >> - - run: - name: "Authenticate with GCP using OIDC" - command: | - # Configure gcloud to leverage the generated credential configuration - gcloud auth login --brief --cred-file "<< parameters.gcp_cred_config_file_path >>" - # Configure ADC - echo "export GOOGLE_APPLICATION_CREDENTIALS='<< parameters.gcp_cred_config_file_path >>'" | tee -a "$BASH_ENV" - - check-changed: - description: "Conditionally halts a step if certain modules change" - parameters: - patterns: - type: string - description: "Comma-separated list of dependencies" - no_go_deps: - type: string - default: "" - description: "If set, does not trigger on `go.mod` / `go.sum` changes." - steps: - - run: - name: "Check for changes" - environment: - CHECK_CHANGED_NO_GO_DEPS: "<<parameters.no_go_deps>>" - command: | - cd ops/check-changed - pip3 install -r requirements.txt - python3 main.py "<<parameters.patterns>>" - - install-contracts-dependencies: - description: "Install the dependencies for the smart contracts" - steps: - - run: - name: Install dependencies - command: | - # Manually craft the submodule update command in order to take advantage - # of the -j parameter, which speeds it up a lot. - git submodule update --init --recursive --force -j 8 - working_directory: packages/contracts-bedrock - - notify-failures-on-develop: - description: "Notify Slack" - parameters: - channel: - type: string - default: C03N11M0BBN - mentions: - type: string - default: "" - steps: - - slack/notify: - channel: << parameters.channel >> - event: fail - template: basic_fail_1 - branch_pattern: develop - mentions: "<< parameters.mentions >>" - - run-contracts-check: - parameters: - command: - description: Just command that runs the check - type: string - steps: - - run: - name: <<parameters.command>> - command: | - git reset --hard - just <<parameters.command>> - git diff --exit-code - working_directory: packages/contracts-bedrock - when: always - environment: - FOUNDRY_PROFILE: ci - -jobs: - cannon-go-lint-and-test: - machine: true - resource_class: ethereum-optimism/latitude-1 - parameters: - skip_slow_tests: - type: boolean - default: false - notify: - description: Whether to notify on failure - type: boolean - default: false - mips_word_size: - type: integer - default: 32 - steps: - - utils/checkout-with-mise - - check-changed: - patterns: cannon,packages/contracts-bedrock/src/cannon,op-preimage,go.mod - - attach_workspace: - at: "." - - run: - name: prep Cannon results dir - command: | - mkdir -p ./tmp/test-results - mkdir -p ./tmp/testlogs - - run: - name: build Cannon example binaries - command: make elf # only compile ELF binaries with Go, we do not have MIPS GCC for creating the debug-dumps. - working_directory: cannon/testdata/example - - run: - name: Cannon Go lint - command: | - make lint - working_directory: cannon - - when: - condition: - equal: [32, <<parameters.mips_word_size>>] - steps: - - run: - name: Cannon Go 32-bit tests - command: | - export SKIP_SLOW_TESTS=<<parameters.skip_slow_tests>> - TIMEOUT="10m" - if [ "$SKIP_SLOW_TESTS" == "false" ]; then - TIMEOUT="30m" - fi - gotestsum --format=testname --junitfile=../tmp/test-results/cannon-32.xml --jsonfile=../tmp/testlogs/log-32.json \ - -- -timeout=$TIMEOUT -parallel=$(nproc) -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage-32.out ./... - working_directory: cannon - - codecov/upload: - disable_search: true - files: ./cannon/coverage-32.out - flags: cannon-go-tests-32 - - when: - condition: - equal: [64, <<parameters.mips_word_size>>] - steps: - - run: - name: Cannon Go 64-bit tests - command: | - export SKIP_SLOW_TESTS=<<parameters.skip_slow_tests>> - TIMEOUT="10m" - if [ "$SKIP_SLOW_TESTS" == "false" ]; then - TIMEOUT="30m" - fi - gotestsum --format=testname --junitfile=../tmp/test-results/cannon-64.xml --jsonfile=../tmp/testlogs/log-64.json \ - -- --tags=cannon64 -timeout=$TIMEOUT -parallel=$(nproc) -coverpkg=github.com/ethereum-optimism/optimism/cannon/... -coverprofile=coverage-64.out ./... - working_directory: cannon - - codecov/upload: - disable_search: true - files: ./cannon/coverage-64.out - flags: cannon-go-tests-64 - - store_test_results: - path: ./tmp/test-results - - store_artifacts: - path: ./tmp/testlogs - when: always - - when: - condition: <<parameters.notify>> - steps: - - notify-failures-on-develop: - mentions: "@proofs-team" - - cannon-build-test-vectors: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - utils/checkout-with-mise - - check-changed: - patterns: cannon/mipsevm/tests/open_mips_tests/test - - run: - name: Install dependencies - command: | - sudo apt-get update - sudo apt-get install -y binutils-mips-linux-gnu - pip install capstone pyelftools - - run: - name: Build MIPS test vectors - command: | - python3 maketests.py && git diff --exit-code - working_directory: cannon/mipsevm/tests/open_mips_tests - - diff-asterisc-bytecode: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - run: - name: Check `RISCV.sol` bytecode - working_directory: packages/contracts-bedrock - command: | - # Clone asterisc @ the pinned version to fetch remote `RISCV.sol` - ASTERISC_REV="v$(yq '.tools.asterisc' ../../mise.toml)" - REMOTE_ASTERISC_PATH="./src/vendor/asterisc/RISCV_Remote.sol" - git clone https://github.com/ethereum-optimism/asterisc \ - -b $ASTERISC_REV && \ - cp ./asterisc/rvsol/src/RISCV.sol $REMOTE_ASTERISC_PATH - - # Replace import paths - sed -i -e 's/@optimism\///' $REMOTE_ASTERISC_PATH - # Replace legacy interface paths - sed -i -e 's/src\/cannon\/interfaces\//interfaces\/cannon\//g' $REMOTE_ASTERISC_PATH - sed -i -e 's/src\/dispute\/interfaces\//interfaces\/dispute\//g' $REMOTE_ASTERISC_PATH - # Replace contract name - sed -i -e 's/contract RISCV/contract RISCV_Remote/' $REMOTE_ASTERISC_PATH - - # Install deps - forge install - - # Diff bytecode, with both contracts compiled in the local environment. - REMOTE_ASTERISC_CODE="$(forge inspect RISCV_Remote bytecode | tr -d '\n')" - LOCAL_ASTERISC_CODE="$(forge inspect RISCV bytecode | tr -d '\n')" - if [ "$REMOTE_ASTERISC_CODE" != "$LOCAL_ASTERISC_CODE" ]; then - echo "Asterisc bytecode mismatch. Local version does not match remote. Diff:" - diff <(echo "$REMOTE_ASTERISC_CODE") <(echo "$LOCAL_ASTERISC_CODE") - else - echo "Asterisc version up to date." - fi - - notify-failures-on-develop: - mentions: "@clabby @proofs-team" - - contracts-bedrock-build: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - parameters: - build_args: - description: Forge build arguments - type: string - default: "" - profile: - description: Profile to use for building - type: string - default: ci - steps: - - utils/checkout-with-mise - - install-contracts-dependencies - - run: - name: Print forge version - command: forge --version - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Build contracts - command: forge build <<parameters.build_args>> - environment: - FOUNDRY_PROFILE: <<parameters.profile>> - working_directory: packages/contracts-bedrock - - persist_to_workspace: - root: "." - paths: - - "packages/contracts-bedrock/cache" - - "packages/contracts-bedrock/artifacts" - - "packages/contracts-bedrock/forge-artifacts" - - "packages/contracts-bedrock/deploy-config/devnetL1.json" - - "packages/contracts-bedrock/deployments/devnetL1" - - notify-failures-on-develop - - check-kontrol-build: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Run Kontrol build - command: just kontrol-summary-full - working_directory: packages/contracts-bedrock - - run: - name: Build Kontrol summary files - command: forge build ./test/kontrol/proofs - working_directory: packages/contracts-bedrock - - notify-failures-on-develop - - docker-build: - environment: - DOCKER_BUILDKIT: 1 - parameters: - docker_tags: - description: Docker image tags, comma-separated - type: string - docker_name: - description: "Docker buildx bake target" - type: string - default: "" - registry: - description: Docker registry - type: string - default: "us-docker.pkg.dev" - repo: - description: Docker repo - type: string - default: "oplabs-tools-artifacts/images" - save_image_tag: - description: Save docker image with given tag - type: string - default: "" - platforms: - description: Platforms to build for, comma-separated - type: string - default: "linux/amd64" - publish: - description: Publish the docker image (multi-platform, all tags) - type: boolean - default: false - release: - description: Run the release script - type: boolean - default: false - resource_class: - description: Docker resoruce class - type: string - default: medium - machine: - image: <<pipeline.parameters.base_image>> - resource_class: "<<parameters.resource_class>>" - docker_layer_caching: true # we rely on this for faster builds, and actively warm it up for builds with common stages - steps: - - utils/checkout-with-mise - - attach_workspace: - at: /tmp/docker_images - - run: - command: mkdir -p /tmp/docker_images - - when: - condition: - or: - - "<<parameters.publish>>" - - "<<parameters.release>>" - steps: - - gcp-cli/install - - when: - condition: - or: - - "<<parameters.publish>>" - - "<<parameters.release>>" - steps: - - gcp-oidc-authenticate - - run: - name: Build - command: | - # Check to see if DOCKER_HUB_READ_ONLY_TOKEN is set (i.e. we are in repo) before attempting to use secrets. - # Building should work without this read only login, but may get rate limited. - if [[ -v DOCKER_HUB_READ_ONLY_TOKEN ]]; then - echo "$DOCKER_HUB_READ_ONLY_TOKEN" | docker login -u "$DOCKER_HUB_READ_ONLY_USER" --password-stdin - fi - - export REGISTRY="<<parameters.registry>>" - export REPOSITORY="<<parameters.repo>>" - export IMAGE_TAGS="$(echo -ne "<<parameters.docker_tags>>" | sed "s/[^a-zA-Z0-9\n,]/-/g")" - export GIT_COMMIT="$(git rev-parse HEAD)" - export GIT_DATE="$(git show -s --format='%ct')" - export PLATFORMS="<<parameters.platforms>>" - - echo "Checking git tags pointing at $GIT_COMMIT:" - tags_at_commit=$(git tag --points-at $GIT_COMMIT) - echo "Tags at commit:\n$tags_at_commit" - - filtered_tags=$(echo "$tags_at_commit" | grep "^<<parameters.docker_name>>/" || true) - echo "Filtered tags: $filtered_tags" - - if [ -z "$filtered_tags" ]; then - export GIT_VERSION="untagged" - else - sorted_tags=$(echo "$filtered_tags" | sed "s/<<parameters.docker_name>>\///" | sort -V) - echo "Sorted tags: $sorted_tags" - - # prefer full release tag over "-rc" release candidate tag if both exist - full_release_tag=$(echo "$sorted_tags" | grep -v -- "-rc" || true) - if [ -z "$full_release_tag" ]; then - export GIT_VERSION=$(echo "$sorted_tags" | tail -n 1) - else - export GIT_VERSION=$(echo "$full_release_tag" | tail -n 1) - fi - fi - - echo "Setting GIT_VERSION=$GIT_VERSION" - - # Create, start (bootstrap) and use a *named* docker builder - # This allows us to cross-build multi-platform, - # and naming allows us to use the DLC (docker-layer-cache) - docker buildx create --driver=docker-container --name=buildx-build --bootstrap --use - - DOCKER_OUTPUT_DESTINATION="" - if [ "<<parameters.publish>>" == "true" ]; then - gcloud auth configure-docker <<parameters.registry>> - echo "Building for platforms $PLATFORMS and then publishing to registry" - DOCKER_OUTPUT_DESTINATION="--push" - if [ "<<parameters.save_image_tag>>" != "" ]; then - echo "ERROR: cannot save image to docker when publishing to registry" - exit 1 - fi - else - if [ "<<parameters.save_image_tag>>" == "" ]; then - echo "Running $PLATFORMS build without destination (cache warm-up)" - DOCKER_OUTPUT_DESTINATION="" - elif [[ $PLATFORMS == *,* ]]; then - echo "ERROR: cannot perform multi-arch (platforms: $PLATFORMS) build while also loading the result into regular docker" - exit 1 - else - echo "Running single-platform $PLATFORMS build and loading into docker" - DOCKER_OUTPUT_DESTINATION="--load" - fi - fi - - # Let them cook! - docker buildx bake \ - --progress plain \ - --builder=buildx-build \ - -f docker-bake.hcl \ - $DOCKER_OUTPUT_DESTINATION \ - <<parameters.docker_name>> - - no_output_timeout: 45m - - when: - condition: "<<parameters.publish>>" - steps: - - notify-failures-on-develop - - when: - condition: "<<parameters.save_image_tag>>" - steps: - - run: - name: Save - command: | - IMAGE_NAME="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>:<<parameters.save_image_tag>>" - docker save -o /tmp/docker_images/<<parameters.docker_name>>.tar $IMAGE_NAME - - persist_to_workspace: - root: /tmp/docker_images - paths: # only write the one file, to avoid concurrent workspace-file additions - - "<<parameters.docker_name>>.tar" - - when: - condition: "<<parameters.release>>" - steps: - - run: - name: Tag - command: | - ./ops/scripts/ci-docker-tag-op-stack-release.sh <<parameters.registry>>/<<parameters.repo>> $CIRCLE_TAG $CIRCLE_SHA1 - - when: - condition: - or: - - and: - - "<<parameters.publish>>" - - "<<parameters.release>>" - - and: - - "<<parameters.publish>>" - - equal: [develop, << pipeline.git.branch >>] - steps: - - gcp-oidc-authenticate: - service_account_email: GCP_SERVICE_ATTESTOR_ACCOUNT_EMAIL - - run: - name: Sign - command: | - VER=$(yq '.tools.binary_signer' mise.toml) - wget -O - "https://github.com/ethereum-optimism/binary_signer/archive/refs/tags/v${VER}.tar.gz" | tar xz - cd "binary_signer-${VER}/signer" - - IMAGE_PATH="<<parameters.registry>>/<<parameters.repo>>/<<parameters.docker_name>>:<<pipeline.git.revision>>" - echo $IMAGE_PATH - pip3 install -r requirements.txt - - python3 ./sign_image.py --command="sign"\ - --attestor-project-name="$ATTESTOR_PROJECT_NAME"\ - --attestor-name="$ATTESTOR_NAME"\ - --image-path="$IMAGE_PATH"\ - --signer-logging-level="INFO"\ - --attestor-key-id="//cloudkms.googleapis.com/v1/projects/$ATTESTOR_PROJECT_NAME/locations/global/keyRings/$ATTESTOR_NAME-key-ring/cryptoKeys/$ATTESTOR_NAME-key/cryptoKeyVersions/1" - - # Verify newly published images (built on AMD machine) will run on ARM - check-cross-platform: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: arm.medium - parameters: - registry: - description: Docker registry - type: string - default: "us-docker.pkg.dev" - repo: - description: Docker repo - type: string - default: "oplabs-tools-artifacts/images" - op_component: - description: "Name of op-stack component (e.g. op-node)" - type: string - default: "" - docker_tag: - description: "Tag of docker image" - type: string - default: "<<pipeline.git.revision>>" - steps: - - setup_remote_docker - - run: - name: "Verify Image Platform" - command: | - image_name="<<parameters.registry>>/<<parameters.repo>>/<<parameters.op_component>>:<<parameters.docker_tag>>" - echo "Retrieving Docker image manifest: $image_name" - MANIFEST=$(docker manifest inspect $image_name) - - echo "Verifying 'linux/arm64' is supported..." - SUPPORTED_PLATFORM=$(echo "$MANIFEST" | jq -r '.manifests[] | select(.platform.architecture == "arm64" and .platform.os == "linux")') - echo $SUPPORT_PLATFORM - if [ -z "$SUPPORTED_PLATFORM" ]; then - echo "Platform 'linux/arm64' not supported by this image" - exit 1 - fi - - run: - name: "Pull and run docker image" - command: | - image_name="<<parameters.registry>>/<<parameters.repo>>/<<parameters.op_component>>:<<parameters.docker_tag>>" - docker pull $image_name || exit 1 - docker run $image_name <<parameters.op_component>> --version || exit 1 - - contracts-bedrock-frozen-code: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock - - run: - name: Check if target branch is develop - command: | - # Get PR number from CIRCLE_PULL_REQUEST - PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | rev | cut -d/ -f1 | rev) - - # Use GitHub API to get target branch - TARGET_BRANCH=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${PR_NUMBER}" | jq -r .base.ref) - - # If the target branch is not develop, do not run this check - if [ "$TARGET_BRANCH" != "develop" ]; then - echo "Target branch is not develop, skipping frozen files check" - circleci-agent step halt - fi - - run: - name: Check if PR has exempt label - command: | - # Get PR number from CIRCLE_PULL_REQUEST - PR_NUMBER=$(echo $CIRCLE_PULL_REQUEST | rev | cut -d/ -f1 | rev) - - # Use GitHub API to get labels - LABELS=$(curl -s "https://api.github.com/repos/${CIRCLE_PROJECT_USERNAME}/${CIRCLE_PROJECT_REPONAME}/pulls/${PR_NUMBER}" | jq -r .labels) - - # If the PR has the "M-exempt-frozen-files" label, do not run this check - if echo $LABELS | jq -e 'any(.[]; .name == "M-exempt-frozen-files")' > /dev/null; then - echo "Skipping frozen files check, PR has exempt label" - circleci-agent step halt - fi - - run: - name: Check frozen files - command: just check-frozen-code - working_directory: packages/contracts-bedrock - - contracts-bedrock-tests: - circleci_ip_ranges: true - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - parameters: - test_list: - description: List of test files to run - type: string - test_command: - description: Test command to execute (test or coverage) - type: string - default: test - test_flags: - description: Additional flags to pass to the test command - type: string - default: "" - test_timeout: - description: Timeout for running tests - type: string - default: 15m - test_profile: - description: Profile to use for testing - type: string - default: ci - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - run: - name: Check if test list is empty - command: | - TEST_FILES=$(<<parameters.test_list>>) - if [ -z "$TEST_FILES" ]; then - echo "No test files to run. Exiting early." - circleci-agent step halt - fi - working_directory: packages/contracts-bedrock - - check-changed: - patterns: contracts-bedrock,op-node - - run: - name: Print dependencies - command: just dep-status - working_directory: packages/contracts-bedrock - - run: - name: Print forge version - command: forge --version - working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Build go-ffi - command: just build-go-ffi - working_directory: packages/contracts-bedrock - - run: - name: Run tests - command: | - TEST_FILES=$(<<parameters.test_list>>) - TEST_FILES=$(echo "$TEST_FILES" | circleci tests split --split-by=timings) - TEST_FILES=$(echo "$TEST_FILES" | sed 's|^test/||') - MATCH_PATH="./test/{$(echo "$TEST_FILES" | paste -sd "," -)}" - forge <<parameters.test_command>> <<parameters.test_flags>> --match-path "$MATCH_PATH" - environment: - FOUNDRY_PROFILE: <<parameters.test_profile>> - working_directory: packages/contracts-bedrock - no_output_timeout: <<parameters.test_timeout>> - - run: - name: Print failed test traces - command: just test-rerun - environment: - FOUNDRY_PROFILE: ci - working_directory: packages/contracts-bedrock - when: on_fail - - run: - name: Lint forge test names - command: just lint-forge-tests-check-no-build - working_directory: packages/contracts-bedrock - - save_cache: - name: Save Go build cache - key: golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} - paths: - - "/root/.cache/go-build" - - notify-failures-on-develop - - contracts-bedrock-coverage: - circleci_ip_ranges: true - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - parameters: - test_flags: - description: Additional flags to pass to the test command - type: string - default: "" - test_timeout: - description: Timeout for running tests - type: string - default: 15m - test_profile: - description: Profile to use for testing - type: string - default: ci - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock,op-node - - run: - name: Print dependencies - command: just dep-status - working_directory: packages/contracts-bedrock - - run: - name: Print forge version - command: forge --version - working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Install lcov - command: | - sudo apt-get update - sudo apt-get install -y lcov - - run: - name: Write pinned block number for cache key - command: | - just print-pinned-block-number > ./pinnedBlockNumber.txt - cat pinnedBlockNumber.txt - working_directory: packages/contracts-bedrock - - restore_cache: - name: Restore forked state - key: forked-state-contracts-bedrock-tests-upgrade-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - - run: - name: Run coverage tests - command: just coverage-lcov-all <<parameters.test_flags>> - environment: - FOUNDRY_PROFILE: <<parameters.test_profile>> - ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io - working_directory: packages/contracts-bedrock - no_output_timeout: <<parameters.test_timeout>> - - run: - name: Print failed test traces - command: just test-rerun - environment: - FOUNDRY_PROFILE: <<parameters.test_profile>> - ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io - working_directory: packages/contracts-bedrock - when: on_fail - - codecov/upload: - disable_search: true - files: ./packages/contracts-bedrock/lcov-all.info - flags: contracts-bedrock-tests - - notify-failures-on-develop - - contracts-bedrock-tests-upgrade: - circleci_ip_ranges: true - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock,op-node - - run: - name: Print dependencies - command: just dep-status - working_directory: packages/contracts-bedrock - - run: - name: Print forge version - command: forge --version - working_directory: packages/contracts-bedrock - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Write pinned block number for cache key - command: | - just print-pinned-block-number > ./pinnedBlockNumber.txt - cat pinnedBlockNumber.txt - working_directory: packages/contracts-bedrock - - restore_cache: - name: Restore forked state - key: forked-state-contracts-bedrock-tests-upgrade-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - - run: - name: Run tests - command: just test-upgrade - environment: - FOUNDRY_FUZZ_SEED: 42424242 - FOUNDRY_FUZZ_RUNS: 1 - FOUNDRY_PROFILE: ci - ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io - working_directory: packages/contracts-bedrock - no_output_timeout: 15m - - run: - name: Print failed test traces - command: just test-upgrade-rerun - environment: - FOUNDRY_FUZZ_SEED: 42424242 - FOUNDRY_FUZZ_RUNS: 1 - FOUNDRY_PROFILE: ci - ETH_RPC_URL: https://ci-mainnet-l1-archive.optimism.io - working_directory: packages/contracts-bedrock - when: on_fail - - save_cache: - name: Save Go build cache - key: golang-build-cache-contracts-bedrock-tests-{{ checksum "go.sum" }} - paths: - - "/root/.cache/go-build" - - save_cache: - name: Save forked state - key: forked-state-contracts-bedrock-tests-upgrade-{{ checksum "packages/contracts-bedrock/pinnedBlockNumber.txt" }} - when: always - paths: - - "/root/.foundry/cache" - - notify-failures-on-develop - - contracts-bedrock-checks: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - attach_workspace: { at: "." } - - install-contracts-dependencies - - check-changed: - patterns: contracts-bedrock,op-node - - run: - name: print forge version - command: forge --version - - run-contracts-check: - command: check-kontrol-summaries-unchanged - - run-contracts-check: - command: semgrep-test-validity-check - - run-contracts-check: - command: semgrep - - run-contracts-check: - command: semver-lock-no-build - - run-contracts-check: - command: semver-diff-check-no-build - - run-contracts-check: - command: validate-deploy-configs - - run-contracts-check: - command: lint - - run-contracts-check: - command: snapshots-check-no-build - - run-contracts-check: - command: interfaces-check-no-build - - run-contracts-check: - command: size-check - - run-contracts-check: - command: unused-imports-check-no-build - - run-contracts-check: - command: validate-spacers-no-build - - todo-issues: - parameters: - check_closed: - type: boolean - default: true - machine: - image: <<pipeline.parameters.base_image>> - steps: - - utils/checkout-with-mise - - run: - name: Install ripgrep - command: sudo apt-get install -y ripgrep - - run: - name: Check TODO issues - command: ./ops/scripts/todo-checker.sh --verbose <<#parameters.check_closed>> --check-closed <</parameters.check_closed>> - - notify-failures-on-develop - - fuzz-golang: - parameters: - package_name: - description: Go package name - type: string - on_changes: - description: changed pattern to fire fuzzer on - type: string - uses_artifacts: - description: should load in foundry artifacts - type: boolean - default: false - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - utils/checkout-with-mise - - check-changed: - patterns: "<<parameters.package_name>>" - - attach_workspace: - at: "." - if: ${{ uses_artifacts }} - - run: - name: Fuzz - command: | - make fuzz - working_directory: "<<parameters.package_name>>" - - run: - name: Copy fuzz artifacts - command: | - mkdir -p fuzzdata - find ./<<parameters.package_name>> -type d -name "fuzz" -exec sh -c 'cp -r "{}"/* fuzzdata/ 2>/dev/null || true' \; - when: always - - store_artifacts: - path: ./fuzzdata - when: always - - go-lint: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - run: - name: run Go linter - command: | - make lint-go - working_directory: . - - go-tests: - parameters: - notify: - description: Whether to notify on failure - type: boolean - default: false - mentions: - description: Slack user or group to mention when notifying of failures - type: string - default: "" - resource_class: - description: Machine resource class - type: string - default: ethereum-optimism/latitude-1-go-e2e - no_output_timeout: - description: Timeout for when CircleCI kills the job if there's no output - type: string - default: 60m - test_timeout: - description: Timeout for running tests - type: string - default: 10m - environment_overrides: - description: Environment overrides - type: string - default: "" - packages: - description: List of packages to test - type: string - machine: true - resource_class: <<parameters.resource_class>> - steps: - - utils/checkout-with-mise - - attach_workspace: - at: "." - - run: - name: build op-program-client - command: make op-program-client - working_directory: op-program - - run: - name: build op-program-host - command: make op-program-host - working_directory: op-program - - run: - name: build cannon - command: make cannon - - run: - name: run tests - no_output_timeout: <<parameters.no_output_timeout>> - command: | - mkdir -p ./tmp/test-results && mkdir -p ./tmp/testlogs - cd op-e2e && make pre-test && cd .. - - packages=( - <<parameters.packages>> - ) - formatted_packages="" - for package in "${packages[@]}"; do - formatted_packages="$formatted_packages ./$package/..." - done - - export ENABLE_KURTOSIS=true - export OP_E2E_CANNON_ENABLED="false" - export OP_E2E_SKIP_SLOW_TEST=true - export OP_E2E_USE_HTTP=true - export ENABLE_ANVIL=true - export SEPOLIA_RPC_URL="https://ci-sepolia-l1-archive.optimism.io" - export MAINNET_RPC_URL="https://ci-mainnet-l1-archive.optimism.io" - export PARALLEL=$(nproc) - export OP_TESTLOG_FILE_LOGGER_OUTDIR=$(realpath ./tmp/testlogs) - - <<parameters.environment_overrides>> - - gotestsum --format=testname \ - --junitfile=./tmp/test-results/results.xml \ - --jsonfile=./tmp/testlogs/log.json \ - --rerun-fails=3 \ - --rerun-fails-max-failures=50 \ - --packages="$formatted_packages" \ - -- -parallel=$PARALLEL -coverprofile=coverage.out -timeout=<<parameters.test_timeout>> - - codecov/upload: - disable_search: true - files: ./coverage.out - - store_test_results: - path: ./tmp/test-results - - run: - name: Compress test logs - command: tar -czf testlogs.tar.gz -C ./tmp testlogs - when: always - - store_artifacts: - path: testlogs.tar.gz - when: always - - when: - condition: "<<parameters.notify>>" - steps: - - notify-failures-on-develop: - mentions: "<<parameters.mentions>>" - - sanitize-op-program: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - utils/checkout-with-mise - - run: - name: Install tools - command: | - sudo apt-get update - sudo apt-get install -y binutils-mips-linux-gnu - - run: - name: Build cannon - command: make cannon - - run: - name: Build op-program - command: make op-program - - run: - name: Sanitize op-program client - command: make sanitize-program GUEST_PROGRAM=../op-program/bin/op-program-client.elf - working_directory: cannon - - - cannon-prestate-quick: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - restore_cache: - name: Restore cannon prestate cache - key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} - - run: - name: Build prestates - command: make cannon-prestates - - save_cache: - key: cannon-prestate-{{ checksum "./cannon/bin/cannon" }}-{{ checksum "op-program/bin/op-program-client.elf" }} - name: Save Cannon prestate to cache - paths: - - "op-program/bin/prestate*.bin.gz" - - "op-program/bin/meta*.json" - - "op-program/bin/prestate-proof*.json" - - persist_to_workspace: - root: . - paths: - - "op-program/bin/prestate*" - - "op-program/bin/meta*" - - "cannon/bin" - - cannon-prestate: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - utils/checkout-with-mise - - setup_remote_docker - - run: - name: Build prestates - command: make reproducible-prestate - - persist_to_workspace: - root: . - paths: - - "op-program/bin/prestate*" - - "op-program/bin/meta*" - - publish-cannon-prestates: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - attach_workspace: - at: "." - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - - run: - name: Upload cannon prestates - command: | - # Use the actual hash for tags (hash can be found by reading releases.json) - PRESTATE_HASH=$(jq -r .pre ./op-program/bin/prestate-proof.json) - PRESTATE_MT64_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-mt64.json) - PRESTATE_INTEROP_HASH=$(jq -r .pre ./op-program/bin/prestate-proof-interop.json) - - BRANCH_NAME=$(echo "<< pipeline.git.branch >>" | tr '/' '-') - echo "Publishing ${PRESTATE_HASH}, ${PRESTATE_MT64_HASH}, ${PRESTATE_INTEROP_HASH} as ${BRANCH_NAME}" - if [[ "" != "<< pipeline.git.branch >>" ]] - then - # Upload the git commit info for each prestate since this won't be recorded in releases.json - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate=${PRESTATE_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_MT64_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-mt64.bin.gz.txt" - (echo "Commit=<< pipeline.git.revision >>" && echo "Prestate: ${PRESTATE_INTEROP_HASH}") | gsutil cp - "gs://oplabs-network-data/proofs/op-program/cannon/${BRANCH_NAME}-interop.bin.gz.txt" - - - # Use the branch name for branches to provide a consistent URL - PRESTATE_HASH="${BRANCH_NAME}" - PRESTATE_MT64_HASH="${BRANCH_NAME}-mt64" - PRESTATE_INTEROP_HASH="${BRANCH_NAME}-interop" - fi - gsutil cp ./op-program/bin/prestate.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_HASH}.bin.gz" - - gsutil cp ./op-program/bin/prestate-mt64.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_MT64_HASH}.bin.gz" - - gsutil cp ./op-program/bin/prestate-interop.bin.gz \ - "gs://oplabs-network-data/proofs/op-program/cannon/${PRESTATE_INTEROP_HASH}.bin.gz" - - notify-failures-on-develop: - mentions: "@proofs-team" - - preimage-reproducibility: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - utils/checkout-with-mise - - setup_remote_docker - - run: make -C op-program verify-reproducibility - - notify-failures-on-develop: - mentions: "@proofs-team" - - cannon-stf-verify: - docker: - - image: <<pipeline.parameters.default_docker_image>> - steps: - - utils/checkout-with-mise - - setup_remote_docker - - run: - name: Build cannon - command: make cannon - - run: - name: Verify the Cannon STF - command: make -C ./cannon cannon-stf-verify - - notify-failures-on-develop: - mentions: "@proofs-team" - - semgrep-scan: - parameters: - diff_branch: - type: string - default: develop - scan_command: - type: string - default: semgrep ci --timeout=100 - environment: - TEMPORARY_BASELINE_REF: << parameters.diff_branch >> - SEMGREP_REPO_URL: << pipeline.project.git_url >> - SEMGREP_BRANCH: << pipeline.git.branch >> - SEMGREP_COMMIT: << pipeline.git.revision >> - docker: - - image: returntocorp/semgrep - resource_class: xlarge - steps: - - checkout # no need to use mise here since the docker image contains the only dependency - - unless: - condition: - equal: ["develop", << pipeline.git.branch >>] - steps: - - run: - # Scan changed files in PRs, block on new issues only (existing issues ignored) - # Do a full scan when scanning develop, otherwise do an incremental scan. - name: "Conditionally set BASELINE env var" - command: | - echo 'export SEMGREP_BASELINE_REF=${TEMPORARY_BASELINE_REF}' >> $BASH_ENV - - run: - name: "Set environment variables" # for PR comments and in-app hyperlinks to findings - command: | - echo 'export SEMGREP_PR_ID=${CIRCLE_PULL_REQUEST##*/}' >> $BASH_ENV - echo 'export SEMGREP_JOB_URL=$CIRCLE_BUILD_URL' >> $BASH_ENV - echo 'export SEMGREP_REPO_NAME=$CIRCLE_PROJECT_USERNAME/$CIRCLE_PROJECT_REPONAME' >> $BASH_ENV - - run: - name: "Semgrep scan" - # --timeout (in seconds) limits the time per rule and file. - # SEMGREP_TIMEOUT is the same, but docs have conflicting defaults (5s in CLI flag, 1800 in some places) - # https://semgrep.dev/docs/troubleshooting/semgrep-app#if-the-job-is-aborted-due-to-taking-too-long - command: << parameters.scan_command >> - # If semgrep hangs, stop the scan after 20m, to prevent a useless 5h job - no_output_timeout: 20m - - notify-failures-on-develop - - bedrock-go-tests: # just a helper, that depends on all the actual test jobs - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: medium - steps: - - run: echo Done - - fpp-verify: - circleci_ip_ranges: true - docker: - - image: cimg/go:1.21 - steps: - - utils/checkout-with-mise - - run: - name: verify-sepolia - command: | - make verify-sepolia - working_directory: op-program - - notify-failures-on-develop: - mentions: "@proofs-team" - - op-program-compat: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - run: - name: compat-sepolia - command: | - make verify-compat - working_directory: op-program - - check-generated-mocks-op-node: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - check-changed: - patterns: op-node - - run: - name: check-generated-mocks - command: make generate-mocks-op-node && git diff --exit-code - - check-generated-mocks-op-service: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - utils/checkout-with-mise - - check-changed: - patterns: op-service - - run: - name: check-generated-mocks - command: make generate-mocks-op-service && git diff --exit-code - - kontrol-tests: - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: xlarge - steps: - - utils/checkout-with-mise - - install-contracts-dependencies - - check-changed: - no_go_deps: "true" - patterns: contracts-bedrock/test/kontrol,contracts-bedrock/src/L1/OptimismPortal\.sol,contracts-bedrock/src/L1/OptimismPortal2\.sol,contracts-bedrock/src/L1/L1CrossDomainMessenger\.sol,contracts-bedrock/src/L1/L1ERC721Bridge\.sol,contracts-bedrock/src/L1/L1StandardBridge\.sol,contracts-bedrock/src/L1/ResourceMetering\.sol,contracts-bedrock/src/universal/StandardBridge\.sol,contracts-bedrock/src/universal/ERC721Bridge\.sol,contracts-bedrock/src/universal/CrossDomainMessenger\.sol - - setup_remote_docker: - docker_layer_caching: true - - run: - name: Run Kontrol Tests - command: | - curl -X POST \ - -H "Accept: application/vnd.github+json" \ - -H "Authorization: Bearer $RV_COMPUTE_TOKEN" \ - https://api.github.com/repos/runtimeverification/optimism-ci/actions/workflows/optimism-ci.yaml/dispatches \ - -d '{ - "ref": "master", - "inputs": { - "branch_name": "<<pipeline.git.branch>>", - "extra_args": "script", - "statuses_sha": "<< pipeline.git.revision >>", - "org": "ethereum-optimism", - "repository": "optimism" - } - }' - working_directory: ./packages/contracts-bedrock - - notify-failures-on-develop - - publish-contract-artifacts: - machine: true - resource_class: ethereum-optimism/latitude-1 - steps: - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - project_id: GCP_TOOLS_ARTIFACTS_PROJECT_ID - service_account_email: GCP_CONTRACTS_PUBLISHER_SERVICE_ACCOUNT_EMAIL - - utils/checkout-with-mise - - install-contracts-dependencies - - run: - name: Pull artifacts - command: bash scripts/ops/pull-artifacts.sh - working_directory: packages/contracts-bedrock - - run: - name: Build contracts - environment: - FOUNDRY_PROFILE: ci - command: just forge-build - working_directory: packages/contracts-bedrock - - run: - name: Publish artifacts - command: bash scripts/ops/publish-artifacts.sh - working_directory: packages/contracts-bedrock - - go-release: - parameters: - module: - description: Go Module Name - type: string - filename: - description: Goreleaser config file - default: .goreleaser.yaml - type: string - docker: - - image: <<pipeline.parameters.default_docker_image>> - resource_class: large - steps: - - setup_remote_docker - - gcp-cli/install - - gcp-oidc-authenticate: - gcp_cred_config_file_path: /tmp/gcp_cred_config.json - oidc_token_file_path: /tmp/oidc_token.json - - utils/checkout-with-mise - - run: - name: Configure Docker - command: | - gcloud auth configure-docker us-docker.pkg.dev - - run: - name: Run goreleaser - command: | - goreleaser release --clean -f ./<<parameters.module>>/<<parameters.filename>> - - stale-check: - docker: - - image: cimg/python:3.11 - steps: - - run: - name: Run Stale Check Script - command: | - git clone --branch main --depth 1 https://github.com/ethereum-optimism/circleci-utils.git /tmp/circleci-utils - cd /tmp/circleci-utils/stale-check - pip3 install -r requirements.txt - python3 stale-check.py --repo "ethereum-optimism/${CIRCLE_PROJECT_REPONAME}" --github-token "${STALE_GITHUB_TOKEN}" - -workflows: - main: - when: - and: - - or: - # Trigger on new commits - - equal: [webhook, << pipeline.trigger_source >>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.main_dispatch >>] - - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - - contracts-bedrock-build: - name: contracts-bedrock-build - # Build with just core + script contracts. - build_args: --deny-warnings --skip test - - check-kontrol-build: - requires: - - contracts-bedrock-build - - contracts-bedrock-tests: - # Test everything except PreimageOracle.t.sol since it's slow. - name: contracts-bedrock-tests - test_list: find test -name "*.t.sol" -not -name "PreimageOracle.t.sol" - - contracts-bedrock-tests: - # PreimageOracle test is slow, run it separately to unblock CI. - name: contracts-bedrock-tests-preimage-oracle - test_list: find test -name "PreimageOracle.t.sol" - - contracts-bedrock-tests: - # Heavily fuzz any fuzz tests within added or modified test files. - name: contracts-bedrock-tests-heavy-fuzz-modified - test_list: git diff origin/develop...HEAD --name-only --diff-filter=AM -- './test/**/*.t.sol' | sed 's|packages/contracts-bedrock/||' - test_timeout: 1h - test_profile: ciheavy - - contracts-bedrock-coverage: - # Generate coverage reports. - name: contracts-bedrock-coverage - test_timeout: 1h - test_profile: cicoverage - # need this requires to ensure that all FFI JSONs exist - requires: - - contracts-bedrock-build - - contracts-bedrock-tests-upgrade: - name: contracts-bedrock-tests-upgrade - - contracts-bedrock-checks: - requires: - - contracts-bedrock-build - - contracts-bedrock-frozen-code: - requires: - - contracts-bedrock-build - - diff-asterisc-bytecode - - semgrep-scan: - name: semgrep-scan-local - scan_command: semgrep scan --timeout=100 --config .semgrep/rules/ --error . - - semgrep-scan: - name: semgrep-test - scan_command: semgrep scan --test --config .semgrep/rules/ .semgrep/tests/ - - go-lint - - fuzz-golang: - name: fuzz-golang-<<matrix.package_name>> - on_changes: <<matrix.package_name>> - matrix: - parameters: - package_name: - - op-challenger - - op-node - - op-service - - op-chain-ops - - fuzz-golang: - name: cannon-fuzz - package_name: cannon - on_changes: cannon,packages/contracts-bedrock/src/cannon - uses_artifacts: true - requires: ["contracts-bedrock-build"] - - fuzz-golang: - name: op-e2e-fuzz - package_name: op-e2e - on_changes: op-e2e,packages/contracts-bedrock/src - uses_artifacts: true - requires: ["contracts-bedrock-build"] - - go-tests: - environment_overrides: | - export PARALLEL=24 - packages: | - op-alt-da - op-batcher - op-chain-ops - op-node - op-proposer - op-challenger - op-dispute-mon - op-conductor - op-program - op-service - op-supervisor - op-deployer - op-e2e/system - op-e2e/e2eutils - op-e2e/opgeth - op-e2e/interop - op-e2e/actions - op-e2e/faultproofs - packages/contracts-bedrock/scripts/checks - op-dripper - requires: - - contracts-bedrock-build - - cannon-prestate-quick - - op-program-compat - - bedrock-go-tests: - requires: - - go-lint - - cannon-build-test-vectors - - cannon-go-lint-and-test-32-bit - - cannon-go-lint-and-test-64-bit - - check-generated-mocks-op-node - - check-generated-mocks-op-service - - op-program-compat - # Not needed for the devnet but we want to make sure they build successfully - - cannon-docker-build - - op-dispute-mon-docker-build - - op-program-docker-build - - op-supervisor-docker-build - - proofs-tools-docker-build - - go-tests - - sanitize-op-program - - docker-build: - name: <<matrix.docker_name>>-docker-build - docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> - save_image_tag: <<pipeline.git.revision>> - matrix: - parameters: - docker_name: - - op-node - - op-batcher - - op-program - - op-proposer - - op-challenger - - proofs-tools - - op-dispute-mon - - op-conductor - - da-server - - op-supervisor - - cannon - - op-dripper - - cannon-prestate-quick - - sanitize-op-program: - requires: - - cannon-prestate-quick - - check-generated-mocks-op-node - - check-generated-mocks-op-service - - cannon-go-lint-and-test: - name: cannon-go-lint-and-test-<<matrix.mips_word_size>>-bit - requires: - - contracts-bedrock-build - skip_slow_tests: true - notify: true - matrix: - parameters: - mips_word_size: [32, 64] - - cannon-build-test-vectors - - todo-issues: - name: todo-issues-check - check_closed: false - - shellcheck/check: - name: shell-check - # We don't need the `exclude` key as the orb detects the `.shellcheckrc` - dir: . - ignore-dirs: ./packages/contracts-bedrock/lib - - go-release-deployer: - jobs: - - go-release: - filters: - tags: - only: /^op-deployer.*/ - branches: - ignore: /.*/ - module: op-deployer - context: - - oplabs-gcr-release - - release: - when: - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - # Wait for approval on the release - - hold: - type: approval - filters: - tags: - only: /^(da-server|proofs-tools|cannon|ufm-[a-z0-9\-]*|op-[a-z0-9\-]*)\/v.*/ - branches: - ignore: /.*/ - # Standard (medium) cross-platform docker images go here - - docker-build: - matrix: - parameters: - docker_name: - - op-node - - op-batcher - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - da-server - - op-ufm - - op-supervisor - - op-deployer - - cannon - - op-dripper - name: <<matrix.docker_name>>-docker-release - docker_tags: <<pipeline.git.revision>> - platforms: "linux/amd64,linux/arm64" - publish: true - release: true - filters: - tags: - only: /^<<matrix.docker_name>>\/v.*/ - branches: - ignore: /.*/ - context: - - oplabs-gcr-release - requires: - - hold - # Checks for cross-platform images go here - - check-cross-platform: - matrix: - parameters: - op_component: - - op-node - - op-batcher - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - da-server - - op-ufm - - op-supervisor - - op-deployer - - cannon - - op-dripper - name: <<matrix.op_component>>-cross-platform - requires: - - op-node-docker-release - - op-batcher-docker-release - - op-proposer-docker-release - - op-challenger-docker-release - - op-dispute-mon-docker-release - - op-conductor-docker-release - - da-server-docker-release - - op-ufm-docker-release - - op-supervisor-docker-release - - cannon-docker-release - - op-dripper-docker-release - # Standard (xlarge) AMD-only docker images go here - - docker-build: - matrix: - parameters: - docker_name: - - proofs-tools - name: <<matrix.docker_name>>-docker-release - resource_class: xlarge - docker_tags: <<pipeline.git.revision>> - publish: true - release: true - filters: - tags: - only: /^<<matrix.docker_name>>\/v.*/ - branches: - ignore: /.*/ - context: - - oplabs-gcr-release - requires: - - hold - - cannon-prestate: - filters: - tags: - only: /^op-program\/v.*/ - branches: - ignore: /.*/ - - publish-cannon-prestates: - context: - - slack - - oplabs-network-optimism-io-bucket - requires: - - hold - - cannon-prestate - filters: - tags: - only: /^op-program\/v.*/ - branches: - ignore: /.*/ - - scheduled-todo-issues: - when: - equal: [build_four_hours, <<pipeline.schedule.name>>] - jobs: - - todo-issues: - name: todo-issue-checks - context: - - slack - - scheduled-fpp: - when: - equal: [build_hourly, <<pipeline.schedule.name>>] - jobs: - - fpp-verify: - context: - - slack - - oplabs-fpp-nodes - - develop-publish-contract-artifacts: - when: - or: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: - [true, <<pipeline.parameters.publish_contract_artifacts_dispatch>>] - jobs: - - publish-contract-artifacts - - develop-fault-proofs: - when: - and: - - or: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: [true, <<pipeline.parameters.fault_proofs_dispatch>>] - - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - - cannon-prestate - - cannon-stf-verify: - context: - - slack - - contracts-bedrock-build: - build_args: --deny-warnings --skip test - context: - - slack - - go-tests: - name: op-e2e-cannon-tests - notify: true - mentions: "@proofs-team" - no_output_timeout: 60m - test_timeout: 59m - resource_class: ethereum-optimism/latitude-fps-1 - environment_overrides: | - export OP_E2E_CANNON_ENABLED="true" - export PARALLEL=24 - packages: | - op-e2e/faultproofs - context: - - slack - requires: - - contracts-bedrock-build - - cannon-prestate - - publish-cannon-prestates: - context: - - slack - - oplabs-network-optimism-io-bucket - requires: - - cannon-prestate - - op-e2e-cannon-tests - filters: - branches: - only: - - develop - - develop-kontrol-tests: - when: - and: - - or: - - equal: ["develop", <<pipeline.git.branch>>] - - equal: [true, <<pipeline.parameters.kontrol_dispatch>>] - - not: - equal: [scheduled_pipeline, << pipeline.trigger_source >>] - jobs: - - kontrol-tests: - context: - - slack - - runtimeverification - - scheduled-cannon-full-tests: - when: - or: - - equal: [build_four_hours, <<pipeline.schedule.name>>] - - equal: [true, << pipeline.parameters.cannon_full_test_dispatch >>] - jobs: - - contracts-bedrock-build: - build_args: --deny-warnings --skip test - - cannon-go-lint-and-test: - name: cannon-go-lint-and-test-<<matrix.mips_word_size>>-bit - requires: - - contracts-bedrock-build - skip_slow_tests: false - notify: true - context: - - slack - matrix: - parameters: - mips_word_size: [32, 64] - - scheduled-docker-publish: - when: - or: - - equal: [build_daily, <<pipeline.schedule.name>>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.docker_publish_dispatch >>] - jobs: - - docker-build: - matrix: - parameters: - docker_name: - - op-node - - op-batcher - - op-program - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - op-supervisor - - cannon - - op-dripper - name: <<matrix.docker_name>>-docker-publish - docker_tags: <<pipeline.git.revision>>,<<pipeline.git.branch>> - platforms: "linux/amd64,linux/arm64" - publish: true - context: - - oplabs-gcr - - slack - - check-cross-platform: - matrix: - parameters: - op_component: - - op-node - - op-batcher - - op-program - - op-proposer - - op-challenger - - op-dispute-mon - - op-conductor - - op-supervisor - - cannon - - op-dripper - name: <<matrix.op_component>>-cross-platform - requires: - - <<matrix.op_component>>-docker-publish - - scheduled-preimage-reproducibility: - when: - or: - - equal: [build_daily, <<pipeline.schedule.name>>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.reproducibility_dispatch >>] - jobs: - - preimage-reproducibility: - context: slack - - scheduled-stale-check: - when: - or: - - equal: [build_daily, <<pipeline.schedule.name>>] - # Trigger on manual triggers if explicitly requested - - equal: [true, << pipeline.parameters.stale_check_dispatch >>] - jobs: - - stale-check: - context: github-token-stale-check
diff --git ethereum-optimism/optimism/.github/workflows/kurtosis-devnet.yml layr-labs/optimism/.github/workflows/kurtosis-devnet.yml new file mode 100644 index 0000000000000000000000000000000000000000..5257a6fabd3c19a803f5bdc946d9cf9401844245 --- /dev/null +++ layr-labs/optimism/.github/workflows/kurtosis-devnet.yml @@ -0,0 +1,54 @@ +name: Kurtosis Devnet + +on: + push: + branches: [eigenda-develop] + pull_request: + # Workflow dispatch allows you to trigger this workflow manually from the Actions tab. + # We use this to trigger the workflow on release branches. + workflow_dispatch: + +env: + MISE_VERSION: 2024.12.14 + +jobs: + # We turn off this devnet while holesky is borked... see https://eigenda.statuspage.io/ + # Holesky forked after pectra upgrade, and eigenda has been down for 2 days. + # + # This is an optimism devnet which talks to the eigenda holesky testnet via an eigenda-proxy. + # TODO: we should connect this to an eigenda kurtosis devnet instead of using our holesky testnet. + # run_op_eigenda_holesky_devnet: + # runs-on: ubuntu-latest + # steps: + # - uses: actions/checkout@v4 + # - uses: jdx/mise-action@v2 + # with: + # version: ${{ env.MISE_VERSION }} + # experimental: true + # # Needed by the just eigenda-holesky-devnet command below + # # These secrets get injected into the eigenda-holesky.yaml kurtosis config file + # - name: Create EigenDA secrets file + # run: | + # cat > kurtosis-devnet/eigenda-secrets.json << EOF + # { + # "secrets": { + # "eigenda.signer-private-key-hex": "${{ secrets.EIGENDA_V1_HOLESKY_TESTNET_SIGNER_KEY }}", + # "eigenda.eth_rpc": "https://ethereum-holesky-rpc.publicnode.com" + # } + # } + # EOF + # - run: just eigenda-holesky-devnet-start + # working-directory: kurtosis-devnet + + run_op_eigenda_memstore_devnet: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: jdx/mise-action@v2 + with: + version: ${{ env.MISE_VERSION }} + experimental: true + - run: just eigenda-memstore-devnet-start + working-directory: kurtosis-devnet + - run: just eigenda-memstore-devnet-test + working-directory: kurtosis-devnet
diff --git ethereum-optimism/optimism/.github/workflows/pages.yml layr-labs/optimism/.github/workflows/pages.yml new file mode 100644 index 0000000000000000000000000000000000000000..5bd173a3aca1c373180531d9994340a94d2b454a --- /dev/null +++ layr-labs/optimism/.github/workflows/pages.yml @@ -0,0 +1,48 @@ +name: Build & publish forkdiff github-pages +permissions: + contents: read + pages: write + id-token: write +on: + workflow_dispatch: + push: + branches: + - eigenda + +jobs: + build: + concurrency: ci-${{ github.ref }} + runs-on: ubuntu-latest + steps: + - name: Checkout + uses: actions/checkout@v3 + with: + fetch-depth: 1000 # make sure to fetch the old commit we diff against + + - name: Build forkdiff + uses: "docker://protolambda/forkdiff:0.1.0" + with: + args: -repo=/github/workspace -fork=/github/workspace/fork.yaml -out=/github/workspace/index.html + + - name: Setup Pages + uses: actions/configure-pages@v5 + + - name: Build with Jekyll + uses: actions/jekyll-build-pages@v1 + with: + source: ./ + destination: ./_site + + - name: Upload artifact + uses: actions/upload-pages-artifact@v3 + + deploy: + environment: + name: github-pages + url: ${{ steps.deployment.outputs.page_url }} + runs-on: ubuntu-latest + needs: build + steps: + - name: Deploy to GitHub Pages + id: deployment + uses: actions/deploy-pages@v4
diff --git ethereum-optimism/optimism/.github/workflows/test-golang.yml layr-labs/optimism/.github/workflows/test-golang.yml new file mode 100644 index 0000000000000000000000000000000000000000..f2a46e49528defaf5d6a78ec8c74dcefa1c2a0b4 --- /dev/null +++ layr-labs/optimism/.github/workflows/test-golang.yml @@ -0,0 +1,102 @@ +name: Go + +on: + push: + branches: [eigenda-develop] + pull_request: + # Workflow dispatch allows you to trigger this workflow manually from the Actions tab. + # We use this to trigger the workflow on release branches. + workflow_dispatch: + +jobs: + go-lint: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + + - name: Set up Go + uses: actions/setup-go@v5 + with: + go-version: "1.22" + + - name: Install and run golangci-lint + uses: golangci/golangci-lint-action@v6 + with: + version: v1.61.0 + args: -E goimports,sqlclosecheck,bodyclose,asciicheck,misspell,errorlint --timeout 5m -e "errors.As" -e "errors.Is" ./... + + build-and-cache-contracts: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4 + - uses: jdx/mise-action@v2 + with: + version: 2024.12.14 # [default: latest] mise version to install + install: true # [default: true] run `mise install` + cache: true # [default: true] cache mise using GitHub's cache + experimental: true # [default: false] enable experimental features + - uses: actions/cache@v3 + id: cache-artifacts + with: + path: packages/contracts-bedrock/forge-artifacts + # If any of the contracts file changes, the cache key will change, forcing a rebuild of the forge artifacts + key: ${{ runner.os }}-forge-${{ hashFiles('packages/contracts-bedrock/src/**/*.sol') }} + - name: Build contracts if cache miss + if: steps.cache-artifacts.outputs.cache-hit != 'true' + run: make build-contracts + + go-tests: + needs: [build-and-cache-contracts] + runs-on: ubuntu-latest + strategy: + matrix: + packages: + - op-batcher + - op-node + - op-alt-da + - op-e2e/system/altda + - op-e2e/actions/altda + steps: + - uses: actions/checkout@v4 + + - uses: jdx/mise-action@v2 + with: + version: 2024.12.14 # [default: latest] mise version to install + install: true # [default: true] run `mise install` + cache: true # [default: true] cache mise using GitHub's cache + experimental: true # [default: false] enable experimental features + + - name: Restore cached forge artifacts cached + uses: actions/cache@v3 + id: cache-restore + with: + path: packages/contracts-bedrock/forge-artifacts + key: ${{ runner.os }}-forge-${{ hashFiles('packages/contracts-bedrock/src/**/*.sol') }} + + # Cache has been stored in the build-and-cache-contracts job, so if this fails there's a problem + - name: Check cache restore + if: steps.cache-restore.outputs.cache-hit != 'true' + run: | + echo "Cache restore failed" + exit 1 + + # We use mise to install golang instead of the setup-go action, + # so we need to do the cache setup ourselves + - name: Go Module Cache + uses: actions/cache@v3 + id: go-cache + with: + path: ~/go/pkg/mod + key: ${{ runner.os }}-go-${{ hashFiles('**/go.sum') }} + restore-keys: | + ${{ runner.os }}-go- + + # Add explicit download on cache miss + # go test runs `go mod download` implicitly, but this separation is nice to see how long downloading vs running tests takes + - name: Download Go modules + if: steps.go-cache.outputs.cache-hit != 'true' + run: go mod download + + - name: Run tests + run: | + go test -timeout=10m ./${{ matrix.packages }}/...

Added kurtosis devnet yaml file to spin up an op chain that uses altda and spins up an eigenda-proxy in memstore mode to simulate interactions with an EigenDA network.

diff --git ethereum-optimism/optimism/kurtosis-devnet/README.md layr-labs/optimism/kurtosis-devnet/README.md index d0efc34b2ab8f0f2c1563bb1066ecff80d549fb2..d3b7e74c25218298e26933f0f0655e237bd6a23a 100644 --- ethereum-optimism/optimism/kurtosis-devnet/README.md +++ layr-labs/optimism/kurtosis-devnet/README.md @@ -19,6 +19,7 @@ To see available devnets, consult the `justfile` to see what `.*-devnet` targets exist, currently - `simple-devnet` - `interop-devnet` - `user-devnet` +- `eigenda-holesky-devnet`   You can read over the referenced `yaml` files located in this directory to see the network definition which would be deployed. Mini and Simple are example network definitions, and User expects a provided network definition.
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-holesky.yaml layr-labs/optimism/kurtosis-devnet/eigenda-holesky.yaml new file mode 100644 index 0000000000000000000000000000000000000000..183c1fc4ab529839c5e3255ea3ab1733b3de244a --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-holesky.yaml @@ -0,0 +1,112 @@ +# This devnet uses an eigenda-proxy to interact with the eigenda holesky testnet network. +# As a requirement, you must first create and populate the eigenda-secrets.json file +# 1. cp eigenda-secrets.example.json eigenda-secrets.json +# 2. Populate the file with the required values +# TODO: Connect this with an eigenda v1 kurtosis devnet instead of using our holesky testnet. +# See https://github.com/Layr-Labs/avs-devnet/blob/main/examples/eigenda.yaml +{{- $context := or . (dict)}} +--- +optimism_package: + altda_deploy_config: + use_altda: true + # We use the generic commitment which means that the dachallenge contract won't get deployed. + # We align with l2beat's analysis of the da_challenge contract not being economically viable, + # so even if a rollup failsover to keccak commitments, not using the da_challenge contract is fine + # (has same security as using it). + # See https://l2beat.com/scaling/projects/redstone#da-layer-risk-analysis and + # https://discord.com/channels/1244729134312198194/1260612364865245224/1290294353688002562 for + # an economic analysis of the da challenge contract. + da_commitment_type: GenericCommitment + da_challenge_window: 16 + da_resolve_window: 16 + da_bond_size: 0 + da_resolver_refund_percentage: 0 + chains: + - participants: + - el_type: op-geth + el_image: "" + el_log_level: "" + el_extra_env_vars: {} + el_extra_labels: {} + el_extra_params: [] + cl_type: op-node + cl_image: {{ localDockerImage "op-node" }} + cl_log_level: "debug" + cl_extra_env_vars: {} + cl_extra_labels: {} + cl_extra_params: [] + count: 2 + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + name: "op-kurtosis" + fjord_time_offset: 0 + granite_time_offset: 0 + holocene_time_offset: 0 + fund_dev_accounts: true + batcher_params: + image: {{ localDockerImage "op-batcher" }} + extra_params: [] + proposer_params: + image: {{ localDockerImage "op-proposer" }} + extra_params: [] + game_type: 1 + proposal_interval: 10m + challenger_params: + image: {{ localDockerImage "op-challenger" }} + cannon_prestate_path: "" + cannon_prestates_url: "http://fileserver/proofs/op-program/cannon" + extra_params: [] + mev_params: + rollup_boost_image: "" + builder_host: "" + builder_port: "" + da_server_params: + image: ghcr.io/layr-labs/eigenda-proxy:v1.6.3 + cmd: + - --addr + - 0.0.0.0 + - --port + - "3100" + - --eigenda.disperser-rpc + - disperser-holesky.eigenda.xyz:443 + - --eigenda.svc-manager-addr + - "0xD4A7E1Bd8015057293f0D0A557088c286942e84b" + # The two params below are loaded from the eigenda-secrets.json file + - --eigenda.signer-private-key-hex + - {{ dig "secrets" "eigenda.signer-private-key-hex" "" $context }} + - --eigenda.eth-rpc + - {{ dig "secrets" "eigenda.eth_rpc" "" $context }} + additional_services: + - da_server + op_contract_deployer_params: + image: {{ localDockerImage "op-deployer" }} + l1_artifacts_locator: {{ localContractArtifacts "l1" }} + l2_artifacts_locator: {{ localContractArtifacts "l2" }} + global_deploy_overrides: + faultGameAbsolutePrestate: {{ localPrestate.Hashes.prestate }} + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + cl_type: teku + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: | + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + } + port_publisher: + el: + enabled: true + public_port_start: 32000
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-memstore.yaml layr-labs/optimism/kurtosis-devnet/eigenda-memstore.yaml new file mode 100644 index 0000000000000000000000000000000000000000..2ce7e4b4b294f314878b3d694f92500fd334bd43 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-memstore.yaml @@ -0,0 +1,103 @@ +# This devnet uses an eigenda-proxy to interact with the eigenda holesky testnet network. +# As a requirement, you must first create and populate the eigenda-secrets.json file +# 1. cp eigenda-secrets.example.json eigenda-secrets.json +# 2. Populate the file with the required values +# TODO: Connect this with an eigenda v1 kurtosis devnet instead of using our holesky testnet. +# See https://github.com/Layr-Labs/avs-devnet/blob/main/examples/eigenda.yaml +{{- $context := or . (dict)}} +--- +optimism_package: + altda_deploy_config: + use_altda: true + # We use the generic commitment which means that the dachallenge contract won't get deployed. + # We align with l2beat's analysis of the da_challenge contract not being economically viable, + # so even if a rollup failsover to keccak commitments, not using the da_challenge contract is fine + # (has same security as using it). + # See https://l2beat.com/scaling/projects/redstone#da-layer-risk-analysis and + # https://discord.com/channels/1244729134312198194/1260612364865245224/1290294353688002562 for + # an economic analysis of the da challenge contract. + da_commitment_type: GenericCommitment + da_challenge_window: 16 + da_resolve_window: 16 + da_bond_size: 0 + da_resolver_refund_percentage: 0 + chains: + - participants: + - el_type: op-geth + # latest tag is currently broken until the next stable release, see https://github.com/ethereum-optimism/op-geth/pull/515 + # Also see discussion in https://discord.com/channels/1244729134312198194/1260624141497798706/1342556343495692320 + el_image: "us-docker.pkg.dev/oplabs-tools-artifacts/images/op-geth:optimism" + el_log_level: "" + el_extra_env_vars: {} + el_extra_labels: {} + el_extra_params: [] + cl_type: op-node + cl_image: {{ localDockerImage "op-node" }} + cl_log_level: "debug" + cl_extra_env_vars: {} + cl_extra_labels: {} + cl_extra_params: [] + count: 1 + network_params: + network: "kurtosis" + network_id: "2151908" + seconds_per_slot: 2 + name: "op-kurtosis" + fjord_time_offset: 0 + granite_time_offset: 0 + holocene_time_offset: 0 + fund_dev_accounts: true + batcher_params: + image: {{ localDockerImage "op-batcher" }} + extra_params: + - --altda.max-concurrent-da-requests=1 + - --max-channel-duration=2 + - --target-num-frames=1 + - --max-l1-tx-size-bytes=1000 + - --batch-type=1 + proposer_params: + image: {{ localDockerImage "op-proposer" }} + extra_params: [] + game_type: 1 + proposal_interval: 10m + challenger_params: + # TODO: reenable once we start testing secure integrations + enabled: false + image: {{ localDockerImage "op-challenger" }} + cannon_prestate_path: "" + cannon_prestates_url: "http://fileserver/proofs/op-program/cannon" + extra_params: [] + da_server_params: + image: ghcr.io/layr-labs/eigenda-proxy:v1.6.5 + cmd: + - --addr + - 0.0.0.0 + - --port + - "3100" + - --memstore.enabled + - --memstore.expiration + - "30m" + additional_services: + - da_server + global_log_level: "info" + global_node_selectors: {} + global_tolerations: [] + persistent: false +ethereum_package: + participants: + - el_type: geth + el_extra_params: + - --graphql # needed to query for batcher-inbox txs to test failover working correctly + cl_type: teku + network_params: + preset: minimal + genesis_delay: 5 + additional_preloaded_contracts: | + { + "0x4e59b44847b379578588920cA78FbF26c0B4956C": { + "balance": "0ETH", + "code": "0x7fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffe03601600081602082378035828234f58015156039578182fd5b8082525050506014600cf3", + "storage": {}, + "nonce": "1" + } + }
diff --git ethereum-optimism/optimism/kurtosis-devnet/eigenda-secrets.example.json layr-labs/optimism/kurtosis-devnet/eigenda-secrets.example.json new file mode 100644 index 0000000000000000000000000000000000000000..8dc8e1fce4507ba9b96ee6b8a1ee3a9fc154d0f8 --- /dev/null +++ layr-labs/optimism/kurtosis-devnet/eigenda-secrets.example.json @@ -0,0 +1,6 @@ +{ + "secrets": { + "eigenda.signer-private-key-hex": "", + "eigenda.eth_rpc": "" + } +}
diff --git ethereum-optimism/optimism/kurtosis-devnet/justfile layr-labs/optimism/kurtosis-devnet/justfile index 314e04bc3896cb646ae199e2f7670bafb14f980a..4c6754e7d0ea72f7983679fb03c51d47d66010d8 100644 --- ethereum-optimism/optimism/kurtosis-devnet/justfile +++ layr-labs/optimism/kurtosis-devnet/justfile @@ -73,6 +73,96 @@ ./tests/ "$ARGS"   # Devnet recipes   +# EigenDA devnet that uses eigenda-proxy connected to eigenda holesky testnet network +[group('eigenda')] +eigenda-holesky-devnet-start: (devnet "eigenda-holesky.yaml" "eigenda-secrets.json" "eigenda-holesky") +[group('eigenda')] +eigenda-holesky-devnet-clean: + kurtosis enclave rm eigenda-holesky-devnet --force +# EigenDA devnet that uses the eigenda-proxy in memstore mode (simulates an eigenda network but generates random certs) +[group('eigenda')] +eigenda-memstore-devnet-start: (devnet "eigenda-memstore.yaml") +[group('eigenda')] +eigenda-memstore-devnet-clean: + kurtosis enclave rm eigenda-memstore-devnet --force +# Cause proxy to start returning 503 errors to batcher, as a signal +# to failover to ethDA. Use `eigenda-memstore-devnet-failback` to revert. +[group('eigenda')] +eigenda-memstore-devnet-failover: + #!/usr/bin/env bash + PROXY_ENDPOINT=$(kurtosis port print eigenda-memstore-devnet da-server-op-kurtosis http) + curl -X PATCH $PROXY_ENDPOINT/memstore/config -d '{"PutReturnsFailoverError": true}' +[group('eigenda')] +eigenda-memstore-devnet-failback: + #!/usr/bin/env bash + PROXY_ENDPOINT=$(kurtosis port print eigenda-memstore-devnet da-server-op-kurtosis http) + curl -X PATCH $PROXY_ENDPOINT/memstore/config -d '{"PutReturnsFailoverError": false}' +[group('eigenda')] +eigenda-memstore-devnet-grafana: + #!/usr/bin/env bash + GRAFANA_URL=$(kurtosis port print eigenda-memstore-devnet grafana http) + open $GRAFANA_URL +[group('eigenda')] +eigenda-memstore-devnet-sync-status: + #!/usr/bin/env bash + OPNODE_ENDPOINT=$(kurtosis port print eigenda-memstore-devnet op-cl-1-op-node-op-geth-op-kurtosis http) + cast rpc optimism_syncStatus --rpc-url $OPNODE_ENDPOINT | jq +[group('eigenda')] +eigenda-memstore-devnet-configs: + #!/usr/bin/env bash + echo "OP-NODE ROLLUP CONFIG:" + OPNODE_ENDPOINT=$(kurtosis port print eigenda-memstore-devnet op-cl-1-op-node-op-geth-op-kurtosis http) + cast rpc optimism_rollupConfig --rpc-url $OPNODE_ENDPOINT | jq + echo "TEKU L1-CL SPEC:" + TEKU_ENDPOINT=$(kurtosis port print eigenda-memstore-devnet cl-1-teku-geth http) + curl $TEKU_ENDPOINT/eth/v1/config/spec | jq + echo "PROXY MEMSTORE CONFIG:" + PROXY_ENDPOINT=$(kurtosis port print eigenda-memstore-devnet da-server-op-kurtosis http) + curl $PROXY_ENDPOINT/memstore/config | jq +# We unfortunately have to restart the batcher in this ugly way right now just to change even a single flag. +# This is b/c op's kurtosis setup right now is not idempotent so if we change a param in eigenda-memstore.yaml +# and rerun `just eigenda-memstore-devnet`, the entire devnet gets respun up which takes a long time. +# Track progress for fixing this in https://github.com/ethereum-optimism/optimism/issues/14390. +# Kurtosis also doesn't have a simple way to update a running service's config, like `kubectl edit` for k8s. +# See https://github.com/kurtosis-tech/kurtosis/issues/2628 for this issue. +# Restart batcher with new flags or image. +[group('eigenda')] +eigenda-memstore-devnet-restart-batcher: + #!/usr/bin/env bash + # IMAGE=op-batcher:eigenda-memstore-devnet + IMAGE=us-docker.pkg.dev/oplabs-tools-artifacts/images/op-batcher:v1.10.0 + kurtosis service add eigenda-memstore-devnet op-batcher-op-kurtosis \ + $IMAGE \ + --ports "http=8548,metrics=9001" \ + -- op-batcher \ + --l2-eth-rpc=http://op-el-1-op-geth-op-node-op-kurtosis:8545 \ + --rollup-rpc=http://op-cl-1-op-node-op-geth-op-kurtosis:8547 \ + --poll-interval=1s \ + --sub-safety-margin=6 \ + --num-confirmations=1 \ + --safe-abort-nonce-too-low-count=3 \ + --resubmission-timeout=30s \ + --rpc.addr=0.0.0.0 \ + --rpc.port=8548 \ + --rpc.enable-admin \ + --metrics.enabled \ + --metrics.addr=0.0.0.0 \ + --metrics.port=9001 \ + --l1-eth-rpc=http://el-1-geth-teku:8545 \ + --private-key=0xb3d2d558e3491a3709b7c451100a0366b5872520c7aa020c17a0e7fa35b6a8df \ + --data-availability-type=calldata \ + --altda.enabled=True \ + --altda.da-server=http://da-server-op-kurtosis:3100 \ + --altda.da-service \ + --altda.max-concurrent-da-requests=1 \ + --max-channel-duration=2 \ + --target-num-frames=1 \ + --max-l1-tx-size-bytes=1000 \ + --batch-type=1 +[group('eigenda')] +eigenda-memstore-devnet-test: + go test ./tests/eigenda/... + # Simple devnet simple-devnet: (devnet "simple.yaml")
diff --git ethereum-optimism/optimism/README.md layr-labs/optimism/README.md index d4ad5efe04f3c77cfb5cdc836240694601ff669d..3319e14ec66688638b40d526c7d74febd4466bfc 100644 --- ethereum-optimism/optimism/README.md +++ layr-labs/optimism/README.md @@ -1,135 +1,102 @@ -<div align="center"> - <br /> - <br /> - <a href="https://optimism.io"><img alt="Optimism" src="https://raw.githubusercontent.com/ethereum-optimism/brand-kit/main/assets/svg/OPTIMISM-R.svg" width=600></a> - <br /> - <h3><a href="https://optimism.io">Optimism</a> is Ethereum, scaled.</h3> - <br /> -</div> +![](./assets/EigenDA_TextLogo_White.svg)   -<!-- START doctoc generated TOC please keep comment here to allow auto update --> -<!-- DON'T EDIT THIS SECTION, INSTEAD RE-RUN doctoc TO UPDATE --> -**Table of Contents** +# EigenDA powered Optimism-Fork   -- [What is Optimism?](#what-is-optimism) -- [Documentation](#documentation) -- [Specification](#specification) -- [Community](#community) -- [Contributing](#contributing) -- [Security Policy and Vulnerability Reporting](#security-policy-and-vulnerability-reporting) -- [Directory Structure](#directory-structure) -- [Development and Release Process](#development-and-release-process) - - [Overview](#overview) - - [Production Releases](#production-releases) - - [Development branch](#development-branch) -- [License](#license) +[![golang](https://github.com/Layr-Labs/optimism/actions/workflows/test-golang.yml/badge.svg)](https://github.com/Layr-Labs/optimism/actions/workflows/test-golang.yml) +[![kurtosis](https://github.com/Layr-Labs/optimism/actions/workflows/kurtosis-devnet.yml/badge.svg)](https://github.com/Layr-Labs/optimism/actions/workflows/kurtosis-devnet.yml)   -<!-- END doctoc generated TOC please keep comment here to allow auto update --> +[ForkDiff](https://layr-labs.github.io/optimism)   -## What is Optimism? +This is repo contains our fork of [optimism](https://github.com/ethereum-optimism/optimism) to support EigenDA.   -[Optimism](https://www.optimism.io/) is a project dedicated to scaling Ethereum's technology and expanding its ability to coordinate people from across the world to build effective decentralized economies and governance systems. The [Optimism Collective](https://www.optimism.io/vision) builds open-source software that powers scalable blockchains and aims to address key governance and economic challenges in the wider Ethereum ecosystem. Optimism operates on the principle of **impact=profit**, the idea that individuals who positively impact the Collective should be proportionally rewarded with profit. **Change the incentives and you change the world.** +- [EigenDA powered Optimism-Fork](#eigenda-powered-optimism-fork) + - [EigenDA Proxy](#eigenda-proxy) + - [Fork Features](#fork-features) + - [1. High Throughput (large parallel blobs)](#1-high-throughput-large-parallel-blobs) + - [2. Failover (for Liveness)](#2-failover-for-liveness) + - [3. Security (for Safety)](#3-security-for-safety) + - [Testing](#testing) + - [CI](#ci) + - [Unit Tests](#unit-tests) + - [op-e2e Tests](#op-e2e-tests) + - [Kurtosis Devnet Tests](#kurtosis-devnet-tests) + - [Releases and Branching Strategy](#releases-and-branching-strategy)   -In this repository you'll find numerous core components of the OP Stack, the decentralized software stack maintained by the Optimism Collective that powers Optimism and forms the backbone of blockchains like [OP Mainnet](https://explorer.optimism.io/) and [Base](https://base.org). The OP Stack is designed to be aggressively open-source — you are welcome to explore, modify, and extend this code. +## EigenDA Proxy   -## Documentation +OP's altda spec has both op-batcher and op-nodes interface with AltDA layers via a [da-server](https://specs.optimism.io/experimental/alt-da.html#da-server). EigenDA's implementation of the da-server is called the [EigenDA Proxy](https://github.com/Layr-Labs/eigenda-proxy). The proxy hides EigenDA's async grpc API behind a simple POST/GET sync (blocking) REST API.   -- If you want to build on top of OP Mainnet, refer to the [Optimism Documentation](https://docs.optimism.io) -- If you want to build your own OP Stack based blockchain, refer to the [OP Stack Guide](https://docs.optimism.io/stack/getting-started) and make sure to understand this repository's [Development and Release Process](#development-and-release-process) +## Fork Features   -## Specification +There are 3 important features for any rollup: +1. Performance +2. Liveness +3. Safety   -Detailed specifications for the OP Stack can be found within the [OP Stack Specs](https://github.com/ethereum-optimism/specs) repository. +The upstream code in optimism's repo currently does not support these features for altda rollups. The goal of our fork is to provide these for downstream altda rollups. We will try to upstream as many changes as possible, but the op-team has stopped being receptive to our PRs since the pectra upgrade.   -## Community +We describe below the current feature-set of the upstream altda code. See release notes for the latest features.   -General discussion happens most frequently on the [Optimism discord](https://discord.gg/optimism). -Governance discussion can also be found on the [Optimism Governance Forum](https://gov.optimism.io/). +### 1. High Throughput (large parallel blobs)   -## Contributing +Because POSTs to the EigenDA Proxy are blocking (see [EigenDA Proxy](#eigenda-proxy) section), the throughput which a rollup can achieve is limited by the number and size of parallel blobs it can submit. The upstream code supports [parallel blobs submissions](https://github.com/ethereum-optimism/optimism/pull/11698) pre-holocene, but the [Holocene strict ordering rules](https://specs.optimism.io/protocol/holocene/derivation.html) have broken that implementation.   -The OP Stack is a collaborative project. By collaborating on free, open software and shared standards, the Optimism Collective aims to prevent siloed software development and rapidly accelerate the development of the Ethereum ecosystem. Come contribute, build the future, and redefine power, together. +We will implement a new parallel blobs submission mechanism which is compatible with the Holocene strict ordering rules, and also enable submitting large blobs (EigenDA allows blobs up to 16MiB currently).   -[CONTRIBUTING.md](./CONTRIBUTING.md) contains a detailed explanation of the contributing process for this repository. Make sure to use the [Developer Quick Start](./CONTRIBUTING.md#development-quick-start) to properly set up your development environment. +### 2. Failover (for Liveness)   -[Good First Issues](https://github.com/ethereum-optimism/optimism/issues?q=is:open+is:issue+label:D-good-first-issue) are a great place to look for tasks to tackle if you're not sure where to start, and see [CONTRIBUTING.md](./CONTRIBUTING.md) for info on larger projects. +The upstream altda code does not support failover. If the EigenDA network goes down, the rollup will be stuck.   -## Security Policy and Vulnerability Reporting +We will implement a failover mechanism to allow the rollup to continue processing transactions even if the EigenDA network is down.   -Please refer to the canonical [Security Policy](https://github.com/ethereum-optimism/.github/blob/master/SECURITY.md) document for detailed information about how to report vulnerabilities in this codebase. -Bounty hunters are encouraged to check out the [Optimism Immunefi bug bounty program](https://immunefi.com/bounty/optimism/). -The Optimism Immunefi program offers up to $2,000,042 for in-scope critical vulnerabilities. +### 3. Security (for Safety)   -## Directory Structure +The upstream derivation pipeline and challenger code does not currently support the EigenDA security model.   -<pre> -├── <a href="./docs">docs</a>: A collection of documents including audits and post-mortems -├── <a href="./kurtosis-devnet">kurtosis-devnet</a>: OP-Stack Kurtosis devnet -├── <a href="./op-batcher">op-batcher</a>: L2-Batch Submitter, submits bundles of batches to L1 -├── <a href="./op-chain-ops">op-chain-ops</a>: State surgery utilities -├── <a href="./op-challenger">op-challenger</a>: Dispute game challenge agent -├── <a href="./op-e2e">op-e2e</a>: End-to-End testing of all bedrock components in Go -├── <a href="./op-node">op-node</a>: rollup consensus-layer client -├── <a href="./op-preimage">op-preimage</a>: Go bindings for Preimage Oracle -├── <a href="./op-program">op-program</a>: Fault proof program -├── <a href="./op-proposer">op-proposer</a>: L2-Output Submitter, submits proposals to L1 -├── <a href="./op-service">op-service</a>: Common codebase utilities -├── <a href="./op-ufm">op-ufm</a>: Simulations for monitoring end-to-end transaction latency -├── <a href="./op-wheel">op-wheel</a>: Database utilities -├── <a href="./ops">ops</a>: Various operational packages -├── <a href="./packages">packages</a> -│ ├── <a href="./packages/contracts-bedrock">contracts-bedrock</a>: OP Stack smart contracts -├── <a href="./semgrep">semgrep</a>: Semgrep rules and tests -</pre> +Because making altda fraud proofs secure is very involved, we have opted to first secure zk integrations like [op-succinct](https://github.com/succinctlabs/op-succinct) and [risc0-kailua](https://github.com/risc0/kailua) by using [op-rs](https://op-rs.github.io/kona/)'s stack. See our [Hokulea](https://github.com/Layr-Labs/hokulea) repo for the latest on this.   -## Development and Release Process +## Testing   -### Overview +### CI   -Please read this section carefully if you're planning to fork or make frequent PRs into this repository. +OP uses circleci for CI, but we migrated to github actions for this fork. The unit and op-e2e tests are purely golang and so run as part of the [test-golang.yml](./.github/workflows/test-golang.yml) github workflow, whereas the kurtosis tests are run as part of the [test-kurtosis.yml](./.github/workflows/test-kurtosis.yml) workflow.   -### Production Releases +### Unit Tests   -Production releases are always tags, versioned as `<component-name>/v<semver>`. -For example, an `op-node` release might be versioned as `op-node/v1.1.2`, and smart contract releases might be versioned as `op-contracts/v1.0.0`. -Release candidates are versioned in the format `op-node/v1.1.2-rc.1`. -We always start with `rc.1` rather than `rc`. +For each feature we add simple unit tests where applicable.   -For contract releases, refer to the GitHub release notes for a given release which will list the specific contracts being released. Not all contracts are considered production ready within a release and many are under active development. +### op-e2e Tests   -Tags of the form `v<semver>`, such as `v1.1.4`, indicate releases of all Go code only, and **DO NOT** include smart contracts. -This naming scheme is required by Golang. -In the above list, this means these `v<semver>` releases contain all `op-*` components and exclude all `contracts-*` components. +We also add integration tests using op-e2e's framework. These tests are very useful as they are run purely in golang in a single process with very fast block times, but they are limited in that proxy is not spun up and the batcher available there is only a fake.   -`op-geth` embeds upstream geth’s version inside its own version as follows: `vMAJOR.GETH_MAJOR GETH_MINOR GETH_PATCH.PATCH`. -Basically, geth’s version is our minor version. -For example if geth is at `v1.12.0`, the corresponding op-geth version would be `v1.101200.0`. -Note that we pad out to three characters for the geth minor version and two characters for the geth patch version. -Since we cannot left-pad with zeroes, the geth major version is not padded. +### Kurtosis Devnet Tests   -See the [Node Software Releases](https://docs.optimism.io/builders/node-operators/releases) page of the documentation for more information about releases for the latest node components. +For full e2e tests we leverage optimism's [kurtosis devnet](./kurtosis-devnet/README.md). See the config file to spin up a devnet with eigenda-proxy in [memstore](./kurtosis-devnet/eigenda-memstore.yaml) mode and [holesky](./kurtosis-devnet/eigenda-holesky.yaml) mode, as well as the available eigenda group commands in the [justfile](./kurtosis-devnet/justfile): +```sh +$ just --list +Available recipes: + ...   -The full set of components that have releases are: + [eigenda] + eigenda-holesky-devnet-clean + eigenda-holesky-devnet-start # EigenDA devnet that uses eigenda-proxy connected to eigenda holesky testnet network + eigenda-memstore-devnet-clean + eigenda-memstore-devnet-configs + eigenda-memstore-devnet-failback + eigenda-memstore-devnet-failover # to failover to ethDA. Use `eigenda-memstore-devnet-failback` to revert. + eigenda-memstore-devnet-grafana + eigenda-memstore-devnet-restart-batcher # Restart batcher with new flags or image. + eigenda-memstore-devnet-start # EigenDA devnet that uses the eigenda-proxy in memstore mode (simulates an eigenda network but generates random certs) + eigenda-memstore-devnet-sync-status + eigenda-memstore-devnet-test +```   -- `op-batcher` -- `op-contracts` -- `op-challenger` -- `op-node` -- `op-proposer` +## Releases and Branching Strategy   -All other components and packages should be considered development components only and do not have releases. - -### Development branch - -The primary development branch is [`develop`](https://github.com/ethereum-optimism/optimism/tree/develop/). -`develop` contains the most up-to-date software that remains backwards compatible with the latest experimental [network deployments](https://docs.optimism.io/chain/networks). -If you're making a backwards compatible change, please direct your pull request towards `develop`. +Our main development branch, `eigenda-develop`, contains a linear history with new feature work and fixes, as well as upstream merges. We maintain this branch to be able to track the entire history of our fork changes. It might also be useful for some teams who want to use our fork directly as their upstream, so that they can just merge/rebase our latest changes (which will incorporate the OP changes as well).   -**Changes to contracts within `packages/contracts-bedrock/src` are usually NOT considered backwards compatible.** -Some exceptions to this rule exist for cases in which we absolutely must deploy some new contract after a tag has already been fully deployed. -If you're changing or adding a contract and you're unsure about which branch to make a PR into, default to using a feature branch. -Feature branches are typically used when there are conflicts between 2 projects touching the same code, to avoid conflicts from merging both into `develop`. +For teams that want/need more flexibility in how they manage their own fork, we also create release-specific branches which contain cleaned up history of commits on top of a specific upstream release. For example, the second eigenda-fork release in the picture below would be named `op-batcher/v1.11.2-eigenda.2`, and will consist of a cleaned-up history of commits (one per feature/service pair) on top of the upstream [op-batcher/v1.11.2](https://github.com/ethereum-optimism/optimism/releases/tag/op-batcher%2Fv1.11.2) release. We will strive to make our releases on top of op [production releases](https://github.com/ethereum-optimism/optimism?tab=readme-ov-file#production-releases), unless an urgent fix is needed.   -## License +![](./assets/fork-branching-and-releases.png)   -All other files within this repository are licensed under the [MIT License](https://github.com/ethereum-optimism/optimism/blob/master/LICENSE) unless stated otherwise. +Fork developers should consult the [Fork Release Runbook](./docs/handbook/fork-release-runbook.md) for more details on how to make a new release.
diff --git ethereum-optimism/optimism/assets/EigenDA_TextLogo_White.svg layr-labs/optimism/assets/EigenDA_TextLogo_White.svg new file mode 100644 index 0000000000000000000000000000000000000000..9037e313f9cf60be953f733db175a930c981d73b --- /dev/null +++ layr-labs/optimism/assets/EigenDA_TextLogo_White.svg @@ -0,0 +1,18 @@ +<svg width="1101" height="482" viewBox="0 0 1101 482" fill="none" xmlns="http://www.w3.org/2000/svg"> +<g clip-path="url(#clip0_344_225)"> +<path fill-rule="evenodd" clip-rule="evenodd" d="M142.241 10.5986V57.4135H188.725V10.5986H142.241Z" fill="white"/> +<path fill-rule="evenodd" clip-rule="evenodd" d="M326.301 291.488V197.858H233.349V104.228H186.865V197.858H93.9131V10.5986H0.945488V197.858H93.9131V385.117H186.865H233.349V478.747H279.833V385.117H324.74H326.301H372.785V291.488H326.301Z" fill="white"/> +<path d="M434.29 234.71L433.266 226.719C447.058 221.612 449.47 214.582 450.494 200.521C451.519 189.976 451.519 175.916 451.519 164.74V79.7801C451.519 68.6045 451.519 54.5285 450.494 43.9991C449.47 29.9231 447.042 22.9245 433.266 17.8017L434.29 9.81006C447.751 10.7558 457.036 10.7558 465.642 10.7558H529.055C543.178 10.7558 553.881 10.7558 567.311 9.81006C569.722 26.0928 571.094 37.9147 572.15 55.49L562.14 57.082C557.664 45.2601 553.849 38.561 548.679 33.4224C543.509 28.2996 535.58 25.746 529.023 24.7845C520.401 23.492 512.819 23.1925 499.39 23.1925C485.96 23.1925 481.121 25.746 480.096 38.5295C479.072 48.4284 479.072 59.935 479.072 71.426V112.314C490.799 112.314 514.932 111.668 525.934 110.722C541.444 109.13 546.283 104.023 551.453 89.3165L559.744 90.9085C558.72 103.377 558.72 110.391 558.72 118.367C558.72 126.343 558.72 133.704 559.744 146.172L551.453 147.764C546.283 133.073 541.444 128.282 525.934 126.359C514.9 125.066 490.436 124.766 479.072 124.766V167.893C479.072 179.069 479.765 196.344 480.096 202.728C480.79 218.396 486.653 221.249 502.164 221.249C517.674 221.249 529.402 220.934 538.686 219.657C550.744 218.065 556.261 213.573 561.431 206.874C565.577 200.789 569.013 194.09 573.489 182.268L582.805 183.86C581.087 203.028 578.329 218.696 574.514 235.294C549.357 234.001 518.005 233.702 473.854 233.702C429.703 233.702 447.673 233.702 434.211 234.663L434.274 234.71H434.29Z" fill="white"/> +<path d="M623.473 134.713C628.312 122.576 629.336 116.491 629.336 113.307C629.336 110.123 628.312 110.423 626.578 110.423C623.126 110.423 616.237 115.53 600.396 132.79L593.839 127.352C613.495 100.839 627.618 88.7019 638.983 88.7019C650.348 88.7019 652.775 94.7862 652.775 104.37C652.775 113.954 647.274 128.644 642.104 141.759L619.705 200.537C615.922 210.121 615.229 212.675 615.229 213.967C615.229 215.26 616.616 217.167 618.681 217.167C622.464 217.167 629.352 213.668 646.249 191.3L653.138 195.777C634.869 228.674 618.996 237.627 606.938 237.627C594.879 237.627 592.121 231.243 592.121 222.936C592.121 214.629 594.532 208.56 602.792 187.785L623.473 134.76V134.729V134.713ZM650.017 0.872803C660.357 0.872803 668.286 8.21816 668.286 17.8018C668.286 27.3855 660.357 34.7308 650.017 34.7308C639.676 34.7308 631.748 27.3855 631.748 17.8018C631.748 8.21816 639.676 0.872803 650.017 0.872803Z" fill="white"/> +<path d="M781.366 31.9882C789.642 40.6261 792.731 51.4707 792.731 63.2927C792.731 94.5971 767.905 116.964 737.578 116.964C707.251 116.964 710.703 112.819 700.693 105.142C690.022 112.819 687.595 120.164 687.595 127.509C687.595 134.855 695.523 142.846 711.034 143.477L752.726 144.769C786.174 145.715 802.346 164.252 802.346 188.211C802.346 212.17 771.656 242.198 723.754 242.198C675.851 242.198 665.149 226.23 665.149 204.494C665.149 182.757 673.077 177.65 692.717 165.198C677.901 160.406 667.891 149.861 667.891 134.839C667.891 119.817 674.086 109.587 691.693 97.1507C683.764 87.2518 679.966 77.0219 679.966 63.2927C679.966 33.2492 705.816 9.62109 736.143 9.62109C766.471 9.62109 761.3 16.0049 780.956 16.0049C800.612 16.0049 801.637 15.6897 813.695 14.7124L816.454 20.1505L813.695 31.9724H781.287H781.35L781.366 31.9882ZM746.562 169.028C731.052 169.028 718.647 169.028 704.508 168.082C692.449 178.612 687.61 189.157 687.61 199.087C687.61 209.018 700.362 224.339 729.302 224.339C758.242 224.339 783.431 208.671 783.431 193.681C783.431 178.69 772.397 169.075 746.547 169.075L746.578 169.044L746.562 169.028ZM706.589 61.3539C706.589 89.4585 718.316 104.181 737.609 104.181C756.903 104.181 766.581 93.0051 766.581 64.8847C766.581 36.7642 754.522 22.4045 735.56 22.4045C716.598 22.4045 706.589 34.5417 706.589 61.3854V61.3539Z" fill="white"/> +<path d="M883.003 90.9877C916.089 90.9877 941.262 113.654 941.262 144.344C941.262 175.034 941.262 149.136 940.568 151.689H834.407C833.714 155.535 833.714 159.035 833.714 163.827C833.714 196.723 855.088 219.09 882.672 219.09C910.257 219.09 918.17 212.06 932.293 193.224L940.584 198.016C930.244 221.975 909.894 241.788 879.236 241.788C848.578 241.788 807.548 207.93 807.548 168.949C807.548 129.969 840.302 91.0035 883.019 91.0035V90.9719L883.003 90.9877ZM910.919 138.922C910.588 117.201 894.715 105.379 875.421 105.379C856.128 105.379 840.949 120.07 836.803 138.922H910.919Z" fill="white"/> +<path d="M1084.31 92.4532C1084.31 102.352 1084.31 113.528 1085 123.442C1086.03 136.525 1088.12 141.664 1100.84 146.456L1099.82 153.801C1087.76 152.855 1080.16 152.855 1072.25 152.855C1064.34 152.855 1056.74 152.855 1044.68 153.801L1043.66 146.456C1056.42 141.664 1058.14 136.557 1059.17 123.442C1060.19 113.543 1060.19 102.368 1060.19 92.4532V58.5952C1060.19 40.7204 1048.46 28.5517 1030.19 28.5517C1011.93 28.5517 991.576 43.5734 991.576 64.0017V92.4532C991.576 102.352 991.576 113.528 992.269 123.442C993.294 136.525 995.39 141.664 1008.11 146.456L1007.09 153.801C995.028 152.855 987.43 152.855 979.502 152.855C971.573 152.855 963.991 152.855 951.933 153.801L950.908 146.456C963.66 141.664 965.394 136.557 966.419 123.442C967.443 113.543 967.443 102.368 967.443 92.4532V84.4616C967.443 74.5627 967.443 64.3327 966.419 54.4181C965.394 41.3352 961.942 38.4506 949.521 36.1966V29.4975C965.363 22.4674 973.985 17.3603 986.043 7.13037L993.278 9.68391C992.254 19.9138 991.56 28.5517 991.213 36.5276L991.544 36.8428C1000.86 20.5601 1017.06 7.77664 1037.74 7.77664C1058.42 7.77664 1068.77 15.122 1077.06 27.5902C1081.53 34.9355 1084.29 42.9272 1084.29 55.6949V92.4374H1084.32L1084.31 92.4532Z" fill="white"/> +<path d="M675.82 481.111C646.88 481.111 627.587 478.558 612.77 478.558C597.953 478.558 595.194 478.558 581.765 479.519L580.724 471.528C594.501 466.405 596.913 459.375 597.953 445.299C598.993 434.753 598.993 420.677 598.993 409.486V324.415C598.993 313.224 598.993 299.148 597.953 288.603C596.913 274.527 594.501 267.497 580.724 262.374L581.765 254.382C595.194 255.344 604.51 255.344 613.117 255.344C634.128 255.344 649.985 253.736 673.408 253.736C743.363 253.736 805.719 292.118 805.719 367.92C805.719 443.722 752.994 481.143 675.82 481.143V481.111ZM673.393 466.074C737.136 466.074 774.005 430.576 774.005 368.204C774.005 305.831 729.555 268.096 670.634 268.096C611.714 268.096 628.595 270.334 627.571 283.133C626.53 293.048 626.53 304.555 626.53 316.077V412.67C626.53 423.862 626.877 441.137 627.571 447.537C629.289 463.851 638.258 466.089 673.393 466.089V466.074Z" fill="white"/> +<path d="M895.85 249.559H902.391L966.135 409.47C970.265 419.7 976.475 434.738 981.646 445.283C988.187 459.359 994.051 466.074 1005.42 471.512L1004.38 479.503C990.945 478.542 984.735 478.542 976.129 478.542C967.522 478.542 954.77 478.542 941.325 479.503L940.285 471.512C949.584 467.681 953.383 464.797 953.383 458.728C953.383 452.66 948.213 438.899 936.848 409.47L934.09 403.071H850.012L847.601 409.47C835.542 438.899 833.477 451.052 833.477 458.397C833.477 465.742 836.583 467.997 846.576 471.512L845.536 479.503C832.09 478.542 825.202 478.542 816.596 478.542C807.989 478.542 803.166 478.542 791.092 479.503L790.051 471.512C801.416 466.074 807.28 459.359 813.821 445.283C818.992 434.738 825.186 419.7 829.332 409.47L895.834 249.559H895.85ZM893.438 298.801L855.876 389.31H928.92L893.422 298.801H893.438Z" fill="white"/> +</g> +<defs> +<clipPath id="clip0_344_225"> +<rect width="1100.07" height="480.443" fill="white" transform="translate(0.803467 0.77832)"/> +</clipPath> +</defs> +</svg>
diff --git ethereum-optimism/optimism/assets/fork-branching-and-releases.png layr-labs/optimism/assets/fork-branching-and-releases.png new file mode 100644 index 0000000000000000000000000000000000000000..ba05f2a7b7ab5b8d00762fa780840eba4ac786ef Binary files /dev/null and layr-labs/optimism/assets/fork-branching-and-releases.png differ
diff --git ethereum-optimism/optimism/docs/handbook/fork-release-runbook.md layr-labs/optimism/docs/handbook/fork-release-runbook.md new file mode 100644 index 0000000000000000000000000000000000000000..5037fd4e2ce72107d93c9a7d82b30ce9a532f5cb --- /dev/null +++ layr-labs/optimism/docs/handbook/fork-release-runbook.md @@ -0,0 +1,30 @@ +# Fork Release Runbook + +This document describes the process for releasing a new version of the EigenDA powered Optimism-Fork. This adds details to the explanation in the main [README](../../README.md#releases-and-branching-strategy). + +![](../../assets/fork-branching-and-releases.png) + +## Example Release For op-batcher/v1.11.2-eigenda.2 + +First we make the cleaned-up release branch/tag: + +```bash +git checkout op-node/v1.11.1-eigenda.1 +git rebase op-batcher/v1.11.2 +# cherry pick all the new commits +# Can also do it manually: `git cherry-pick <fixA> <featC> <fixB>` +git cherry-pick op-node/v1.11.1-eigenda.1^..eigenda-develop +# cleanup history +git rebase -i op-batcher/v1.11.2 +# tag the release +git tag op-batcher/v1.11.2-eigenda.2 +git push --tags +``` + +Then we update the eigenda-develop branch: + +```bash +git checkout eigenda-develop +git merge op-batcher/v1.11.2 +git push # or can create a PR instead - but make sure to merge with merge commit +```
diff --git ethereum-optimism/optimism/fork.yaml layr-labs/optimism/fork.yaml new file mode 100644 index 0000000000000000000000000000000000000000..46e6042ff9aa06e89ee61978b6bd464c2824c352 --- /dev/null +++ layr-labs/optimism/fork.yaml @@ -0,0 +1,92 @@ +title: "layr-labs/optimism" # Define the HTML page title +logo: "logo.png" +footer: | # define the footer with markdown + EigenDA's [OP-Fork](https://github.com/layr-labs/optimism) fork overview - created with [Forkdiff](https://github.com/protolambda/forkdiff) +base: + name: ethereum-optimism/optimism + url: https://github.com/ethereum-optimism/optimism + # ref: refs/tags/op-node/v1.11.1 + hash: 443e931f242e1595896d6598b02068dc822e1232 +fork: + name: layr-labs/optimism + url: https://github.com/layr-labs/optimism + ref: refs/heads/eigenda-develop +def: + title: "EigenDA x Optimism fork diff" + description: | # description in markdown + The original optimism codebase can be found at [`github.com/ethereum-optimism/optimism`](https://github.com/ethereum-optimism/optimism). + And our fork at [`github.com/Layr-Labs/optimism`](https://github.com/Layr-Labs/optimism). + + sub: + - title: "OP Batcher" + description: | + Modifications to op-batcher. + sub: + # Adding this test section is the easy way to remove the tests from the main batcher section, so we can focus on the meaningful code changes. + - title: "Tests" + globs: + - "op-batcher/**/*_test.go" + - title: "Batcher" + globs: + - "op-batcher/**" + + - title: "OP Node" + description: | + Modifications to op-node. + sub: + # Adding this test section is the easy way to remove the tests from the main batcher section, so we can focus on the meaningful code changes. + - title: "Tests" + globs: + - "op-node/**/*_test.go" + - title: "Node" + globs: + - "op-node/**" + + - title: "OP AltDA Client" + description: | + Modifications to op-alt-da client. + sub: + # Adding this test section is the easy way to remove the tests from the main batcher section, so we can focus on the meaningful code changes. + - title: "Tests" + globs: + - "op-alt-da/**/*_test.go" + - "op-alt-da/**/*mock.go" + - title: "AltDA Client" + globs: + - "op-alt-da/**" + + - title: "Testing" + sub: + - title: "OP E2E Tests" + globs: + - "op-e2e/**" + - title: "Kurtosis Tests" + globs: + - "kurtosis-devnet/tests/**" + + - title: "CI/CD" + description: | + Replaced op's circleci with github actions relevant to testing our changes. + sub: + - title: "Github workflows" + globs: + - ".github/**" + - ".circleci/**" + + - title: "Kurtosis Devnet" + description: | + Added kurtosis devnet yaml file to spin up an op chain that uses altda and spins up an eigenda-proxy in memstore mode to simulate interactions with an EigenDA network. + sub: + - title: "Kurtosis Devnet" + globs: + - "kurtosis-devnet/**" +# files can be ignored globally, these will be listed in a separate grayed-out section, +# and do not count towards the total line count. +ignore: + - "**/go.mod" + - "**/go.sum" + - "**/.gitignore" + # Not very useful to see utils added for testing, so prefer to remove to reduce noise + # and focus on actually meaningful test changes/additions. + - "**/e2eutils/**" + - "op-e2e/actions/helpers/**"
diff --git ethereum-optimism/optimism/.gitignore layr-labs/optimism/.gitignore index f58f7f9c1f6368ff0d92150534ba92b94adaac4c..9f9977fc4a5a52ac613bc9cb19f339b992fb69d2 100644 --- ethereum-optimism/optimism/.gitignore +++ layr-labs/optimism/.gitignore @@ -17,6 +17,9 @@ dist artifacts cache   +# forkdiff output +index.html + !op-chain-ops/foundry/testdata/srcmaps/cache !op-chain-ops/foundry/testdata/srcmaps/artifacts
diff --git ethereum-optimism/optimism/go.mod layr-labs/optimism/go.mod index 4f4e978b17810f7829310ba86c8646d7b11e7e0b..339ba5ef72161ab362c94112d2277565928d4f2f 100644 --- ethereum-optimism/optimism/go.mod +++ layr-labs/optimism/go.mod @@ -6,6 +6,7 @@ toolchain go1.22.7   require ( github.com/BurntSushi/toml v1.4.0 + github.com/Layr-Labs/eigenda-proxy/clients v1.0.1 github.com/andybalholm/brotli v1.1.0 github.com/bmatcuk/doublestar/v4 v4.8.1 github.com/btcsuite/btcd v0.24.2
diff --git ethereum-optimism/optimism/go.sum layr-labs/optimism/go.sum index 87025f4a24efe138be809c1af656028d00855312..e5c20fde89a76e21fd243d8e4c466669cd64cec9 100644 --- ethereum-optimism/optimism/go.sum +++ layr-labs/optimism/go.sum @@ -16,6 +16,8 @@ github.com/DataDog/datadog-go v2.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/datadog-go v3.2.0+incompatible/go.mod h1:LButxg5PwREeZtORoXG3tL4fMGNddJ+vMq1mwgfaqoQ= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e h1:ZIWapoIRN1VqT8GR8jAwb1Ie9GyehWjVcGh32Y2MznE= github.com/DataDog/zstd v1.5.6-0.20230824185856-869dae002e5e/go.mod h1:g4AWEaM3yOg3HYfnJ3YIawPnVdXJh9QME85blwSAmyw= +github.com/Layr-Labs/eigenda-proxy/clients v1.0.1 h1:62NFB1fUauwQPGvTiOXhz1HKaL0fRhGy34tI9EpKz6I= +github.com/Layr-Labs/eigenda-proxy/clients v1.0.1/go.mod h1:JbDNvSritUGHErvzwB5Tb1IrVk7kea9DSBLKEOkBebE= github.com/Masterminds/semver/v3 v3.1.1 h1:hLg3sBzpNErnxhQtUy/mmLR2I9foDujNK030IGemrRc= github.com/Masterminds/semver/v3 v3.1.1/go.mod h1:VPu/7SZ7ePZ3QOrcuXROw5FAcLl4a0cBrbBpGY/8hQs= github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
diff --git ethereum-optimism/optimism/kurtosis-devnet/.gitignore layr-labs/optimism/kurtosis-devnet/.gitignore index 7eb455c4b8a40f7023ed7753afd1f25afc9b0da1..22bb51ab5f89b1e455ce588aab2b14e6835ed770 100644 --- ethereum-optimism/optimism/kurtosis-devnet/.gitignore +++ layr-labs/optimism/kurtosis-devnet/.gitignore @@ -1 +1,2 @@ *-user.json +eigenda-secrets.json
diff --git ethereum-optimism/optimism/op-e2e/actions/helpers/l2_batcher.go layr-labs/optimism/op-e2e/actions/helpers/l2_batcher.go index f270bddb717a03e1e837ad281ab3b27a8d3c1d0e..2aa40aa3424b3282aab54f793c65249b233cdd39 100644 --- ethereum-optimism/optimism/op-e2e/actions/helpers/l2_batcher.go +++ layr-labs/optimism/op-e2e/actions/helpers/l2_batcher.go @@ -309,6 +309,20 @@ return data.Bytes() }   +func (s *L2Batcher) ActAltDAFailoverToEthDA(t Testing) { + if !s.l2BatcherCfg.UseAltDA { + t.Fatalf("cannot failover to ethda when already using ethda") + } + s.l2BatcherCfg.UseAltDA = false +} + +func (s *L2Batcher) ActAltDAFallbackToAltDA(t Testing) { + if s.l2BatcherCfg.UseAltDA { + t.Fatalf("cannot fallback to altDA when already using altDA") + } + s.l2BatcherCfg.UseAltDA = true +} + // ActL2BatchSubmit constructs a batch tx from previous buffered L2 blocks, and submits it to L1 func (s *L2Batcher) ActL2BatchSubmit(t Testing, txOpts ...func(tx *types.DynamicFeeTx)) { s.ActL2BatchSubmitRaw(t, s.ReadNextOutputFrame(t), txOpts...)
diff --git ethereum-optimism/optimism/op-e2e/e2eutils/geth/wait.go layr-labs/optimism/op-e2e/e2eutils/geth/wait.go index 8356058afda753028fa52584a58b7000e029a636..ce3b1d377e298f0d662f4651d32c3dc6e54308a6 100644 --- ethereum-optimism/optimism/op-e2e/e2eutils/geth/wait.go +++ layr-labs/optimism/op-e2e/e2eutils/geth/wait.go @@ -8,6 +8,7 @@ "math/big" "strings" "time"   + "github.com/ethereum-optimism/optimism/op-e2e/e2eutils/transactions" "github.com/ethereum-optimism/optimism/op-node/rollup" "github.com/ethereum-optimism/optimism/op-node/rollup/derive" "github.com/ethereum/go-ethereum" @@ -84,6 +85,33 @@ return nil, fmt.Errorf("receipt for transaction %s not found. tip block number is %d: %w", hash.Hex(), tip.NumberU64(), errTimeout) case <-ticker.C: } } +} + +// WaitForBlockWithTxFromSender waits for a block with a transaction from a specific sender address. +// It starts from the current block and checks up to the next nBlocks blocks. +// As soon as it finds a block that contains a tx from sender, it returns that block. +// If no such block is found in the next nBlocks blocks, it returns an error. +func WaitForBlockWithTxFromSender(sender common.Address, client *ethclient.Client, nBlocks uint64) (*types.Block, error) { + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + blockNum, err := client.BlockNumber(ctx) + if err != nil { + return nil, err + } + for blockNum := blockNum; blockNum < blockNum+nBlocks; blockNum++ { + blockL1, err := WaitForBlock(big.NewInt(0).SetUint64(blockNum), client) + if err != nil { + return nil, err + } + batcherTxCount, err := transactions.TransactionsBySenderCount(blockL1, sender) + if err != nil { + return nil, err + } + if batcherTxCount > 0 { + return blockL1, nil + } + } + return nil, fmt.Errorf("no block with tx from sender %s found in the last %d blocks", sender.Hex(), nBlocks) }   type waitForBlockOptions struct {
diff --git ethereum-optimism/optimism/op-e2e/e2eutils/setup.go layr-labs/optimism/op-e2e/e2eutils/setup.go index 92fc08403c1deb5114d757185fa6d8b4802b51b5..60da69ed84d44bfce0db201c9012dcd4bb619fdb 100644 --- ethereum-optimism/optimism/op-e2e/e2eutils/setup.go +++ layr-labs/optimism/op-e2e/e2eutils/setup.go @@ -1,6 +1,7 @@ package e2eutils   import ( + "log/slog" "math/big" "os" "path" @@ -50,6 +51,7 @@ ChannelTimeout uint64 L1BlockTime uint64 UseAltDA bool AllocType config.AllocType + LogLevel slog.Level }   func MakeDeployParams(t require.TestingT, tp *TestParams) *DeployParams { @@ -66,7 +68,7 @@ deployConfig.L1BlockTime = tp.L1BlockTime deployConfig.UseAltDA = tp.UseAltDA ApplyDeployConfigForks(deployConfig)   - logger := log.NewLogger(log.DiscardHandler()) + logger := log.NewLogger(log.NewTerminalHandlerWithLevel(os.Stdout, tp.LogLevel, true)) require.NoError(t, deployConfig.Check(logger)) require.Equal(t, addresses.Batcher, deployConfig.BatchSenderAddress) require.Equal(t, addresses.Proposer, deployConfig.L2OutputOracleProposer)
diff --git ethereum-optimism/optimism/op-e2e/e2eutils/transactions/count.go layr-labs/optimism/op-e2e/e2eutils/transactions/count.go index 0f4d41fe04786da83030e0f3465f48f7c4fd812c..a7815b4aa4d847445179a5cd523f454b48872951 100644 --- ethereum-optimism/optimism/op-e2e/e2eutils/transactions/count.go +++ layr-labs/optimism/op-e2e/e2eutils/transactions/count.go @@ -5,7 +5,8 @@ "github.com/ethereum/go-ethereum/common" "github.com/ethereum/go-ethereum/core/types" )   -func TransactionsBySender(block *types.Block, sender common.Address) (int64, error) { +// TransactionsBySenderCount returns the number of transactions in the block that were sent by the given sender. +func TransactionsBySenderCount(block *types.Block, sender common.Address) (int64, error) { txCount := int64(0) for _, tx := range block.Transactions() { signer := types.NewCancunSigner(tx.ChainId()) @@ -19,3 +20,20 @@ } } return txCount, nil } + +// TransactionsBySender returns the transactions (possibly none) in the block that were sent by the given sender. +// It returns an error if any of the transactions in the block have an invalid signature. +func TransactionsBySender(block *types.Block, sender common.Address) ([]*types.Transaction, error) { + txs := make([]*types.Transaction, 0) + for _, tx := range block.Transactions() { + signer := types.NewCancunSigner(tx.ChainId()) + txSender, err := types.Sender(signer, tx) + if err != nil { + return nil, err + } + if txSender == sender { + txs = append(txs, tx) + } + } + return txs, nil +}