Skip to content

Commit 33231f5

Browse files
authored
Merge branch 'develop' into omerfirmak/defer-txpool-reorgs
2 parents d011a4b + e3bfb5f commit 33231f5

File tree

8 files changed

+51
-34
lines changed

8 files changed

+51
-34
lines changed

go.mod

+1-1
Original file line numberDiff line numberDiff line change
@@ -50,7 +50,7 @@ require (
5050
github.com/prometheus/tsdb v0.7.1
5151
github.com/rjeczalik/notify v0.9.1
5252
github.com/rs/cors v1.7.0
53-
github.com/scroll-tech/da-codec v0.1.1-0.20240716101216-c55ed9455cf4
53+
github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923
5454
github.com/scroll-tech/zktrie v0.8.4
5555
github.com/shirou/gopsutil v3.21.11+incompatible
5656
github.com/status-im/keycard-go v0.0.0-20190316090335-8537d3370df4

go.sum

+2-2
Original file line numberDiff line numberDiff line change
@@ -392,8 +392,8 @@ github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncj
392392
github.com/rs/cors v1.7.0 h1:+88SsELBHx5r+hZ8TCkggzSstaWNbDvThkVK8H6f9ik=
393393
github.com/rs/cors v1.7.0/go.mod h1:gFx+x8UowdsKA9AchylcLynDq+nNFfI8FkUZdN/jGCU=
394394
github.com/russross/blackfriday/v2 v2.0.1/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM=
395-
github.com/scroll-tech/da-codec v0.1.1-0.20240716101216-c55ed9455cf4 h1:40Lby3huKNFZ2EXzxqVpADB+caepDRrNRoUgTsCKN88=
396-
github.com/scroll-tech/da-codec v0.1.1-0.20240716101216-c55ed9455cf4/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
395+
github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923 h1:A1ItzpnFDCHMh4g6cpeBZf7/fPf2lfwHbhjr/FSpk2w=
396+
github.com/scroll-tech/da-codec v0.1.1-0.20240718144756-1875fd490923/go.mod h1:D6XEESeNVJkQJlv3eK+FyR+ufPkgVQbJzERylQi53Bs=
397397
github.com/scroll-tech/zktrie v0.8.4 h1:UagmnZ4Z3ITCk+aUq9NQZJNAwnWl4gSxsLb2Nl7IgRE=
398398
github.com/scroll-tech/zktrie v0.8.4/go.mod h1:XvNo7vAk8yxNyTjBDj5WIiFzYW4bx/gJ78+NK6Zn6Uk=
399399
github.com/segmentio/kafka-go v0.1.0/go.mod h1:X6itGqS9L4jDletMsxZ7Dz+JFWxM6JHfPOCvTvk+EJo=

miner/scroll_worker.go

+4-3
Original file line numberDiff line numberDiff line change
@@ -544,12 +544,13 @@ func (w *worker) handlePipelineResult(res *pipeline.Result) error {
544544
w.currentPipeline.Release()
545545
w.currentPipeline = nil
546546

547+
if res.FinalBlock != nil {
548+
w.updateSnapshot(res.FinalBlock)
549+
}
550+
547551
// Rows being nil without an OverflowingTx means that block didn't go thru CCC,
548552
// which means that we are not the sequencer. Do not attempt to commit.
549553
if res.Rows == nil && res.OverflowingTx == nil {
550-
if res.FinalBlock != nil {
551-
w.updateSnapshot(res.FinalBlock)
552-
}
553554
return nil
554555
}
555556

params/version.go

+1-1
Original file line numberDiff line numberDiff line change
@@ -24,7 +24,7 @@ import (
2424
const (
2525
VersionMajor = 5 // Major version component of the current release
2626
VersionMinor = 5 // Minor version component of the current release
27-
VersionPatch = 14 // Patch version component of the current release
27+
VersionPatch = 17 // Patch version component of the current release
2828
VersionMeta = "mainnet" // Version metadata to append to the version string
2929
)
3030

rollup/circuitcapacitychecker/libzkp/Cargo.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@ log = "0.4"
3333
once_cell = "1.19"
3434
serde = "1.0"
3535
serde_derive = "1.0"
36-
serde_json = "1.0.66"
36+
serde_json = { version = "1.0.66", features = ["unbounded_depth"] }
3737

3838
[profile.test]
3939
opt-level = 3

rollup/circuitcapacitychecker/libzkp/src/lib.rs

+22-10
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,8 @@ pub mod checker {
1212
use std::panic;
1313
use std::ptr::null;
1414
use std::ffi::CStr;
15+
use serde::Deserialize as Deserializea;
16+
use serde_json::Deserializer;
1517

1618
#[derive(Debug, Clone, Deserialize, Serialize)]
1719
pub struct CommonResult {
@@ -48,9 +50,19 @@ pub mod checker {
4850
#[no_mangle]
4951
pub unsafe extern "C" fn parse_json_to_rust_trace(trace_json_ptr: *const c_char) -> *mut BlockTrace {
5052
let trace_json_cstr = unsafe { CStr::from_ptr(trace_json_ptr) };
51-
let trace = serde_json::from_slice::<BlockTrace>(trace_json_cstr.to_bytes());
53+
let trace_json_bytes = trace_json_cstr.to_bytes();
54+
let mut deserializer = Deserializer::from_slice(trace_json_bytes);
55+
deserializer.disable_recursion_limit();
56+
let trace = BlockTrace::deserialize(&mut deserializer);
5257
match trace {
53-
Err(_) => return null_mut(),
58+
Err(e) => {
59+
log::warn!(
60+
"failed to parse trace in parse_json_to_rust_trace, error: {:?}, trace_json_cstr: {:?}",
61+
e,
62+
trace_json_cstr,
63+
);
64+
return null_mut();
65+
}
5466
Ok(t) => return Box::into_raw(Box::new(t))
5567
}
5668
}
@@ -226,10 +238,10 @@ pub mod checker {
226238
))?
227239
.get_tx_num() as u64)
228240
})
229-
.map_or_else(
230-
|e| bail!("circuit capacity checker (id: {id}) error in get_tx_num: {e:?}"),
231-
|result| result,
232-
)
241+
.map_or_else(
242+
|e| bail!("circuit capacity checker (id: {id}) error in get_tx_num: {e:?}"),
243+
|result| result,
244+
)
233245
}
234246

235247
/// # Safety
@@ -260,10 +272,10 @@ pub mod checker {
260272
.set_light_mode(light_mode);
261273
Ok(())
262274
})
263-
.map_or_else(
264-
|e| bail!("circuit capacity checker (id: {id}) error in set_light_mode: {e:?}"),
265-
|result| result,
266-
)
275+
.map_or_else(
276+
|e| bail!("circuit capacity checker (id: {id}) error in set_light_mode: {e:?}"),
277+
|result| result,
278+
)
267279
}
268280
}
269281

rollup/pipeline/pipeline.go

+6-2
Original file line numberDiff line numberDiff line change
@@ -362,7 +362,7 @@ func (p *Pipeline) encodeStage(traces <-chan *BlockCandidate) <-chan *BlockCandi
362362
trace.RustTrace = circuitcapacitychecker.MakeRustTrace(trace.LastTrace, buffer)
363363
if trace.RustTrace == nil {
364364
log.Error("making rust trace", "txHash", trace.LastTrace.Transactions[0].TxHash)
365-
return
365+
// ignore the error here, CCC stage will catch it and treat it as a CCC error
366366
}
367367
}
368368
encodeTimer.UpdateSince(encodeStart)
@@ -431,7 +431,11 @@ func (p *Pipeline) cccStage(candidates <-chan *BlockCandidate, deadline time.Tim
431431
var accRows *types.RowConsumption
432432
var err error
433433
if candidate != nil && p.ccc != nil {
434-
accRows, err = p.ccc.ApplyTransactionRustTrace(candidate.RustTrace)
434+
if candidate.RustTrace != nil {
435+
accRows, err = p.ccc.ApplyTransactionRustTrace(candidate.RustTrace)
436+
} else {
437+
err = errors.New("no rust trace")
438+
}
435439
lastTxn := candidate.Txs[candidate.Txs.Len()-1]
436440
cccTimer.UpdateSince(cccStart)
437441
if err != nil {

rollup/rollup_sync_service/rollup_sync_service.go

+14-14
Original file line numberDiff line numberDiff line change
@@ -241,10 +241,15 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB
241241
log.Warn("got nil when reading last finalized batch index. This should happen only once.")
242242
}
243243

244+
parentBatchMeta := &rawdb.FinalizedBatchMeta{}
245+
if startBatchIndex > 0 {
246+
parentBatchMeta = rawdb.ReadFinalizedBatchMeta(s.db, startBatchIndex-1)
247+
}
248+
244249
var highestFinalizedBlockNumber uint64
245250
batchWriter := s.db.NewBatch()
246251
for index := startBatchIndex; index <= batchIndex; index++ {
247-
parentBatchMeta, chunks, err := s.getLocalInfoForBatch(index)
252+
chunks, err := s.getLocalChunksForBatch(index)
248253
if err != nil {
249254
return fmt.Errorf("failed to get local node info, batch index: %v, err: %w", index, err)
250255
}
@@ -256,6 +261,7 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB
256261

257262
rawdb.WriteFinalizedBatchMeta(batchWriter, index, finalizedBatchMeta)
258263
highestFinalizedBlockNumber = endBlock
264+
parentBatchMeta = finalizedBatchMeta
259265

260266
if index%100 == 0 {
261267
log.Info("finalized batch progress", "batch index", index, "finalized l2 block height", endBlock)
@@ -283,17 +289,17 @@ func (s *RollupSyncService) parseAndUpdateRollupEventLogs(logs []types.Log, endB
283289
return nil
284290
}
285291

286-
func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.FinalizedBatchMeta, []*encoding.Chunk, error) {
292+
func (s *RollupSyncService) getLocalChunksForBatch(batchIndex uint64) ([]*encoding.Chunk, error) {
287293
chunkBlockRanges := rawdb.ReadBatchChunkRanges(s.db, batchIndex)
288294
if len(chunkBlockRanges) == 0 {
289-
return nil, nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges")
295+
return nil, fmt.Errorf("failed to get batch chunk ranges, empty chunk block ranges")
290296
}
291297

292298
endBlockNumber := chunkBlockRanges[len(chunkBlockRanges)-1].EndBlockNumber
293299
for i := 0; i < defaultMaxRetries; i++ {
294300
if s.ctx.Err() != nil {
295301
log.Info("Context canceled", "reason", s.ctx.Err())
296-
return nil, nil, s.ctx.Err()
302+
return nil, s.ctx.Err()
297303
}
298304

299305
localSyncedBlockHeight := s.bc.CurrentBlock().Number().Uint64()
@@ -308,7 +314,7 @@ func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.Fina
308314

309315
localSyncedBlockHeight := s.bc.CurrentBlock().Number().Uint64()
310316
if localSyncedBlockHeight < endBlockNumber {
311-
return nil, nil, fmt.Errorf("local node is not synced up to the required block height: %v, local synced block height: %v", endBlockNumber, localSyncedBlockHeight)
317+
return nil, fmt.Errorf("local node is not synced up to the required block height: %v, local synced block height: %v", endBlockNumber, localSyncedBlockHeight)
312318
}
313319

314320
chunks := make([]*encoding.Chunk, len(chunkBlockRanges))
@@ -317,12 +323,12 @@ func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.Fina
317323
for j := cr.StartBlockNumber; j <= cr.EndBlockNumber; j++ {
318324
block := s.bc.GetBlockByNumber(j)
319325
if block == nil {
320-
return nil, nil, fmt.Errorf("failed to get block by number: %v", i)
326+
return nil, fmt.Errorf("failed to get block by number: %v", i)
321327
}
322328
txData := encoding.TxsToTxsData(block.Transactions())
323329
state, err := s.bc.StateAt(block.Root())
324330
if err != nil {
325-
return nil, nil, fmt.Errorf("failed to get block state, block: %v, err: %w", block.Hash().Hex(), err)
331+
return nil, fmt.Errorf("failed to get block state, block: %v, err: %w", block.Hash().Hex(), err)
326332
}
327333
withdrawRoot := withdrawtrie.ReadWTRSlot(rcfg.L2MessageQueueAddress, state)
328334
chunks[i].Blocks[j-cr.StartBlockNumber] = &encoding.Block{
@@ -333,13 +339,7 @@ func (s *RollupSyncService) getLocalInfoForBatch(batchIndex uint64) (*rawdb.Fina
333339
}
334340
}
335341

336-
// get metadata of parent batch: default to genesis batch metadata.
337-
parentBatchMeta := &rawdb.FinalizedBatchMeta{}
338-
if batchIndex > 0 {
339-
parentBatchMeta = rawdb.ReadFinalizedBatchMeta(s.db, batchIndex-1)
340-
}
341-
342-
return parentBatchMeta, chunks, nil
342+
return chunks, nil
343343
}
344344

345345
func (s *RollupSyncService) getChunkRanges(batchIndex uint64, vLog *types.Log) ([]*rawdb.ChunkBlockRange, error) {

0 commit comments

Comments
 (0)