Skip to content
Merged
Show file tree
Hide file tree
Changes from 10 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
ad48778
feat: rpc calls optimisation
sbackend123 Mar 12, 2026
1da63cc
Merge branch 'master' into feat/rpc-calls-optimisation
sbackend123 Mar 12, 2026
68ecefb
fix: linter issues
sbackend123 Mar 12, 2026
04f6520
Merge branch 'master' into feat/rpc-calls-optimisation
sbackend123 Mar 13, 2026
1ff4f75
fix: enable cache metrics
sbackend123 Mar 16, 2026
095f4af
fix: review issues
sbackend123 Mar 17, 2026
e3b1aa5
fix: dead code
sbackend123 Mar 18, 2026
f8e3049
fix: update cache implementation
sbackend123 Mar 25, 2026
e6945ef
fix: wrapped backend test
sbackend123 Mar 25, 2026
a9fd888
fix: make linter happy
sbackend123 Mar 30, 2026
bed886c
chore: calculate blocks and sync with chain once in N seconds (#5422)
sbackend123 Apr 6, 2026
acabf46
Merge branch 'master' into feat/rpc-calls-optimisation
sbackend123 Apr 6, 2026
a1e3f83
fix: block sync interval flag
sbackend123 Apr 6, 2026
bf4a030
fix: review issues
sbackend123 Apr 8, 2026
54ee6af
fix: return dep
sbackend123 Apr 8, 2026
ba08feb
fix: remove extra metric test
sbackend123 Apr 8, 2026
2a11aa6
fix: simplify cache
sbackend123 Apr 12, 2026
e4befcf
fix: remove comments for linter
sbackend123 Apr 12, 2026
2e40f56
fix: metric naming
sbackend123 Apr 12, 2026
c7bd723
fix: remove expiresAt because there is no meaningful usage
sbackend123 Apr 14, 2026
76a6e1d
fix: prevent dividing to zero
sbackend123 Apr 14, 2026
ba744e9
fix: clean up
sbackend123 Apr 14, 2026
a858f42
fix: add env vars
sbackend123 Apr 15, 2026
433f3ff
fix: add new flag to all manifests
sbackend123 Apr 15, 2026
083da1f
Merge branch 'master' into feat/rpc-calls-optimisation
sbackend123 Apr 15, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
3 changes: 0 additions & 3 deletions cmd/bee/cmd/db_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -214,9 +214,6 @@ func TestDBNuke_FLAKY(t *testing.T) {
Logger: log.Noop,
ReserveCapacity: storer.DefaultReserveCapacity,
}, path.Join(dataDir, "localstore"))
if err != nil {
t.Fatal(err)
}
defer db.Close()

info, err = db.DebugInfo(ctx)
Expand Down
1 change: 1 addition & 0 deletions cmd/bee/cmd/deploy.go
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,7 @@ func (c *command) initDeployCmd() error {
blocktime,
true,
c.config.GetUint64(optionNameMinimumGasTipCap),
0,
)
if err != nil {
return err
Expand Down
2 changes: 2 additions & 0 deletions go.mod
Original file line number Diff line number Diff line change
Expand Up @@ -60,6 +60,8 @@ require (
resenje.org/web v0.4.3
)

require github.com/kylelemons/godebug v1.1.0 // indirect
Comment thread
sbackend123 marked this conversation as resolved.
Outdated

require (
github.com/BurntSushi/toml v1.3.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
Expand Down
2 changes: 1 addition & 1 deletion pkg/api/chunk_stream_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,7 +37,7 @@ func TestChunkUploadStream(t *testing.T) {
)

t.Run("upload and verify", func(t *testing.T) {
chsToGet := []swarm.Chunk{}
chsToGet := make([]swarm.Chunk, 0, 5)
for range 5 {
ch := testingc.GenerateTestRandomChunk()

Expand Down
4 changes: 2 additions & 2 deletions pkg/bmt/proof_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func TestProofCorrectness(t *testing.T) {
verifySegments := func(t *testing.T, exp []string, found [][]byte) {
t.Helper()

var expSegments [][]byte
expSegments := make([][]byte, 0, len(exp))
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

These are fine but feel like they belong in a separate PR.

Copy link
Copy Markdown
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Yes, but linter was failing so I decided to fix everything. Next time will create separate PR

for _, v := range exp {
decoded, err := hex.DecodeString(v)
if err != nil {
Expand Down Expand Up @@ -154,7 +154,7 @@ func TestProofCorrectness(t *testing.T) {
"745bae095b6ff5416b4a351a167f731db6d6f5924f30cd88d48e74261795d27b",
}

var segments [][]byte
segments := make([][]byte, 0, len(segmentStrings))
for _, v := range segmentStrings {
decoded, err := hex.DecodeString(v)
if err != nil {
Expand Down
7 changes: 6 additions & 1 deletion pkg/crypto/crypto_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,17 +22,20 @@ func TestGenerateSecp256k1Key(t *testing.T) {
if err != nil {
t.Fatal(err)
}
//nolint:staticcheck // SA5011 false positive: t.Fatal terminates test
if k1 == nil {
t.Fatal("nil key")
}
k2, err := crypto.GenerateSecp256k1Key()
if err != nil {
t.Fatal(err)
}
//nolint:staticcheck // SA5011 false positive: t.Fatal terminates test
if k2 == nil {
t.Fatal("nil key")
}

//nolint:staticcheck // SA5011 false positive: t.Fatal terminates test
if bytes.Equal(k1.D.Bytes(), k2.D.Bytes()) {
t.Fatal("two generated keys are equal")
}
Expand All @@ -45,17 +48,19 @@ func TestGenerateSecp256k1EDG(t *testing.T) {
if err != nil {
t.Fatal(err)
}
//nolint:staticcheck // SA5011 false positive: t.Fatal terminates test
if k1 == nil {
t.Fatal("nil key")
}
k2, err := crypto.EDGSecp256_K1.Generate()
if err != nil {
t.Fatal(err)
}
//nolint:staticcheck // SA5011 false positive: t.Fatal terminates test
if k2 == nil {
t.Fatal("nil key")
}

//nolint:staticcheck // SA5011 false positive: t.Fatal terminates test
if bytes.Equal(k1.D.Bytes(), k2.D.Bytes()) {
t.Fatal("two generated keys are equal")
}
Expand Down
4 changes: 2 additions & 2 deletions pkg/file/buffer_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -114,10 +114,10 @@ func TestCopyBuffer(t *testing.T) {
swarm.ChunkSize*17 + 3,
}

testCases := []struct {
testCases := make([]struct {
readBufferSize int
dataSize int
}{}
}, 0, len(dataSizes)*len(readBufferSizes))

for i := range readBufferSizes {
for j := range dataSizes {
Expand Down
2 changes: 1 addition & 1 deletion pkg/file/pipeline/bmt/bmt_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -52,7 +52,7 @@ func TestBmtWriter(t *testing.T) {
mockChainWriter := mock.NewChainWriter()
writer := bmt.NewBmtWriter(mockChainWriter)

var data []byte
data := make([]byte, 0, len(tc.data)+8)

if !tc.noSpan {
data = make([]byte, 8)
Expand Down
3 changes: 2 additions & 1 deletion pkg/node/chain.go
Original file line number Diff line number Diff line change
Expand Up @@ -51,6 +51,7 @@ func InitChain(
pollingInterval time.Duration,
chainEnabled bool,
minimumGasTipCap uint64,
blockCacheTTLPercent uint64,
) (transaction.Backend, common.Address, int64, transaction.Monitor, transaction.Service, error) {
backend := backendnoop.New(chainID)

Expand All @@ -72,7 +73,7 @@ func InitChain(

logger.Info("connected to blockchain backend", "version", versionString)

backend = wrapped.NewBackend(ethclient.NewClient(rpcClient), minimumGasTipCap)
backend = wrapped.NewBackend(ethclient.NewClient(rpcClient), minimumGasTipCap, pollingInterval, blockCacheTTLPercent)
}

backendChainID, err := backend.ChainID(ctx)
Expand Down
2 changes: 2 additions & 0 deletions pkg/node/node.go
Original file line number Diff line number Diff line change
Expand Up @@ -135,6 +135,7 @@ type Options struct {
BlockchainRpcEndpoint string
BlockProfile bool
BlockTime time.Duration
BlockCacheTTLPercent uint64
BootnodeMode bool
Bootnodes []string
CacheCapacity uint64
Expand Down Expand Up @@ -409,6 +410,7 @@ func NewBee(
o.BlockTime,
chainEnabled,
o.MinimumGasTipCap,
o.BlockCacheTTLPercent,
)
if err != nil {
return nil, fmt.Errorf("init chain: %w", err)
Expand Down
2 changes: 1 addition & 1 deletion pkg/postage/mock/service.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func (m *mockPostage) StampIssuers() []*postage.StampIssuer {
m.issuerLock.Lock()
defer m.issuerLock.Unlock()

issuers := make([]*postage.StampIssuer, 0)
issuers := make([]*postage.StampIssuer, 0, len(m.issuersMap))
for _, v := range m.issuersMap {
issuers = append(issuers, v)
}
Expand Down
32 changes: 17 additions & 15 deletions pkg/shed/index_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -345,7 +345,7 @@ func TestIndex(t *testing.T) {
}

t.Run("not found", func(t *testing.T) {
items := make([]Item, len(want))
items := make([]Item, len(want), len(want)+1)
for i, w := range want {
items[i] = Item{
Address: w.Address,
Expand Down Expand Up @@ -373,28 +373,29 @@ func TestIndex_Iterate(t *testing.T) {
t.Fatal(err)
}

items := []Item{
{
items := make([]Item, 0, 6)
items = append(items,
Item{
Address: []byte("iterate-hash-01"),
Data: []byte("data80"),
},
{
Item{
Address: []byte("iterate-hash-03"),
Data: []byte("data22"),
},
{
Item{
Address: []byte("iterate-hash-05"),
Data: []byte("data41"),
},
{
Item{
Address: []byte("iterate-hash-02"),
Data: []byte("data84"),
},
{
Item{
Address: []byte("iterate-hash-06"),
Data: []byte("data1"),
},
}
)
batch := new(leveldb.Batch)
for _, i := range items {
err = index.PutInBatch(batch, i)
Expand Down Expand Up @@ -555,28 +556,29 @@ func TestIndex_IterateReverse(t *testing.T) {
t.Fatal(err)
}

items := []Item{
{
items := make([]Item, 0, 6)
items = append(items,
Item{
Address: []byte("iterate-hash-01"),
Data: []byte("data80"),
},
{
Item{
Address: []byte("iterate-hash-03"),
Data: []byte("data22"),
},
{
Item{
Address: []byte("iterate-hash-05"),
Data: []byte("data41"),
},
{
Item{
Address: []byte("iterate-hash-02"),
Data: []byte("data84"),
},
{
Item{
Address: []byte("iterate-hash-06"),
Data: []byte("data1"),
},
}
)
batch := new(leveldb.Batch)
for _, i := range items {
err = index.PutInBatch(batch, i)
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/migration/steps_chain_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -19,7 +19,7 @@ func TestNewStepsChain(t *testing.T) {
store := inmemstore.New()
populateStore(t, store, populateItemsCount)

stepsFn := make([]migration.StepFn, 0)
stepsFn := make([]migration.StepFn, 0, 10)

// Create 10 step functions where each would remove single element, having value [0-10)
for i := range 10 {
Expand Down
2 changes: 1 addition & 1 deletion pkg/storage/storagetest/storage.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ func (o *obj1) ID() string { return o.Id }
func (obj1) Namespace() string { return "obj1" }

func (o *obj1) Marshal() ([]byte, error) {
buf := make([]byte, 40)
buf := make([]byte, 40, 40+len(o.Buf))
copy(buf[:32], o.Id)
binary.LittleEndian.PutUint64(buf[32:], o.SomeInt)
buf = append(buf, o.Buf[:]...)
Expand Down
4 changes: 2 additions & 2 deletions pkg/storer/internal/reserve/reserve_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -537,7 +537,7 @@ func TestEvict(t *testing.T) {
ts := internal.NewInmemStorage()

chunksPerBatch := 50
var chunks []swarm.Chunk
chunks := make([]swarm.Chunk, 0, 3*chunksPerBatch)
batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
evictBatch := batches[1]

Expand Down Expand Up @@ -690,7 +690,7 @@ func TestEvictMaxCount(t *testing.T) {
t.Fatal(err)
}

var chunks []swarm.Chunk
chunks := make([]swarm.Chunk, 0, 20)

batch := postagetesting.MustNewBatch()

Expand Down
2 changes: 1 addition & 1 deletion pkg/storer/mock/mockstorer.go
Original file line number Diff line number Diff line change
Expand Up @@ -132,7 +132,7 @@ func (m *mockStorer) ListSessions(offset, limit int) ([]storer.SessionInfo, erro
m.mu.Lock()
defer m.mu.Unlock()

sessions := []storer.SessionInfo{}
sessions := make([]storer.SessionInfo, 0, len(m.activeSessions))
for _, v := range m.activeSessions {
sessions = append(sessions, *v)
}
Expand Down
3 changes: 2 additions & 1 deletion pkg/storer/reserve_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -220,8 +220,8 @@ func TestEvictBatch(t *testing.T) {
}
ctx := context.Background()

var chunks []swarm.Chunk
var chunksPerPO uint64 = 10
chunks := make([]swarm.Chunk, 0, chunksPerPO*3)
batches := []*postage.Batch{postagetesting.MustNewBatch(), postagetesting.MustNewBatch(), postagetesting.MustNewBatch()}
evictBatch := batches[1]

Expand Down Expand Up @@ -543,6 +543,7 @@ func TestSubscribeBin(t *testing.T) {
chunksPerPO uint64 = 50
putter = storer.ReservePutter()
)
chunks = make([]swarm.Chunk, 0, chunksPerPO*2)
Copy link
Copy Markdown
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

it is a duplicate,there is already allocation on line 543

Copy link
Copy Markdown
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

that's also why the linter is complaining


for j := range 2 {
for range chunksPerPO {
Expand Down
2 changes: 1 addition & 1 deletion pkg/storer/sample_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ func TestReserveSampler(t *testing.T) {
const maxPO = 10

randChunks := func(baseAddr swarm.Address, timeVar uint64) []swarm.Chunk {
var chs []swarm.Chunk
chs := make([]swarm.Chunk, 0, chunkCountPerPO*maxPO)
for po := range maxPO {
for range chunkCountPerPO {
ch := chunk.GenerateValidRandomChunkAt(t, baseAddr, po).WithBatch(3, 2, false)
Expand Down
25 changes: 11 additions & 14 deletions pkg/transaction/transaction.go
Original file line number Diff line number Diff line change
Expand Up @@ -154,9 +154,9 @@ func (t *transactionService) waitForAllPendingTx() error {
return err
}

pendingTxs = t.filterPendingTransactions(t.ctx, pendingTxs)
pending := t.filterPendingTransactions(t.ctx, pendingTxs)

for _, txHash := range pendingTxs {
for txHash := range pending {
t.waitForPendingTx(txHash)
}

Expand Down Expand Up @@ -339,19 +339,15 @@ func (t *transactionService) nextNonce(ctx context.Context) (uint64, error) {
return 0, err
}

pendingTxs = t.filterPendingTransactions(t.ctx, pendingTxs)
pending := t.filterPendingTransactions(t.ctx, pendingTxs)

// PendingNonceAt returns the nonce we should use, but we will
// compare this to our pending tx list, therefore the -1.
maxNonce := onchainNonce - 1
for _, txHash := range pendingTxs {
trx, _, err := t.backend.TransactionByHash(ctx, txHash)
if err != nil {
t.logger.Error(err, "pending transaction not found", "tx", txHash)
return 0, err
for _, trx := range pending {
Comment thread
sbackend123 marked this conversation as resolved.
Outdated
if trx != nil {
maxNonce = max(maxNonce, trx.Nonce())
}

maxNonce = max(maxNonce, trx.Nonce())
}

return maxNonce + 1, nil
Expand Down Expand Up @@ -404,11 +400,12 @@ func (t *transactionService) PendingTransactions() ([]common.Hash, error) {

// filterPendingTransactions will filter supplied transaction hashes removing those that are not pending anymore.
// Removed transactions will be also removed from store.
func (t *transactionService) filterPendingTransactions(ctx context.Context, txHashes []common.Hash) []common.Hash {
result := make([]common.Hash, 0, len(txHashes))
// Returns the pending transactions keyed by hash.
func (t *transactionService) filterPendingTransactions(ctx context.Context, txHashes []common.Hash) map[common.Hash]*types.Transaction {
result := make(map[common.Hash]*types.Transaction, len(txHashes))

for _, txHash := range txHashes {
_, isPending, err := t.backend.TransactionByHash(ctx, txHash)
trx, isPending, err := t.backend.TransactionByHash(ctx, txHash)
// When error occurres consider transaction as pending (so this transaction won't be filtered out),
// unless it was not found
if err != nil {
Expand All @@ -422,7 +419,7 @@ func (t *transactionService) filterPendingTransactions(ctx context.Context, txHa
}

if isPending {
result = append(result, txHash)
result[txHash] = trx
} else {
err := t.store.Delete(pendingTransactionKey(txHash))
if err != nil {
Expand Down
Loading
Loading