Skip to content

Commit d237ce6

Browse files
committed
use updated get_ptc iterator
2 parents a2bb3e4 + 0edc9d5 commit d237ce6

25 files changed

+711
-139
lines changed

.github/workflows/ci.yml

Lines changed: 2 additions & 42 deletions
Original file line numberDiff line numberDiff line change
@@ -259,51 +259,11 @@ jobs:
259259
- name: Check exception tracking
260260
if: ${{ !cancelled() }} && github.event_name == 'pull_request'
261261
run: |
262-
problematic_files=()
263-
while read -r file; do
264-
if ! grep -qE '^{\.push raises: \[\](, gcsafe)?\.}$' "$file"; then
265-
problematic_files+=("$file")
266-
fi
267-
done < <(git diff --name-only --diff-filter=AM --ignore-submodules HEAD^ HEAD | grep -E '\.nim$' || true)
268-
269-
if (( ${#problematic_files[@]} )); then
270-
echo "The following files do not have '{.push raises: [], gcsafe.}' (gcsafe optional):"
271-
for file in "${problematic_files[@]}"; do
272-
echo "- $file"
273-
done
274-
echo "See https://status-im.github.io/nim-style-guide/errors.exceptions.html"
275-
exit 2
276-
fi
277-
262+
scripts/check_exception_headers.sh
278263
- name: Check submodules
279264
if: ${{ !cancelled() }} && github.event_name == 'pull_request'
280265
run: |
281-
while read -r file; do
282-
commit="$(git -C "$file" rev-parse HEAD)"
283-
commit_date=$(TZ=UTC0 git -C "$file" show -s --format='%cd' --date=iso-local HEAD)
284-
if ! branch="$(git config -f .gitmodules --get "submodule.$file.branch")"; then
285-
echo "Submodule '$file': '.gitmodules' lacks 'branch' entry"
286-
exit 2
287-
fi
288-
# Without the `--depth=1` fetch, may run into 'error processing shallow info: 4'
289-
if ! error="$(git -C "$file" fetch -q --depth=1 origin "+refs/heads/${branch}:refs/remotes/origin/${branch}")"; then
290-
echo "Submodule '$file': Failed to fetch '$branch': $error (1)"
291-
exit 2
292-
fi
293-
branch_commit_date=$(TZ=UTC0 git -C "$file" show -s --format='%cd' --date=iso-local "refs/remotes/origin/${branch}")
294-
if [[ "${commit_date}" > "${branch_commit_date}" ]]; then
295-
echo "Submodule '$file': '$commit' ($commit_date) is more recent than latest '$branch' ($branch_commit_date) (branch config: '.gitmodules')"
296-
exit 2
297-
fi
298-
if ! error="$(git -C "$file" fetch -q --shallow-since="$commit_date" origin "+refs/heads/${branch}:refs/remotes/origin/${branch}")"; then
299-
echo "Submodule '$file': Failed to fetch '$branch': $error (2)"
300-
exit 2
301-
fi
302-
if ! git -C "$file" merge-base --is-ancestor "$commit" "refs/remotes/origin/$branch"; then
303-
echo "Submodule '$file': '$commit' is not on '$branch' as of $commit_date (branch config: '.gitmodules')"
304-
exit 2
305-
fi
306-
done < <(git diff --name-only --diff-filter=AM HEAD^ HEAD | grep -f <(git config --file .gitmodules --get-regexp path | awk '{ print $2 }') || true)
266+
scripts/check_submodule_branches.sh
307267
308268
# https://github.com/EnricoMi/publish-unit-test-result-action
309269
event_file:

AllTests-mainnet.md

Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -172,6 +172,18 @@ AllTests-mainnet
172172
```
173173
## ColumnQuarantine data structure test suite [Preset: mainnet]
174174
```diff
175+
+ ColumnQuarantine: update(empty:grow) [node->node] test OK
176+
+ ColumnQuarantine: update(empty:grow) [node->supernode] test OK
177+
+ ColumnQuarantine: update(empty:shrink) [node->node] test OK
178+
+ ColumnQuarantine: update(empty:shrink) [supernode->node] test OK
179+
+ ColumnQuarantine: update(memory+disk:grow) [node->node] test OK
180+
+ ColumnQuarantine: update(memory+disk:grow) [node->supernode] test OK
181+
+ ColumnQuarantine: update(memory+disk:shrink) [node->node] test OK
182+
+ ColumnQuarantine: update(memory+disk:shrink) [supernode->node] test OK
183+
+ ColumnQuarantine: update(memory:grow) [node->node] test OK
184+
+ ColumnQuarantine: update(memory:grow) [node->supernode] test OK
185+
+ ColumnQuarantine: update(memory:shrink) [node->node] test OK
186+
+ ColumnQuarantine: update(memory:shrink) [supernode->node] test OK
175187
+ Empty in-memory scenario test [node] OK
176188
+ Empty in-memory scenario test [supernode] OK
177189
+ Mixed entries scenario test [node] OK
@@ -1163,9 +1175,11 @@ AllTests-mainnet
11631175
```
11641176
## subnet tracker
11651177
```diff
1178+
+ should register and prune PTC duties OK
11661179
+ should register stability subnets on attester duties OK
11671180
+ should register sync committee duties OK
11681181
+ should subscribe to all subnets when flag is enabled OK
1182+
+ should track PTC duties in slot bitmaps OK
11691183
```
11701184
## test_fixture_ssz_generic_types.nim
11711185
```diff

CHANGELOG.md

Lines changed: 24 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,27 @@
1+
2025-11-28 v25.11.1
2+
===================
3+
4+
The Nimbus consensus client `v25.11.1` is a `high-urgency` release for mainnet due to the impending Fusaka fork and a `medium-urgency` release on other networks due to stability fixes. To access blobs in Fusaka and later in this release, use the `--light-supernode` option. The undocumented `--debug-peerdas-supernode` option will be removed in a pending release.
5+
6+
### Improvements
7+
8+
- Introduce light supernodes to provide blobs with less resource usage:
9+
https://github.com/status-im/nimbus-eth2/pull/7752
10+
11+
- Allow the consensus light client to specify finalized block hashes to sync:
12+
https://github.com/status-im/nimbus-eth2/pull/7735
13+
14+
- Allow updating column custody during any slot:
15+
https://github.com/status-im/nimbus-eth2/pull/7724
16+
17+
### Fixes
18+
19+
- Fix blob and column quarantine-related hang:
20+
https://github.com/status-im/nimbus-eth2/pull/7743
21+
22+
- Fix blocking of command-line-supplied invalid blocks:
23+
https://github.com/status-im/nimbus-eth2/pull/7714
24+
125
2025-11-03 v25.11.0
226
===================
327

Makefile

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -720,6 +720,7 @@ test_libnimbus_lc: libnimbus_lc.a
720720
--std=c17 -flto \
721721
-pedantic -pedantic-errors \
722722
-Wall -Wextra -Werror -Wno-maybe-uninitialized \
723+
-Wno-stringop-overflow \
723724
-Wno-unsafe-buffer-usage -Wno-unknown-warning-option \
724725
-o build/test_libnimbus_lc \
725726
beacon_chain/libnimbus_lc/test_libnimbus_lc.c \

beacon_chain/conf.nim

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -251,6 +251,11 @@ type
251251
desc: "Subscribe to all column subnets, thereby becoming a PeerDAS supernode"
252252
name: "peerdas-supernode" .}: bool
253253

254+
lightSupernode* {.
255+
defaultValue: false,
256+
desc: "Subscribe to the first half of column subnets"
257+
name: "light-supernode" .}: bool
258+
254259
slashingDbKind* {.
255260
hidden
256261
defaultValue: SlashingDbKind.v2

beacon_chain/consensus_object_pools/blob_quarantine.nim

Lines changed: 73 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -82,6 +82,9 @@ type
8282
ColumnQuarantine* =
8383
SidecarQuarantine[fulu.DataColumnSidecar, OnDataColumnSidecarCallback]
8484

85+
ColumnQuarantineNode* =
86+
DoublyLinkedNode[RootTableRecord[fulu.DataColumnSidecar]]
87+
8588
func indexLog*[T: SomeSidecarRef](sidecars: openArray[ref T]): string =
8689
"[" & sidecars.mapIt($uint64(it[].index)).join(",") & "]"
8790

@@ -886,12 +889,13 @@ proc init*(
886889
onDataColumnSidecarCallback: OnDataColumnSidecarCallback
887890
): ColumnQuarantine =
888891
doAssert(len(custodyColumns) <= NUMBER_OF_COLUMNS)
892+
893+
let custodyMap = ColumnMap.init(custodyColumns)
889894
var indexMap = newSeqUninit[int](NUMBER_OF_COLUMNS)
890895
if len(custodyColumns) < NUMBER_OF_COLUMNS:
891896
for i in 0 ..< len(indexMap):
892897
indexMap[i] = -1
893-
for index, item in custodyColumns.pairs():
894-
doAssert(item < uint64(NUMBER_OF_COLUMNS))
898+
for index, item in custodyMap.pairs():
895899
indexMap[int(item)] = index
896900

897901
let size = maxSidecars(NUMBER_OF_COLUMNS)
@@ -911,27 +915,84 @@ proc init*(
911915
memSidecarsCount: 0,
912916
diskSidecarsCount: 0,
913917
indexMap: indexMap,
914-
custodyColumns: @custodyColumns,
915-
custodyMap: ColumnMap.init(custodyColumns),
918+
custodyColumns: toSeq(custodyMap.items),
919+
custodyMap: custodyMap,
916920
list: initDoublyLinkedList[RootTableRecord[fulu.DataColumnSidecar]](),
917921
db: database,
918922
onSidecarCallback: onDataColumnSidecarCallback
919923
)
920924

921-
func updateColumnQuarantine*(
922-
quarantine: ref ColumnQuarantine,
925+
proc update*(
926+
quarantine: var ColumnQuarantine,
923927
cfg: RuntimeConfig,
924-
custodyColumns: openArray[ColumnIndex]) =
928+
custodyColumns: openArray[ColumnIndex]
929+
) =
925930
doAssert(len(custodyColumns) <= NUMBER_OF_COLUMNS)
931+
let
932+
custodyMap = ColumnMap.init(custodyColumns)
933+
maxSidecarsPerBlockCount = len(custodyMap)
934+
926935
var indexMap = newSeqUninit[int](NUMBER_OF_COLUMNS)
927936
if len(custodyColumns) < NUMBER_OF_COLUMNS:
928937
for i in 0 ..< len(indexMap):
929938
indexMap[i] = -1
930-
for index, item in custodyColumns.pairs():
931-
doAssert(item < uint64(NUMBER_OF_COLUMNS))
939+
for index, item in custodyMap.pairs():
932940
indexMap[int(item)] = index
933941

934-
quarantine.maxSidecarsPerBlockCount = len(custodyColumns)
942+
var
943+
memSidecarsCount = 0
944+
diskSidecarsCount = 0
945+
nodesToRemove: seq[ColumnQuarantineNode]
946+
947+
for node in quarantine.list.nodes():
948+
var
949+
sidecars =
950+
newSeq[SidecarHolder[fulu.DataColumnSidecar]](len(custodyMap))
951+
count = 0
952+
unloaded = 0
953+
954+
for cindex in quarantine.custodyMap.items():
955+
let
956+
index = quarantine.getIndex(cindex)
957+
sidecar = node[].value.sidecars[index]
958+
959+
if not(isEmpty(sidecar)):
960+
if cindex in custodyMap:
961+
let dindex = indexMap[int(cindex)]
962+
if dindex >= 0:
963+
sidecars[dindex] = sidecar
964+
inc(count)
965+
if not(sidecar.isLoaded()):
966+
inc(unloaded)
967+
968+
node.value.sidecars.reset()
969+
970+
if count > 0:
971+
node[].value.sidecars = sidecars
972+
node[].value.count = count
973+
node[].value.unloaded = unloaded
974+
# We do account sidecars which are useful in new configuration, but
975+
# its possible that some sidecars will be left on disk which can't be
976+
# used in new configuration, and we can't delete it easily. But this
977+
# sidecars will be deleted as soon as sidecars with same `block_root`
978+
# will be popped out from quarantine.
979+
diskSidecarsCount.inc(unloaded)
980+
memSidecarsCount.inc(count - unloaded)
981+
else:
982+
# If there no useful columns, we will mark this node for deletion.
983+
nodesToRemove.add(node)
984+
985+
for node in nodesToRemove:
986+
quarantine.removeNode(node, 0)
987+
988+
quarantine.diskSidecarsCount = diskSidecarsCount
989+
quarantine.memSidecarsCount = memSidecarsCount
990+
blob_quarantine_memory_slots_occupied.set(
991+
int64(quarantine.memSidecarsCount))
992+
blob_quarantine_database_slots_occupied.set(
993+
int64(quarantine.diskSidecarsCount))
994+
995+
quarantine.maxSidecarsPerBlockCount = maxSidecarsPerBlockCount
935996
quarantine.indexMap = indexMap
936-
quarantine.custodyColumns = @custodyColumns
937-
quarantine.custodyMap = ColumnMap.init(custodyColumns)
997+
quarantine.custodyColumns = toSeq(custodyMap.items)
998+
quarantine.custodyMap = custodyMap

beacon_chain/consensus_object_pools/blockchain_dag.nim

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -2283,9 +2283,8 @@ proc loadExecutionBlockHash*(dag: ChainDAGRef, bid: BlockId): Opt[Eth2Digest] =
22832283
return Opt.none(Eth2Digest)
22842284

22852285
withBlck(blockData):
2286-
debugGloasComment " "
2287-
when consensusFork == ConsensusFork.Gloas:
2288-
Opt.some ZERO_HASH
2286+
when consensusFork >= ConsensusFork.Gloas:
2287+
Opt.some forkyBlck.message.body.signed_execution_payload_bid.message.block_hash
22892288
elif consensusFork >= ConsensusFork.Bellatrix:
22902289
Opt.some forkyBlck.message.body.execution_payload.block_hash
22912290
else:

beacon_chain/consensus_object_pools/payload_attestation_pool.nim

Lines changed: 6 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -16,7 +16,7 @@ import
1616
"."/[spec_cache, blockchain_dag],
1717
../beacon_clock
1818

19-
from ../spec/beaconstate import get_ptc_list
19+
from ../spec/beaconstate import get_ptc
2020

2121
logScope: topics = "payattpool"
2222

@@ -90,8 +90,7 @@ proc aggregateMessages(
9090
signatures: seq[CookedSig]
9191
ptc_index = 0
9292

93-
let ptc_list = get_ptc_list(forkyState.data, slot, cache)
94-
for ptc_validator_index in ptc_list:
93+
for ptc_validator_index in get_ptc(forkyState.data, slot, cache):
9594
entry.messages.withValue(ptc_validator_index, message):
9695
let cookedSig = message[].signature.load().valueOr:
9796
continue
@@ -138,13 +137,14 @@ proc getPayloadAttestationsForBlock*(
138137
return @[]
139138

140139
let attestation_slot = target_slot - 1
141-
var
142-
payload_attestations: seq[PayloadAttestation]
143-
totalCandidates = 0
144140

145141
if attestation_slot notin pool.attestations:
146142
return @[]
147143

144+
var
145+
payload_attestations: seq[PayloadAttestation]
146+
totalCandidates = 0
147+
148148
pool.attestations.withValue(attestation_slot, slotEntries):
149149
for beacon_block_root, entry in slotEntries[]:
150150
totalCandidates += 1
@@ -153,7 +153,6 @@ proc getPayloadAttestationsForBlock*(
153153
attestation_slot, beacon_block_root, cache)
154154
if aggregated.isSome():
155155
payload_attestations.add(aggregated.get())
156-
157156
if payload_attestations.len >= MAX_PAYLOAD_ATTESTATIONS.int:
158157
break
159158

beacon_chain/gossip_processing/block_processor.nim

Lines changed: 11 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -725,18 +725,19 @@ proc addBlock*(
725725
return self[].storeBackfillBlock(blck, sidecarsOpt)
726726

727727
let queueTick = Moment.now()
728+
729+
# If the lock is acquired already, the current block will be put on hold
730+
# meaning that we'll form an unbounded queue of blocks to be processed
731+
# waiting for the lock - this is similar to using an `AsyncQueue` but
732+
# without the copying and transition to/from `Forked`.
733+
# The lock is important to ensure that we don't process blocks out-of-order
734+
# which both would upset the `storeBlock` logic and cause unnecessary
735+
# quarantine traffic.
736+
self.pendingStores += 1
737+
await self.storeLock.acquire()
738+
728739
let res =
729740
try:
730-
# If the lock is acquired already, the current block will be put on hold
731-
# meaning that we'll form an unbounded queue of blocks to be processed
732-
# waiting for the lock - this is similar to using an `AsyncQueue` but
733-
# without the copying and transition to/from `Forked`.
734-
# The lock is important to ensure that we don't process blocks out-of-order
735-
# which both would upset the `storeBlock` logic and cause unnecessary
736-
# quarantine traffic.
737-
self.pendingStores += 1
738-
await self.storeLock.acquire()
739-
740741
# Since block processing is async, we want to make sure it doesn't get
741742
# (re)added there while we're busy - the start of processing also removes
742743
# the block from the various quarantines.

0 commit comments

Comments
 (0)