diff --git a/AllTests-mainnet.md b/AllTests-mainnet.md index 00456b20a4..08d211b64f 100644 --- a/AllTests-mainnet.md +++ b/AllTests-mainnet.md @@ -856,6 +856,15 @@ AllTests-mainnet ```diff + pre-1.1.0 OK ``` +## Payload attestation pool [Preset: mainnet] +```diff ++ Can add and retrieve payload attestations [Preset: mainnet] OK ++ Can get payload attestations for block production [Preset: mainnet] OK ++ Different payload presence values [Preset: mainnet] OK ++ Duplicate validator in PTC - multiple signatures [Preset: mainnet] OK ++ Multiple validators in PTC can attest [Preset: mainnet] OK ++ Payload attestations get pruned [Preset: mainnet] OK +``` ## PeerPool testing suite ```diff + Access peers by key test OK diff --git a/beacon_chain/beacon_node.nim b/beacon_chain/beacon_node.nim index 185781793c..a01da98015 100644 --- a/beacon_chain/beacon_node.nim +++ b/beacon_chain/beacon_node.nim @@ -23,7 +23,8 @@ import ./el/el_manager, ./consensus_object_pools/[ blockchain_dag, blob_quarantine, block_quarantine, consensus_manager, - attestation_pool, sync_committee_msg_pool, validator_change_pool, + attestation_pool, execution_payload_pool, payload_attestation_pool, + sync_committee_msg_pool, validator_change_pool, blockchain_list], ./spec/datatypes/[base, altair], ./spec/eth2_apis/dynamic_fee_recipients, @@ -87,6 +88,8 @@ type syncCommitteeMsgPool*: ref SyncCommitteeMsgPool lightClientPool*: ref LightClientPool validatorChangePool*: ref ValidatorChangePool + executionPayloadBidPool*: ref ExecutionPayloadBidPool + payloadAttestationPool*: ref PayloadAttestationPool elManager*: ELManager restServer*: RestServerRef keymanagerHost*: ref KeymanagerHost diff --git a/beacon_chain/consensus_object_pools/payload_attestation_pool.nim b/beacon_chain/consensus_object_pools/payload_attestation_pool.nim new file mode 100644 index 0000000000..f525a4fe8a --- /dev/null +++ b/beacon_chain/consensus_object_pools/payload_attestation_pool.nim @@ -0,0 +1,168 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} + +import + # Status libraries + metrics, + chronicles, + # Internal + ../spec/[eth2_merkleization, forks, validator], + "."/[spec_cache, blockchain_dag], + ../beacon_clock + +from ../spec/beaconstate import get_ptc + +logScope: topics = "payattpool" + +declareGauge payload_attestation_pool_block_packing_time, + "Time it took to create list of payload attestations for block" + +type + PayloadAttestationEntry* = object + data*: PayloadAttestationData + messages*: Table[ValidatorIndex, PayloadAttestationMessage] + aggregated*: Opt[PayloadAttestation] + + PayloadAttestationPool* = object + dag*: ChainDAGRef + attestations*: Table[Slot, Table[Eth2Digest, PayloadAttestationEntry]] + +func init*(T: type PayloadAttestationPool, dag: ChainDAGRef): T = + T(dag: dag) + +func pruneOldEntries(pool: var PayloadAttestationPool, wallTime: BeaconTime) = + let current_slot = wallTime.slotOrZero(pool.dag.timeParams) + + # keep only recent slots - since payload attestations + # are only valid for 1 slot + var slotsToRemove: seq[Slot] + for slot in pool.attestations.keys: + if slot + 2 < current_slot: + slotsToRemove.add(slot) + + for slot in slotsToRemove: + pool.attestations.del(slot) + +func addPayloadAttestation*( + pool: var PayloadAttestationPool, message: PayloadAttestationMessage, + wallTime: BeaconTime): bool = + template beacon_block_root: untyped = message.data.beacon_block_root + let + slot = message.data.slot + validator_index = message.validator_index + + pool.pruneOldEntries(wallTime) + + # create an entry for this block and slot + let + entry = addr pool.attestations.mgetOrPut(slot).mgetOrPut( + beacon_block_root, PayloadAttestationEntry(data: message.data)) + + # Check for duplicate + let vidx = ValidatorIndex(validator_index) + if vidx in entry[].messages: + return false + + entry[].messages[vidx] = message + + entry[].aggregated = Opt.none(PayloadAttestation) + + true + +func aggregateMessages( + pool: PayloadAttestationPool, slot: Slot, + entry: var PayloadAttestationEntry, cache: var StateCache +): Opt[PayloadAttestation] = + + if entry.messages.len == 0: + return Opt.none(PayloadAttestation) + + withState(pool.dag.headState): + when consensusFork >= ConsensusFork.Gloas: + var + aggregation_bits: BitArray[int(PTC_SIZE)] + signatures: seq[CookedSig] + ptc_index = 0 + + for ptc_validator_index in get_ptc(forkyState.data, slot, cache): + entry.messages.withValue(ptc_validator_index, message): + let cookedSig = message[].signature.load().valueOr: + continue + aggregation_bits[ptc_index] = true + signatures.add(cookedSig) + ptc_index += 1 + + if signatures.len == 0: + return Opt.none(PayloadAttestation) + + var aggregated_signature = AggregateSignature.init(signatures[0]) + for i in 1..= MAX_PAYLOAD_ATTESTATIONS.int: + break + + let packingDur = Moment.now() - startPackingTick + + debug "Packed payload attestations for block", + target_slot = target_slot, attestation_slot = attestation_slot, + packingDur = packingDur, totalCandidates = totalCandidates, + payload_attestations = payload_attestations.len() + + payload_attestation_pool_block_packing_time.set(packingDur.toFloatSeconds()) + + payload_attestations diff --git a/beacon_chain/gossip_processing/batch_validation.nim b/beacon_chain/gossip_processing/batch_validation.nim index 477b2b5dc9..ab9fbfc352 100644 --- a/beacon_chain/gossip_processing/batch_validation.nim +++ b/beacon_chain/gossip_processing/batch_validation.nim @@ -578,3 +578,28 @@ proc scheduleBlsToExecutionChangeCheck*( pubkey, sig) ok((fut, sig)) + +proc schedulePayloadAttestationCheck*( + batchCrypto: ref BatchCrypto, fork: Fork, + genesis_validators_root: Eth2Digest, + msg: PayloadAttestationMessage, + pubkey: CookedPubKey, + signature: ValidatorSig + ): Result[tuple[fut: FutureBatchResult, sig: CookedSig], cstring] = + ## Schedule crypto verification of a payload attestation + ## + ## The buffer is processed: + ## - when eager processing is enabled and the batch is full + ## - otherwise after 10ms (BatchAttAccumTime) + ## + ## This returns an error if crypto sanity checks failed + ## and a future with the deferred payload attestation check otherwise. + ## + let + sig = signature.load().valueOr: + return err("payload attestation: cannot load signature") + fut = batchCrypto.verifySoon("batch_validation.schedulePayloadAttestationCheck"): + payload_attestation_signature_set( + fork, genesis_validators_root, msg, pubkey, sig) + + ok((fut, sig)) diff --git a/beacon_chain/gossip_processing/eth2_processor.nim b/beacon_chain/gossip_processing/eth2_processor.nim index c1864b8c25..9c1ac4aa5c 100644 --- a/beacon_chain/gossip_processing/eth2_processor.nim +++ b/beacon_chain/gossip_processing/eth2_processor.nim @@ -18,7 +18,8 @@ import ../consensus_object_pools/[ attestation_pool, blob_quarantine, block_clearance, block_quarantine, blockchain_dag, envelope_quarantine, execution_payload_pool, - light_client_pool, sync_committee_msg_pool, validator_change_pool], + payload_attestation_pool, light_client_pool, + sync_committee_msg_pool, validator_change_pool], ../validators/validator_pool, ../beacon_clock, "."/[gossip_validation, block_processor, batch_validation], @@ -146,6 +147,7 @@ type syncCommitteeMsgPool: ref SyncCommitteeMsgPool lightClientPool: ref LightClientPool executionPayloadBidPool*: ref ExecutionPayloadBidPool + payloadAttestationPool*: ref PayloadAttestationPool doppelgangerDetection*: DoppelgangerProtection @@ -196,6 +198,7 @@ proc new*(T: type Eth2Processor, syncCommitteeMsgPool: ref SyncCommitteeMsgPool, lightClientPool: ref LightClientPool, executionPayloadBidPool: ref ExecutionPayloadBidPool, + payloadAttestationPool: ref PayloadAttestationPool, quarantine: ref Quarantine, blobQuarantine: ref BlobQuarantine, dataColumnQuarantine: ref ColumnQuarantine, @@ -216,6 +219,7 @@ proc new*(T: type Eth2Processor, syncCommitteeMsgPool: syncCommitteeMsgPool, lightClientPool: lightClientPool, executionPayloadBidPool: executionPayloadBidPool, + payloadAttestationPool: payloadAttestationPool, quarantine: quarantine, blobQuarantine: blobQuarantine, dataColumnQuarantine: dataColumnQuarantine, @@ -916,3 +920,24 @@ proc processExecutionPayloadBid*( debug "Dropping execution payload bid", reason = $v.error beacon_execution_payload_bids_dropped.inc(1, [$v.error[0]]) err(v.error()) + +proc processPayloadAttestationMessage*( + self: ref Eth2Processor, src: MsgSource, + payload_attestation_message: PayloadAttestationMessage, + checkSignature, checkValidator: bool +): Future[ValidationRes] {.async: (raises: [CancelledError]).} = + let + wallTime = self.getCurrentBeaconTime() + v = await validatePayloadAttestationMessage( + self.dag, self.payloadAttestationPool, self.batchCrypto, + payload_attestation_message, wallTime, checkSignature) + + if v.isErr(): + debug "Dropping payload attestation", reason = $v.error + return err(v.error()) + + discard self.payloadAttestationPool[].addPayloadAttestation( + payload_attestation_message, wallTime) + + trace "Payload attestation validated" + return ok() diff --git a/beacon_chain/gossip_processing/gossip_validation.nim b/beacon_chain/gossip_processing/gossip_validation.nim index b6450201c4..d7ad6f718e 100644 --- a/beacon_chain/gossip_processing/gossip_validation.nim +++ b/beacon_chain/gossip_processing/gossip_validation.nim @@ -19,8 +19,8 @@ import ../consensus_object_pools/[ attestation_pool, blockchain_dag, blob_quarantine, block_clearance, block_quarantine, envelope_quarantine, execution_payload_pool, - light_client_pool, spec_cache, sync_committee_msg_pool, - validator_change_pool], + light_client_pool, payload_attestation_pool,spec_cache, + sync_committee_msg_pool, validator_change_pool], ".."/[beacon_clock], ./batch_validation @@ -2183,7 +2183,7 @@ proc validateExecutionPayloadBid*( signed_execution_payload_bid: SignedExecutionPayloadBid, wallTime: BeaconTime): Result[void, ValidationError] = template bid: untyped = signed_execution_payload_bid.message - + withState(dag.headState): when consensusFork >= ConsensusFork.Gloas: # [REJECT] bid.builder_index is a valid, active, and non-slashed builder index @@ -2214,7 +2214,7 @@ proc validateExecutionPayloadBid*( if existingBid.isSome(): return errIgnore( "ExecutionPayloadBid: already seen bid from this builder for this slot") - + # [IGNORE] This bid is the highest value bid seen for the corresponding # slot and the given parent block hash let highestBid = executionPayloadBidPool[].getHighestBidForSlotAndParent( @@ -2225,7 +2225,7 @@ proc validateExecutionPayloadBid*( # [IGNORE] bid.value is less or equal than the builder's excess balance # i.e. MIN_ACTIVATION_BALANCE + bid.value <= state.balances[bid.builder_index] - if forkyState.data.balances.item(bid.builder_index) < + if forkyState.data.balances.item(bid.builder_index) < MIN_ACTIVATION_BALANCE.Gwei + bid.value: return errIgnore( "ExecutionPayloadBid: insufficient builder balance") @@ -2238,7 +2238,7 @@ proc validateExecutionPayloadBid*( try: let parentExecHash = dag.loadExecutionBlockHash(parentBlck).valueOr: return errIgnore("Bid: parent has no execution payload") - + # Verify the bid references the correct execution payload if parentExecHash != bid.parent_block_hash: return dag.checkedReject( @@ -2251,7 +2251,7 @@ proc validateExecutionPayloadBid*( if dag.getBlockRef(bid.parent_block_root).isNone(): return errIgnore( "ExecutionPayloadBid: parent block root not found in fork choice") - + # [IGNORE] bid.slot is the current slot or the next slot let currentSlot = wallTime.slotOrZero(dag.timeParams) if bid.slot != currentSlot and bid.slot != currentSlot + 1: @@ -2278,3 +2278,91 @@ proc validateExecutionPayloadBid*( "ExecutionPayloadBid: only valid for Gloas fork or later") ok() + +# https://github.com/ethereum/consensus-specs/blob/v1.6.1/specs/gloas/p2p-interface.md#payload_attestation_message +proc validatePayloadAttestationMessage*( + dag: ChainDAGRef, + payloadAttestationPool: ref PayloadAttestationPool, + batchCrypto: ref BatchCrypto, + payload_attestation_message: PayloadAttestationMessage, + wallTime: BeaconTime, + checkSignature: bool = true +): Future[Result[ + void, ValidationError]] {.async: (raises: [CancelledError]).} = + template data: untyped = payload_attestation_message.data + + # [IGNORE] The message's slot is for the current slot (with a + # `MAXIMUM_GOSSIP_CLOCK_DISPARITY` allowance), i.e `data.slot == current_slot`. + block: + let v = dag.timeParams.check_slot_exact(data.slot, wallTime) + if v.isErr(): + return err(v.error()) + + # [IGNORE] The `payload_attestaion_message`is the first valid message + # received from the validator with index `paylod_attestation_message.validator_index`. + let entry = payloadAttestationPool[].attestations + .getOrDefault(data.slot) + .getOrDefault(data.beacon_block_root) + + if ValidatorIndex(payload_attestation_message.validator_index) in + entry.messages: + return errIgnore("PayloadAttestaionMessage: duplicate message from validator") + + # [IGNORE] The message's block `data.beacon_block_root` has been seen (via + # gossip or non-gossip sources) + let blck = dag.getBlockRef(data.beacon_block_root).valueOr: + return errIgnore("PayloadAttestationMessage: block not found") + + # [REJECT] The message's block `data.beacon_block_root` passes validation. + # Should have been validatied by getNBlockRef above + + # [REJECT] The message's validator index is within the payload committee in + # `get_ptc(state, data.slot)`. The `state` is the head state corresponding to + # processing the block up to the current slot as determined by fork choice + withState(dag.headState): + when consensusFork >= ConsensusFork.Gloas: + var cache: StateCache + let vidx = ValidatorIndex(payload_attestation_message.validator_index) + + var present = false + for idx in get_ptc(forkyState.data, data.slot, cache): + if idx == vidx: + present = true + break + + if not present: + return dag.checkedReject( + "PayloadAttestationMessage: validator not in ptc") + else: + return dag.checkedReject( + "PayloadAttestationMessage: only valid for Gloas fork") + + # [REJECT] `payload_attestation_message.signature` is valid with respect + # to the validator's public key. + if checkSignature: + let + validator_index = ValidatorIndex(payload_attestation_message.validator_index) + senderPubKey = dag.validatorKey(validator_index).valueOr: + return dag.checkedReject( + "PayPayloadAttesatationMessage: invalid validator index") + fork = dag.forkAtEpoch(data.slot.epoch) + + let deferredCrypto = batchCrypto.schedulePayloadAttestationCheck( + fork, dag.genesis_validators_root, payload_attestation_message, + senderPubKey, payload_attestation_message.signature) + if deferredCrypto.isErr(): + return dag.checkedReject(deferredCrypto.error) + + let (cryptoFut, sig) = deferredCrypto.get() + let x = await cryptoFut + case x + of BatchResult.Invalid: + return dag.checkedReject( + "PayloadAttestatoinMessage: invalid signature") + of BatchResult.Timeout: + return errIgnore( + "PayloadAttestationMessage: timeout checking signature") + of BatchResult.Valid: + discard + + ok() diff --git a/beacon_chain/networking/eth2_network.nim b/beacon_chain/networking/eth2_network.nim index 531ce04848..f73f2fa889 100644 --- a/beacon_chain/networking/eth2_network.nim +++ b/beacon_chain/networking/eth2_network.nim @@ -2870,3 +2870,12 @@ proc broadcastLightClientOptimisticUpdate*( let topic = getLightClientOptimisticUpdateTopic( node.forkDigestAtEpoch(msg.contextEpoch)) node.broadcast(topic, msg) + +proc broadcastPayloadAttestationMessage*( + node: Eth2Node, msg: PayloadAttestationMessage): + Future[SendResult] {.async: (raises: [CancelledError], raw: true).} = + let + contextEpoch = msg.data.slot.epoch + topic = getPayloadAttestationMessageTopic( + node.forkDigestAtEpoch(contextEpoch)) + node.broadcast(topic, msg) diff --git a/beacon_chain/nimbus_beacon_node.nim b/beacon_chain/nimbus_beacon_node.nim index f716be8505..18bb572e0d 100644 --- a/beacon_chain/nimbus_beacon_node.nim +++ b/beacon_chain/nimbus_beacon_node.nim @@ -17,7 +17,8 @@ import eth/enr/enr, eth/p2p/discoveryv5/random2, ./consensus_object_pools/[ - blob_quarantine, blockchain_list, execution_payload_pool], + blob_quarantine, blockchain_list, execution_payload_pool, + payload_attestation_pool], ./consensus_object_pools/vanity_logs/vanity_logs, ./networking/[topic_params, network_metadata_downloads], ./rpc/[rest_api, state_ttl_cache], @@ -414,6 +415,7 @@ proc initFullNode( onProposerSlashingAdded, onPhase0AttesterSlashingAdded, onElectraAttesterSlashingAdded)) executionPayloadBidPool = newClone(ExecutionPayloadBidPool.init(dag)) + payloadAttestationPool = newClone(PayloadAttestationPool.init(dag)) blobQuarantine = newClone(BlobQuarantine.init( dag.cfg, dag.db.getQuarantineDB(), 10, onBlobSidecarAdded)) supernode = node.config.peerdasSupernode or node.config.debugPeerdasSupernode @@ -541,8 +543,9 @@ proc initFullNode( config.doppelgangerDetection, blockProcessor, node.validatorMonitor, dag, attestationPool, validatorChangePool, node.attachedValidators, syncCommitteeMsgPool, - lightClientPool, executionPayloadBidPool, quarantine, blobQuarantine, dataColumnQuarantine, - rng, getBeaconTime, taskpool) + lightClientPool, executionPayloadBidPool, payloadAttestationPool, + quarantine, blobQuarantine, dataColumnQuarantine, rng, + getBeaconTime, taskpool) syncManagerFlags = if node.config.longRangeSync != LongRangeSyncMode.Lenient: {SyncManagerFlag.NoGenesisSync} @@ -657,6 +660,8 @@ proc initFullNode( node.lightClientPool = lightClientPool node.validatorChangePool = validatorChangePool node.processor = processor + node.executionPayloadBidPool = executionPayloadBidPool + node.payloadAttestationPool = payloadAttestationPool node.batchVerifier = batchVerifier node.blockProcessor = blockProcessor node.consensusManager = consensusManager @@ -2378,6 +2383,20 @@ proc installMessageValidators(node: BeaconNode) = node.processor[].processExecutionPayloadBid( MsgSource.gossip, signedBid))) + # payload_attestation_message + # https://github.com/ethereum/consensus-specs/blob/v1.6.1/specs/gloas/p2p-interface.md#payload_attestation_message + when consensusFork >= ConsensusFork.Gloas: + node.network.addAsyncValidator( + getPayloadAttestationMessageTopic(digest), proc ( + payloadAttestationMessage: PayloadAttestationMessage, + src: PeerId + ): Future[ValidationResult] {. + async: (raises: [CancelledError]).} = + return toValidationResult( + await node.processor.processPayloadAttestationMessage( + MsgSource.gossip, payloadAttestationMessage, + checkSignature = true, checkValidator = false))) + # beacon_attestation_{subnet_id} # https://github.com/ethereum/consensus-specs/blob/v1.4.0-beta.5/specs/phase0/p2p-interface.md#beacon_attestation_subnet_id # https://github.com/ethereum/consensus-specs/blob/v1.6.0-beta.0/specs/gloas/p2p-interface.md#beacon_attestation_subnet_id diff --git a/beacon_chain/spec/beaconstate.nim b/beacon_chain/spec/beaconstate.nim index b35451f615..cf14a5ab89 100644 --- a/beacon_chain/spec/beaconstate.nim +++ b/beacon_chain/spec/beaconstate.nim @@ -2762,6 +2762,8 @@ func can_advance_slots*( state: ForkedHashedBeaconState, block_root: Eth2Digest, target_slot: Slot): bool = withState(state): forkyState.can_advance_slots(block_root, target_slot) +# {.closure.} prevents stack overflow from inline expansion. +# See: https://github.com/nim-lang/Nim/issues/25287 # https://github.com/ethereum/consensus-specs/blob/v1.6.0-alpha.6/specs/gloas/beacon-chain.md#new-get_ptc iterator get_ptc*(state: gloas.BeaconState, slot: Slot, cache: var StateCache): ValidatorIndex {.closure.} = diff --git a/beacon_chain/spec/datatypes/gloas.nim b/beacon_chain/spec/datatypes/gloas.nim index 2b1b91a31a..3699c8742b 100644 --- a/beacon_chain/spec/datatypes/gloas.nim +++ b/beacon_chain/spec/datatypes/gloas.nim @@ -638,6 +638,21 @@ func shortLog*(v: ExecutionPayloadEnvelope): auto = state_root: shortLog(v.state_root) ) +func shortLog*(v: PayloadAttestationData): auto = + ( + beacon_block_root: shortLog(v.beacon_block_root), + slot: v.slot, + payload_present: v.payload_present, + blob_data_available: v.blob_data_available + ) + +func shortLog*(v: PayloadAttestationMessage): auto = + ( + validator_index: v.validator_index, + data: shortLog(v.data), + signature: shortLog(v.signature) + ) + template asSigned*( x: SigVerifiedSignedBeaconBlock | TrustedSignedBeaconBlock): SignedBeaconBlock = diff --git a/beacon_chain/spec/signatures_batch.nim b/beacon_chain/spec/signatures_batch.nim index 65b4dc07ae..9ac739d235 100644 --- a/beacon_chain/spec/signatures_batch.nim +++ b/beacon_chain/spec/signatures_batch.nim @@ -179,6 +179,15 @@ func attestation_signature_set*( SignatureSet.init(pubkey, signing_root, signature) +func payload_attestation_signature_set*( + fork: Fork, genesis_validators_root: Eth2Digest, + payload_attestation_message: PayloadAttestationMessage, + pubkey: CookedPubKey, signature: CookedSig): SignatureSet = + let signing_root = compute_payload_attestation_message_signing_root( + fork, genesis_validators_root, payload_attestation_message) + + SignatureSet.init(pubkey, signing_root, signature) + # See also: verify_voluntary_exit_signature func voluntary_exit_signature_set*( fork: Fork, genesis_validators_root: Eth2Digest, diff --git a/beacon_chain/validators/message_router.nim b/beacon_chain/validators/message_router.nim index 8f244f4d82..2201f9a94f 100644 --- a/beacon_chain/validators/message_router.nim +++ b/beacon_chain/validators/message_router.nim @@ -599,3 +599,34 @@ proc routeBlsToExecutionChange*( error = res.error() return ok() + +proc routePayloadAttestationMessage*( + router: ref MessageRouter, + message: PayloadAttestationMessage, + checkSignature, checkValidator: bool = true): + Future[SendResult] {.async: (raises: [CancelledError]).} = + block: + let res = await router.processor.processPayloadAttestationMessage( + MsgSource.api, message, checkSignature = checkSignature, + checkValidator = checkValidator) + + if not res.isGoodForSending: + warn "Payload attestation failed validation", + message = shortLog(message), error = res.error() + return err(res.error()[1]) + + let + sendTime = router[].processor.getCurrentBeaconTime() + slot = message.data.slot + delay = sendTime - + slot.payload_attestation_deadline(router[].dag.timeParams) + res = await router[].network.broadcastPayloadAttestationMessage(message) + + if res.isOk(): + info "Payload attestation sent", + message = shortLog(message), delay + else: + notice "Payload attestation not sent", + message = shortLog(message), error = res.error() + + return ok() diff --git a/tests/all_tests.nim b/tests/all_tests.nim index 8d3c4faed9..9ea12f643b 100644 --- a/tests/all_tests.nim +++ b/tests/all_tests.nim @@ -43,6 +43,7 @@ import # Unit test ./test_light_client_processor, ./test_light_client, ./test_network_metadata, + ./test_payload_attestation_pool, ./test_peer_pool, ./test_peerdas_helpers, ./test_remote_keystore, diff --git a/tests/test_payload_attestation_pool.nim b/tests/test_payload_attestation_pool.nim new file mode 100644 index 0000000000..a16c7b2b1b --- /dev/null +++ b/tests/test_payload_attestation_pool.nim @@ -0,0 +1,291 @@ +# beacon_chain +# Copyright (c) 2025 Status Research & Development GmbH +# Licensed and distributed under either of +# * MIT license (license terms in the root directory or at https://opensource.org/licenses/MIT). +# * Apache v2 license (license terms in the root directory or at https://www.apache.org/licenses/LICENSE-2.0). +# at your option. This file may not be copied, modified, or distributed except according to those terms. + +{.push raises: [], gcsafe.} +{.used.} + +import + # Status libraries + unittest2, + chronicles, + # Internal + ../beacon_chain/consensus_object_pools/[ + blockchain_dag, payload_attestation_pool, spec_cache], + ../beacon_chain/spec/[ + forks, helpers, signatures, state_transition], + ../beacon_chain/beacon_clock, + # Test utilities + ./testutil, ./testdbutil, ./testblockutil, ./consensus_spec/fixtures_utils + +from ../beacon_chain/spec/beaconstate import get_ptc + +proc makePayloadAttestationMessage( + state: gloas.HashedBeaconState, + beacon_block_root: Eth2Digest, + validator_index: ValidatorIndex, + privkey: ValidatorPrivKey, + cache: var StateCache, + payload_present: bool = true, + blob_data_available: bool = true + ): PayloadAttestationMessage = + + let + slot = state.data.slot + fork = Fork( + previous_version: state.data.fork.current_version, + current_version: state.data.fork.current_version, + epoch: state.data.slot.epoch) + genesis_validators_root = state.data.genesis_validators_root + + data = PayloadAttestationData( + beacon_block_root: beacon_block_root, + slot: slot, + payload_present: payload_present, + blob_data_available: blob_data_available) + + domain = get_domain( + fork, DOMAIN_PTC_ATTESTER, slot.epoch(), genesis_validators_root) + signing_root = compute_signing_root(data, domain) + signature = blsSign(privkey, signing_root.data) + + PayloadAttestationMessage( + validator_index: validator_index.uint64, + data: data, + signature: signature.toValidatorSig()) + +suite "Payload attestation pool" & preset(): + setup: + # Genesis state that results in 512 members in a committee + const TOTAL_COMMITTEES = 1 + var + cfg = genesisTestRuntimeConfig(ConsensusFork.Gloas) + validatorMonitor = newClone(ValidatorMonitor.init(cfg)) + dag = init( + ChainDAGRef, cfg, + cfg.makeTestDB( + TOTAL_COMMITTEES * PTC_SIZE), + validatorMonitor, {}) + pool = newClone(PayloadAttestationPool.init(dag)) + state = newClone(dag.headState) + cache = StateCache() + info = ForkedEpochInfo() + check: + process_slots( + dag.cfg, + state[], + getStateField(state[], slot) + 1, + cache, + info, + {}).isOk() + + test "Can add and retrieve payload attestations" & preset(): + let + slot = getStateField(state[], slot) + beacon_block_root = + withState(state[]): hash_tree_root(forkyState.data.latest_block_header) + wallTime = slot.start_beacon_time(dag.cfg.timeParams) + + withState(state[]): + when consensusFork >= ConsensusFork.Gloas: + var ptc_member: ValidatorIndex + var found = false + for validator_index in get_ptc(forkyState.data, slot, cache): + ptc_member = validator_index + found = true + break + + check found + + let + privkey = MockPrivKeys[ptc_member] + message = makePayloadAttestationMessage( + forkyState, beacon_block_root, ptc_member, privkey, cache) + + check pool[].addPayloadAttestation(message, wallTime) + + # Should not be able to add the same attestation twice + check not pool[].addPayloadAttestation(message, wallTime) + + let aggregated = pool[].getAggregatedPayloadAttestation( + slot, beacon_block_root, cache) + + check aggregated.isSome() + check aggregated.get().data == message.data + check aggregated.get().aggregation_bits.countOnes() > 0 + + test "Multiple validators in PTC can attest" & preset(): + let + slot = getStateField(state[], slot) + beacon_block_root = + withState(state[]): hash_tree_root(forkyState.data.latest_block_header) + wallTime = slot.start_beacon_time(dag.cfg.timeParams) + + withState(state[]): + when consensusFork >= ConsensusFork.Gloas: + var messages: seq[PayloadAttestationMessage] + var ptc_members: seq[ValidatorIndex] + + for validator_index in get_ptc(forkyState.data, slot, cache): + if ptc_members.len >= 3: + break + ptc_members.add(validator_index) + + check ptc_members.len >= 3 + + for ptc_member in ptc_members: + let + privkey = MockPrivKeys[ptc_member] + message = makePayloadAttestationMessage( + forkyState, beacon_block_root, ptc_member, privkey, cache) + messages.add(message) + check pool[].addPayloadAttestation(message, wallTime) + + let aggregated = pool[].getAggregatedPayloadAttestation( + slot, beacon_block_root, cache) + check aggregated.isSome() + check aggregated.get().aggregation_bits.countOnes() >= ptc_members.len + + test "Duplicate validator in PTC - multiple signatures" & preset(): + let + slot = getStateField(state[], slot) + beacon_block_root = + withState(state[]): hash_tree_root(forkyState.data.latest_block_header) + wallTime = slot.start_beacon_time(dag.cfg.timeParams) + + withState(state[]): + when consensusFork >= ConsensusFork.Gloas: + var + validator_positions: Table[ValidatorIndex, seq[int]] + ptc_index = 0 + + for validator_index in get_ptc(forkyState.data, slot, cache): + if validator_index notin validator_positions: + validator_positions[validator_index] = @[] + validator_positions[validator_index].add(ptc_index) + ptc_index += 1 + + var + multi_position_validator: ValidatorIndex + positions: seq[int] + found = false + + for validator_index, position_list in validator_positions: + if position_list.len > 1: + multi_position_validator = validator_index + positions = position_list + found = true + break + + if found: + let + privkey = MockPrivKeys[multi_position_validator] + message = makePayloadAttestationMessage( + forkyState, beacon_block_root, + multi_position_validator, privkey, cache) + + check pool[].addPayloadAttestation(message, wallTime) + + let aggregated = + pool[].getAggregatedPayloadAttestation( + slot, beacon_block_root, cache) + check aggregated.isSome() + + # Check that all positions are set in aggregation bits + for pos in positions: + check aggregated.get().aggregation_bits[pos] + + test "Can get payload attestations for block production" & preset(): + let + slot = getStateField(state[], slot) + beacon_block_root = + withState(state[]): hash_tree_root(forkyState.data.latest_block_header) + wallTime = slot.start_beacon_time(dag.cfg.timeParams) + target_slot = slot + 1 + + withState(state[]): + when consensusFork >= ConsensusFork.Gloas: + var added_count = 0 + for validator_index in get_ptc(forkyState.data, slot, cache): + if added_count >= 2: + break + let + privkey = MockPrivKeys[validator_index] + message = makePayloadAttestationMessage( + forkyState, beacon_block_root, validator_index, privkey, cache) + check pool[].addPayloadAttestation(message, wallTime) + added_count += 1 + + let attestations = + pool[].getPayloadAttestationsForBlock(target_slot, cache) + check attestations.len > 0 + check attestations[0].data.slot == slot + + test "Payload attestations get pruned" & preset(): + let + slot = getStateField(state[], slot) + beacon_block_root = + withState(state[]): hash_tree_root(forkyState.data.latest_block_header) + wallTime = slot.start_beacon_time(dag.cfg.timeParams) + future_time = (slot + 5).start_beacon_time(dag.cfg.timeParams) + + withState(state[]): + when consensusFork >= ConsensusFork.Gloas: + var ptc_member: ValidatorIndex + for validator_index in get_ptc(forkyState.data, slot, cache): + ptc_member = validator_index + break + + let + privkey = MockPrivKeys[ptc_member] + message = makePayloadAttestationMessage( + forkyState, beacon_block_root, ptc_member, privkey, cache) + + # Add attestation + check pool[].addPayloadAttestation(message, wallTime) + + # Add another attestation at a future time (should trigger pruning) + check pool[].addPayloadAttestation(message, future_time) + + # Old attestation should no longer be retrievable + let attestations = + pool[].getPayloadAttestationsForBlock(slot + 6, cache) + check attestations.len == 0 + + test "Different payload presence values" & preset(): + let + slot = getStateField(state[], slot) + beacon_block_root = + withState(state[]): hash_tree_root(forkyState.data.latest_block_header) + wallTime = slot.start_beacon_time(dag.cfg.timeParams) + + withState(state[]): + when consensusFork >= ConsensusFork.Gloas: + var ptc_members: seq[ValidatorIndex] + for validator_index in get_ptc(forkyState.data, slot, cache): + if ptc_members.len >= 2: + break + ptc_members.add(validator_index) + + check ptc_members.len >= 2 + + let + message1 = makePayloadAttestationMessage( + forkyState, beacon_block_root, ptc_members[0], + MockPrivKeys[ptc_members[0]], cache, + payload_present = true, blob_data_available = true) + message2 = makePayloadAttestationMessage( + forkyState, beacon_block_root, ptc_members[1], + MockPrivKeys[ptc_members[1]], cache, + payload_present = false, blob_data_available = false) + + check pool[].addPayloadAttestation(message1, wallTime) + check pool[].addPayloadAttestation(message2, wallTime) + + let + agg1 = pool[].getAggregatedPayloadAttestation( + slot, beacon_block_root, cache) + check agg1.isSome()