hotshot_testing/byzantine/
byzantine_behaviour.rs

1use std::{
2    collections::{BTreeMap, HashMap, HashSet},
3    iter::once,
4    sync::Arc,
5};
6
7use anyhow::Context;
8use async_lock::RwLock;
9use async_trait::async_trait;
10use hotshot::{
11    tasks::EventTransformerState,
12    types::{SignatureKey, SystemContextHandle},
13};
14use hotshot_task_impls::{
15    events::HotShotEvent,
16    network::{
17        NetworkEventTaskState,
18        test::{ModifierClosure, NetworkEventTaskStateModifier},
19    },
20};
21use hotshot_types::{
22    consensus::OuterConsensus,
23    data::{EpochNumber, QuorumProposalWrapper, ViewNumber},
24    epoch_membership::EpochMembershipCoordinator,
25    message::{
26        GeneralConsensusMessage, Message, MessageKind, Proposal, SequencingMessage, UpgradeLock,
27        convert_proposal,
28    },
29    simple_vote::{
30        HasEpoch, QuorumVote2, ViewSyncPreCommitData, ViewSyncPreCommitData2,
31        ViewSyncPreCommitVote, ViewSyncPreCommitVote2,
32    },
33    traits::{
34        election::Membership,
35        network::ConnectedNetwork,
36        node_implementation::{NodeImplementation, NodeType},
37    },
38    vote::HasViewNumber,
39};
40
41#[derive(Debug)]
42/// An `EventTransformerState` that multiplies `QuorumProposalSend` events, incrementing the view number of the proposal
43pub struct BadProposalViewDos {
44    /// The number of times to duplicate a `QuorumProposalSend` event
45    pub multiplier: u64,
46    /// The view number increment each time it's duplicatedjust
47    pub increment: u64,
48}
49
50#[async_trait]
51impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
52    for BadProposalViewDos
53{
54    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
55        vec![event.clone()]
56    }
57
58    async fn send_handler(
59        &mut self,
60        event: &HotShotEvent<TYPES>,
61        _public_key: &TYPES::SignatureKey,
62        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
63        _upgrade_lock: &UpgradeLock<TYPES>,
64        consensus: OuterConsensus<TYPES>,
65        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
66        _network: Arc<I::Network>,
67    ) -> Vec<HotShotEvent<TYPES>> {
68        match event {
69            HotShotEvent::QuorumProposalSend(proposal, signature) => {
70                let mut result = Vec::new();
71
72                for n in 1..self.multiplier {
73                    let mut modified_proposal = proposal.clone();
74
75                    modified_proposal.data.proposal.view_number += n * self.increment;
76
77                    result.push(HotShotEvent::QuorumProposalSend(
78                        modified_proposal,
79                        signature.clone(),
80                    ));
81                }
82
83                consensus.write().await.reset_actions();
84                result
85            },
86            _ => vec![event.clone()],
87        }
88    }
89}
90
91#[derive(Debug)]
92/// An `EventHandlerState` that doubles the `QuorumVoteSend` and `QuorumProposalSend` events
93pub struct DoubleProposeVote;
94
95#[async_trait]
96impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
97    for DoubleProposeVote
98{
99    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
100        vec![event.clone()]
101    }
102
103    async fn send_handler(
104        &mut self,
105        event: &HotShotEvent<TYPES>,
106        _public_key: &TYPES::SignatureKey,
107        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
108        _upgrade_lock: &UpgradeLock<TYPES>,
109        _consensus: OuterConsensus<TYPES>,
110        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
111        _network: Arc<I::Network>,
112    ) -> Vec<HotShotEvent<TYPES>> {
113        match event {
114            HotShotEvent::QuorumProposalSend(..) | HotShotEvent::QuorumVoteSend(_) => {
115                vec![event.clone(), event.clone()]
116            },
117            _ => vec![event.clone()],
118        }
119    }
120}
121
122#[derive(Debug)]
123/// An `EventHandlerState` that modifies justify_qc on `QuorumProposalSend` to that of a previous view to mock dishonest leader
124pub struct DishonestLeader<TYPES: NodeType> {
125    /// Store events from previous views
126    pub validated_proposals: Vec<QuorumProposalWrapper<TYPES>>,
127    /// How many times current node has been elected leader and sent proposal
128    pub total_proposals_from_node: u64,
129    /// Which proposals to be dishonest at
130    pub dishonest_at_proposal_numbers: HashSet<u64>,
131    /// How far back to look for a QC
132    pub view_look_back: usize,
133    /// Shared state of all view numbers we send bad proposal at
134    pub dishonest_proposal_view_numbers: Arc<RwLock<HashSet<ViewNumber>>>,
135}
136
137/// Add method that will handle `QuorumProposalSend` events
138/// If we have previous proposals stored and the total_proposals_from_node matches a value specified in dishonest_at_proposal_numbers
139/// Then send out the event with the modified proposal that has an older QC
140impl<TYPES: NodeType> DishonestLeader<TYPES> {
141    /// When a leader is sending a proposal this method will mock a dishonest leader
142    /// We accomplish this by looking back a number of specified views and using that cached proposals QC
143    async fn handle_proposal_send_event(
144        &self,
145        event: &HotShotEvent<TYPES>,
146        proposal: &Proposal<TYPES, QuorumProposalWrapper<TYPES>>,
147        sender: &TYPES::SignatureKey,
148    ) -> HotShotEvent<TYPES> {
149        let length = self.validated_proposals.len();
150        if !self
151            .dishonest_at_proposal_numbers
152            .contains(&self.total_proposals_from_node)
153            || length == 0
154        {
155            return event.clone();
156        }
157
158        // Grab proposal from specified view look back
159        let proposal_from_look_back = if length - 1 < self.view_look_back {
160            // If look back is too far just take the first proposal
161            self.validated_proposals[0].clone()
162        } else {
163            let index = (self.validated_proposals.len() - 1) - self.view_look_back;
164            self.validated_proposals[index].clone()
165        };
166
167        // Create a dishonest proposal by using the old proposals qc
168        let mut dishonest_proposal = proposal.clone();
169        dishonest_proposal.data.proposal.justify_qc = proposal_from_look_back.proposal.justify_qc;
170
171        // Save the view we sent the dishonest proposal on (used for coordination attacks with other byzantine replicas)
172        let mut dishonest_proposal_sent = self.dishonest_proposal_view_numbers.write().await;
173        dishonest_proposal_sent.insert(proposal.data.view_number());
174
175        HotShotEvent::QuorumProposalSend(dishonest_proposal, sender.clone())
176    }
177}
178
179#[async_trait]
180impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
181    EventTransformerState<TYPES, I> for DishonestLeader<TYPES>
182{
183    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
184        vec![event.clone()]
185    }
186
187    async fn send_handler(
188        &mut self,
189        event: &HotShotEvent<TYPES>,
190        _public_key: &TYPES::SignatureKey,
191        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
192        _upgrade_lock: &UpgradeLock<TYPES>,
193        _consensus: OuterConsensus<TYPES>,
194        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
195        _network: Arc<I::Network>,
196    ) -> Vec<HotShotEvent<TYPES>> {
197        match event {
198            HotShotEvent::QuorumProposalSend(proposal, sender) => {
199                self.total_proposals_from_node += 1;
200                return vec![
201                    self.handle_proposal_send_event(event, proposal, sender)
202                        .await,
203                ];
204            },
205            HotShotEvent::QuorumProposalValidated(proposal, _) => {
206                self.validated_proposals.push(proposal.data.clone());
207            },
208            _ => {},
209        }
210        vec![event.clone()]
211    }
212}
213
214#[derive(Debug)]
215/// An `EventHandlerState` that modifies view number on the certificate of `DacSend` event to that of a future view
216pub struct DishonestDa {
217    /// How many times current node has been elected leader and sent Da Cert
218    pub total_da_certs_sent_from_node: u64,
219    /// Which proposals to be dishonest at
220    pub dishonest_at_da_cert_sent_numbers: HashSet<u64>,
221    /// When leader how many times we will send DacSend and increment view number
222    pub total_views_add_to_cert: u64,
223}
224
225#[async_trait]
226impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
227    EventTransformerState<TYPES, I> for DishonestDa
228{
229    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
230        vec![event.clone()]
231    }
232
233    async fn send_handler(
234        &mut self,
235        event: &HotShotEvent<TYPES>,
236        _public_key: &TYPES::SignatureKey,
237        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
238        _upgrade_lock: &UpgradeLock<TYPES>,
239        _consensus: OuterConsensus<TYPES>,
240        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
241        _network: Arc<I::Network>,
242    ) -> Vec<HotShotEvent<TYPES>> {
243        if let HotShotEvent::DacSend(cert, sender) = event {
244            self.total_da_certs_sent_from_node += 1;
245            if self
246                .dishonest_at_da_cert_sent_numbers
247                .contains(&self.total_da_certs_sent_from_node)
248            {
249                let mut result = vec![HotShotEvent::DacSend(cert.clone(), sender.clone())];
250                for i in 1..=self.total_views_add_to_cert {
251                    let mut bad_cert = cert.clone();
252                    bad_cert.view_number = cert.view_number + i;
253                    result.push(HotShotEvent::DacSend(bad_cert, sender.clone()));
254                }
255                return result;
256            }
257        }
258        vec![event.clone()]
259    }
260}
261
262/// View delay configuration
263#[derive(Debug)]
264pub struct ViewDelay<TYPES: NodeType> {
265    /// How many views the node will be delayed
266    pub number_of_views_to_delay: u64,
267    /// A map that is from view number to vector of events
268    pub events_for_view: HashMap<ViewNumber, Vec<HotShotEvent<TYPES>>>,
269    /// Specify which view number to stop delaying
270    pub stop_view_delay_at_view_number: u64,
271}
272
273#[async_trait]
274impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
275    EventTransformerState<TYPES, I> for ViewDelay<TYPES>
276{
277    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
278        let correct_event = vec![event.clone()];
279        if let Some(view_number) = event.view_number() {
280            if *view_number >= self.stop_view_delay_at_view_number {
281                return correct_event;
282            }
283
284            // add current view or push event to the map if view number has been added
285            let events_for_current_view = self.events_for_view.entry(view_number).or_default();
286            events_for_current_view.push(event.clone());
287
288            // ensure we are actually able to lookback enough views
289            let view_diff = (*view_number).saturating_sub(self.number_of_views_to_delay);
290            if view_diff > 0 {
291                return match self.events_for_view.remove(&ViewNumber::new(view_diff)) {
292                    Some(lookback_events) => lookback_events.clone(),
293                    // we have already return all received events for this view
294                    None => vec![],
295                };
296            }
297        }
298
299        correct_event
300    }
301
302    async fn send_handler(
303        &mut self,
304        event: &HotShotEvent<TYPES>,
305        _public_key: &TYPES::SignatureKey,
306        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
307        _upgrade_lock: &UpgradeLock<TYPES>,
308        _consensus: OuterConsensus<TYPES>,
309        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
310        _network: Arc<I::Network>,
311    ) -> Vec<HotShotEvent<TYPES>> {
312        vec![event.clone()]
313    }
314}
315
316/// An `EventHandlerState` that modifies view number on the vote of `QuorumVoteSend` event to that of a future view and correctly signs the vote
317pub struct DishonestVoting<TYPES: NodeType> {
318    /// Number added to the original vote's view number
319    pub view_increment: u64,
320    /// A function passed to `NetworkEventTaskStateModifier` to modify `NetworkEventTaskState` behaviour.
321    pub modifier: Arc<ModifierClosure<TYPES>>,
322}
323
324#[async_trait]
325impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
326    EventTransformerState<TYPES, I> for DishonestVoting<TYPES>
327{
328    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
329        vec![event.clone()]
330    }
331
332    async fn send_handler(
333        &mut self,
334        event: &HotShotEvent<TYPES>,
335        public_key: &TYPES::SignatureKey,
336        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
337        upgrade_lock: &UpgradeLock<TYPES>,
338        _consensus: OuterConsensus<TYPES>,
339        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
340        _network: Arc<I::Network>,
341    ) -> Vec<HotShotEvent<TYPES>> {
342        if let HotShotEvent::QuorumVoteSend(vote) = event {
343            let new_view = vote.view_number + self.view_increment;
344            let spoofed_vote = QuorumVote2::<TYPES>::create_signed_vote(
345                vote.data.clone(),
346                new_view,
347                public_key,
348                private_key,
349                upgrade_lock,
350            )
351            .context("Failed to sign vote")
352            .unwrap();
353            tracing::debug!("Sending Quorum Vote for view: {new_view:?}");
354            return vec![HotShotEvent::QuorumVoteSend(spoofed_vote)];
355        }
356        vec![event.clone()]
357    }
358
359    fn add_network_event_task(
360        &self,
361        handle: &mut SystemContextHandle<TYPES, I>,
362        network: Arc<<I as NodeImplementation<TYPES>>::Network>,
363    ) {
364        let network_state: NetworkEventTaskState<_, _, _> = NetworkEventTaskState {
365            network,
366            view: ViewNumber::genesis(),
367            epoch: None,
368            membership_coordinator: handle.membership_coordinator.clone(),
369            storage: handle.storage(),
370            storage_metrics: handle.storage_metrics(),
371            consensus: OuterConsensus::new(handle.consensus()),
372            upgrade_lock: handle.hotshot.upgrade_lock.clone(),
373            transmit_tasks: BTreeMap::new(),
374            epoch_height: handle.epoch_height,
375            id: handle.hotshot.id,
376        };
377        let modified_network_state = NetworkEventTaskStateModifier {
378            network_event_task_state: network_state,
379            modifier: Arc::clone(&self.modifier),
380        };
381        handle.add_task(modified_network_state);
382    }
383}
384
385impl<TYPES: NodeType> std::fmt::Debug for DishonestVoting<TYPES> {
386    fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
387        f.debug_struct("DishonestVoting")
388            .field("view_increment", &self.view_increment)
389            .finish_non_exhaustive()
390    }
391}
392
393#[derive(Debug)]
394/// An `EventHandlerState` that will send a vote for a bad proposal
395pub struct DishonestVoter<TYPES: NodeType> {
396    /// Collect all votes the node sends
397    pub votes_sent: Vec<QuorumVote2<TYPES>>,
398    /// Shared state with views numbers that leaders were dishonest at
399    pub dishonest_proposal_view_numbers: Arc<RwLock<HashSet<ViewNumber>>>,
400}
401
402#[async_trait]
403impl<TYPES: NodeType, I: NodeImplementation<TYPES> + std::fmt::Debug>
404    EventTransformerState<TYPES, I> for DishonestVoter<TYPES>
405{
406    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
407        vec![event.clone()]
408    }
409
410    async fn send_handler(
411        &mut self,
412        event: &HotShotEvent<TYPES>,
413        public_key: &TYPES::SignatureKey,
414        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
415        upgrade_lock: &UpgradeLock<TYPES>,
416        _consensus: OuterConsensus<TYPES>,
417        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
418        _network: Arc<I::Network>,
419    ) -> Vec<HotShotEvent<TYPES>> {
420        match event {
421            HotShotEvent::QuorumProposalRecv(proposal, _sender) => {
422                // Check if view is a dishonest proposal, if true send a vote
423                let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await;
424                if dishonest_proposals.contains(&proposal.data.view_number()) {
425                    // Create a vote using data from most recent vote and the current event number
426                    // We wont update internal consensus state for this Byzantine replica but we are at least
427                    // Going to send a vote to the next honest leader
428                    let vote = QuorumVote2::<TYPES>::create_signed_vote(
429                        self.votes_sent.last().unwrap().data.clone(),
430                        event.view_number().unwrap(),
431                        public_key,
432                        private_key,
433                        upgrade_lock,
434                    )
435                    .context("Failed to sign vote")
436                    .unwrap();
437                    return vec![HotShotEvent::QuorumVoteSend(vote)];
438                }
439            },
440            HotShotEvent::TimeoutVoteSend(vote) => {
441                // Check if this view was a dishonest proposal view, if true dont send timeout
442                let dishonest_proposals = self.dishonest_proposal_view_numbers.read().await;
443                if dishonest_proposals.contains(&vote.view_number) {
444                    // We craft the vote upon `QuorumProposalRecv` and send out a vote.
445                    // So, dont send the timeout to the next leader from this byzantine replica
446                    return vec![];
447                }
448            },
449            HotShotEvent::QuorumVoteSend(vote) => {
450                self.votes_sent.push(vote.clone());
451            },
452            _ => {},
453        }
454        vec![event.clone()]
455    }
456}
457
458/// Implements a byzantine behaviour which aims at splitting the honest nodes during view sync protocol
459/// so that the honest nodes cannot view sync on their own.
460///
461/// Requirement: The scenario requires at least 4 dishonest nodes so total number of nodes need to be
462/// at least 13.
463///
464/// Scenario:
465/// 1. The first dishonest leader sends a proposal to only f + 1 honest nodes and f dishonest nodes
466/// 2. The second dishonest leader sends a proposal to only f + 1 honest nodes.
467/// 3. All dishonest nodes do not send timeout votes.
468/// 4. The first dishonest relay sends a correctly formed precommit certificate to f + 1 honest nodes
469///    and f dishonest nodes.
470/// 5. The first dishonest relay sends a correctly formed commit certificate to only one honest node.
471/// 6. The second dishonest relay behaves in the same way as the first dishonest relay.
472#[derive(Debug)]
473pub struct DishonestViewSyncRelay {
474    pub dishonest_proposal_view_numbers: Vec<u64>,
475    pub dishonest_vote_view_numbers: Vec<u64>,
476    pub first_f_honest_nodes: Vec<u64>,
477    pub second_f_honest_nodes: Vec<u64>,
478    pub one_honest_node: u64,
479    pub f_dishonest_nodes: Vec<u64>,
480}
481
482#[async_trait]
483impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
484    for DishonestViewSyncRelay
485{
486    async fn send_handler(
487        &mut self,
488        event: &HotShotEvent<TYPES>,
489        _public_key: &TYPES::SignatureKey,
490        _private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
491        upgrade_lock: &UpgradeLock<TYPES>,
492        _consensus: OuterConsensus<TYPES>,
493        membership_coordinator: EpochMembershipCoordinator<TYPES>,
494        network: Arc<I::Network>,
495    ) -> Vec<HotShotEvent<TYPES>> {
496        match event {
497            HotShotEvent::QuorumProposalSend(proposal, sender) => {
498                let view_number = proposal.data.view_number();
499                if !self.dishonest_proposal_view_numbers.contains(&view_number) {
500                    return vec![event.clone()];
501                }
502                let message_kind = if upgrade_lock.epochs_enabled(view_number) {
503                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
504                        GeneralConsensusMessage::Proposal2(convert_proposal(proposal.clone())),
505                    ))
506                } else {
507                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
508                        GeneralConsensusMessage::Proposal(convert_proposal(proposal.clone())),
509                    ))
510                };
511                let message = Message {
512                    sender: sender.clone(),
513                    kind: message_kind,
514                };
515                let serialized_message = match upgrade_lock.serialize(&message) {
516                    Ok(serialized) => serialized,
517                    Err(e) => {
518                        panic!("Failed to serialize message: {e}");
519                    },
520                };
521                let second_f_honest_it = self.second_f_honest_nodes.iter();
522                let f_dishonest_it = self.f_dishonest_nodes.iter();
523                let one_honest_it = once(&self.one_honest_node);
524                let chained_it: Box<dyn Iterator<Item = &u64> + Send> =
525                    if &*view_number == self.dishonest_proposal_view_numbers.first().unwrap() {
526                        // The first dishonest proposal is sent to f + 1 honest nodes and f dishonest nodes
527                        Box::new(second_f_honest_it.chain(one_honest_it.chain(f_dishonest_it)))
528                    } else {
529                        // All other dishonest proposals are sent to f + 1 honest nodes
530                        Box::new(second_f_honest_it.chain(one_honest_it))
531                    };
532                for node_id in chained_it {
533                    let dummy_view = ViewNumber::new(*node_id);
534                    let Ok(node) = membership_coordinator
535                        .membership()
536                        .read()
537                        .await
538                        .leader(dummy_view, proposal.data.epoch())
539                    else {
540                        panic!(
541                            "Failed to find leader for view {} and epoch {:?}",
542                            dummy_view,
543                            proposal.data.epoch()
544                        );
545                    };
546                    let transmit_result = network
547                        .direct_message(
548                            view_number.u64().into(),
549                            serialized_message.clone(),
550                            node.clone(),
551                        )
552                        .await;
553                    match transmit_result {
554                        Ok(()) => tracing::info!(
555                            "Sent proposal for view {} to node {}",
556                            proposal.data.view_number(),
557                            node_id
558                        ),
559                        Err(e) => panic!("Failed to send message task: {e:?}"),
560                    }
561                }
562                vec![]
563            },
564            HotShotEvent::QuorumVoteSend(vote) => {
565                if !self.dishonest_vote_view_numbers.contains(&vote.view_number) {
566                    return vec![event.clone()];
567                }
568                vec![]
569            },
570            HotShotEvent::TimeoutVoteSend(vote) => {
571                if !self.dishonest_vote_view_numbers.contains(&vote.view_number) {
572                    return vec![event.clone()];
573                }
574                vec![]
575            },
576            HotShotEvent::ViewSyncPreCommitVoteSend(vote) => {
577                if !self.dishonest_vote_view_numbers.contains(&vote.view_number) {
578                    return vec![event.clone()];
579                }
580                vec![]
581            },
582            HotShotEvent::ViewSyncPreCommitCertificateSend(certificate, sender) => {
583                let view_number = certificate.data.round;
584                if !self.dishonest_proposal_view_numbers.contains(&view_number) {
585                    return vec![event.clone()];
586                }
587                let message_kind = if upgrade_lock.epochs_enabled(view_number) {
588                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
589                        GeneralConsensusMessage::ViewSyncPreCommitCertificate2(certificate.clone()),
590                    ))
591                } else {
592                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
593                        GeneralConsensusMessage::ViewSyncPreCommitCertificate(
594                            certificate.clone().to_vsc(),
595                        ),
596                    ))
597                };
598                let message = Message {
599                    sender: sender.clone(),
600                    kind: message_kind,
601                };
602                let serialized_message = match upgrade_lock.serialize(&message) {
603                    Ok(serialized) => serialized,
604                    Err(e) => {
605                        panic!("Failed to serialize message: {e}");
606                    },
607                };
608                let second_f_honest_it = self.second_f_honest_nodes.iter();
609                let f_dishonest_it = self.f_dishonest_nodes.iter();
610                let one_honest_it = once(&self.one_honest_node);
611                // The pre-commit certificate is sent to f + 1 honest nodes and f dishonest nodes
612                let chained_it: Box<dyn Iterator<Item = &u64> + Send> =
613                    Box::new(second_f_honest_it.chain(one_honest_it.chain(f_dishonest_it)));
614                for node_id in chained_it {
615                    let dummy_view = ViewNumber::new(*node_id);
616                    let Ok(node) = membership_coordinator
617                        .membership()
618                        .read()
619                        .await
620                        .leader(dummy_view, certificate.epoch())
621                    else {
622                        panic!(
623                            "Failed to find leader for view {} and epoch {:?}",
624                            dummy_view,
625                            certificate.epoch()
626                        );
627                    };
628                    let transmit_result = network
629                        .direct_message(
630                            view_number.u64().into(),
631                            serialized_message.clone(),
632                            node.clone(),
633                        )
634                        .await;
635                    match transmit_result {
636                        Ok(()) => tracing::info!(
637                            "Sent ViewSyncPreCommitCertificate for view {} to node {}",
638                            view_number,
639                            node_id
640                        ),
641                        Err(e) => panic!("Failed to send message task: {e:?}"),
642                    }
643                }
644                vec![]
645            },
646            HotShotEvent::ViewSyncCommitCertificateSend(certificate, sender) => {
647                let view_number = certificate.data.round;
648                if !self.dishonest_proposal_view_numbers.contains(&view_number) {
649                    return vec![event.clone()];
650                }
651                let message_kind = if upgrade_lock.epochs_enabled(view_number) {
652                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
653                        GeneralConsensusMessage::ViewSyncCommitCertificate2(certificate.clone()),
654                    ))
655                } else {
656                    MessageKind::<TYPES>::from_consensus_message(SequencingMessage::General(
657                        GeneralConsensusMessage::ViewSyncCommitCertificate(
658                            certificate.clone().to_vsc(),
659                        ),
660                    ))
661                };
662                let message = Message {
663                    sender: sender.clone(),
664                    kind: message_kind,
665                };
666                let serialized_message = match upgrade_lock.serialize(&message) {
667                    Ok(serialized) => serialized,
668                    Err(e) => {
669                        panic!("Failed to serialize message: {e}");
670                    },
671                };
672                let one_honest_it = once(&self.one_honest_node);
673                // The commit certificate is sent to 1 honest node
674                let chained_it: Box<dyn Iterator<Item = &u64> + Send> = Box::new(one_honest_it);
675                for node_id in chained_it {
676                    let dummy_view = ViewNumber::new(*node_id);
677                    let Ok(node) = membership_coordinator
678                        .membership()
679                        .read()
680                        .await
681                        .leader(dummy_view, certificate.epoch())
682                    else {
683                        panic!(
684                            "Failed to find leader for view {} and epoch {:?}",
685                            dummy_view,
686                            certificate.epoch()
687                        );
688                    };
689                    let transmit_result = network
690                        .direct_message(
691                            view_number.u64().into(),
692                            serialized_message.clone(),
693                            node.clone(),
694                        )
695                        .await;
696                    match transmit_result {
697                        Ok(()) => tracing::info!(
698                            "Sent ViewSyncCommitCertificate for view {} to node {}",
699                            view_number,
700                            node_id
701                        ),
702                        Err(e) => panic!("Failed to send message task: {e:?}"),
703                    }
704                }
705                vec![]
706            },
707            _ => vec![event.clone()],
708        }
709    }
710
711    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
712        vec![event.clone()]
713    }
714}
715
716#[derive(Debug)]
717pub struct DishonestViewSyncWrongEpoch {
718    pub first_dishonest_view_number: u64,
719    pub epoch_modifier: fn(EpochNumber) -> EpochNumber,
720}
721
722#[async_trait]
723impl<TYPES: NodeType, I: NodeImplementation<TYPES>> EventTransformerState<TYPES, I>
724    for DishonestViewSyncWrongEpoch
725{
726    async fn send_handler(
727        &mut self,
728        event: &HotShotEvent<TYPES>,
729        public_key: &TYPES::SignatureKey,
730        private_key: &<TYPES::SignatureKey as SignatureKey>::PrivateKey,
731        upgrade_lock: &UpgradeLock<TYPES>,
732        _consensus: OuterConsensus<TYPES>,
733        _membership_coordinator: EpochMembershipCoordinator<TYPES>,
734        _network: Arc<I::Network>,
735    ) -> Vec<HotShotEvent<TYPES>> {
736        match event {
737            HotShotEvent::QuorumProposalSend(proposal, _) => {
738                if self.first_dishonest_view_number > proposal.data.view_number().u64() {
739                    return vec![event.clone()];
740                }
741                vec![]
742            },
743            HotShotEvent::QuorumVoteSend(vote) => {
744                if self.first_dishonest_view_number > vote.view_number().u64() {
745                    return vec![event.clone()];
746                }
747                vec![]
748            },
749            HotShotEvent::TimeoutVoteSend(vote) => {
750                if self.first_dishonest_view_number > vote.view_number().u64() {
751                    return vec![event.clone()];
752                }
753                vec![]
754            },
755            HotShotEvent::ViewSyncPreCommitVoteSend(vote) => {
756                if self.first_dishonest_view_number > vote.view_number().u64() {
757                    return vec![event.clone()];
758                }
759                let view_number = vote.data.round;
760                let vote = if upgrade_lock.epochs_enabled(view_number) {
761                    ViewSyncPreCommitVote2::<TYPES>::create_signed_vote(
762                        ViewSyncPreCommitData2 {
763                            relay: 0,
764                            round: view_number,
765                            epoch: vote.data.epoch.map(self.epoch_modifier),
766                        },
767                        view_number,
768                        public_key,
769                        private_key,
770                        upgrade_lock,
771                    )
772                    .context("Failed to sign pre commit vote!")
773                    .unwrap()
774                } else {
775                    let vote = ViewSyncPreCommitVote::<TYPES>::create_signed_vote(
776                        ViewSyncPreCommitData {
777                            relay: 0,
778                            round: view_number,
779                        },
780                        view_number,
781                        public_key,
782                        private_key,
783                        upgrade_lock,
784                    )
785                    .context("Failed to sign pre commit vote!")
786                    .unwrap();
787                    vote.to_vote2()
788                };
789                vec![HotShotEvent::ViewSyncPreCommitVoteSend(vote)]
790            },
791            _ => vec![event.clone()],
792        }
793    }
794
795    async fn recv_handler(&mut self, event: &HotShotEvent<TYPES>) -> Vec<HotShotEvent<TYPES>> {
796        vec![event.clone()]
797    }
798}