Skip to content

Commit 48bb745

Browse files
committed
Add unit tests for ChunkedMessageQueue padding
Add five new tests: - `test_chunked_message_queue_ping_padding`: verifies chunk alignment for various message sizes - `test_chunked_message_queue_small_remainder_overflow`: verifies the two-Ping edge case when remainder < `MIN_ENCRYPTED_PING_SIZE` - `test_chunked_message_queue_chunk_alignment`: verifies alignment after encrypting multiple real messages - `test_chunked_message_queue_buffer_compaction`: verifies `maybe_compact` correctly drains sent bytes - `test_chunked_message_queue_pending_msg_bytes_tracking`: verifies that `pending_msg_bytes` tracks real message bytes and is unaffected by padding Also extract a `get_test_encryptor` helper to reduce boilerplate across the new tests. Co-Authored-By: HAL 9000 Signed-off-by: Elias Rohrer <dev@tnull.de>
1 parent c788ee2 commit 48bb745

1 file changed

Lines changed: 159 additions & 0 deletions

File tree

lightning/src/ln/peer_handler.rs

Lines changed: 159 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4993,6 +4993,165 @@ mod tests {
49934993
);
49944994
}
49954995

4996+
/// Helper: completes a noise handshake and returns the outbound encryptor ready for encryption.
4997+
fn get_test_encryptor() -> PeerChannelEncryptor {
4998+
use crate::ln::peer_channel_encryptor::PeerChannelEncryptor;
4999+
use bitcoin::secp256k1::{Secp256k1, SecretKey};
5000+
5001+
let secp_ctx = Secp256k1::new();
5002+
// Inbound peer identity (the "responder").
5003+
let inbound_secret = SecretKey::from_slice(&[42; 32]).unwrap();
5004+
let inbound_pubkey =
5005+
bitcoin::secp256k1::PublicKey::from_secret_key(&secp_ctx, &inbound_secret);
5006+
let inbound_signer = crate::util::test_utils::TestNodeSigner::new(inbound_secret);
5007+
5008+
// Outbound peer identity (the "initiator").
5009+
let outbound_secret = SecretKey::from_slice(&[43; 32]).unwrap();
5010+
let outbound_signer = crate::util::test_utils::TestNodeSigner::new(outbound_secret);
5011+
5012+
let outbound_ephemeral = SecretKey::from_slice(&[44; 32]).unwrap();
5013+
let inbound_ephemeral = SecretKey::from_slice(&[45; 32]).unwrap();
5014+
5015+
let mut outbound = PeerChannelEncryptor::new_outbound(inbound_pubkey, outbound_ephemeral);
5016+
let mut inbound = PeerChannelEncryptor::new_inbound(&&inbound_signer);
5017+
5018+
let act_one = outbound.get_act_one(&secp_ctx);
5019+
let act_two = inbound
5020+
.process_act_one_with_keys(&act_one, &&inbound_signer, inbound_ephemeral, &secp_ctx)
5021+
.unwrap();
5022+
let (act_three, _) = outbound.process_act_two(&act_two, &&outbound_signer).unwrap();
5023+
let _ = inbound.process_act_three(&act_three).unwrap();
5024+
5025+
outbound
5026+
}
5027+
5028+
#[test]
5029+
fn test_chunked_message_queue_ping_padding() {
5030+
// Tests that Ping padding correctly fills the remainder of a chunk.
5031+
let mut encryptor = get_test_encryptor();
5032+
5033+
// Test various remainder sizes to ensure padding works correctly.
5034+
for msg_size in [40, 100, 500, 1000, 5000, 30000, 65535] {
5035+
let mut queue = ChunkedMessageQueue::new();
5036+
// Push a raw blob of msg_size bytes to simulate encrypted message data.
5037+
let fake_data = vec![0u8; msg_size];
5038+
queue.buffer.extend_from_slice(&fake_data);
5039+
queue.pending_msg_bytes += msg_size;
5040+
5041+
queue.pad_and_finalize_chunk(&mut encryptor);
5042+
assert_eq!(
5043+
queue.pending_bytes() % CHUNK_SIZE,
5044+
0,
5045+
"Buffer not chunk-aligned after padding for msg_size={}",
5046+
msg_size
5047+
);
5048+
assert!(
5049+
queue.pending_bytes() >= CHUNK_SIZE,
5050+
"Buffer should be at least one chunk for msg_size={}",
5051+
msg_size
5052+
);
5053+
}
5054+
}
5055+
5056+
#[test]
5057+
fn test_chunked_message_queue_small_remainder_overflow() {
5058+
// Tests the edge case where remainder < MIN_ENCRYPTED_PING_SIZE, requiring two Pings.
5059+
let mut encryptor = get_test_encryptor();
5060+
5061+
// Test remainders from 1 to MIN_ENCRYPTED_PING_SIZE-1 (the overflow cases).
5062+
for remainder in 1..MIN_ENCRYPTED_PING_SIZE {
5063+
let mut queue = ChunkedMessageQueue::new();
5064+
// Fill buffer so that exactly `remainder` bytes are left in the current chunk.
5065+
let fill_size = CHUNK_SIZE - remainder;
5066+
queue.buffer.resize(fill_size, 0);
5067+
queue.pending_msg_bytes = fill_size;
5068+
5069+
queue.pad_and_finalize_chunk(&mut encryptor);
5070+
assert_eq!(
5071+
queue.pending_bytes() % CHUNK_SIZE,
5072+
0,
5073+
"Buffer not chunk-aligned for remainder={}",
5074+
remainder
5075+
);
5076+
// Should overflow into exactly 2 chunks.
5077+
assert_eq!(
5078+
queue.pending_bytes(),
5079+
2 * CHUNK_SIZE,
5080+
"Expected 2 chunks for small remainder={}",
5081+
remainder
5082+
);
5083+
}
5084+
}
5085+
5086+
#[test]
5087+
fn test_chunked_message_queue_chunk_alignment() {
5088+
// Tests that after multiple messages the buffer stays correctly aligned after padding.
5089+
let mut encryptor = get_test_encryptor();
5090+
let mut queue = ChunkedMessageQueue::new();
5091+
5092+
// Encrypt several Ping messages of various sizes.
5093+
for pong_len in [0u16, 64, 256, 1024] {
5094+
let ping = msgs::Ping { ponglen: pong_len, byteslen: 64 };
5095+
let msg: wire::Message<()> = wire::Message::Ping(ping);
5096+
queue.encrypt_and_push_message(&mut encryptor, msg);
5097+
}
5098+
5099+
let pending_before_pad = queue.pending_bytes();
5100+
assert!(pending_before_pad > 0);
5101+
5102+
queue.pad_and_finalize_chunk(&mut encryptor);
5103+
5104+
assert_eq!(queue.pending_bytes() % CHUNK_SIZE, 0);
5105+
assert!(queue.pending_bytes() >= pending_before_pad);
5106+
}
5107+
5108+
#[test]
5109+
fn test_chunked_message_queue_buffer_compaction() {
5110+
// Tests that maybe_compact drains sent bytes appropriately.
5111+
let mut queue = ChunkedMessageQueue::new();
5112+
5113+
// Fill with 2 chunks worth of data.
5114+
queue.buffer.resize(2 * CHUNK_SIZE, 0xAB);
5115+
queue.pending_msg_bytes = 2 * CHUNK_SIZE;
5116+
assert_eq!(queue.pending_bytes(), 2 * CHUNK_SIZE);
5117+
5118+
// Simulate sending the first chunk.
5119+
queue.send_offset = CHUNK_SIZE;
5120+
queue.pending_msg_bytes = CHUNK_SIZE;
5121+
queue.maybe_compact();
5122+
5123+
// After compaction, send_offset should be 0 and buffer should be one chunk.
5124+
assert_eq!(queue.send_offset, 0);
5125+
assert_eq!(queue.buffer.len(), CHUNK_SIZE);
5126+
assert_eq!(queue.pending_bytes(), CHUNK_SIZE);
5127+
}
5128+
5129+
#[test]
5130+
fn test_chunked_message_queue_pending_msg_bytes_tracking() {
5131+
// Tests that pending_msg_bytes correctly tracks message bytes vs padding bytes.
5132+
let mut encryptor = get_test_encryptor();
5133+
let mut queue = ChunkedMessageQueue::new();
5134+
5135+
// Encrypt a small message.
5136+
let ping = msgs::Ping { ponglen: 0, byteslen: 64 };
5137+
let msg: wire::Message<()> = wire::Message::Ping(ping);
5138+
queue.encrypt_and_push_message(&mut encryptor, msg);
5139+
5140+
let msg_bytes = queue.pending_msg_bytes;
5141+
assert!(msg_bytes > 0);
5142+
assert_eq!(msg_bytes, queue.pending_bytes());
5143+
5144+
// After padding, pending_bytes increases but pending_msg_bytes stays the same.
5145+
queue.pad_and_finalize_chunk(&mut encryptor);
5146+
5147+
assert_eq!(queue.pending_msg_bytes, msg_bytes);
5148+
assert!(queue.pending_bytes() > msg_bytes);
5149+
assert_eq!(queue.pending_bytes() % CHUNK_SIZE, 0);
5150+
5151+
// total_buffered_bytes should use pending_msg_bytes, not pending_bytes.
5152+
assert_eq!(queue.total_buffered_bytes(), msg_bytes);
5153+
}
5154+
49965155
#[test]
49975156
fn test_filter_addresses() {
49985157
// Tests the filter_addresses function.

0 commit comments

Comments
 (0)