Skip to content

Commit a7716ef

Browse files
committed
refactor(workspace): clean up
1 parent ada1804 commit a7716ef

File tree

5 files changed

+100
-81
lines changed

5 files changed

+100
-81
lines changed

src/common.rs

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,11 +1,11 @@
11
use thiserror::Error;
22

3+
use crate::primitives::ChunksError;
4+
35
#[derive(Error, Debug)]
46
pub enum RLNCError {
5-
#[error("Data is empty")]
6-
EmptyData,
7-
#[error("Chunk count must be greater than 0")]
8-
ZeroChunkCount,
7+
#[error(transparent)]
8+
ChunksError(#[from] ChunksError),
99
#[error("Required packet count must be greater than 0")]
1010
ZeroPacketCount,
1111
#[error("Chunk size mismatch: got {0}, expected {1}")]

src/decode.rs

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -3,7 +3,7 @@
33
use crate::{
44
common::RLNCError,
55
matrix::Matrix,
6-
primitives::{field::Field, packet::RLNCPacket},
6+
primitives::{ChunksError, field::Field, packet::RLNCPacket},
77
};
88

99
/// RLNC Decoder.
@@ -23,7 +23,7 @@ impl<F: Field> Decoder<F> {
2323
/// Creates a new decoder for the given chunk size and chunk count (generation size).
2424
pub fn new(chunk_size: usize, chunk_count: usize) -> Result<Self, RLNCError> {
2525
if chunk_size == 0 {
26-
return Err(RLNCError::ZeroChunkCount);
26+
return Err(ChunksError::ZeroChunkSize.into());
2727
}
2828

2929
if chunk_count == 0 {

src/encode.rs

Lines changed: 14 additions & 65 deletions
Original file line numberDiff line numberDiff line change
@@ -2,19 +2,16 @@
22
use rand::Rng;
33

44
use crate::{
5-
common::{BOUNDARY_MARKER, RLNCError},
6-
primitives::{
7-
Chunk,
8-
field::{Field, Scalar},
9-
packet::RLNCPacket,
10-
},
5+
common::RLNCError,
6+
primitives::{Chunks, field::Field, packet::RLNCPacket},
117
};
128

13-
/// RLNC encoder.
9+
/// RLNC encoder that's generic over the [`Field`] type. An ancoder should be instantiated
10+
/// per piece of data the caller wants to encode, then used to generate the encoded chunks.
1411
#[derive(Debug)]
1512
pub struct Encoder<F: Field> {
1613
// The chunks of data to be encoded.
17-
chunks: Vec<Chunk<F>>,
14+
chunks: Chunks<F>,
1815
// The number of chunks to split the data into (also known as the generation size).
1916
chunk_count: usize,
2017
// The size of each chunk in bytes.
@@ -23,43 +20,18 @@ pub struct Encoder<F: Field> {
2320

2421
impl<F: Field> Encoder<F> {
2522
/// Creates a new encoder for the given data and chunk count.
26-
///
27-
/// # Arguments
28-
///
29-
/// - `data` - The data to be encoded.
30-
/// - `chunk_count` - The number of chunks to split the data into (also known as the generation
31-
/// size).
3223
pub fn new(data: impl AsRef<[u8]>, chunk_count: usize) -> Result<Self, RLNCError> {
33-
if data.as_ref().is_empty() {
34-
return Err(RLNCError::EmptyData);
35-
}
36-
37-
if chunk_count == 0 {
38-
return Err(RLNCError::ZeroChunkCount);
39-
}
40-
41-
let mut data = Vec::from(data.as_ref());
42-
data.push(BOUNDARY_MARKER);
43-
44-
// Calculate chunk size to accommodate original data + boundary marker
45-
let chunk_size = data.len().div_ceil(chunk_count);
46-
47-
// Round up chunk size to nearest multiple of `F::SAFE_CAPACITY` for scalar packing
48-
let chunk_size = chunk_size.div_ceil(F::SAFE_CAPACITY) * F::SAFE_CAPACITY;
49-
let padded_len = chunk_size * chunk_count;
50-
51-
// Pad the rest with zeros if needed
52-
data.resize(padded_len, 0);
53-
54-
let chunks = data.chunks_exact(chunk_size).map(Chunk::from_bytes).collect();
24+
let chunks = Self::prepare(data, chunk_count)?;
25+
let chunk_count = chunks.len();
26+
let chunk_size = chunks.chunk_size();
5527

5628
Ok(Self { chunks, chunk_count, chunk_size })
5729
}
5830

5931
/// Creates a new encoder from a vector of chunks.
60-
pub fn from_chunks(chunks: Vec<Chunk<F>>) -> Self {
32+
pub fn from_chunks(chunks: Chunks<F>) -> Self {
6133
let chunk_count = chunks.len();
62-
let chunk_size = chunks[0].size();
34+
let chunk_size = chunks.chunk_size();
6335

6436
Self { chunks, chunk_count, chunk_size }
6537
}
@@ -88,7 +60,7 @@ impl<F: Field> Encoder<F> {
8860
fn encode_inner(&self, coding_vector: &[F]) -> Vec<F> {
8961
let mut result = vec![F::ZERO; self.chunk_size.div_ceil(F::SAFE_CAPACITY)];
9062

91-
for (chunk, &coefficient) in self.chunks.iter().zip(coding_vector) {
63+
for (chunk, &coefficient) in self.chunks.inner().iter().zip(coding_vector) {
9264
if coefficient.is_zero_vartime() {
9365
continue;
9466
}
@@ -113,32 +85,8 @@ impl<F: Field> Encoder<F> {
11385

11486
/// Prepares the data for encoding by splitting it into equally sized chunks and padding with
11587
/// zeros. Also converts the data into symbols in the chosen finite field.
116-
pub fn prepare(
117-
data: impl AsRef<[u8]>,
118-
chunk_count: usize,
119-
) -> Result<Vec<Chunk<Scalar>>, RLNCError> {
120-
if data.as_ref().is_empty() {
121-
return Err(RLNCError::EmptyData);
122-
}
123-
124-
if chunk_count == 0 {
125-
return Err(RLNCError::ZeroChunkCount);
126-
}
127-
128-
let mut data = Vec::from(data.as_ref());
129-
data.push(BOUNDARY_MARKER);
130-
131-
// Calculate chunk size to accommodate original data + boundary marker
132-
let chunk_size = data.len().div_ceil(chunk_count);
133-
134-
// Round up chunk size to nearest multiple of `F::SAFE_CAPACITY` for scalar packing
135-
let chunk_size = chunk_size.div_ceil(F::SAFE_CAPACITY) * F::SAFE_CAPACITY;
136-
let padded_len = chunk_size * chunk_count;
137-
138-
// Pad the rest with zeros if needed
139-
data.resize(padded_len, 0);
140-
141-
Ok(data.chunks_exact(chunk_size).map(Chunk::from_bytes).collect())
88+
pub fn prepare(data: impl AsRef<[u8]>, chunk_count: usize) -> Result<Chunks<F>, RLNCError> {
89+
Ok(Chunks::new(data.as_ref(), chunk_count)?)
14290
}
14391

14492
/// Encodes the data with the given coding vector using linear combinations.
@@ -189,6 +137,7 @@ impl<F: Field> Encoder<F> {
189137
// Map each (chunk, coefficient) pair to its contribution and then reduce all
190138
// contributions into the final result.
191139
self.chunks
140+
.inner()
192141
.par_iter()
193142
.zip(coding_vector)
194143
.filter_map(|(chunk, &coefficient)| {

src/lib.rs

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -69,11 +69,12 @@ mod tests {
6969

7070
let chunks = Encoder::<Scalar>::prepare(&data, chunk_count).unwrap();
7171
let start = Instant::now();
72-
let committer = PedersenCommitter::new(seed, chunks[0].symbols().len());
72+
let committer = PedersenCommitter::new(seed, chunks.inner()[0].symbols().len());
7373
println!("Committer creation time: {:?}", start.elapsed());
7474

7575
let start = Instant::now();
76-
let commitments = chunks.iter().map(|c| committer.commit(c.symbols())).collect::<Vec<_>>();
76+
let commitments =
77+
chunks.inner().iter().map(|c| committer.commit(c.symbols())).collect::<Vec<_>>();
7778
println!("Commitment time: {:?}", start.elapsed());
7879

7980
let encoder = Encoder::from_chunks(chunks);

src/primitives/mod.rs

Lines changed: 77 additions & 8 deletions
Original file line numberDiff line numberDiff line change
@@ -3,10 +3,87 @@ pub mod field;
33
pub mod packet;
44
use field::Field;
55

6+
use crate::common::BOUNDARY_MARKER;
7+
8+
/// A collection of equally sized, prepared chunks of data. Each chunk of data holds the symbols.
9+
/// This type represents correctly sized and padded chunks of data that are ready to be encoded.
10+
#[derive(Debug)]
11+
pub struct Chunks<F: Field> {
12+
inner: Vec<Chunk<F>>,
13+
chunk_size: usize,
14+
}
15+
16+
/// Errors that can occur when creating a new collection of chunks.
17+
#[derive(Debug, thiserror::Error)]
18+
pub enum ChunksError {
19+
/// The data is empty.
20+
#[error("data is empty")]
21+
EmptyData,
22+
/// The chunk count is zero.
23+
#[error("chunk count is zero")]
24+
ZeroChunkCount,
25+
/// The chunk size is zero.
26+
#[error("chunk size is zero")]
27+
ZeroChunkSize,
28+
}
29+
30+
impl<F: Field> Chunks<F> {
31+
/// Creates a new collection of chunks from a slice of bytes. The data is split into
32+
/// `chunk_count` equally sized chunks, and then converted into symbols (scalars) of the
33+
/// field `F`. See also [`Chunk`] for more details.
34+
pub fn new(data: &[u8], chunk_count: usize) -> Result<Self, ChunksError> {
35+
if data.is_empty() {
36+
return Err(ChunksError::EmptyData);
37+
}
38+
39+
if chunk_count == 0 {
40+
return Err(ChunksError::ZeroChunkCount);
41+
}
42+
43+
let mut data = Vec::from(data.as_ref());
44+
data.push(BOUNDARY_MARKER);
45+
46+
// Calculate chunk size to accommodate original data + boundary marker
47+
let chunk_size = data.len().div_ceil(chunk_count);
48+
49+
// Round up chunk size to nearest multiple of `F::SAFE_CAPACITY` for scalar packing
50+
let chunk_size = chunk_size.div_ceil(F::SAFE_CAPACITY) * F::SAFE_CAPACITY;
51+
let padded_len = chunk_size * chunk_count;
52+
53+
// Pad the rest with zeros if needed
54+
data.resize(padded_len, 0);
55+
56+
let chunks = data.chunks_exact(chunk_size).map(Chunk::from_bytes).collect();
57+
58+
Ok(Self { inner: chunks, chunk_size })
59+
}
60+
61+
/// Returns the size of the chunks in bytes.
62+
pub fn chunk_size(&self) -> usize {
63+
self.chunk_size
64+
}
65+
66+
/// Returns the inner chunks.
67+
pub fn inner(&self) -> &[Chunk<F>] {
68+
&self.inner
69+
}
70+
71+
/// Returns the number of chunks in the collection.
72+
pub fn len(&self) -> usize {
73+
self.inner.len()
74+
}
75+
76+
/// Returns true if the collection is empty.
77+
pub fn is_empty(&self) -> bool {
78+
self.inner.is_empty()
79+
}
80+
}
81+
682
/// A chunk of data.
783
#[derive(Debug, Clone)]
884
pub struct Chunk<F: Field> {
985
symbols: Vec<F>,
86+
#[allow(unused)]
1087
size: usize,
1188
}
1289

@@ -22,12 +99,4 @@ impl<F: Field> Chunk<F> {
2299
pub(crate) fn symbols(&self) -> &[F] {
23100
&self.symbols
24101
}
25-
26-
/// Returns the size of the chunk in bytes.
27-
pub(crate) fn size(&self) -> usize {
28-
self.size
29-
}
30102
}
31-
32-
// TODO: Add a generic implementation for Chunk<S>. In its current form, we have to convert bytes to
33-
// Symbols, but since ff::Field doesn't implement TryFrom<&[u8]>, we can't make it generic.

0 commit comments

Comments
 (0)