feat: support no_std (#143)

* feat: support no_std

`metal` feature supports `no_std` in configuration `default-features = false, features = ["metal"]`.
Float calculation is done via `micromath` crate.

All previously available functionality remains under default `std` feature.

Some tweaking of `python` and `wasm` features was done to compile tests.

* feat: get rid of floats (#2)

* feat: remove conversion to f64, fix features

* chore: uncomment symbols_required checker, fmt

* revert: add cdylib target for python support

* fix: generalize crate type

---------

Co-authored-by: varovainen <99664267+varovainen@users.noreply.github.com>
This commit is contained in:
Slesarew 2023-02-03 04:07:41 +02:00 committed by GitHub
parent e7d51816ac
commit 5a720829fa
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
22 changed files with 292 additions and 82 deletions

@ -11,7 +11,7 @@ rust-version = "1.60"
authors = ["Christopher Berner <christopherberner@gmail.com>"]
[lib]
crate-type = ["cdylib", "rlib"]
crate-type = ["lib"]
[dependencies]
serde = {version = "1.0.102", features=["std", "derive"], optional = true}
@ -43,15 +43,22 @@ harness = false
name = "decode_benchmark"
harness = false
[[example]]
name = "main"
required-features = ["std"]
[profile.release]
debug = true
lto = false
[features]
benchmarking = []
python = ["pyo3"]
serde_support = ["serde"]
wasm = ["wasm-bindgen", "js-sys"]
default = ["std"]
benchmarking = ["std"]
python = ["pyo3", "std"]
serde_support = ["serde", "std"]
std = []
wasm = ["wasm-bindgen", "js-sys", "std"]
[package.metadata.maturin]
requires-python = ">= 3.7"

@ -1,7 +1,13 @@
#[cfg(not(any(feature = "python", feature = "wasm")))]
use rand::seq::SliceRandom;
#[cfg(not(any(feature = "python", feature = "wasm")))]
use rand::Rng;
#[cfg(not(any(feature = "python", feature = "wasm")))]
use raptorq::{Decoder, Encoder, EncodingPacket};
#[cfg(not(any(feature = "python", feature = "wasm")))]
fn main() {
// Generate some random data to send
let mut data: Vec<u8> = vec![0; 10_000];
@ -43,3 +49,8 @@ fn main() {
// Check that even though some of the data was lost we are able to reconstruct the original message
assert_eq!(result.unwrap(), data);
}
#[cfg(any(feature = "python", feature = "wasm"))]
fn main() {
panic!("This is not indented to compile for `python` and `wasm` features.");
}

@ -1,5 +1,11 @@
use std::mem::size_of;
use std::ops::Range;
#[cfg(feature = "std")]
use std::{mem::size_of, ops::Range, u32, vec::Vec};
#[cfg(not(feature = "std"))]
use core::{mem::size_of, ops::Range, u32};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
// Map<u16, Vec<u32>>
@ -50,9 +56,9 @@ impl ImmutableListMapBuilder {
pub fn build(self) -> ImmutableListMap {
let mut entries = self.entries;
entries.sort_unstable_by_key(|x| x.0);
assert!(entries.len() < std::u32::MAX as usize);
assert!(entries.len() < u32::MAX as usize);
assert!(!entries.is_empty());
let mut offsets = vec![std::u32::MAX; self.num_keys];
let mut offsets = vec![u32::MAX; self.num_keys];
let mut last_key = entries[0].0;
offsets[last_key as usize] = 0;
let mut values = vec![];
@ -64,7 +70,7 @@ impl ImmutableListMapBuilder {
values.push(*value);
}
for i in (0..offsets.len()).rev() {
if offsets[i] == std::u32::MAX {
if offsets[i] == u32::MAX {
if i == offsets.len() - 1 {
offsets[i] = entries.len() as u32;
} else {

@ -1,10 +1,19 @@
#[cfg(feature = "std")]
use std::{cmp::min, vec::Vec};
#[cfg(not(feature = "std"))]
use core::cmp::min;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::rng::rand;
use crate::systematic_constants::{
MAX_SOURCE_SYMBOLS_PER_BLOCK, SYSTEMATIC_INDICES_AND_PARAMETERS,
};
use crate::util::int_div_ceil;
#[cfg(feature = "serde_support")]
use serde::{Deserialize, Serialize};
use std::cmp::min;
// As defined in section 3.2
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
@ -119,9 +128,15 @@ impl ObjectTransmissionInformation {
assert!(transfer_length <= 942574504275);
assert_eq!(symbol_size % alignment as u16, 0);
// See section 4.4.1.2. "These parameters MUST be set so that ceil(ceil(F/T)/Z) <= K'_max."
let symbols_required =
((transfer_length as f64 / symbol_size as f64).ceil() / source_blocks as f64).ceil();
assert!((symbols_required as u32) <= MAX_SOURCE_SYMBOLS_PER_BLOCK);
if (symbol_size != 0) && (source_blocks != 0) {
let symbols_required = int_div_ceil(
int_div_ceil(transfer_length, symbol_size as u64) as u64,
source_blocks as u64,
);
assert!((symbols_required) <= MAX_SOURCE_SYMBOLS_PER_BLOCK);
}
ObjectTransmissionInformation {
transfer_length,
symbol_size,
@ -192,25 +207,26 @@ impl ObjectTransmissionInformation {
let symbol_size = max_packet_size - (max_packet_size % alignment);
let sub_symbol_size = 8;
let kt = (transfer_length as f64 / symbol_size as f64).ceil();
let n_max = (symbol_size as f64 / (sub_symbol_size * alignment) as f64).floor() as u32;
let kt = int_div_ceil(transfer_length, symbol_size as u64);
let n_max = symbol_size as u32 / (sub_symbol_size * alignment) as u32;
let kl = |n: u32| -> u32 {
for &(kprime, _, _, _, _) in SYSTEMATIC_INDICES_AND_PARAMETERS.iter().rev() {
let x = (symbol_size as f64 / (alignment as u32 * n) as f64).ceil();
if kprime <= (decoder_memory_requirement as f64 / (alignment as f64 * x)) as u32 {
let x = int_div_ceil(symbol_size as u64, alignment as u64 * n as u64);
if kprime <= (decoder_memory_requirement / (alignment as u64 * x as u64)) as u32 {
return kprime;
}
}
unreachable!();
};
let num_source_blocks = (kt / kl(n_max) as f64).ceil() as u32;
let num_source_blocks = int_div_ceil(kt as u64, kl(n_max) as u64);
let mut n = 1;
for i in 1..=n_max {
n = i;
if (kt / num_source_blocks as f64).ceil() as u32 <= kl(n) {
if int_div_ceil(kt as u64, num_source_blocks as u64) <= kl(n) {
break;
}
}
@ -243,8 +259,10 @@ where
TJ: Into<u32>,
{
let (i, j) = (i.into(), j.into());
let il = (i as f64 / j as f64).ceil() as u32;
let is = (i as f64 / j as f64).floor() as u32;
let il = int_div_ceil(i as u64, j as u64);
let is = i / j;
let jl = i - is * j;
let js = j - jl;
(il, is, jl, js)

@ -1,3 +1,9 @@
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::base::intermediate_tuple;
use crate::matrix::BinaryMatrix;
use crate::octet::Octet;
@ -168,6 +174,7 @@ pub fn generate_constraint_matrix<T: BinaryMatrix>(
(matrix, generate_hdpc_rows(Kprime, S, H))
}
#[cfg(feature = "std")]
#[cfg(test)]
mod tests {
use crate::constraint_matrix::generate_hdpc_rows;
@ -179,6 +186,7 @@ mod tests {
extended_source_block_symbols, num_hdpc_symbols, num_ldpc_symbols,
};
use rand::Rng;
use std::vec::Vec;
#[allow(non_snake_case)]
fn reference_generate_hdpc_rows(Kprime: usize, S: usize, H: usize) -> DenseOctetMatrix {

@ -1,3 +1,12 @@
#[cfg(feature = "std")]
use std::{collections::HashSet as Set, iter, vec::Vec};
#[cfg(not(feature = "std"))]
use core::iter;
#[cfg(not(feature = "std"))]
use alloc::{collections::BTreeSet as Set, vec::Vec};
use crate::base::intermediate_tuple;
use crate::base::partition;
use crate::base::EncodingPacket;
@ -15,9 +24,9 @@ use crate::systematic_constants::num_ldpc_symbols;
use crate::systematic_constants::{
calculate_p1, extended_source_block_symbols, num_lt_symbols, num_pi_symbols, systematic_index,
};
use crate::util::int_div_ceil;
#[cfg(feature = "serde_support")]
use serde::{Deserialize, Serialize};
use std::{collections::HashSet, iter};
#[derive(Clone, Debug, PartialEq, Eq)]
#[cfg_attr(feature = "serde_support", derive(Serialize, Deserialize))]
@ -29,7 +38,8 @@ pub struct Decoder {
impl Decoder {
pub fn new(config: ObjectTransmissionInformation) -> Decoder {
let kt = (config.transfer_length() as f64 / config.symbol_size() as f64).ceil() as u32;
let kt = int_div_ceil(config.transfer_length(), config.symbol_size() as u64);
let (kl, ks, zl, zs) = partition(kt, config.source_blocks());
let mut decoders = vec![];
@ -56,7 +66,10 @@ impl Decoder {
}
}
#[cfg(any(test, feature = "benchmarking"))]
#[cfg(all(
any(test, feature = "benchmarking"),
not(any(feature = "python", feature = "wasm"))
))]
pub fn set_sparse_threshold(&mut self, value: u32) {
for block_decoder in self.block_decoders.iter_mut() {
block_decoder.set_sparse_threshold(value);
@ -84,6 +97,7 @@ impl Decoder {
Some(result)
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
pub fn add_new_packet(&mut self, packet: EncodingPacket) {
let block_number = packet.payload_id.source_block_number() as usize;
if self.blocks[block_number].is_none() {
@ -92,6 +106,7 @@ impl Decoder {
}
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
pub fn get_result(&self) -> Option<Vec<u8>> {
for block in self.blocks.iter() {
if block.is_none() {
@ -119,7 +134,7 @@ pub struct SourceBlockDecoder {
source_symbols: Vec<Option<Symbol>>,
repair_packets: Vec<EncodingPacket>,
received_source_symbols: u32,
received_esi: HashSet<u32>,
received_esi: Set<u32>,
decoded: bool,
sparse_threshold: u32,
}
@ -129,6 +144,7 @@ impl SourceBlockDecoder {
since = "1.3.0",
note = "Use the new2() function instead. In version 2.0, that function will replace this one"
)]
#[cfg(feature = "std")]
pub fn new(source_block_id: u8, symbol_size: u16, block_length: u64) -> SourceBlockDecoder {
let config = ObjectTransmissionInformation::new(0, symbol_size, 0, 1, 1);
SourceBlockDecoder::new2(source_block_id, &config, block_length)
@ -140,8 +156,9 @@ impl SourceBlockDecoder {
config: &ObjectTransmissionInformation,
block_length: u64,
) -> SourceBlockDecoder {
let source_symbols = (block_length as f64 / config.symbol_size() as f64).ceil() as u32;
let mut received_esi = HashSet::new();
let source_symbols = int_div_ceil(block_length, config.symbol_size() as u64);
let mut received_esi = Set::new();
for i in source_symbols..extended_source_block_symbols(source_symbols) {
received_esi.insert(i);
}
@ -329,30 +346,41 @@ impl SourceBlockDecoder {
}
}
#[cfg(feature = "std")]
#[cfg(test)]
mod codec_tests {
#[cfg(not(any(feature = "python", feature = "wasm")))]
use crate::Decoder;
use crate::SourceBlockEncoder;
use crate::{Decoder, SourceBlockEncodingPlan};
use crate::SourceBlockEncodingPlan;
#[cfg(not(any(feature = "python", feature = "wasm")))]
use crate::{Encoder, EncoderBuilder};
use crate::{ObjectTransmissionInformation, SourceBlockDecoder};
#[cfg(not(any(feature = "python", feature = "wasm")))]
use rand::seq::SliceRandom;
use rand::Rng;
use std::sync::Arc;
use std::{
iter,
sync::atomic::{AtomicU32, Ordering},
sync::{
atomic::{AtomicU32, Ordering},
Arc,
},
vec::Vec,
};
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn random_erasure_dense() {
random_erasure(99_999);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn random_erasure_sparse() {
random_erasure(0);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
fn random_erasure(sparse_threshold: u32) {
let elements: usize = rand::thread_rng().gen_range(1..1_000_000);
let mut data: Vec<u8> = vec![0; elements];
@ -385,6 +413,7 @@ mod codec_tests {
assert_eq!(result.unwrap(), data);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn sub_block_erasure() {
let elements: usize = 10_000;

@ -1,3 +1,9 @@
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::base::intermediate_tuple;
use crate::base::partition;
use crate::base::EncodingPacket;
@ -15,6 +21,7 @@ use crate::systematic_constants::num_ldpc_symbols;
use crate::systematic_constants::num_lt_symbols;
use crate::systematic_constants::num_pi_symbols;
use crate::systematic_constants::{calculate_p1, systematic_index};
use crate::util::int_div_ceil;
use crate::ObjectTransmissionInformation;
#[cfg(feature = "serde_support")]
use serde::{Deserialize, Serialize};
@ -61,7 +68,8 @@ pub fn calculate_block_offsets(
data: &[u8],
config: &ObjectTransmissionInformation,
) -> Vec<(usize, usize)> {
let kt = (config.transfer_length() as f64 / config.symbol_size() as f64).ceil() as u32;
let kt = int_div_ceil(config.transfer_length(), config.symbol_size() as u64);
let (kl, ks, zl, zs) = partition(kt, config.source_blocks());
let mut data_index = 0;
@ -429,9 +437,11 @@ fn enc(
result
}
#[cfg(feature = "std")]
#[cfg(test)]
mod tests {
use rand::Rng;
use std::vec::Vec;
use crate::base::intermediate_tuple;
use crate::encoder::enc;
@ -442,7 +452,9 @@ mod tests {
use crate::systematic_constants::{
calculate_p1, num_ldpc_symbols, systematic_index, MAX_SOURCE_SYMBOLS_PER_BLOCK,
};
#[cfg(not(any(feature = "python", feature = "wasm")))]
use crate::{Encoder, EncoderBuilder, EncodingPacket, ObjectTransmissionInformation};
#[cfg(not(any(feature = "python", feature = "wasm")))]
use std::collections::HashSet;
const SYMBOL_SIZE: usize = 4;
@ -543,6 +555,7 @@ mod tests {
}
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn test_builder() {
let data = vec![0, 1, 2, 3];
@ -552,6 +565,7 @@ mod tests {
assert_eq!(builder.build(&data), encoder);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn padding_constraint_exact() {
let packet_size: u16 = 1024;
@ -560,6 +574,7 @@ mod tests {
padding_constraint(packet_size, padding_size, data_size);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn padding_constraint_42_bytes() {
let packet_size: u16 = 1024;
@ -568,6 +583,7 @@ mod tests {
padding_constraint(packet_size, padding_size, data_size);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
fn padding_constraint(packet_size: u16, padding_size: usize, data_size: usize) {
let data = gen_test_data(data_size);
let encoder = Encoder::with_defaults(&data, packet_size);
@ -588,6 +604,7 @@ mod tests {
assert_eq!(data[..], padded_data[..data_size]);
}
#[cfg(not(any(feature = "python", feature = "wasm")))]
#[test]
fn unique_blocks() {
let data = gen_test_data(120);

@ -1,6 +1,11 @@
use crate::arraymap::U16ArrayMap;
#[cfg(feature = "std")]
use std::cmp::{max, min};
#[cfg(not(feature = "std"))]
use core::cmp::{max, min};
use crate::arraymap::U16ArrayMap;
const NO_CONNECTED_COMPONENT: u16 = 0;
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]

@ -1,3 +1,9 @@
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::matrix::DenseBinaryMatrix;
use crate::octet::Octet;
use crate::sparse_vec::SparseBinaryVec;

@ -1,4 +1,16 @@
#![allow(clippy::needless_return, clippy::unreadable_literal)]
#![no_std]
#[cfg(not(feature = "std"))]
#[macro_use]
extern crate alloc;
#[cfg(not(feature = "std"))]
extern crate core;
#[cfg(feature = "std")]
#[macro_use]
extern crate std;
mod arraymap;
mod base;
@ -29,13 +41,11 @@ pub use crate::base::partition;
pub use crate::base::EncodingPacket;
pub use crate::base::ObjectTransmissionInformation;
pub use crate::base::PayloadId;
#[cfg(not(feature = "python"))]
#[cfg(not(feature = "wasm"))]
#[cfg(not(any(feature = "python", feature = "wasm")))]
pub use crate::decoder::Decoder;
pub use crate::decoder::SourceBlockDecoder;
pub use crate::encoder::calculate_block_offsets;
#[cfg(not(feature = "python"))]
#[cfg(not(feature = "wasm"))]
#[cfg(not(any(feature = "python", feature = "wasm")))]
pub use crate::encoder::Encoder;
pub use crate::encoder::EncoderBuilder;
pub use crate::encoder::SourceBlockEncoder;

@ -1,9 +1,17 @@
#[cfg(feature = "std")]
use std::{mem::size_of, vec::Vec};
#[cfg(not(feature = "std"))]
use core::mem::size_of;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::gf2::add_assign_binary;
use crate::iterators::OctetIter;
use crate::octet::Octet;
use crate::octets::BinaryOctetVec;
use crate::util::get_both_ranges;
use std::mem::size_of;
// TODO: change this struct to not use the Octet class, since it's binary not GF(256)
pub trait BinaryMatrix: Clone {

@ -1,10 +1,11 @@
#[cfg(feature = "std")]
use std::ops::{Add, AddAssign, Div, Mul, Sub};
#[cfg(not(feature = "std"))]
use core::ops::{Add, AddAssign, Div, Mul, Sub};
#[cfg(feature = "serde_support")]
use serde::{Deserialize, Serialize};
use std::ops::Add;
use std::ops::AddAssign;
use std::ops::Div;
use std::ops::Mul;
use std::ops::Sub;
// As defined in section 5.7.3
#[rustfmt::skip]
@ -71,13 +72,16 @@ pub const OCTET_MUL: [[u8; 256]; 256] = calculate_octet_mul_table();
// See "Screaming Fast Galois Field Arithmetic Using Intel SIMD Instructions" by Plank et al.
// Further adapted to AVX2
#[cfg(any(feature = "std", test))]
pub const OCTET_MUL_HI_BITS: [[u8; 32]; 256] = calculate_octet_mul_hi_table();
#[cfg(any(feature = "std", test))]
pub const OCTET_MUL_LOW_BITS: [[u8; 32]; 256] = calculate_octet_mul_low_table();
const fn const_mul(x: usize, y: usize) -> u8 {
return OCT_EXP[OCT_LOG[x] as usize + OCT_LOG[y] as usize];
}
#[cfg(any(feature = "std", test))]
const fn calculate_octet_mul_hi_table() -> [[u8; 32]; 256] {
let mut result = [[0; 32]; 256];
let mut i = 1;
@ -93,6 +97,7 @@ const fn calculate_octet_mul_hi_table() -> [[u8; 32]; 256] {
return result;
}
#[cfg(any(feature = "std", test))]
const fn calculate_octet_mul_low_table() -> [[u8; 32]; 256] {
let mut result = [[0; 32]; 256];
let mut i = 1;

@ -1,3 +1,9 @@
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::octet::Octet;
use crate::octets::{add_assign, fused_addassign_mul_scalar_binary, mulassign_scalar};
use crate::octets::{fused_addassign_mul_scalar, BinaryOctetVec};
@ -47,7 +53,7 @@ impl DenseOctetMatrix {
self.height
}
#[cfg(test)]
#[cfg(all(test, feature = "std"))]
pub fn width(&self) -> usize {
self.width
}

@ -1,21 +1,33 @@
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::octet::Octet;
use crate::octet::OCTET_MUL;
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64"
#[cfg(all(
any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64",
),
feature = "std"
))]
use crate::octet::OCTET_MUL_HI_BITS;
#[cfg(any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64"
#[cfg(all(
any(
target_arch = "x86",
target_arch = "x86_64",
target_arch = "arm",
target_arch = "aarch64",
),
feature = "std"
))]
use crate::octet::OCTET_MUL_LOW_BITS;
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", feature = "std"))]
use std::arch::is_aarch64_feature_detected;
// An octet vec containing only binary values, which are bit-packed for efficiency
@ -89,7 +101,7 @@ pub fn fused_addassign_mul_scalar_binary(
);
assert_eq!(octets.len(), other.len());
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
{
if is_x86_feature_detected!("avx2") && is_x86_feature_detected!("bmi1") {
unsafe {
@ -97,7 +109,7 @@ pub fn fused_addassign_mul_scalar_binary(
}
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", feature = "std"))]
{
if is_aarch64_feature_detected!("neon") {
unsafe {
@ -105,7 +117,7 @@ pub fn fused_addassign_mul_scalar_binary(
}
}
}
#[cfg(target_arch = "arm")]
#[cfg(all(target_arch = "arm", feature = "std"))]
{
// TODO: enable when stable
// if is_arm_feature_detected!("neon") {
@ -195,7 +207,7 @@ unsafe fn fused_addassign_mul_scalar_binary_neon(
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "avx2")]
#[target_feature(enable = "bmi1")]
unsafe fn fused_addassign_mul_scalar_binary_avx2(
@ -318,7 +330,7 @@ unsafe fn mulassign_scalar_neon(octets: &mut [u8], scalar: &Octet) {
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "avx2")]
unsafe fn mulassign_scalar_avx2(octets: &mut [u8], scalar: &Octet) {
#[cfg(target_arch = "x86")]
@ -360,7 +372,7 @@ unsafe fn mulassign_scalar_avx2(octets: &mut [u8], scalar: &Octet) {
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "ssse3")]
unsafe fn mulassign_scalar_ssse3(octets: &mut [u8], scalar: &Octet) {
#[cfg(target_arch = "x86")]
@ -401,7 +413,7 @@ unsafe fn mulassign_scalar_ssse3(octets: &mut [u8], scalar: &Octet) {
}
pub fn mulassign_scalar(octets: &mut [u8], scalar: &Octet) {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
{
if is_x86_feature_detected!("avx2") {
unsafe {
@ -414,7 +426,7 @@ pub fn mulassign_scalar(octets: &mut [u8], scalar: &Octet) {
}
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", feature = "std"))]
{
if is_aarch64_feature_detected!("neon") {
unsafe {
@ -422,7 +434,7 @@ pub fn mulassign_scalar(octets: &mut [u8], scalar: &Octet) {
}
}
}
#[cfg(target_arch = "arm")]
#[cfg(all(target_arch = "arm", feature = "std"))]
{
// TODO: enable when stable
// if is_arm_feature_detected!("neon") {
@ -493,7 +505,7 @@ unsafe fn fused_addassign_mul_scalar_neon(octets: &mut [u8], other: &[u8], scala
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "avx2")]
unsafe fn fused_addassign_mul_scalar_avx2(octets: &mut [u8], other: &[u8], scalar: &Octet) {
#[cfg(target_arch = "x86")]
@ -542,7 +554,7 @@ unsafe fn fused_addassign_mul_scalar_avx2(octets: &mut [u8], other: &[u8], scala
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "ssse3")]
unsafe fn fused_addassign_mul_scalar_ssse3(octets: &mut [u8], other: &[u8], scalar: &Octet) {
#[cfg(target_arch = "x86")]
@ -602,7 +614,7 @@ pub fn fused_addassign_mul_scalar(octets: &mut [u8], other: &[u8], scalar: &Octe
);
assert_eq!(octets.len(), other.len());
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
{
if is_x86_feature_detected!("avx2") {
unsafe {
@ -615,7 +627,7 @@ pub fn fused_addassign_mul_scalar(octets: &mut [u8], other: &[u8], scalar: &Octe
}
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", feature = "std"))]
{
if is_aarch64_feature_detected!("neon") {
unsafe {
@ -623,7 +635,7 @@ pub fn fused_addassign_mul_scalar(octets: &mut [u8], other: &[u8], scalar: &Octe
}
}
}
#[cfg(target_arch = "arm")]
#[cfg(all(target_arch = "arm", feature = "std"))]
{
// TODO: enable when stable
// if is_arm_feature_detected!("neon") {
@ -681,7 +693,7 @@ unsafe fn store_neon(ptr: *mut uint8x16_t, value: uint8x16_t) {
*(ptr as *mut u64).add(1) = vgetq_lane_u64(reinterp, 1);
}
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", feature = "std"))]
// TODO: enable when stable
// #[cfg(any(target_arch = "arm", target_arch = "aarch64"))]
// #[target_feature(enable = "neon")]
@ -724,7 +736,7 @@ unsafe fn add_assign_neon(octets: &mut [u8], other: &[u8]) {
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "avx2")]
unsafe fn add_assign_avx2(octets: &mut [u8], other: &[u8]) {
#[cfg(target_arch = "x86")]
@ -764,7 +776,7 @@ unsafe fn add_assign_avx2(octets: &mut [u8], other: &[u8]) {
}
}
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
#[target_feature(enable = "ssse3")]
unsafe fn add_assign_ssse3(octets: &mut [u8], other: &[u8]) {
#[cfg(target_arch = "x86")]
@ -805,7 +817,7 @@ unsafe fn add_assign_ssse3(octets: &mut [u8], other: &[u8]) {
}
pub fn add_assign(octets: &mut [u8], other: &[u8]) {
#[cfg(any(target_arch = "x86", target_arch = "x86_64"))]
#[cfg(all(any(target_arch = "x86", target_arch = "x86_64"), feature = "std"))]
{
if is_x86_feature_detected!("avx2") {
unsafe {
@ -818,7 +830,7 @@ pub fn add_assign(octets: &mut [u8], other: &[u8]) {
}
}
}
#[cfg(target_arch = "aarch64")]
#[cfg(all(target_arch = "aarch64", feature = "std"))]
{
if is_aarch64_feature_detected!("neon") {
unsafe {
@ -826,7 +838,7 @@ pub fn add_assign(octets: &mut [u8], other: &[u8]) {
}
}
}
#[cfg(target_arch = "arm")]
#[cfg(all(target_arch = "arm", feature = "std"))]
{
// TODO: enable when stable
// if is_arm_feature_detected!("neon") {
@ -838,9 +850,11 @@ pub fn add_assign(octets: &mut [u8], other: &[u8]) {
return add_assign_fallback(octets, other);
}
#[cfg(feature = "std")]
#[cfg(test)]
mod tests {
use rand::Rng;
use std::vec::Vec;
use crate::octet::Octet;
use crate::octets::mulassign_scalar;

@ -1,3 +1,9 @@
#[cfg(feature = "std")]
use std::vec::Vec;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::octet::Octet;
use crate::symbol::Symbol;
use crate::util::get_both_indices;
@ -53,9 +59,11 @@ pub fn perform_op(op: &SymbolOps, symbols: &mut Vec<Symbol>) {
}
}
#[cfg(feature = "std")]
#[cfg(test)]
mod tests {
use rand::Rng;
use std::vec::Vec;
use crate::octet::Octet;
use crate::operation_vector::{perform_op, SymbolOps};

@ -1,3 +1,12 @@
#[cfg(feature = "std")]
use std::{mem, mem::size_of, u16, vec::Vec};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
#[cfg(not(feature = "std"))]
use core::{mem, mem::size_of, u16};
use crate::arraymap::UndirectedGraph;
use crate::arraymap::{U16ArrayMap, U32VecMap};
use crate::graph::ConnectedComponentGraph;
@ -12,7 +21,6 @@ use crate::systematic_constants::num_intermediate_symbols;
use crate::systematic_constants::num_ldpc_symbols;
use crate::systematic_constants::num_pi_symbols;
use crate::util::get_both_indices;
use std::mem::size_of;
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
enum RowOp {
@ -339,7 +347,7 @@ impl FirstPhaseRowSelectionStats {
// There's no need for special handling of HDPC rows, since Errata 2 guarantees we won't
// select any, and they're excluded in the first_phase solver
let mut chosen = None;
let mut chosen_original_degree = std::u16::MAX;
let mut chosen_original_degree = u16::MAX;
// Fast path for r=1, since this is super common
if r == 1 {
assert_ne!(0, self.rows_with_single_one.len());
@ -1311,7 +1319,7 @@ impl<T: BinaryMatrix> IntermediateSymbolDecoder<T> {
reorder.push(*i);
}
let mut operation_vector = std::mem::take(&mut self.deferred_D_ops);
let mut operation_vector = mem::take(&mut self.deferred_D_ops);
operation_vector.push(SymbolOps::Reorder { order: reorder });
return (Some(result), Some(operation_vector));
}
@ -1328,6 +1336,7 @@ pub fn fused_inverse_mul_symbols<T: BinaryMatrix>(
IntermediateSymbolDecoder::new(matrix, hdpc_rows, symbols, num_source_symbols).execute()
}
#[cfg(feature = "std")]
#[cfg(test)]
mod tests {
use super::IntermediateSymbolDecoder;
@ -1339,6 +1348,7 @@ mod tests {
extended_source_block_symbols, num_ldpc_symbols, num_lt_symbols,
MAX_SOURCE_SYMBOLS_PER_BLOCK,
};
use std::vec::Vec;
#[test]
fn operations_per_symbol() {

@ -1,3 +1,5 @@
use std::vec::Vec;
use crate::base::{EncodingPacket, ObjectTransmissionInformation};
use crate::decoder::Decoder as DecoderNative;
use crate::encoder::Encoder as EncoderNative;

@ -1,3 +1,12 @@
#[cfg(feature = "std")]
use std::{mem::size_of, vec::Vec};
#[cfg(not(feature = "std"))]
use core::mem::size_of;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::arraymap::{ImmutableListMap, ImmutableListMapBuilder};
use crate::iterators::OctetIter;
use crate::matrix::BinaryMatrix;
@ -5,7 +14,6 @@ use crate::octet::Octet;
use crate::octets::BinaryOctetVec;
use crate::sparse_vec::SparseBinaryVec;
use crate::util::get_both_indices;
use std::mem::size_of;
// Stores a matrix in sparse representation, with an optional dense block for the right most columns
// The logical storage is as follows:

@ -1,6 +1,13 @@
#[cfg(feature = "std")]
use std::{cmp::Ordering, mem::size_of, vec::Vec};
#[cfg(not(feature = "std"))]
use core::{cmp::Ordering, mem::size_of};
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::octet::Octet;
use std::cmp::Ordering;
use std::mem::size_of;
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
pub struct SparseBinaryVec {

@ -1,10 +1,18 @@
#[cfg(feature = "std")]
use std::{ops::AddAssign, vec::Vec};
#[cfg(not(feature = "std"))]
use core::ops::AddAssign;
#[cfg(not(feature = "std"))]
use alloc::vec::Vec;
use crate::octet::Octet;
use crate::octets::add_assign;
use crate::octets::fused_addassign_mul_scalar;
use crate::octets::mulassign_scalar;
#[cfg(feature = "serde_support")]
use serde::{Deserialize, Serialize};
use std::ops::AddAssign;
/// Elementary unit of data, for encoding/decoding purposes.
#[derive(Clone, Debug, PartialEq, PartialOrd, Eq, Ord, Hash)]
@ -58,9 +66,11 @@ impl<'a> AddAssign<&'a Symbol> for Symbol {
}
}
#[cfg(feature = "std")]
#[cfg(test)]
mod tests {
use rand::Rng;
use std::vec::Vec;
use crate::symbol::Symbol;

@ -31,3 +31,16 @@ pub fn get_both_indices<T>(vector: &mut [T], i: usize, j: usize) -> (&mut T, &mu
return (&mut last[0], &mut first[j]);
}
}
// This should eventually become <https://doc.rust-lang.org/std/primitive.u64.html#method.div_ceil>
// when it gets stabilized, and this function should be removed.
// (1) the result is known to not overflow u32 from elsewhere;
// (2) `denom` is known to not be `0` from elsewhere.
// TODO this is definitely not always the case! Let's do something about it.
pub fn int_div_ceil(num: u64, denom: u64) -> u32 {
if num % denom == 0 {
(num / denom) as u32
} else {
(num / denom + 1) as u32
}
}

@ -1,3 +1,5 @@
use std::vec::Vec;
use crate::base::{EncodingPacket, ObjectTransmissionInformation};
use crate::decoder::Decoder as DecoderNative;
use crate::encoder::Encoder as EncoderNative;