From 0fe55b4a7e20db9234cf19d6e23c5b83e6a27a79 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:08:50 +0200 Subject: [PATCH 01/42] Add old PineAPPL v0 code --- pineappl_v0/Cargo.toml | 43 + pineappl_v0/README.md | 9 + pineappl_v0/src/bin.rs | 1237 ++++++++++++++ pineappl_v0/src/boc.rs | 769 +++++++++ pineappl_v0/src/convert.rs | 9 + pineappl_v0/src/convolutions.rs | 422 +++++ pineappl_v0/src/empty_subgrid.rs | 130 ++ pineappl_v0/src/evolution.rs | 801 +++++++++ pineappl_v0/src/fk_table.rs | 441 +++++ pineappl_v0/src/grid.rs | 2180 ++++++++++++++++++++++++ pineappl_v0/src/import_only_subgrid.rs | 785 +++++++++ pineappl_v0/src/lagrange_subgrid.rs | 1490 ++++++++++++++++ pineappl_v0/src/lib.rs | 51 + pineappl_v0/src/ntuple_subgrid.rs | 198 +++ pineappl_v0/src/packed_array.rs | 708 ++++++++ pineappl_v0/src/pids.rs | 902 ++++++++++ pineappl_v0/src/sparse_array3.rs | 1135 ++++++++++++ pineappl_v0/src/subgrid.rs | 362 ++++ pineappl_v0/tests/drell_yan_lo.rs | 822 +++++++++ 19 files changed, 12494 insertions(+) create mode 100644 pineappl_v0/Cargo.toml create mode 100644 pineappl_v0/README.md create mode 100644 pineappl_v0/src/bin.rs create mode 100644 pineappl_v0/src/boc.rs create mode 100644 pineappl_v0/src/convert.rs create mode 100644 pineappl_v0/src/convolutions.rs create mode 100644 pineappl_v0/src/empty_subgrid.rs create mode 100644 pineappl_v0/src/evolution.rs create mode 100644 pineappl_v0/src/fk_table.rs create mode 100644 pineappl_v0/src/grid.rs create mode 100644 pineappl_v0/src/import_only_subgrid.rs create mode 100644 pineappl_v0/src/lagrange_subgrid.rs create mode 100644 pineappl_v0/src/lib.rs create mode 100644 pineappl_v0/src/ntuple_subgrid.rs create mode 100644 pineappl_v0/src/packed_array.rs create mode 100644 pineappl_v0/src/pids.rs create mode 100644 pineappl_v0/src/sparse_array3.rs create mode 100644 pineappl_v0/src/subgrid.rs create mode 100644 pineappl_v0/tests/drell_yan_lo.rs diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml new file mode 100644 index 000000000..c279937e9 --- /dev/null +++ b/pineappl_v0/Cargo.toml @@ -0,0 +1,43 @@ +[package] +authors = ["Christopher Schwan "] +description = "PineAPPL is not an extension of APPLgrid" +name = "pineappl" +readme = "README.md" + +categories.workspace = true +edition.workspace = true +keywords.workspace = true +license.workspace = true +repository.workspace = true +rust-version.workspace = true +version.workspace = true + +[lints] +workspace = true + +[dependencies] +anyhow = "1.0.48" +arrayvec = "0.7.2" +bincode = "1.3.3" +bitflags = "2.4.2" +enum_dispatch = "0.3.7" +float-cmp = "0.9.0" +git-version = "0.3.5" +itertools = "0.10.1" +lz4_flex = "0.9.2" +ndarray = { features = ["serde"], version = "0.15.4" } +rustc-hash = "1.1.0" +serde = { features = ["derive"], version = "1.0.130" } +thiserror = "1.0.30" + +[dev-dependencies] +anyhow = "1.0.48" +lhapdf = { package = "managed-lhapdf", version = "0.3.2" } +num-complex = "0.4.4" +rand = { default-features = false, version = "0.8.4" } +rand_pcg = { default-features = false, version = "0.3.1" } +serde_yaml = "0.9.13" +ndarray-npy = "0.8.1" + +[features] +static = ["lhapdf/static"] diff --git a/pineappl_v0/README.md b/pineappl_v0/README.md new file mode 100644 index 000000000..f925b9c2f --- /dev/null +++ b/pineappl_v0/README.md @@ -0,0 +1,9 @@ +[![Rust](https://github.com/NNPDF/pineappl/workflows/Rust/badge.svg)](https://github.com/NNPDF/pineappl/actions?query=workflow%3ARust) +[![codecov](https://codecov.io/gh/NNPDF/pineappl/branch/master/graph/badge.svg)](https://codecov.io/gh/NNPDF/pineappl) +[![Documentation](https://docs.rs/pineappl/badge.svg)](https://docs.rs/pineappl) +[![crates.io](https://img.shields.io/crates/v/pineappl.svg)](https://crates.io/crates/pineappl) + +# PineAPPL + +PineAPPL is a library for recording and storing predictions for high-energy +physics observables independently of their parton distribution functions. diff --git a/pineappl_v0/src/bin.rs b/pineappl_v0/src/bin.rs new file mode 100644 index 000000000..3d45a597e --- /dev/null +++ b/pineappl_v0/src/bin.rs @@ -0,0 +1,1237 @@ +//! Module that contains helpers for binning observables + +use super::convert::{f64_from_usize, usize_from_f64}; +use float_cmp::approx_eq; +use itertools::izip; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use std::f64; +use std::ops::Range; +use std::str::FromStr; +use thiserror::Error; + +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +enum Limits { + Equal { left: f64, right: f64, bins: usize }, + Unequal { limits: Vec }, +} + +/// Error type which is returned when two `BinLimits` objects are merged which are not +/// connected/non-consecutive. +#[derive(Debug, Error)] +pub enum MergeBinError { + /// Returned when two `BinLimits` objects `a` and `b` were tried to be merged using + /// `a.merge(b)`, but when the right-most limit of `a` does not match the left-most limit of + /// `b`. + #[error("can not merge bins which end at {lhs} with bins that start at {rhs}")] + NonConsecutiveBins { + /// right-most limit of the `BinLimits` object that is being merged into. + lhs: f64, + /// left-most limit of the `BinLimits` object that is being merged. + rhs: f64, + }, + + /// Returned by [`BinRemapper::merge_bins`] whenever it can not merge bins. + #[error("can not merge bins with indices {0:?}")] + NonConsecutiveRange(Range), + + /// Returned by [`BinLimits::merge_bins`] whenever the range is outside the available bins. + #[error("tried to merge bins with indices {range:?}, but there are only {bins} bins")] + InvalidRange { + /// Range given to [`BinLimits::merge_bins`]. + range: Range, + /// Number of bins. + bins: usize, + }, + + /// Returned by [`BinRemapper::merge`] whenever the dimensions of two `BinRemapper` are not the + /// same. + #[error("tried to merge bins with different dimensions {lhs} and {rhs}")] + IncompatibleDimensions { + /// Dimension of the bins of the first `BinRemapper`. + lhs: usize, + /// Dimension of the bins of the second `BinRemapper`. + rhs: usize, + }, +} + +/// Structure representing bin limits. +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct BinLimits(Limits); + +/// Error type that is returned by the constructor of `BinRemapper`. +#[derive(Debug, Error)] +pub enum BinRemapperNewError { + /// Returned if the lengths of the normalization and limits vectors do not allow to determine a + /// well-defined number of dimensions. + #[error("could not determine the dimensions from a normalization vector with length {normalizations_len} and limits vector with length {limits_len}")] + DimensionUnknown { + /// Length of the normalization vector. + normalizations_len: usize, + /// Length of the limits vector. + limits_len: usize, + }, + /// Returned if bins overlap. + #[error("the bin limits for the bins with indices {} overlap with other bins", overlaps.iter().map(ToString::to_string).join(","))] + OverlappingBins { + /// Indices of the bins that overlap with other bins. + overlaps: Vec, + }, +} + +/// Structure for remapping bin limits. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct BinRemapper { + normalizations: Vec, + limits: Vec<(f64, f64)>, +} + +/// Captures all information about the bins in a grid. +#[derive(Debug)] +pub struct BinInfo<'a> { + limits: &'a BinLimits, + remapper: Option<&'a BinRemapper>, +} + +/// Error type returned by [`BinRemapper::from_str`] +#[derive(Debug, Error)] +pub enum ParseBinRemapperError { + /// An error that occured while parsing the string in [`BinRemapper::from_str`]. + #[error("{0}")] + Error(String), + /// An error that occured while constructing the remapper with [`BinRemapper::new`]. + #[error("{source}")] + BinRemapperNewError { + // TODO: enable #[backtrace] whenever the feature is stable + /// The error returned by [`BinRemapper::new`]. + source: BinRemapperNewError, + }, +} + +impl FromStr for BinRemapper { + type Err = ParseBinRemapperError; + + fn from_str(s: &str) -> Result { + let remaps: Result>>, Self::Err> = s + .split(';') + .map(|string| { + string + .split('|') + .map(|string| { + string + .split_once(':') + .map_or(Ok(string), |(lhs, rhs)| { + match (lhs.trim().parse::(), rhs.trim().parse::()) { + (Err(lhs), Err(rhs)) => Err(ParseBinRemapperError::Error(format!( + "unable to parse 'N:M' syntax from: '{string}' (N: '{lhs}', M: '{rhs}')" + ))), + // skip :N specification + (Err(_), Ok(_)) => Ok(lhs), + // skip N: specification + (Ok(_), Err(_)) => Ok(rhs), + // skip N:M specification + (Ok(_), Ok(_)) => Ok(""), + } + })? + .split(',') + .filter_map(|string| { + let string = string.trim(); + if string.is_empty() { + None + } else { + Some(string.parse::().map_err(|err| { + ParseBinRemapperError::Error(format!( + "unable to parse limit '{string}': '{err}')" + )) + })) + } + }) + .collect() + }) + .collect() + }) + .collect(); + let mut remaps = remaps?; + + if let Some(first) = remaps.first() { + if first.len() != 1 { + return Err(ParseBinRemapperError::Error( + "'|' syntax not meaningful for first dimension".to_owned(), + )); + } + } + + // go over `remaps` again, and repeat previous entries as requested with the `|` syntax + for vec in &mut remaps { + for i in 1..vec.len() { + if vec[i].is_empty() { + if vec[i - 1].is_empty() { + return Err(ParseBinRemapperError::Error( + "empty repetition with '|'".to_owned(), + )); + } + + vec[i] = vec[i - 1].clone(); + } + } + } + + // go over `remaps` again, this time remove bin as requested with the `:N` or `N:` syntax + for (vec, string) in remaps.iter_mut().zip(s.split(';')) { + for (vec, string) in vec.iter_mut().zip(string.split('|')) { + let (lhs, rhs) = { + if let Some((lhs, rhs)) = string.split_once(':') { + (lhs.parse::(), rhs.parse::()) + } else { + // there's no colon + continue; + } + }; + + if let Ok(num) = rhs { + vec.truncate(vec.len() - num); + } + + if let Ok(num) = lhs { + vec.drain(0..num); + } + + if vec.len() <= 1 { + return Err(ParseBinRemapperError::Error( + "no limits due to ':' syntax".to_owned(), + )); + } + } + } + + let dimensions = remaps.len(); + let mut normalizations = Vec::new(); + let mut limits = Vec::new(); + let mut buffer = Vec::with_capacity(dimensions); + let mut pipe_indices = vec![0; dimensions]; + let mut last_indices = vec![0; dimensions]; + + 'looop: for indices in remaps + .iter() + .map(|vec| 0..vec.iter().map(|vec| vec.len() - 1).max().unwrap()) + .multi_cartesian_product() + { + // calculate `pipe_indices`, which stores the indices for the second dimension of `remaps` + for d in 0..dimensions - 1 { + if indices[d] > last_indices[d] { + for dp in d + 1..dimensions { + if remaps[dp].len() != 1 { + pipe_indices[dp] += 1; + } + } + } + } + + last_indices.clone_from(&indices); + + let mut normalization = 1.0; + + for (remap, &pipe_index, &i) in izip!(&remaps, &pipe_indices, &indices) { + if let Some(r) = remap.get(pipe_index) { + if r.len() <= (i + 1) { + buffer.clear(); + + // this index doesn't exist + continue 'looop; + } + + let left = r[i]; + let right = r[i + 1]; + + buffer.push((left, right)); + normalization *= right - left; + } else { + return Err(ParseBinRemapperError::Error( + "missing '|' specification: number of variants too small".to_owned(), + )); + } + } + + limits.append(&mut buffer); + normalizations.push(normalization); + } + + Self::new(normalizations, limits) + .map_err(|err| ParseBinRemapperError::BinRemapperNewError { source: err }) + } +} + +impl<'a> BinInfo<'a> { + /// Constructor. + #[must_use] + pub const fn new(limits: &'a BinLimits, remapper: Option<&'a BinRemapper>) -> Self { + Self { limits, remapper } + } + + /// Return the bin limits for the bin with index `bin`. + #[must_use] + pub fn bin_limits(&self, bin: usize) -> Vec<(f64, f64)> { + // TODO: make return type a Cow + self.remapper.map_or_else( + || { + let limits = &self.limits.limits()[bin..=bin + 1]; + vec![(limits[0], limits[1])] + }, + |remapper| { + let dim = remapper.dimensions(); + remapper.limits()[bin * dim..(bin + 1) * dim].to_vec() + }, + ) + } + + /// Returns the number of bins. + #[must_use] + pub fn bins(&self) -> usize { + self.limits.bins() + } + + /// Returns the number of dimensions. + #[must_use] + pub fn dimensions(&self) -> usize { + self.remapper.map_or(1, BinRemapper::dimensions) + } + + /// Return the index of the bin corresponding to `limits`. If no bin is found `None` is + /// returned. + #[must_use] + pub fn find_bin(&self, limits: &[(f64, f64)]) -> Option { + (0..self.bins()) + .map(|bin| self.bin_limits(bin)) + .position(|lim| lim == limits) + } + + /// Returns all left-limits for the specified dimension. If the dimension does not exist, an + /// empty vector is returned. + #[must_use] + pub fn left(&self, dimension: usize) -> Vec { + if dimension >= self.dimensions() { + vec![] + } else { + self.remapper.map_or_else( + || { + self.limits + .limits() + .iter() + .take(self.bins()) + .copied() + .collect() + }, + |remapper| { + remapper + .limits() + .iter() + .skip(dimension) + .step_by(self.dimensions()) + .take(self.bins()) + .map(|tuple| tuple.0) + .collect() + }, + ) + } + } + + /// Returns all right-limits for the specified dimension. If the dimension does not exist, an + /// empty vector is returned. + #[must_use] + pub fn right(&self, dimension: usize) -> Vec { + if dimension >= self.dimensions() { + vec![] + } else { + self.remapper.map_or_else( + || { + self.limits + .limits() + .iter() + .skip(1) + .take(self.bins()) + .copied() + .collect() + }, + |remapper| { + remapper + .limits() + .iter() + .skip(dimension) + .step_by(self.dimensions()) + .take(self.bins()) + .map(|tuple| tuple.1) + .collect() + }, + ) + } + } + + /// For each bin return a vector of `(left, right)` limits for each dimension. + #[must_use] + pub fn limits(&self) -> Vec> { + self.remapper.map_or_else( + || { + self.limits + .limits() + .windows(2) + .map(|window| vec![(window[0], window[1])]) + .collect() + }, + |remapper| { + remapper + .limits() + .to_vec() + .chunks_exact(self.dimensions()) + .map(<[(f64, f64)]>::to_vec) + .collect() + }, + ) + } + + /// Returns all normalization factors. + #[must_use] + pub fn normalizations(&self) -> Vec { + self.remapper.map_or_else( + || self.limits.bin_sizes(), + |remapper| remapper.normalizations().to_vec(), + ) + } + + /// Returns a vector of half-open intervals that show how multi-dimensional bins can be + /// efficiently sliced into one-dimensional histograms. + #[must_use] + pub fn slices(&self) -> Vec<(usize, usize)> { + // TODO: convert this to Vec> + self.remapper + .map_or_else(|| vec![(0, self.limits.bins())], BinRemapper::slices) + } +} + +impl PartialEq> for BinInfo<'_> { + fn eq(&self, other: &BinInfo) -> bool { + (self.limits() == other.limits()) && (self.normalizations() == other.normalizations()) + } +} + +impl BinRemapper { + /// Create a new `BinRemapper` object with the specified number of bins and dimensions and + /// limits. + /// + /// # Errors + /// + /// Returns an error if the length of `limits` is not a multiple of the length of + /// `normalizations`, or if the limits of at least two bins overlap. + pub fn new( + normalizations: Vec, + limits: Vec<(f64, f64)>, + ) -> Result { + if limits.len() % normalizations.len() == 0 { + let dimensions = limits.len() / normalizations.len(); + let mut overlaps = Vec::new(); + + for (i, bin_i) in limits.chunks_exact(dimensions).enumerate() { + for (j, bin_j) in limits.chunks_exact(dimensions).enumerate().skip(i + 1) { + if bin_i.iter().zip(bin_j).all(|((l1, r1), (l2, r2))| { + ((l2 >= l1) && (l2 < r1)) || ((l1 >= l2) && (l1 < r2)) + }) { + overlaps.push(j); + } + } + } + + overlaps.sort_unstable(); + overlaps.dedup(); + + if overlaps.is_empty() { + Ok(Self { + normalizations, + limits, + }) + } else { + Err(BinRemapperNewError::OverlappingBins { overlaps }) + } + } else { + Err(BinRemapperNewError::DimensionUnknown { + normalizations_len: normalizations.len(), + limits_len: limits.len(), + }) + } + } + + /// Return the number of bins. + #[must_use] + pub fn bins(&self) -> usize { + self.normalizations.len() + } + + /// Return the number of dimensions. + #[must_use] + pub fn dimensions(&self) -> usize { + self.limits.len() / self.normalizations.len() + } + + /// Return tuples of left and right bin limits for all dimensions and all bins. + #[must_use] + pub fn limits(&self) -> &[(f64, f64)] { + &self.limits + } + + /// Merges the bins for the corresponding range together in a single one. + /// + /// # Errors + /// + /// When `range` refers to non-consecutive bins, an error is returned. + pub fn merge_bins(&mut self, range: Range) -> Result<(), MergeBinError> { + if self + .slices() + .iter() + .any(|&(start, end)| (start <= range.start) && (range.end <= end)) + { + for bin in range.start + 1..range.end { + self.normalizations[range.start] += self.normalizations[bin]; + } + + let dim = self.dimensions(); + + self.normalizations.drain(range.start + 1..range.end); + self.limits[dim * (range.start + 1) - 1].1 = self.limits[dim * range.end - 1].1; + self.limits.drain(dim * (range.start + 1)..dim * range.end); + + Ok(()) + } else { + Err(MergeBinError::NonConsecutiveRange(range)) + } + } + + /// Merge the `BinRemapper` of `other` into `self` on the right-hand-side. + /// + /// # Errors + /// + /// If the dimensions of both remappers are not the same an error is returned. + pub fn merge(&mut self, other: &Self) -> Result<(), MergeBinError> { + let lhs_dim = self.dimensions(); + let rhs_dim = other.dimensions(); + + if lhs_dim != rhs_dim { + return Err(MergeBinError::IncompatibleDimensions { + lhs: lhs_dim, + rhs: rhs_dim, + }); + } + + // TODO: we shouldn't allow overlapping bins + self.normalizations.extend_from_slice(&other.normalizations); + self.limits.extend_from_slice(&other.limits); + + Ok(()) + } + + /// Return the normalization factors for all bins. + #[must_use] + pub fn normalizations(&self) -> &[f64] { + &self.normalizations + } + + /// Returns a vector of half-open intervals that show how multi-dimensional bins can be + /// efficiently sliced into one-dimensional histograms. + #[must_use] + pub fn slices(&self) -> Vec<(usize, usize)> { + if self.dimensions() == 1 { + vec![(0, self.bins())] + } else { + self.limits() + .iter() + .enumerate() + .filter_map(|(index, x)| { + ((index % self.dimensions()) != (self.dimensions() - 1)).then_some(x) + }) + .collect::>() + .chunks_exact(self.dimensions() - 1) + .enumerate() + .dedup_by_with_count(|(_, x), (_, y)| x == y) + .map(|(count, (index, _))| (index, index + count)) + .collect() + } + } + + /// Deletes all bins whose corresponding indices are in one of the ranges of `bins`. + pub fn delete_bins(&mut self, bins: &[Range]) { + let dim = self.dimensions(); + + for range in bins.iter().cloned().rev() { + self.normalizations.drain(range); + } + + for range in bins.iter().rev() { + self.limits.drain((range.start * dim)..(range.end * dim)); + } + } +} + +impl PartialEq for BinRemapper { + fn eq(&self, other: &Self) -> bool { + (self.limits == other.limits) && (self.normalizations == other.normalizations) + } +} + +impl BinLimits { + /// Constructor for `BinLimits`. + /// + /// # Panics + /// + /// TODO + #[must_use] + pub fn new(mut limits: Vec) -> Self { + limits.sort_by(|left, right| left.partial_cmp(right).unwrap()); + + if limits + .iter() + .zip(limits.iter().skip(1)) + .map(|(current, next)| next - current) + .collect::>() + .windows(2) + .all(|val| approx_eq!(f64, val[0], val[1], ulps = 8)) + { + Self(Limits::Equal { + left: *limits.first().unwrap(), + right: *limits.last().unwrap(), + bins: limits.len() - 1, + }) + } else { + Self(Limits::Unequal { limits }) + } + } + + /// Returns the number of bins. + #[must_use] + pub fn bins(&self) -> usize { + match &self.0 { + Limits::Equal { bins, .. } => *bins, + Limits::Unequal { limits } => limits.len() - 1, + } + } + + /// Returns the bin index for observable `value`. If the value over- or underflows, the return + /// value is `None`. + /// + /// # Panics + /// + /// TODO + #[must_use] + pub fn index(&self, value: f64) -> Option { + match &self.0 { + Limits::Equal { left, right, bins } => { + if value < *left || value >= *right { + None + } else { + Some(usize_from_f64( + (value - left) / (right - left) * f64_from_usize(*bins), + )) + } + } + Limits::Unequal { limits } => { + match limits.binary_search_by(|left| left.partial_cmp(&value).unwrap()) { + Err(0) => None, + Err(index) if index == limits.len() => None, + Ok(index) if index == (limits.len() - 1) => None, + Ok(index) => Some(index), + Err(index) => Some(index - 1), + } + } + } + } + + /// Returns the left-most bin limit + /// + /// # Panics + /// + /// TODO + #[must_use] + pub fn left(&self) -> f64 { + match &self.0 { + Limits::Unequal { limits } => *limits.first().unwrap(), + Limits::Equal { left, .. } => *left, + } + } + + /// Returns the limits in a `Vec`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::bin::BinLimits; + /// + /// // example with equally sized bins + /// let equal_bins = BinLimits::new(vec![0.25, 0.5, 0.75, 1.0]); + /// assert_eq!(equal_bins.limits(), vec![0.25, 0.5, 0.75, 1.0]); + /// + /// // example with unequally sized bins + /// let unequal_bins = BinLimits::new(vec![0.125, 0.25, 1.0, 1.5]); + /// assert_eq!(unequal_bins.limits(), vec![0.125, 0.25, 1.0, 1.5]); + /// ``` + #[must_use] + pub fn limits(&self) -> Vec { + match &self.0 { + Limits::Equal { left, right, bins } => (0..=*bins) + .map(|b| (*right - *left).mul_add(f64_from_usize(b) / f64_from_usize(*bins), *left)) + .collect(), + Limits::Unequal { limits } => limits.clone(), + } + } + + /// Merges the bins for the corresponding range together in a single one. + /// + /// # Errors + /// + /// When `bins` contains any indices that do not correspond to bins this method returns an + /// error. + pub fn merge_bins(&mut self, range: Range) -> Result<(), MergeBinError> { + if range.end > self.bins() { + return Err(MergeBinError::InvalidRange { + range, + bins: self.bins(), + }); + } + + let mut new_limits = self.limits(); + new_limits.drain(range.start + 1..range.end); + *self = Self::new(new_limits); + + Ok(()) + } + + /// Returns the size for each bin. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::bin::BinLimits; + /// + /// // example with equally sized bins + /// let equal_bins = BinLimits::new(vec![0.25, 0.5, 0.75, 1.0]); + /// assert_eq!(equal_bins.bin_sizes(), vec![0.25, 0.25, 0.25]); + /// + /// // example with unequally sized bins + /// let unequal_bins = BinLimits::new(vec![0.125, 0.25, 1.0, 1.5]); + /// assert_eq!(unequal_bins.bin_sizes(), vec![0.125, 0.75, 0.5]); + /// ``` + #[must_use] + pub fn bin_sizes(&self) -> Vec { + match &self.0 { + Limits::Equal { left, right, bins } => { + vec![(*right - *left) / f64_from_usize(*bins); *bins] + } + Limits::Unequal { limits } => limits.windows(2).map(|x| x[1] - x[0]).collect(), + } + } + + /// Merge the limits of `other` into `self` on the right-hand-side. If both limits are + /// non-consecutive, an error is returned. + /// + /// # Errors + /// + /// If the right-most limit of `self` is different from the left-most limit of `other`, the + /// bins are non-consecutive and an error is returned. + /// + /// # Panics + /// + /// TODO + pub fn merge(&mut self, other: &Self) -> Result<(), MergeBinError> { + if !approx_eq!(f64, self.right(), other.left(), ulps = 8) { + return Err(MergeBinError::NonConsecutiveBins { + lhs: self.right(), + rhs: other.left(), + }); + } + + let mut limits = self.limits(); + let add_limits = other.limits(); + + // average over the shared limit + *limits.last_mut().unwrap() = + 0.5 * (*limits.last().unwrap() + *add_limits.first().unwrap()); + // add the new limits + limits.extend_from_slice(&add_limits[1..]); + + // use the constructor to get a valid state + *self = Self::new(limits); + + Ok(()) + } + + /// Returns the right-most bin limit + /// + /// # Panics + /// + /// TODO + #[must_use] + pub fn right(&self) -> f64 { + match &self.0 { + Limits::Unequal { limits } => *limits.last().unwrap(), + Limits::Equal { right, .. } => *right, + } + } + + /// Delete `bins` number of bins from the start. + pub fn delete_bins_left(&mut self, bins: usize) { + let mut limits = self.limits(); + limits.drain(..bins); + *self = Self::new(limits); + } + + /// Delete `bins` number of bins from the end. + pub fn delete_bins_right(&mut self, bins: usize) { + let mut limits = self.limits(); + limits.drain((limits.len() - bins)..); + *self = Self::new(limits); + } +} + +#[cfg(test)] +mod test { + use super::*; + use std::iter; + + #[test] + fn bin_limits_merge() { + let mut limits = BinLimits::new(vec![0.0, 1.0 / 3.0, 2.0 / 3.0, 1.0]); + + // right merge + limits + .merge(&BinLimits::new(vec![ + 1.0, + 1.0 + 1.0 / 3.0, + 1.0 + 2.0 / 3.0, + 2.0, + ])) + .unwrap(); + + assert_eq!(limits.left(), 0.0); + assert_eq!(limits.right(), 2.0); + assert_eq!(limits.bins(), 6); + + let non_consecutive_bins = BinLimits::new(vec![3.0, 4.0]); + + assert!(limits.merge(&non_consecutive_bins).is_err()); + + assert_eq!(limits.left(), 0.0); + assert_eq!(limits.right(), 2.0); + assert_eq!(limits.bins(), 6); + + // left merge + assert!(limits + .merge(&BinLimits::new(vec![ + -1.0, + -1.0 + 1.0 / 3.0, + -1.0 + 2.0 / 3.0, + 0.0 + ])) + .is_err()); + + assert_eq!(limits.left(), 0.0); + assert_eq!(limits.right(), 2.0); + assert_eq!(limits.bins(), 6); + } + + #[test] + fn bin_info_without_remapper() { + let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5]); + let info = BinInfo::new(&limits, None); + + assert_eq!(info.bins(), 4); + assert_eq!(info.dimensions(), 1); + assert_eq!(info.left(0), vec![0.0, 0.125, 0.25, 0.375]); + assert_eq!(info.right(0), vec![0.125, 0.25, 0.375, 0.5]); + assert_eq!(info.normalizations(), vec![0.125; 4]); + + assert_eq!(info.left(1), vec![]); + assert_eq!(info.right(1), vec![]); + + assert_eq!(info.slices(), [(0, 4)]); + } + + #[test] + fn bin_info_with_remapper() { + let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5]); + let remapper = BinRemapper::new( + vec![1.0; 4], + vec![ + (0.0, 0.5), + (0.25, 0.75), + (1.0, 2.0), + (0.5, 1.0), + (0.75, 1.0), + (2.0, 5.0), + (1.0, 2.0), + (1.75, 2.0), + (5.0, 5.5), + (2.5, 3.0), + (2.0, 2.5), + (6.0, 8.0), + ], + ) + .unwrap(); + let info = BinInfo::new(&limits, Some(&remapper)); + + assert_ne!(info, BinInfo::new(&limits, None)); + assert_eq!(info, BinInfo::new(&limits, Some(&remapper))); + + assert_eq!(info.bins(), 4); + assert_eq!(info.dimensions(), 3); + assert_eq!(info.left(0), vec![0.0, 0.5, 1.0, 2.5]); + assert_eq!(info.left(1), vec![0.25, 0.75, 1.75, 2.0]); + assert_eq!(info.left(2), vec![1.0, 2.0, 5.0, 6.0]); + assert_eq!(info.right(0), vec![0.5, 1.0, 2.0, 3.0]); + assert_eq!(info.right(1), vec![0.75, 1.0, 2.0, 2.5]); + assert_eq!(info.right(2), vec![2.0, 5.0, 5.5, 8.0]); + assert_eq!(info.normalizations(), vec![1.0; 4]); + + assert_eq!(info.left(3), vec![]); + assert_eq!(info.right(3), vec![]); + + assert_eq!(info.slices(), [(0, 1), (1, 2), (2, 3), (3, 4)]); + } + + #[test] + fn bin_info_slices() { + let limits = BinLimits::new( + iter::successors(Some(0.0), |n| Some(n + 1.0)) + .take(11) + .collect(), + ); + let remapper = BinRemapper::new( + vec![1.0; 10], + vec![ + (0.0, 1.0), + (0.0, 1.0), + (0.0, 1.0), + (0.0, 1.0), + (0.0, 1.0), + (1.0, 2.0), + (0.0, 1.0), + (0.0, 1.0), + (2.0, 3.0), + (0.0, 1.0), + (1.0, 2.0), + (0.0, 1.0), + (0.0, 1.0), + (1.0, 2.0), + (1.0, 2.0), + (0.0, 1.0), + (1.0, 2.0), + (2.0, 3.0), + (1.0, 2.0), + (1.0, 2.0), + (0.0, 1.0), + (1.0, 2.0), + (1.0, 2.0), + (1.0, 2.0), + (1.0, 2.0), + (1.0, 2.0), + (2.0, 3.0), + (1.0, 2.0), + (1.0, 2.0), + (3.0, 4.0), + ], + ) + .unwrap(); + let info = BinInfo::new(&limits, Some(&remapper)); + + assert_eq!(info.slices(), [(0, 3), (3, 6), (6, 10)]); + } + + #[test] + fn bin_info_trivial_slices() { + let limits = BinLimits::new( + iter::successors(Some(0.0), |x| Some(x + 1.0)) + .take(11) + .collect(), + ); + let remapper = BinRemapper::new( + vec![1.0; 10], + iter::successors(Some((0.0, 1.0)), |x| Some((x.0 + 1.0, x.1 + 1.0))) + .take(10) + .collect(), + ) + .unwrap(); + let info = BinInfo::new(&limits, Some(&remapper)); + + assert_eq!(info.slices(), [(0, 10)]); + } + + #[test] + fn bin_limits() { + // first check BinLimits with exactly representable bin sizes + let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]); + + assert_eq!(limits.bins(), 8); + assert_eq!(limits.index(-0.1), None); + assert_eq!(limits.index(0.1), Some(0)); + assert_eq!(limits.index(0.2), Some(1)); + assert_eq!(limits.index(0.3), Some(2)); + assert_eq!(limits.index(0.4), Some(3)); + assert_eq!(limits.index(0.55), Some(4)); + assert_eq!(limits.index(0.65), Some(5)); + assert_eq!(limits.index(0.8), Some(6)); + assert_eq!(limits.index(0.9), Some(7)); + assert_eq!(limits.index(1.1), None); + + // check bin limits that are equally sized, with values on the limits + let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]); + assert_eq!(limits.index(0.0), Some(0)); + assert_eq!(limits.index(0.125), Some(1)); + assert_eq!(limits.index(0.25), Some(2)); + assert_eq!(limits.index(0.375), Some(3)); + assert_eq!(limits.index(0.5), Some(4)); + assert_eq!(limits.index(0.625), Some(5)); + assert_eq!(limits.index(0.75), Some(6)); + assert_eq!(limits.index(0.875), Some(7)); + assert_eq!(limits.index(1.0), None); + + // now, check with bin sizes that are not exactly representable + let limits = BinLimits::new(vec![0.0, 0.1, 0.2, 0.3, 0.4, 0.5]); + + assert_eq!(limits.bins(), 5); + assert_eq!(limits.index(-1.0), None); + assert_eq!(limits.index(0.05), Some(0)); + assert_eq!(limits.index(0.15), Some(1)); + assert_eq!(limits.index(0.25), Some(2)); + assert_eq!(limits.index(0.35), Some(3)); + assert_eq!(limits.index(0.45), Some(4)); + assert_eq!(limits.index(1.1), None); + + // check the special case of one bin + let limits = BinLimits::new(vec![0.0, 1.0]); + assert_eq!(limits.bins(), 1); + assert_eq!(limits.index(-0.1), None); + assert_eq!(limits.index(0.5), Some(0)); + assert_eq!(limits.index(1.1), None); + + // check bin limits that are unequally sized, with ascending bin sizes + let limits = BinLimits::new(vec![0.0, 0.1, 0.3, 0.6, 1.0]); + assert_eq!(limits.bins(), 4); + assert_eq!(limits.index(-1.0), None); + assert_eq!(limits.index(0.05), Some(0)); + assert_eq!(limits.index(0.2), Some(1)); + assert_eq!(limits.index(0.4), Some(2)); + assert_eq!(limits.index(0.9), Some(3)); + assert_eq!(limits.index(1.3), None); + + // check bin limits that are unequally sized, with values on the limits + let limits = BinLimits::new(vec![0.0, 0.25, 0.75, 0.875, 1.0]); + assert_eq!(limits.index(0.0), Some(0)); + assert_eq!(limits.index(0.25), Some(1)); + assert_eq!(limits.index(0.75), Some(2)); + assert_eq!(limits.index(0.875), Some(3)); + assert_eq!(limits.index(1.0), None); + + // check bin limits that are unequally sized, with descending bin sizes + let limits = BinLimits::new(vec![0.0, 0.4, 0.7, 0.9, 1.0]); + assert_eq!(limits.bins(), 4); + assert_eq!(limits.index(-1.0), None); + assert_eq!(limits.index(0.2), Some(0)); + assert_eq!(limits.index(0.5), Some(1)); + assert_eq!(limits.index(0.8), Some(2)); + assert_eq!(limits.index(0.95), Some(3)); + assert_eq!(limits.index(1.3), None); + } + + #[test] + fn merge_bins() { + let mut limits = BinLimits::new(vec![0.0, 0.4, 0.7, 0.9, 1.0]); + limits.merge_bins(0..4).unwrap(); + + assert_eq!(limits.bins(), 1); + assert_eq!(limits.index(-1.0), None); + assert_eq!(limits.index(0.2), Some(0)); + assert_eq!(limits.index(0.5), Some(0)); + assert_eq!(limits.index(0.8), Some(0)); + assert_eq!(limits.index(0.95), Some(0)); + assert_eq!(limits.index(1.3), None); + } + + #[test] + fn merge_bins_error() { + let mut limits = BinLimits::new(vec![0.0, 0.4, 0.7, 0.9, 1.0]); + assert!(limits.merge_bins(0..5).is_err()); + } + + #[test] + fn bin_remapper() { + let remapper = BinRemapper::new( + vec![1.0; 4], + vec![ + (0.0, 0.5), + (0.25, 0.75), + (0.5, 1.0), + (0.75, 1.0), + (1.0, 2.0), + (1.75, 2.0), + (2.5, 3.0), + (2.0, 2.5), + ], + ) + .unwrap(); + + assert_ne!( + remapper, + BinRemapper::new( + vec![1.0; 4], + vec![(0.0, 1.0), (1.0, 2.0), (2.0, 3.0), (4.0, 5.0)] + ) + .unwrap() + ); + + assert!(matches!( + BinRemapper::new(vec![1.0; 8], vec![(0.0, 1.0); 2]), + Err(BinRemapperNewError::DimensionUnknown{normalizations_len, limits_len}) + if (normalizations_len == 8) && (limits_len == 2) + )); + + assert_eq!(remapper.bins(), 4); + assert_eq!(remapper.dimensions(), 2); + assert_eq!( + remapper.limits(), + &[ + (0.0, 0.5), + (0.25, 0.75), + (0.5, 1.0), + (0.75, 1.0), + (1.0, 2.0), + (1.75, 2.0), + (2.5, 3.0), + (2.0, 2.5) + ] + ); + assert_eq!(remapper.normalizations(), vec![1.0; 4]); + } + + #[test] + fn bin_remapper_merge_bins() { + let mut remapper = BinRemapper::new( + vec![1.0; 4], + vec![(0.0, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0)], + ) + .unwrap(); + + remapper.merge_bins(0..4).unwrap(); + assert_eq!(remapper.bins(), 1); + assert_eq!(remapper.dimensions(), 1); + assert_eq!(remapper.limits(), [(0.0, 1.0)]); + assert_eq!(remapper.normalizations(), [4.0]); + assert_eq!(remapper.slices(), [(0, 1)]); + } + + //#[test] + //#[ignore] // FIXME: there's a bug in the `slices` method + //#[should_panic] + //fn bin_remapper_merge_bins_panic() { + // let mut remapper = + // BinRemapper::new(vec![1.0; 3], vec![(0.0, 0.25), (0.5, 0.75), (0.75, 1.0)]).unwrap(); + + // //assert_eq!(remapper.slices(), [(0, 1), (1, 3)]); + // remapper.merge_bins(0..3).unwrap(); + //} + + #[test] + fn limit_parsing_failure() { + assert_eq!( + BinRemapper::from_str("0,1,2,x").unwrap_err().to_string(), + "unable to parse limit 'x': 'invalid float literal')" + ); + } + + #[test] + fn pipe_syntax_first_dimension() { + assert_eq!( + BinRemapper::from_str("|0,1,2").unwrap_err().to_string(), + "'|' syntax not meaningful for first dimension" + ); + } + + #[test] + fn pipe_syntax_first_empty() { + assert_eq!( + BinRemapper::from_str("0,1,2;0,2,4;||") + .unwrap_err() + .to_string(), + "empty repetition with '|'" + ); + } + + #[test] + fn colon_syntax_bad_string() { + assert_eq!( + BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|::") + .unwrap_err() + .to_string(), + "unable to parse 'N:M' syntax from: '::' (N: 'cannot parse integer from empty string', M: 'invalid digit found in string')" + ); + } + + #[test] + fn colon_syntax_bad_lhs() { + assert_eq!( + BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|2.5:|:3|:3") + .unwrap_err() + .to_string(), + "unable to parse 'N:M' syntax from: '2.5:' (N: 'invalid digit found in string', M: 'cannot parse integer from empty string')" + ); + } + + #[test] + fn colon_syntax_bad_rhs() { + assert_eq!( + BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|:2.5|:3|:3") + .unwrap_err() + .to_string(), + "unable to parse 'N:M' syntax from: ':2.5' (N: 'cannot parse integer from empty string', M: 'invalid digit found in string')" + ); + } + + #[test] + fn colon_syntax_no_limits() { + assert_eq!( + BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|:4|:3|:3") + .unwrap_err() + .to_string(), + "no limits due to ':' syntax" + ); + } + + #[test] + fn pipe_syntax_too_few_pipes() { + assert_eq!( + BinRemapper::from_str("0,1,2;0,2,4;1,2,3|4,5,6|7,8,9") + .unwrap_err() + .to_string(), + "missing '|' specification: number of variants too small" + ); + } + + #[test] + fn bin_remapper_new_dimension_unknown() { + assert_eq!( + BinRemapper::new( + vec![1.0, 1.0, 1.0], + vec![(1.0, 2.0), (2.0, 3.0), (3.0, 4.0), (4.0, 5.0)], + ) + .unwrap_err() + .to_string(), + "could not determine the dimensions from a normalization vector with length 3 and limits vector with length 4" + ); + } + + #[test] + fn bin_remapper_new_overlapping_bins() { + assert_eq!( + BinRemapper::new( + vec![1.0, 1.0, 1.0], + vec![(1.0, 2.0), (2.0, 3.0), (1.0, 2.0)], + ) + .unwrap_err() + .to_string(), + "the bin limits for the bins with indices 2 overlap with other bins" + ); + } +} diff --git a/pineappl_v0/src/boc.rs b/pineappl_v0/src/boc.rs new file mode 100644 index 000000000..62599b098 --- /dev/null +++ b/pineappl_v0/src/boc.rs @@ -0,0 +1,769 @@ +//! Module containing structures for the 3 dimensions of a [`Grid`]: bins, [`Order`] and channels +//! (`boc`). + +use float_cmp::approx_eq; +use itertools::Itertools; +use serde::{Deserialize, Serialize}; +use std::cmp::Ordering; +use std::str::FromStr; +use thiserror::Error; + +/// Error type keeping information if [`Order::from_str`] went wrong. +#[derive(Debug, Error, Eq, PartialEq)] +#[error("{0}")] +pub struct ParseOrderError(String); + +// TODO: when possible change the types from `u32` to `u8` to change `try_into` to `into` + +/// Coupling powers for each grid. +#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +pub struct Order { + /// Exponent of the strong coupling. + pub alphas: u32, + /// Exponent of the electromagnetic coupling. + pub alpha: u32, + /// Exponent of the logarithm of the scale factor of the renomalization scale. + pub logxir: u32, + /// Exponent of the logarithm of the scale factor of the factorization scale. + pub logxif: u32, +} + +impl FromStr for Order { + type Err = ParseOrderError; + + fn from_str(s: &str) -> Result { + let mut result = Self { + alphas: 0, + alpha: 0, + logxir: 0, + logxif: 0, + }; + + for tuple in s + .split(|c: char| c.is_ascii_digit()) + .filter(|s| !s.is_empty()) + .zip( + s.split(|c: char| !c.is_ascii_digit()) + .filter(|s| !s.is_empty()) + .map(str::parse), + ) + { + match tuple { + ("as", Ok(num)) => { + result.alphas = num; + } + ("a", Ok(num)) => { + result.alpha = num; + } + ("lr", Ok(num)) => { + result.logxir = num; + } + ("lf", Ok(num)) => { + result.logxif = num; + } + (label, Err(err)) => { + return Err(ParseOrderError(format!( + "error while parsing exponent of '{label}': {err}" + ))); + } + (label, Ok(_)) => { + return Err(ParseOrderError(format!("unknown coupling: '{label}'"))); + } + } + } + + Ok(result) + } +} + +impl Ord for Order { + fn cmp(&self, other: &Self) -> Ordering { + // sort leading orders before next-to-leading orders, then the lowest power in alpha, the + // rest lexicographically + (self.alphas + self.alpha) + .cmp(&(other.alphas + other.alpha)) + .then((self.alpha, self.logxir, self.logxif).cmp(&( + other.alpha, + other.logxir, + other.logxif, + ))) + } +} + +impl PartialOrd for Order { + fn partial_cmp(&self, other: &Self) -> Option { + Some(self.cmp(other)) + } +} + +impl Order { + /// Constructor. This function mainly exists to have a way of constructing `Order` that is less + /// verbose. + #[must_use] + pub const fn new(alphas: u32, alpha: u32, logxir: u32, logxif: u32) -> Self { + Self { + alphas, + alpha, + logxir, + logxif, + } + } + + /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolve`], + /// [`Grid::evolve`] or [`Grid::evolve_info`]. The selection of `orders` is controlled using + /// the `max_as` and `max_al` parameters, for instance setting `max_as = 1` and `max_al = 0` + /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` + /// and `max_al = 2` would select all NLOs, and the NNLO QCD. + /// + /// [`Grid::convolve`]: super::grid::Grid::convolve + /// [`Grid::evolve`]: super::grid::Grid::evolve + /// [`Grid::evolve_info`]: super::grid::Grid::evolve_info + /// + /// # Example + /// + /// In the case of Drell—Yan, there are the following orders: + /// + /// - exactly one leading order (LO), + /// - two next-to-leading orders (NLO), which are + /// - the NLO QCD and + /// - the NLO EW, and + /// - three next-to-next-to-leading orders (NNLO), + /// - the NNLO QCD, + /// - the NNLO EW, and finally + /// - the mixed NNLO QCD—EW. + /// + /// ```rust + /// use pineappl::boc::Order; + /// + /// let orders = [ + /// Order::new(0, 2, 0, 0), // LO : alpha^2 + /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 + /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 + /// Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 + /// Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + /// ]; + /// + /// // LO EW + /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [true, false, false, false, false, false]); + /// // LO QCD + /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false]); + /// // LO + /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, false, false, false, false, false]); + /// // NLO QCD + /// assert_eq!(Order::create_mask(&orders, 2, 0, false), [true, true, false, false, false, false]); + /// // NLO EW + /// assert_eq!(Order::create_mask(&orders, 0, 2, false), [true, false, true, false, false, false]); + /// // NNLO QCD + /// assert_eq!(Order::create_mask(&orders, 3, 0, false), [true, true, false, true, false, false]); + /// // NNLO EW + /// assert_eq!(Order::create_mask(&orders, 0, 3, false), [true, false, true, false, false, true]); + /// ``` + /// + /// Orders containing non-zero powers of logarithms can be selected as well if `logs` is set to + /// `true`: + /// + /// ```rust + /// use pineappl::boc::Order; + /// + /// let orders = [ + /// Order::new(0, 2, 0, 0), // LO : alpha^2 + /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 + /// Order::new(1, 2, 1, 0), // NLO QCD : alphas alpha^2 logxif + /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// Order::new(0, 3, 1, 0), // NLO EW : alpha^3 logxif + /// ]; + /// + /// assert_eq!(Order::create_mask(&orders, 0, 2, true), [true, false, false, true, true]); + /// ``` + /// + /// For the more complicated example of top-pair production one can see the difference between + /// the selection for different LOs: + /// + /// ```rust + /// use pineappl::boc::Order; + /// + /// let orders = [ + /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 + /// Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha + /// Order::new(0, 2, 0, 0), // LO EW : alpha^2 + /// Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 + /// Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha + /// Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 + /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + /// ]; + /// + /// // LO EW + /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [false, false, true, false, false, false, false]); + /// // LO QCD + /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false, false]); + /// // LO + /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, true, true, false, false, false, false]); + /// ``` + #[must_use] + pub fn create_mask(orders: &[Self], max_as: u32, max_al: u32, logs: bool) -> Vec { + // smallest sum of alphas and alpha + let lo = orders + .iter() + .map(|Self { alphas, alpha, .. }| alphas + alpha) + .min() + .unwrap_or_default(); + + // all leading orders, without logarithms + let leading_orders: Vec<_> = orders + .iter() + .filter(|Self { alphas, alpha, .. }| alphas + alpha == lo) + .cloned() + .collect(); + + let lo_as = leading_orders + .iter() + .map(|Self { alphas, .. }| *alphas) + .max() + .unwrap_or_default(); + let lo_al = leading_orders + .iter() + .map(|Self { alpha, .. }| *alpha) + .max() + .unwrap_or_default(); + + let max = max_as.max(max_al); + let min = max_as.min(max_al); + + orders + .iter() + .map( + |&Self { + alphas, + alpha, + logxir, + logxif, + }| { + if !logs && (logxir > 0 || logxif > 0) { + return false; + } + + let pto = alphas + alpha - lo; + + alphas + alpha < min + lo + || (alphas + alpha < max + lo + && match max_as.cmp(&max_al) { + Ordering::Greater => lo_as + pto == alphas, + Ordering::Less => lo_al + pto == alpha, + Ordering::Equal => false, + }) + }, + ) + .collect() + } +} + +/// This structure represents a channel. Each channel consists of a tuple containing in the +/// following order, the particle ID of the first incoming parton, then the particle ID of the +/// second parton, and finally a numerical factor that will multiply the result for this specific +/// combination. +#[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize)] +pub struct Channel { + entry: Vec<(i32, i32, f64)>, +} + +impl Channel { + /// Constructor for `Channel`. Note that `entry` must be non-empty, otherwise this function + /// panics. + /// + /// # Examples + /// + /// Ordering of the arguments doesn't matter: + /// + /// ```rust + /// use pineappl::boc::Channel; + /// + /// let entry1 = Channel::new(vec![(2, 2, 1.0), (4, 4, 1.0)]); + /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); + /// + /// // checks that the ordering doesn't matter + /// assert_eq!(entry1, entry2); + /// ``` + /// + /// Same arguments are merged together: + /// + /// ```rust + /// use pineappl::boc::Channel; + /// + /// let entry1 = Channel::new(vec![(1, 1, 1.0), (1, 1, 3.0), (3, 3, 1.0), (1, 1, 6.0)]); + /// let entry2 = Channel::new(vec![(1, 1, 10.0), (3, 3, 1.0)]); + /// + /// assert_eq!(entry1, entry2); + /// ``` + /// + /// # Panics + /// + /// Creating an empty channel panics: + /// + /// ```rust,should_panic + /// use pineappl::boc::Channel; + /// + /// let _ = Channel::new(vec![]); + /// ``` + #[must_use] + pub fn new(mut entry: Vec<(i32, i32, f64)>) -> Self { + assert!(!entry.is_empty()); + + // sort `entry` because the ordering doesn't matter and because it makes it easier to + // compare `Channel` objects with each other + entry.sort_by(|x, y| (x.0, x.1).cmp(&(y.0, y.1))); + + Self { + entry: entry + .into_iter() + .coalesce(|lhs, rhs| { + // sum the factors of repeated elements + if (lhs.0, lhs.1) == (rhs.0, rhs.1) { + Ok((lhs.0, lhs.1, lhs.2 + rhs.2)) + } else { + Err((lhs, rhs)) + } + }) + // filter zeros + // TODO: find a better than to hardcode the epsilon limit + .filter(|&(_, _, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) + .collect(), + } + } + + /// Translates `entry` into a different basis using `translator`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::boc::Channel; + /// use pineappl::channel; + /// + /// let entry = Channel::translate(&channel![103, 11, 1.0], &|evol_id| match evol_id { + /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], + /// _ => vec![(evol_id, 1.0)], + /// }); + /// + /// assert_eq!(entry, channel![2, 11, 1.0; -2, 11, -1.0; 1, 11, -1.0; -1, 11, 1.0]); + /// ``` + pub fn translate(entry: &Self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { + let mut tuples = Vec::new(); + + for &(a, b, factor) in &entry.entry { + for (aid, af) in translator(a) { + for (bid, bf) in translator(b) { + tuples.push((aid, bid, factor * af * bf)); + } + } + } + + Self::new(tuples) + } + + /// Returns a tuple representation of this entry. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::channel; + /// use pineappl::boc::Channel; + /// + /// let entry = channel![4, 4, 1.0; 2, 2, 1.0]; + /// + /// assert_eq!(entry.entry(), [(2, 2, 1.0), (4, 4, 1.0)]); + /// ``` + #[must_use] + pub fn entry(&self) -> &[(i32, i32, f64)] { + &self.entry + } + + /// Creates a new object with the initial states transposed. + #[must_use] + pub fn transpose(&self) -> Self { + Self::new(self.entry.iter().map(|(a, b, c)| (*b, *a, *c)).collect()) + } + + /// If `other` is the same channel when only comparing PIDs and neglecting the factors, return + /// the number `f1 / f2`, where `f1` is the factor from `self` and `f2` is the factor from + /// `other`. + /// + /// # Examples + /// + /// ```rust + /// use pineappl::boc::Channel; + /// + /// let entry1 = Channel::new(vec![(2, 2, 2.0), (4, 4, 2.0)]); + /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); + /// let entry3 = Channel::new(vec![(3, 4, 1.0), (2, 2, 1.0)]); + /// let entry4 = Channel::new(vec![(4, 3, 1.0), (2, 3, 2.0)]); + /// + /// assert_eq!(entry1.common_factor(&entry2), Some(2.0)); + /// assert_eq!(entry1.common_factor(&entry3), None); + /// assert_eq!(entry1.common_factor(&entry4), None); + /// ``` + #[must_use] + pub fn common_factor(&self, other: &Self) -> Option { + if self.entry.len() != other.entry.len() { + return None; + } + + let result: Option> = self + .entry + .iter() + .zip(&other.entry) + .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) + .collect(); + + result.and_then(|factors| { + if factors + .windows(2) + .all(|win| approx_eq!(f64, win[0], win[1], ulps = 4)) + { + factors.first().copied() + } else { + None + } + }) + } +} + +/// Error type keeping information if [`Channel::from_str`] went wrong. +#[derive(Debug, Error)] +#[error("{0}")] +pub struct ParseChannelError(String); + +impl FromStr for Channel { + type Err = ParseChannelError; + + fn from_str(s: &str) -> Result { + Ok(Self::new( + s.split('+') + .map(|sub| { + sub.split_once('*').map_or_else( + || Err(ParseChannelError(format!("missing '*' in '{sub}'"))), + |(factor, pids)| { + let tuple = pids.split_once(',').map_or_else( + || Err(ParseChannelError(format!("missing ',' in '{pids}'"))), + |(a, b)| { + Ok(( + a.trim() + .strip_prefix('(') + .ok_or_else(|| { + ParseChannelError(format!( + "missing '(' in '{pids}'" + )) + })? + .trim() + .parse::() + .map_err(|err| ParseChannelError(err.to_string()))?, + b.trim() + .strip_suffix(')') + .ok_or_else(|| { + ParseChannelError(format!( + "missing ')' in '{pids}'" + )) + })? + .trim() + .parse::() + .map_err(|err| ParseChannelError(err.to_string()))?, + )) + }, + )?; + + Ok(( + tuple.0, + tuple.1, + str::parse::(factor.trim()) + .map_err(|err| ParseChannelError(err.to_string()))?, + )) + }, + ) + }) + .collect::>()?, + )) + } +} + +/// Helper macro to quickly generate a `Channel` at compile time. +/// +/// # Examples +/// +/// In the following example `entry1` and `entry2` represent the same values: +/// +/// ```rust +/// use pineappl::channel; +/// +/// let entry1 = channel![2, 2, 1.0; 4, 4, 1.0]; +/// let entry2 = channel![4, 4, 1.0; 2, 2, 1.0]; +/// +/// assert_eq!(entry1, entry2); +/// ``` +#[macro_export] +macro_rules! channel { + ($a:expr, $b:expr, $factor:expr $(; $c:expr, $d:expr, $fac:expr)*) => { + $crate::boc::Channel::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) + }; +} + +#[cfg(test)] +mod tests { + use super::{Channel, Order, ParseOrderError}; + use crate::pids; + + #[test] + fn order_from_str() { + assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0))); + assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0))); + assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0))); + assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1))); + assert_eq!( + "ab12".parse::(), + Err(ParseOrderError("unknown coupling: 'ab'".to_owned())) + ); + assert_eq!( + "ab123456789000000".parse::(), + Err(ParseOrderError( + "error while parsing exponent of 'ab': number too large to fit in target type" + .to_owned() + )) + ); + } + + #[test] + fn order_cmp() { + let mut orders = [ + Order::new(1, 2, 1, 0), + Order::new(1, 2, 0, 1), + Order::new(1, 2, 0, 0), + Order::new(0, 3, 1, 0), + Order::new(0, 3, 0, 1), + Order::new(0, 3, 0, 0), + Order::new(0, 2, 0, 0), + ]; + + orders.sort(); + + assert_eq!(orders[0], Order::new(0, 2, 0, 0)); + assert_eq!(orders[1], Order::new(1, 2, 0, 0)); + assert_eq!(orders[2], Order::new(1, 2, 0, 1)); + assert_eq!(orders[3], Order::new(1, 2, 1, 0)); + assert_eq!(orders[4], Order::new(0, 3, 0, 0)); + assert_eq!(orders[5], Order::new(0, 3, 0, 1)); + assert_eq!(orders[6], Order::new(0, 3, 1, 0)); + } + + #[test] + fn order_create_mask() { + // Drell—Yan orders + let orders = [ + Order::new(0, 2, 0, 0), // LO : alpha^2 + Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 + Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 + Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + ]; + + assert_eq!( + Order::create_mask(&orders, 0, 0, false), + [false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 1, false), + [true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 2, false), + [true, false, true, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 3, false), + [true, false, true, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 1, 0, false), + [true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 1, false), + [true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 2, false), + [true, false, true, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 3, false), + [true, false, true, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 2, 0, false), + [true, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 1, false), + [true, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 2, false), + [true, true, true, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 3, false), + [true, true, true, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 3, 0, false), + [true, true, false, true, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 1, false), + [true, true, false, true, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 2, false), + [true, true, true, true, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 3, false), + [true, true, true, true, true, true] + ); + + // Top-pair production orders + let orders = [ + Order::new(2, 0, 0, 0), // LO QCD : alphas^2 + Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha + Order::new(0, 2, 0, 0), // LO EW : alpha^2 + Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 + Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha + Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 + Order::new(0, 3, 0, 0), // NLO EW : alpha^3 + Order::new(4, 0, 0, 0), // NNLO QCD : alphas^4 + Order::new(3, 1, 0, 0), // NNLO QCD—EW : alphas^3 alpha + Order::new(2, 2, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 + Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 + Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 + ]; + + assert_eq!( + Order::create_mask(&orders, 0, 0, false), + [false, false, false, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 1, false), + [false, false, true, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 2, false), + [false, false, true, false, false, false, true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 0, 3, false), + [false, false, true, false, false, false, true, false, false, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 1, 0, false), + [true, false, false, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 1, false), + [true, true, true, false, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 2, false), + [true, true, true, false, false, false, true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 1, 3, false), + [true, true, true, false, false, false, true, false, false, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 2, 0, false), + [true, false, false, true, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 1, false), + [true, true, true, true, false, false, false, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 2, false), + [true, true, true, true, true, true, true, false, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 2, 3, false), + [true, true, true, true, true, true, true, false, false, false, false, true] + ); + assert_eq!( + Order::create_mask(&orders, 3, 0, false), + [true, false, false, true, false, false, false, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 1, false), + [true, true, true, true, false, false, false, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 2, false), + [true, true, true, true, true, true, true, true, false, false, false, false] + ); + assert_eq!( + Order::create_mask(&orders, 3, 3, false), + [true, true, true, true, true, true, true, true, true, true, true, true] + ); + } + + #[test] + fn channel_translate() { + let channel = Channel::translate(&channel![103, 203, 2.0], &pids::evol_to_pdg_mc_ids); + + assert_eq!( + channel, + channel![ 2, 2, 2.0; 2, -2, -2.0; 2, 1, -2.0; 2, -1, 2.0; + -2, 2, 2.0; -2, -2, -2.0; -2, 1, -2.0; -2, -1, 2.0; + 1, 2, -2.0; 1, -2, 2.0; 1, 1, 2.0; 1, -1, -2.0; + -1, 2, -2.0; -1, -2, 2.0; -1, 1, 2.0; -1, -1, -2.0] + ); + } + + #[test] + fn channel_from_str() { + assert_eq!( + str::parse::(" 1 * ( 2 , -2) + 2* (4,-4)").unwrap(), + channel![2, -2, 1.0; 4, -4, 2.0] + ); + + assert_eq!( + str::parse::("* ( 2, -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "cannot parse float from empty string" + ); + + assert_eq!( + str::parse::(" 1 ( 2 -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing '*' in ' 1 ( 2 -2) '" + ); + + assert_eq!( + str::parse::(" 1 * ( 2 -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing ',' in ' ( 2 -2) '" + ); + + assert_eq!( + str::parse::(" 1 * 2, -2) + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing '(' in ' 2, -2) '" + ); + + assert_eq!( + str::parse::(" 1 * ( 2, -2 + 2* (4,-4)") + .unwrap_err() + .to_string(), + "missing ')' in ' ( 2, -2 '" + ); + } +} diff --git a/pineappl_v0/src/convert.rs b/pineappl_v0/src/convert.rs new file mode 100644 index 000000000..fe1a85186 --- /dev/null +++ b/pineappl_v0/src/convert.rs @@ -0,0 +1,9 @@ +#[allow(clippy::cast_possible_truncation)] +#[allow(clippy::cast_sign_loss)] +pub fn usize_from_f64(x: f64) -> usize { + x.max(0.0) as usize +} + +pub fn f64_from_usize(x: usize) -> f64 { + f64::from(u32::try_from(x).unwrap()) +} diff --git a/pineappl_v0/src/convolutions.rs b/pineappl_v0/src/convolutions.rs new file mode 100644 index 000000000..ea726c440 --- /dev/null +++ b/pineappl_v0/src/convolutions.rs @@ -0,0 +1,422 @@ +//! Module for everything related to luminosity functions. + +use super::grid::Grid; +use super::pids; +use super::subgrid::{Mu2, Subgrid}; +use rustc_hash::FxHashMap; + +enum Pdfs<'a> { + Two { + xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, + xfx1_cache: FxHashMap<(i32, usize, usize), f64>, + xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, + xfx2_cache: FxHashMap<(i32, usize, usize), f64>, + }, + One { + xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, + xfx_cache: FxHashMap<(i32, usize, usize), f64>, + }, +} + +impl<'a> Pdfs<'a> { + pub fn clear(&mut self) { + match self { + Self::One { xfx_cache, .. } => xfx_cache.clear(), + Self::Two { + xfx1_cache, + xfx2_cache, + .. + } => { + xfx1_cache.clear(); + xfx2_cache.clear(); + } + } + } +} + +/// A cache for evaluating PDFs. Methods like [`Grid::convolve`] accept instances of this `struct` +/// instead of the PDFs themselves. +pub struct LumiCache<'a> { + pdfs: Pdfs<'a>, + alphas: &'a mut dyn FnMut(f64) -> f64, + alphas_cache: Vec, + mur2_grid: Vec, + muf2_grid: Vec, + x_grid: Vec, + imur2: Vec, + imuf2: Vec, + ix1: Vec, + ix2: Vec, + pdg1: i32, + pdg2: i32, + cc1: i32, + cc2: i32, +} + +impl<'a> LumiCache<'a> { + /// Construct a luminosity cache with two PDFs, `xfx1` and `xfx2`. The types of hadrons the + /// PDFs correspond to must be given as `pdg1` and `pdg2`. The function to evaluate the + /// strong coupling must be given as `alphas`. The grid that the cache will be used with must + /// be given as `grid`; this parameter determines which of the initial states are hadronic, and + /// if an initial states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some + /// of the PDFs must be charge-conjugated, this is automatically done in this function. + pub fn with_two( + pdg1: i32, + xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, + pdg2: i32, + xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, + alphas: &'a mut dyn FnMut(f64) -> f64, + ) -> Self { + Self { + pdfs: Pdfs::Two { + xfx1, + xfx1_cache: FxHashMap::default(), + xfx2, + xfx2_cache: FxHashMap::default(), + }, + alphas, + alphas_cache: vec![], + mur2_grid: vec![], + muf2_grid: vec![], + x_grid: vec![], + imur2: Vec::new(), + imuf2: Vec::new(), + ix1: Vec::new(), + ix2: Vec::new(), + pdg1, + pdg2, + cc1: 0, + cc2: 0, + } + } + + /// Construct a luminosity cache with a single PDF `xfx`. The type of hadron the PDF + /// corresponds to must be given as `pdg`. The function to evaluate the strong coupling must be + /// given as `alphas`. The grid that the cache should be used with must be given as `grid`; + /// this parameter determines which of the initial states are hadronic, and if an initial + /// states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some of the PDFs + /// must be charge-conjugated, this is automatically done in this function. + pub fn with_one( + pdg: i32, + xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, + alphas: &'a mut dyn FnMut(f64) -> f64, + ) -> Self { + Self { + pdfs: Pdfs::One { + xfx, + xfx_cache: FxHashMap::default(), + }, + alphas, + alphas_cache: vec![], + mur2_grid: vec![], + muf2_grid: vec![], + x_grid: vec![], + imur2: Vec::new(), + imuf2: Vec::new(), + ix1: Vec::new(), + ix2: Vec::new(), + pdg1: pdg, + pdg2: pdg, + cc1: 0, + cc2: 0, + } + } + + pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64)]) -> Result<(), ()> { + let convolutions = grid.convolutions(); + + // TODO: the following code only works with exactly two convolutions + assert_eq!(convolutions.len(), 2); + + // do we have to charge-conjugate the initial states? + let cc1 = if let Some(pid) = convolutions[0].pid() { + if self.pdg1 == pid { + 1 + } else if self.pdg1 == pids::charge_conjugate_pdg_pid(pid) { + -1 + } else { + // TODO: return a proper error + return Err(()); + } + } else { + 0 + }; + let cc2 = if let Some(pid) = convolutions[1].pid() { + if self.pdg2 == pid { + 1 + } else if self.pdg2 == pids::charge_conjugate_pdg_pid(pid) { + -1 + } else { + // TODO: return a proper error + return Err(()); + } + } else { + 0 + }; + + // TODO: try to avoid calling clear + self.clear(); + + let mut x_grid: Vec<_> = grid + .subgrids() + .iter() + .filter_map(|subgrid| { + if subgrid.is_empty() { + None + } else { + let mut vec = subgrid.x1_grid().into_owned(); + vec.extend_from_slice(&subgrid.x2_grid()); + Some(vec) + } + }) + .flatten() + .collect(); + x_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + x_grid.dedup(); + + let mut mur2_grid: Vec<_> = grid + .subgrids() + .iter() + .filter_map(|subgrid| { + if subgrid.is_empty() { + None + } else { + Some(subgrid.mu2_grid().into_owned()) + } + }) + .flatten() + .flat_map(|Mu2 { ren, .. }| { + xi.iter() + .map(|(xir, _)| xir * xir * ren) + .collect::>() + }) + .collect(); + mur2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + mur2_grid.dedup(); + + let mut muf2_grid: Vec<_> = grid + .subgrids() + .iter() + .filter_map(|subgrid| { + if subgrid.is_empty() { + None + } else { + Some(subgrid.mu2_grid().into_owned()) + } + }) + .flatten() + .flat_map(|Mu2 { fac, .. }| { + xi.iter() + .map(|(_, xif)| xif * xif * fac) + .collect::>() + }) + .collect(); + muf2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + muf2_grid.dedup(); + + self.alphas_cache = mur2_grid.iter().map(|&mur2| (self.alphas)(mur2)).collect(); + + self.mur2_grid = mur2_grid; + self.muf2_grid = muf2_grid; + self.x_grid = x_grid; + self.cc1 = cc1; + self.cc2 = cc2; + + Ok(()) + } + + /// Return the PDF (multiplied with `x`) for the first initial state. + pub fn xfx1(&mut self, pdg_id: i32, ix1: usize, imu2: usize) -> f64 { + let ix1 = self.ix1[ix1]; + let x = self.x_grid[ix1]; + if self.cc1 == 0 { + x + } else { + let imuf2 = self.imuf2[imu2]; + let muf2 = self.muf2_grid[imuf2]; + let pid = if self.cc1 == 1 { + pdg_id + } else { + pids::charge_conjugate_pdg_pid(pdg_id) + }; + let (xfx, xfx_cache) = match &mut self.pdfs { + Pdfs::One { xfx, xfx_cache, .. } => (xfx, xfx_cache), + Pdfs::Two { + xfx1, xfx1_cache, .. + } => (xfx1, xfx1_cache), + }; + *xfx_cache + .entry((pid, ix1, imuf2)) + .or_insert_with(|| xfx(pid, x, muf2)) + } + } + + /// Return the PDF (multiplied with `x`) for the second initial state. + pub fn xfx2(&mut self, pdg_id: i32, ix2: usize, imu2: usize) -> f64 { + let ix2 = self.ix2[ix2]; + let x = self.x_grid[ix2]; + if self.cc2 == 0 { + x + } else { + let imuf2 = self.imuf2[imu2]; + let muf2 = self.muf2_grid[imuf2]; + let pid = if self.cc2 == 1 { + pdg_id + } else { + pids::charge_conjugate_pdg_pid(pdg_id) + }; + let (xfx, xfx_cache) = match &mut self.pdfs { + Pdfs::One { xfx, xfx_cache, .. } => (xfx, xfx_cache), + Pdfs::Two { + xfx2, xfx2_cache, .. + } => (xfx2, xfx2_cache), + }; + *xfx_cache + .entry((pid, ix2, imuf2)) + .or_insert_with(|| xfx(pid, x, muf2)) + } + } + + /// Return the strong coupling for the renormalization scale set with [`LumiCache::set_grids`], + /// in the grid `mu2_grid` at the index `imu2`. + #[must_use] + pub fn alphas(&self, imu2: usize) -> f64 { + self.alphas_cache[self.imur2[imu2]] + } + + /// Clears the cache. + pub fn clear(&mut self) { + self.alphas_cache.clear(); + self.pdfs.clear(); + self.mur2_grid.clear(); + self.muf2_grid.clear(); + self.x_grid.clear(); + } + + /// Set the grids. + pub fn set_grids( + &mut self, + mu2_grid: &[Mu2], + x1_grid: &[f64], + x2_grid: &[f64], + xir: f64, + xif: f64, + ) { + self.imur2 = mu2_grid + .iter() + .map(|Mu2 { ren, .. }| { + self.mur2_grid + .iter() + .position(|&mur2| mur2 == xir * xir * ren) + .unwrap_or_else(|| unreachable!()) + }) + .collect(); + self.imuf2 = mu2_grid + .iter() + .map(|Mu2 { fac, .. }| { + self.muf2_grid + .iter() + .position(|&muf2| muf2 == xif * xif * fac) + .unwrap_or_else(|| unreachable!()) + }) + .collect(); + self.ix1 = x1_grid + .iter() + .map(|x1| { + self.x_grid + .iter() + .position(|x| x1 == x) + .unwrap_or_else(|| unreachable!()) + }) + .collect(); + + self.ix2 = x2_grid + .iter() + .map(|x2| { + self.x_grid + .iter() + .position(|x| x2 == x) + .unwrap_or_else(|| unreachable!()) + }) + .collect(); + } +} + +/// Data type that indentifies different types of convolutions. +#[derive(Debug, Eq, PartialEq)] +pub enum Convolution { + // TODO: eventually get rid of this value + /// No convolution. + None, + /// Unpolarized parton distribution function. The integer denotes the type of hadron with a PDG + /// MC ID. + UnpolPDF(i32), + /// Polarized parton distribution function. The integer denotes the type of hadron with a PDG + /// MC ID. + PolPDF(i32), + /// Unpolarized fragmentation function. The integer denotes the type of hadron with a PDG MC + /// ID. + UnpolFF(i32), + /// Polarized fragmentation function. The integer denotes the type of hadron with a PDG MC ID. + PolFF(i32), +} + +impl Convolution { + /// Return the convolution if the PID is charged conjugated. + #[must_use] + pub const fn charge_conjugate(&self) -> Self { + match *self { + Self::None => Self::None, + Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolPDF(pid) => Self::PolPDF(pids::charge_conjugate_pdg_pid(pid)), + Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), + Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), + } + } + + /// Return the PID of the convolution if it has any. + #[must_use] + pub const fn pid(&self) -> Option { + match *self { + Self::None => None, + Self::UnpolPDF(pid) | Self::PolPDF(pid) | Self::UnpolFF(pid) | Self::PolFF(pid) => { + Some(pid) + } + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn convolution_charge_conjugate() { + assert_eq!(Convolution::None.charge_conjugate(), Convolution::None); + assert_eq!( + Convolution::UnpolPDF(2212).charge_conjugate(), + Convolution::UnpolPDF(-2212) + ); + assert_eq!( + Convolution::PolPDF(2212).charge_conjugate(), + Convolution::PolPDF(-2212) + ); + assert_eq!( + Convolution::UnpolFF(2212).charge_conjugate(), + Convolution::UnpolFF(-2212) + ); + assert_eq!( + Convolution::PolFF(2212).charge_conjugate(), + Convolution::PolFF(-2212) + ); + } + + #[test] + fn convolution_pid() { + assert_eq!(Convolution::None.pid(), None); + assert_eq!(Convolution::UnpolPDF(2212).pid(), Some(2212)); + assert_eq!(Convolution::PolPDF(2212).pid(), Some(2212)); + assert_eq!(Convolution::UnpolFF(2212).pid(), Some(2212)); + assert_eq!(Convolution::PolFF(2212).pid(), Some(2212)); + } +} diff --git a/pineappl_v0/src/empty_subgrid.rs b/pineappl_v0/src/empty_subgrid.rs new file mode 100644 index 000000000..79640e655 --- /dev/null +++ b/pineappl_v0/src/empty_subgrid.rs @@ -0,0 +1,130 @@ +//! TODO + +use super::grid::Ntuple; +use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use serde::{Deserialize, Serialize}; +use std::borrow::Cow; +use std::iter; + +/// A subgrid type that is always empty. +#[derive(Clone, Default, Deserialize, Serialize)] +pub struct EmptySubgridV1; + +impl Subgrid for EmptySubgridV1 { + fn convolve( + &self, + _: &[f64], + _: &[f64], + _: &[Mu2], + _: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + 0.0 + } + + fn fill(&mut self, _: &Ntuple) { + panic!("EmptySubgridV1 doesn't support the fill operation"); + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + Cow::Borrowed(&[]) + } + + fn x1_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&[]) + } + + fn x2_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&[]) + } + + fn is_empty(&self) -> bool { + true + } + + fn merge(&mut self, subgrid: &mut SubgridEnum, _: bool) { + assert!( + subgrid.is_empty(), + "EmptySubgridV1 doesn't support the merge operation for non-empty subgrids" + ); + } + + fn scale(&mut self, _: f64) {} + + fn symmetrize(&mut self) {} + + fn clone_empty(&self) -> SubgridEnum { + Self.into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + Box::new(iter::empty()) + } + + fn stats(&self) -> Stats { + Stats { + total: 0, + allocated: 0, + zeros: 0, + overhead: 0, + bytes_per_value: 0, + } + } + + fn static_scale(&self) -> Option { + None + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn create_empty() { + let mut subgrid = EmptySubgridV1; + assert_eq!(subgrid.convolve(&[], &[], &[], &mut |_, _, _| 0.0), 0.0,); + assert!(subgrid.is_empty()); + subgrid.merge(&mut EmptySubgridV1.into(), false); + subgrid.scale(2.0); + subgrid.symmetrize(); + assert!(subgrid.clone_empty().is_empty()); + assert_eq!( + subgrid.stats(), + Stats { + total: 0, + allocated: 0, + zeros: 0, + overhead: 0, + bytes_per_value: 0, + } + ); + assert_eq!(subgrid.static_scale(), None); + } + + #[test] + #[should_panic(expected = "EmptySubgridV1 doesn't support the fill operation")] + fn fill() { + let mut subgrid = EmptySubgridV1; + subgrid.fill(&Ntuple { + x1: 0.0, + x2: 0.0, + q2: 0.0, + weight: 0.0, + }); + } + + #[test] + fn q2_grid() { + assert!(EmptySubgridV1.mu2_grid().is_empty()); + } + + #[test] + fn x1_grid() { + assert!(EmptySubgridV1.x1_grid().is_empty()); + } + + #[test] + fn x2_grid() { + assert!(EmptySubgridV1.x2_grid().is_empty()); + } +} diff --git a/pineappl_v0/src/evolution.rs b/pineappl_v0/src/evolution.rs new file mode 100644 index 000000000..83e57c3d2 --- /dev/null +++ b/pineappl_v0/src/evolution.rs @@ -0,0 +1,801 @@ +//! Supporting classes and functions for [`Grid::evolve`]. + +use super::boc::{Channel, Order}; +use super::channel; +use super::convolutions::Convolution; +use super::grid::{Grid, GridError}; +use super::import_only_subgrid::ImportOnlySubgridV2; +use super::pids::PidBasis; +use super::sparse_array3::SparseArray3; +use super::subgrid::{Mu2, Subgrid, SubgridEnum}; +use float_cmp::approx_eq; +use itertools::izip; +use itertools::Itertools; +use ndarray::linalg; +use ndarray::{s, Array1, Array2, Array3, ArrayView1, ArrayView4, Axis}; +use std::iter; + +/// Number of ULPS used to de-duplicate grid values in [`Grid::evolve_info`]. +pub(crate) const EVOLVE_INFO_TOL_ULPS: i64 = 256; + +/// Number of ULPS used to search for grid values in this module. This value must be a large-enough +/// multiple of [`EVOLVE_INFO_TOL_ULPS`], because otherwise similar values are not found in +/// [`Grid::evolve`]. See for details. +const EVOLUTION_TOL_ULPS: i64 = 4 * EVOLVE_INFO_TOL_ULPS; + +/// This structure captures the information needed to create an evolution kernel operator (EKO) for +/// a specific [`Grid`]. +pub struct EvolveInfo { + /// Squared factorization scales of the `Grid`. + pub fac1: Vec, + /// Particle identifiers of the `Grid`. + pub pids1: Vec, + /// `x`-grid coordinates of the `Grid`. + pub x1: Vec, + /// Renormalization scales of the `Grid`. + pub ren1: Vec, +} + +/// Information about the evolution kernel operator (EKO) passed to [`Grid::evolve`] as `operator`, +/// which is used to convert a [`Grid`] into an [`FkTable`]. The dimensions of the EKO must +/// correspond to the values given in [`fac1`], [`pids0`], [`x0`], [`pids1`] and [`x1`], exactly in +/// this order. Members with a `1` are defined at the squared factorization scales given in +/// [`fac1`] (often called process scales) and are found in the [`Grid`] that [`Grid::evolve`] is +/// called with. Members with a `0` are defined at the squared factorization scale [`fac0`] (often +/// called fitting scale or starting scale) and are found in the [`FkTable`] resulting from +/// [`Grid::evolve`]. +/// +/// The EKO may convert a `Grid` from a basis given by the particle identifiers [`pids1`] to a +/// possibly different basis given by [`pids0`]. This basis must also be identified using +/// [`pid_basis`], which tells [`FkTable::convolve`] how to perform a convolution. The members +/// [`ren1`] and [`alphas`] must be the strong couplings given at the respective renormalization +/// scales. Finally, [`xir`] and [`xif`] can be used to vary the renormalization and factorization +/// scales, respectively, around their central values. +/// +/// [`FkTable::convolve`]: super::fk_table::FkTable::convolve +/// [`FkTable`]: super::fk_table::FkTable +/// [`alphas`]: Self::alphas +/// [`fac0`]: Self::fac0 +/// [`fac1`]: Self::fac1 +/// [`pid_basis`]: Self::pid_basis +/// [`pids0`]: Self::pids0 +/// [`pids1`]: Self::pids1 +/// [`ren1`]: Self::ren1 +/// [`x0`]: Self::x0 +/// [`x1`]: Self::x1 +/// [`xif`]: Self::xif +/// [`xir`]: Self::xir +pub struct OperatorInfo { + /// Squared factorization scale of the `FkTable`. + pub fac0: f64, + /// Particle identifiers of the `FkTable`. + pub pids0: Vec, + /// `x`-grid coordinates of the `FkTable` + pub x0: Vec, + /// Squared factorization scales of the `Grid`. + pub fac1: Vec, + /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than + /// given here, the contributions of them are silently ignored. + pub pids1: Vec, + /// `x`-grid coordinates of the `Grid`. + pub x1: Vec, + + /// Renormalization scales of the `Grid`. + pub ren1: Vec, + /// Strong couplings corresponding to the order given in [`ren1`](Self::ren1). + pub alphas: Vec, + /// Multiplicative factor for the central renormalization scale. + pub xir: f64, + /// Multiplicative factor for the central factorization scale. + pub xif: f64, + /// Particle ID basis for `FkTable`. + pub pid_basis: PidBasis, +} + +/// Information about the evolution kernel operator slice (EKO) passed to +/// [`Grid::evolve_with_slice_iter`](super::grid::Grid::evolve_with_slice_iter) as `operator`, +/// which is used to convert a [`Grid`] into an [`FkTable`](super::fk_table::FkTable). The +/// dimensions of the EKO must correspond to the values given in [`fac1`](Self::fac1), +/// [`pids0`](Self::pids0), [`x0`](Self::x0), [`pids1`](Self::pids1) and [`x1`](Self::x1), exactly +/// in this order. Members with a `1` are defined at the squared factorization scale given as +/// `fac1` (often called process scale) and are found in the [`Grid`] that +/// `Grid::evolve_with_slice_iter` is called with. Members with a `0` are defined at the squared +/// factorization scale [`fac0`](Self::fac0) (often called fitting scale or starting scale) and are +/// found in the `FkTable` resulting from [`Grid::evolve`]. +/// +/// The EKO slice may convert a `Grid` from a basis given by the particle identifiers `pids1` to a +/// possibly different basis given by `pids0`. This basis must also be identified using +/// [`pid_basis`](Self::pid_basis), which tells +/// [`FkTable::convolve`](super::fk_table::FkTable::convolve) how to perform a convolution. +#[derive(Clone)] +pub struct OperatorSliceInfo { + /// Squared factorization scale of the `FkTable`. + pub fac0: f64, + /// Particle identifiers of the `FkTable`. + pub pids0: Vec, + /// `x`-grid coordinates of the `FkTable` + pub x0: Vec, + /// Squared factorization scale of the slice of `Grid` that should be evolved. + pub fac1: f64, + /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than + /// given here, the contributions of them are silently ignored. + pub pids1: Vec, + /// `x`-grid coordinates of the `Grid`. + pub x1: Vec, + + /// Particle ID basis for `FkTable`. + pub pid_basis: PidBasis, +} + +/// A mapping of squared renormalization scales in `ren1` to strong couplings in `alphas`. The +/// ordering of both members defines the mapping. +pub struct AlphasTable { + /// Renormalization scales of the `Grid`. + pub ren1: Vec, + /// Strong couplings corresponding to the order given in [`ren1`](Self::ren1). + pub alphas: Vec, +} + +impl AlphasTable { + /// Create an `AlphasTable` for `grid`, varying the renormalization scale by `xir` for the + /// strong couplings given by `alphas`. The only argument of `alphas` must be the squared + /// renormalization scale. + pub fn from_grid(grid: &Grid, xir: f64, alphas: &dyn Fn(f64) -> f64) -> Self { + let mut ren1: Vec<_> = grid + .subgrids() + .iter() + .flat_map(|subgrid| { + subgrid + .mu2_grid() + .iter() + .map(|Mu2 { ren, .. }| xir * xir * ren) + .collect::>() + }) + .collect(); + // UNWRAP: if we can't sort numbers the grid is fishy + ren1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + ren1.dedup(); + let ren1 = ren1; + let alphas: Vec<_> = ren1.iter().map(|&mur2| alphas(mur2)).collect(); + + Self { ren1, alphas } + } +} + +fn gluon_has_pid_zero(grid: &Grid) -> bool { + // if there are any PID zero particles ... + grid.channels() + .iter() + .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) + // and if the particle IDs are encoded using PDG MC IDs + && grid.pid_basis() == PidBasis::Pdg +} + +type Pid01IndexTuples = Vec<(usize, usize)>; +type Pid01Tuples = Vec<(i32, i32)>; + +fn pid_slices( + operator: &ArrayView4, + info: &OperatorSliceInfo, + gluon_has_pid_zero: bool, + pid1_nonzero: &dyn Fn(i32) -> bool, +) -> Result<(Pid01IndexTuples, Pid01Tuples), GridError> { + // list of all non-zero PID indices + let pid_indices: Vec<_> = (0..operator.dim().2) + .cartesian_product(0..operator.dim().0) + .filter(|&(pid0_idx, pid1_idx)| { + // 1) at least one element of the operator must be non-zero, and 2) the pid must be + // contained in some channel + operator + .slice(s![pid1_idx, .., pid0_idx, ..]) + .iter() + .any(|&value| value != 0.0) + && pid1_nonzero(if gluon_has_pid_zero && info.pids1[pid1_idx] == 21 { + 0 + } else { + info.pids1[pid1_idx] + }) + }) + .collect(); + + if pid_indices.is_empty() { + return Err(GridError::EvolutionFailure( + "no non-zero operator found; result would be an empty FkTable".to_owned(), + )); + } + + // list of all non-zero (pid0, pid1) combinations + let pids = pid_indices + .iter() + .map(|&(pid0_idx, pid1_idx)| { + ( + info.pids0[pid0_idx], + if gluon_has_pid_zero && info.pids1[pid1_idx] == 21 { + 0 + } else { + info.pids1[pid1_idx] + }, + ) + }) + .collect(); + + Ok((pid_indices, pids)) +} + +fn channels0_with_one(pids: &[(i32, i32)]) -> Vec { + let mut pids0: Vec<_> = pids.iter().map(|&(pid0, _)| pid0).collect(); + pids0.sort_unstable(); + pids0.dedup(); + + pids0 +} + +fn operator_slices( + operator: &ArrayView4, + info: &OperatorSliceInfo, + pid_indices: &[(usize, usize)], + x1: &[f64], +) -> Result>, GridError> { + // permutation between the grid x values and the operator x1 values + let x1_indices: Vec<_> = x1 + .iter() + .map(|&x1p| { + info.x1 + .iter() + .position(|&x1| approx_eq!(f64, x1p, x1, ulps = EVOLUTION_TOL_ULPS)) + .ok_or_else(|| { + GridError::EvolutionFailure(format!("no operator for x = {x1p} found")) + }) + }) + // TODO: use `try_collect` once stabilized + .collect::>()?; + + // create the corresponding operators accessible in the form [muf2, x0, x1] + let operators: Vec<_> = pid_indices + .iter() + .map(|&(pid0_idx, pid1_idx)| { + operator + .slice(s![pid1_idx, .., pid0_idx, ..]) + .select(Axis(0), &x1_indices) + .reversed_axes() + .as_standard_layout() + .into_owned() + }) + .collect(); + + Ok(operators) +} + +type X1aX1bOp2Tuple = (Vec>, Option>); + +fn ndarray_from_subgrid_orders_slice( + fac1: f64, + subgrids: &ArrayView1, + orders: &[Order], + order_mask: &[bool], + (xir, xif): (f64, f64), + alphas_table: &AlphasTable, +) -> Result { + // TODO: skip empty subgrids + + let mut x1_a: Vec<_> = subgrids + .iter() + .enumerate() + .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) + .flat_map(|(_, subgrid)| subgrid.x1_grid().into_owned()) + .collect(); + let mut x1_b: Vec<_> = subgrids + .iter() + .enumerate() + .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) + .flat_map(|(_, subgrid)| subgrid.x2_grid().into_owned()) + .collect(); + + x1_a.sort_by(f64::total_cmp); + x1_a.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); + x1_b.sort_by(f64::total_cmp); + x1_b.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); + + let mut array = Array2::::zeros((x1_a.len(), x1_b.len())); + let mut zero = true; + + // add subgrids for different orders, but the same bin and lumi, using the right + // couplings + for (subgrid, order) in subgrids + .iter() + .zip(orders.iter()) + .zip(order_mask.iter().chain(iter::repeat(&true))) + .filter_map(|((subgrid, order), &enabled)| { + (enabled && !subgrid.is_empty()).then_some((subgrid, order)) + }) + { + let mut logs = 1.0; + + if order.logxir > 0 { + if approx_eq!(f64, xir, 1.0, ulps = 4) { + continue; + } + + logs *= (xir * xir).ln(); + } + + if order.logxif > 0 { + if approx_eq!(f64, xif, 1.0, ulps = 4) { + continue; + } + + logs *= (xif * xif).ln(); + } + + // TODO: use `try_collect` once stabilized + let xa_indices: Vec<_> = subgrid + .x1_grid() + .iter() + .map(|&xa| { + x1_a.iter() + .position(|&x1a| approx_eq!(f64, x1a, xa, ulps = EVOLUTION_TOL_ULPS)) + .ok_or_else(|| { + GridError::EvolutionFailure(format!("no operator for x1 = {xa} found")) + }) + }) + .collect::>()?; + let xb_indices: Vec<_> = subgrid + .x2_grid() + .iter() + .map(|&xb| { + x1_b.iter() + .position(|&x1b| approx_eq!(f64, x1b, xb, ulps = EVOLUTION_TOL_ULPS)) + .ok_or_else(|| { + GridError::EvolutionFailure(format!("no operator for x1 = {xb} found")) + }) + }) + .collect::>()?; + + for ((ifac1, ix1, ix2), value) in subgrid.indexed_iter() { + let Mu2 { ren, fac } = subgrid.mu2_grid()[ifac1]; + + if !approx_eq!(f64, xif * xif * fac, fac1, ulps = EVOLUTION_TOL_ULPS) { + continue; + } + + let mur2 = xir * xir * ren; + + let als = if order.alphas == 0 { + 1.0 + } else if let Some(alphas) = alphas_table + .ren1 + .iter() + .zip(alphas_table.alphas.iter()) + .find_map(|(&ren1, &alphas)| { + approx_eq!(f64, ren1, mur2, ulps = EVOLUTION_TOL_ULPS).then(|| alphas) + }) + { + alphas.powi(order.alphas.try_into().unwrap()) + } else { + return Err(GridError::EvolutionFailure(format!( + "no alphas for mur2 = {mur2} found" + ))); + }; + + zero = false; + + array[[xa_indices[ix1], xb_indices[ix2]]] += als * logs * value; + } + } + + Ok((vec![x1_a, x1_b], (!zero).then_some(array))) +} + +pub(crate) fn evolve_slice_with_one( + grid: &Grid, + operator: &ArrayView4, + info: &OperatorSliceInfo, + order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, +) -> Result<(Array3, Vec), GridError> { + let gluon_has_pid_zero = gluon_has_pid_zero(grid); + let has_pdf1 = grid.convolutions()[0] != Convolution::None; + + let (pid_indices, pids) = pid_slices(operator, info, gluon_has_pid_zero, &|pid| { + grid.channels() + .iter() + .flat_map(Channel::entry) + .any(|&(a, b, _)| if has_pdf1 { a } else { b } == pid) + })?; + + let channels0 = channels0_with_one(&pids); + let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); + let new_axis = if has_pdf1 { 2 } else { 1 }; + + let mut last_x1 = Vec::new(); + let mut ops = Vec::new(); + + for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { + let mut tables = vec![Array1::zeros(info.x0.len()); channels0.len()]; + + for (subgrids_o, channel1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.channels()) { + let (mut x1, array) = ndarray_from_subgrid_orders_slice( + info.fac1, + &subgrids_o, + grid.orders(), + order_mask, + xi, + alphas_table, + )?; + + // skip over zero arrays to speed up evolution and avoid problems with NaNs + let Some(array) = array else { + continue; + }; + + let x1 = if has_pdf1 { x1.remove(0) } else { x1.remove(1) }; + + if x1.is_empty() { + continue; + } + + if (last_x1.len() != x1.len()) + || last_x1 + .iter() + .zip(x1.iter()) + .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) + { + ops = operator_slices(operator, info, &pid_indices, &x1)?; + last_x1 = x1; + } + + for (&pid1, &factor) in + channel1 + .entry() + .iter() + .map(|(a, b, f)| if has_pdf1 { (a, f) } else { (b, f) }) + { + for (fk_table, op) in + channels0 + .iter() + .zip(tables.iter_mut()) + .filter_map(|(&pid0, fk_table)| { + pids.iter() + .zip(ops.iter()) + .find_map(|(&(p0, p1), op)| { + (p0 == pid0 && p1 == pid1).then_some(op) + }) + .map(|op| (fk_table, op)) + }) + { + fk_table.scaled_add(factor, &op.dot(&array.index_axis(Axis(new_axis - 1), 0))); + } + } + } + + sub_fk_tables.extend(tables.into_iter().map(|table| { + ImportOnlySubgridV2::new( + SparseArray3::from_ndarray( + table + .insert_axis(Axis(0)) + .insert_axis(Axis(new_axis)) + .view(), + 0, + 1, + ), + vec![Mu2 { + // TODO: FK tables don't depend on the renormalization scale + //ren: -1.0, + ren: info.fac0, + fac: info.fac0, + }], + if has_pdf1 { info.x0.clone() } else { vec![1.0] }, + if has_pdf1 { vec![1.0] } else { info.x0.clone() }, + ) + .into() + })); + } + + let pid = if grid.convolutions()[0] == Convolution::None { + grid.channels()[0].entry()[0].0 + } else { + grid.channels()[0].entry()[0].1 + }; + + Ok(( + Array1::from_iter(sub_fk_tables) + .into_shape((1, grid.bin_info().bins(), channels0.len())) + .unwrap(), + channels0 + .iter() + .map(|&a| { + channel![ + if has_pdf1 { a } else { pid }, + if has_pdf1 { pid } else { a }, + 1.0 + ] + }) + .collect(), + )) +} + +pub(crate) fn evolve_slice_with_two( + grid: &Grid, + operator: &ArrayView4, + info: &OperatorSliceInfo, + order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, +) -> Result<(Array3, Vec), GridError> { + let gluon_has_pid_zero = gluon_has_pid_zero(grid); + + // TODO: generalize by iterating up to `n` + let (pid_indices, pids01): (Vec<_>, Vec<_>) = (0..2) + .map(|d| { + pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { + grid.channels() + .iter() + .flat_map(Channel::entry) + .any(|tuple| match d { + // TODO: `Channel::entry` should return a tuple of a `Vec` and an `f64` + 0 => tuple.0 == pid1, + 1 => tuple.1 == pid1, + _ => unreachable!(), + }) + }) + }) + .collect::, _>>()? + .into_iter() + .unzip(); + + let mut channels0: Vec<_> = pids01 + .iter() + .map(|pids| pids.iter().map(|&(pid0, _)| pid0)) + .multi_cartesian_product() + .collect(); + channels0.sort_unstable(); + channels0.dedup(); + let channels0 = channels0; + + let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); + + // TODO: generalize to `n` + let mut last_x1 = vec![Vec::new(); 2]; + let mut operators = vec![Vec::new(); 2]; + + for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { + let mut tables = vec![Array2::zeros((info.x0.len(), info.x0.len())); channels0.len()]; + + for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { + let (x1, array) = ndarray_from_subgrid_orders_slice( + info.fac1, + &subgrids_o, + grid.orders(), + order_mask, + xi, + alphas_table, + )?; + + // skip over zero arrays to speed up evolution and avoid problems with NaNs + let Some(array) = array else { + continue; + }; + + for (last_x1, x1, pid_indices, operators) in + izip!(&mut last_x1, x1, &pid_indices, &mut operators) + { + if (last_x1.len() != x1.len()) + || last_x1 + .iter() + .zip(x1.iter()) + .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) + { + *operators = operator_slices(operator, info, pid_indices, &x1)?; + *last_x1 = x1; + } + } + + let mut tmp = Array2::zeros((last_x1[0].len(), info.x0.len())); + + for (pids1, factor) in channel1 + .entry() + .iter() + .map(|&(pida1, pidb1, factor)| ([pida1, pidb1], factor)) + { + for (fk_table, ops) in + channels0 + .iter() + .zip(tables.iter_mut()) + .filter_map(|(pids0, fk_table)| { + izip!(pids0, &pids1, &pids01, &operators) + .map(|(&pid0, &pid1, pids, operators)| { + pids.iter().zip(operators).find_map(|(&(p0, p1), op)| { + ((p0 == pid0) && (p1 == pid1)).then_some(op) + }) + }) + // TODO: avoid using `collect` + .collect::>>() + .map(|ops| (fk_table, ops)) + }) + { + linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); + linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, fk_table); + } + } + } + + sub_fk_tables.extend(tables.into_iter().map(|table| { + ImportOnlySubgridV2::new( + SparseArray3::from_ndarray(table.insert_axis(Axis(0)).view(), 0, 1), + vec![Mu2 { + // TODO: FK tables don't depend on the renormalization scale + //ren: -1.0, + ren: info.fac0, + fac: info.fac0, + }], + info.x0.clone(), + info.x0.clone(), + ) + .into() + })); + } + + Ok(( + Array1::from_iter(sub_fk_tables) + .into_shape((1, grid.bin_info().bins(), channels0.len())) + .unwrap(), + channels0 + .iter() + .map(|c| channel![c[0], c[1], 1.0]) + .collect(), + )) +} + +pub(crate) fn evolve_slice_with_two2( + grid: &Grid, + operators: &[ArrayView4], + infos: &[OperatorSliceInfo], + order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, +) -> Result<(Array3, Vec), GridError> { + let gluon_has_pid_zero = gluon_has_pid_zero(grid); + + // TODO: implement matching of different scales for different EKOs + let mut fac1_scales: Vec<_> = infos.iter().map(|info| info.fac1).collect(); + fac1_scales.sort_by(f64::total_cmp); + assert!(fac1_scales.windows(2).all(|scales| approx_eq!( + f64, + scales[0], + scales[1], + ulps = EVOLUTION_TOL_ULPS + ))); + let fac1 = fac1_scales[0]; + + // TODO: generalize by iterating up to `n` + let (pid_indices, pids01): (Vec<_>, Vec<_>) = izip!(0..2, operators, infos) + .map(|(d, operator, info)| { + pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { + grid.channels() + .iter() + .flat_map(Channel::entry) + .any(|tuple| match d { + // TODO: `Channel::entry` should return a tuple of a `Vec` and an `f64` + 0 => tuple.0 == pid1, + 1 => tuple.1 == pid1, + _ => unreachable!(), + }) + }) + }) + .collect::, _>>()? + .into_iter() + .unzip(); + + let mut channels0: Vec<_> = pids01 + .iter() + .map(|pids| pids.iter().map(|&(pid0, _)| pid0)) + .multi_cartesian_product() + .collect(); + channels0.sort_unstable(); + channels0.dedup(); + let channels0 = channels0; + + let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); + + // TODO: generalize to `n` + let mut last_x1 = vec![Vec::new(); 2]; + let mut eko_slices = vec![Vec::new(); 2]; + + for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { + assert_eq!(infos[0].x0.len(), infos[1].x0.len()); + + let mut tables = + vec![Array2::zeros((infos[0].x0.len(), infos[1].x0.len())); channels0.len()]; + + for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { + let (x1, array) = ndarray_from_subgrid_orders_slice( + fac1, + &subgrids_o, + grid.orders(), + order_mask, + xi, + alphas_table, + )?; + + // skip over zero arrays to speed up evolution and avoid problems with NaNs + let Some(array) = array else { + continue; + }; + + for (last_x1, x1, pid_indices, slices, operator, info) in izip!( + &mut last_x1, + x1, + &pid_indices, + &mut eko_slices, + operators, + infos + ) { + if (last_x1.len() != x1.len()) + || last_x1 + .iter() + .zip(x1.iter()) + .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) + { + *slices = operator_slices(operator, info, pid_indices, &x1)?; + *last_x1 = x1; + } + } + + let mut tmp = Array2::zeros((last_x1[0].len(), infos[1].x0.len())); + + for (pids1, factor) in channel1 + .entry() + .iter() + .map(|&(pida1, pidb1, factor)| ([pida1, pidb1], factor)) + { + for (fk_table, ops) in + channels0 + .iter() + .zip(tables.iter_mut()) + .filter_map(|(pids0, fk_table)| { + izip!(pids0, &pids1, &pids01, &eko_slices) + .map(|(&pid0, &pid1, pids, slices)| { + pids.iter().zip(slices).find_map(|(&(p0, p1), op)| { + ((p0 == pid0) && (p1 == pid1)).then_some(op) + }) + }) + // TODO: avoid using `collect` + .collect::>>() + .map(|ops| (fk_table, ops)) + }) + { + // tmp = array * ops[1]^T + linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); + // fk_table += factor * ops[0] * tmp + linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, fk_table); + } + } + } + + sub_fk_tables.extend(tables.into_iter().map(|table| { + ImportOnlySubgridV2::new( + SparseArray3::from_ndarray(table.insert_axis(Axis(0)).view(), 0, 1), + vec![Mu2 { + // TODO: FK tables don't depend on the renormalization scale + //ren: -1.0, + ren: infos[0].fac0, + fac: infos[0].fac0, + }], + infos[0].x0.clone(), + infos[1].x0.clone(), + ) + .into() + })); + } + + Ok(( + Array1::from_iter(sub_fk_tables) + .into_shape((1, grid.bin_info().bins(), channels0.len())) + .unwrap(), + channels0 + .iter() + .map(|c| channel![c[0], c[1], 1.0]) + .collect(), + )) +} diff --git a/pineappl_v0/src/fk_table.rs b/pineappl_v0/src/fk_table.rs new file mode 100644 index 000000000..b7d045194 --- /dev/null +++ b/pineappl_v0/src/fk_table.rs @@ -0,0 +1,441 @@ +//! Provides the [`FkTable`] type. + +use super::boc::Order; +use super::convolutions::{Convolution, LumiCache}; +use super::grid::{Grid, GridError}; +use super::subgrid::Subgrid; +use float_cmp::approx_eq; +use ndarray::Array4; +use std::collections::HashMap; +use std::fmt::{self, Display, Formatter}; +use std::io::Write; +use std::str::FromStr; +use thiserror::Error; + +/// Structure implementing FK tables. These are special [`Grid`]s, for which the following +/// additional guarantees are given: +/// +/// - all subgrids of the grid evaluate the PDFs at a single factorization scale given by +/// [`FkTable::muf2`]. +/// - all subgrids, for both hadronic initial states (if both initial states are hadronic), share +/// the same `x` grid. See [`FkTable::x_grid`]. +/// - the channel definitions are *simple*, meaning that every entry consists of a single pair of +/// partons with trivial factor `1.0`, and all tuples are distinct from each other. See +/// [`Grid::channels`]. +/// - the FK table's grid contains only a single [`Order`], whose exponents are all zero. +#[repr(transparent)] +pub struct FkTable { + grid: Grid, +} + +/// The error type returned when a conversion of a [`Grid`] to an [`FkTable`] fails. +#[derive(Debug, Error)] +pub enum TryFromGridError { + /// Error if the grid contains multiple scales instead of a single one. + #[error("multiple scales detected")] + MultipleScales, + /// Error if the channels are not simple. + #[error("complicated channel function detected")] + InvalidChannel, + /// Error if the order of the grid was not a single one with all zeros in the exponents. + #[error("multiple orders detected")] + NonTrivialOrder, +} + +/// The optimization assumptions for an [`FkTable`], needed for [`FkTable::optimize`]. Since FK +/// tables are typically stored at very small `Q2 = Q0`, the PDFs `f(x,Q0)` of heavy quarks are +/// typically set to zero at this scale or set to the same value as their anti-quark PDF. This is +/// used to optimize the size of FK tables. +#[derive(Debug, Clone, Copy, Eq, PartialEq)] +pub enum FkAssumptions { + /// All quark PDFs are non-zero at the FK table scale and completely independent. + Nf6Ind, + /// Like [`Nf6Ind`](Self::Nf6Ind), but the PDFs of top and anti-top quarks are the same at FK + /// table scale. + Nf6Sym, + /// Like [`Nf6Ind`](Self::Nf6Ind), but the PDFs of top and anti-top quarks are zero at FK table + /// scale. + Nf5Ind, + /// Like [`Nf5Ind`](Self::Nf5Ind), but the PDFs of bottom and anti-bottom quarks are the same + /// at FK table scale. + Nf5Sym, + /// Like [`Nf5Ind`](Self::Nf5Ind), but the PDFs of bottom and anti-bottom quarks are zero at FK + /// table scale. + Nf4Ind, + /// Like [`Nf4Ind`](Self::Nf4Ind), but the PDFs of charm and anti-charm quarks are the same at + /// FK table scale. PDF sets that make this assumption are NNPDF4.0 and NNPDF3.1 at fitting + /// scale. + Nf4Sym, + /// Like [`Nf4Ind`](Self::Nf4Ind), but the PDFs of charm and anti-charm quarks are zero at FK + /// table scale. PDF sets that make this assumption are MSHT20 and NNPDF3.0 at fitting scale. + Nf3Ind, + /// Like [`Nf3Ind`](Self::Nf3Ind), but the PDFs of strange and anti-strange are the same at FK + /// table scale. A PDF set that makes this assumption is CT18 at fitting scale. + Nf3Sym, +} + +/// Error type when trying to construct [`FkAssumptions`] with a string. +#[derive(Debug, Eq, Error, PartialEq)] +#[error("unknown variant for FkAssumptions: {variant}")] +pub struct UnknownFkAssumption { + variant: String, +} + +impl Display for FkAssumptions { + fn fmt(&self, f: &mut Formatter) -> fmt::Result { + write!( + f, + "{}", + match self { + Self::Nf6Ind => "Nf6Ind", + Self::Nf6Sym => "Nf6Sym", + Self::Nf5Ind => "Nf5Ind", + Self::Nf5Sym => "Nf5Sym", + Self::Nf4Ind => "Nf4Ind", + Self::Nf4Sym => "Nf4Sym", + Self::Nf3Ind => "Nf3Ind", + Self::Nf3Sym => "Nf3Sym", + } + ) + } +} + +impl FromStr for FkAssumptions { + type Err = UnknownFkAssumption; + + fn from_str(s: &str) -> Result { + Ok(match s { + "Nf6Ind" => Self::Nf6Ind, + "Nf6Sym" => Self::Nf6Sym, + "Nf5Ind" => Self::Nf5Ind, + "Nf5Sym" => Self::Nf5Sym, + "Nf4Ind" => Self::Nf4Ind, + "Nf4Sym" => Self::Nf4Sym, + "Nf3Ind" => Self::Nf3Ind, + "Nf3Sym" => Self::Nf3Sym, + _ => { + return Err(UnknownFkAssumption { + variant: s.to_owned(), + }); + } + }) + } +} + +impl FkTable { + /// Returns the [`Grid`] object for this `FkTable`. + #[must_use] + pub const fn grid(&self) -> &Grid { + &self.grid + } + + // TODO: when trying to convert the following function to `const` as per clippy's suggestion, + // the compiler errors out with: 'the destructor for this type cannot be evaluated in constant + // functions' + + /// Converts the `FkTable` back to a [`Grid`]. + #[must_use] + pub fn into_grid(self) -> Grid { + self.grid + } + + /// Returns the FK table represented as a four-dimensional array indexed by `bin`, `channel`, + /// `x1` and `x2`, in this order. + /// + /// # Panics + /// + /// TODO + #[must_use] + pub fn table(&self) -> Array4 { + let has_pdf1 = self.grid.convolutions()[0] != Convolution::None; + let has_pdf2 = self.grid.convolutions()[1] != Convolution::None; + let x_grid = self.x_grid(); + + let mut result = Array4::zeros(( + self.bins(), + self.grid.channels().len(), + if has_pdf1 { x_grid.len() } else { 1 }, + if has_pdf2 { x_grid.len() } else { 1 }, + )); + + for ((_, bin, channel), subgrid) in self.grid().subgrids().indexed_iter() { + let indices1 = if has_pdf1 { + subgrid + .x1_grid() + .iter() + .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) + .collect::>() + .unwrap() + } else { + vec![0] + }; + let indices2 = if has_pdf2 { + subgrid + .x2_grid() + .iter() + .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) + .collect::>() + .unwrap() + } else { + vec![0] + }; + + for ((_, ix1, ix2), value) in subgrid.indexed_iter() { + result[[bin, channel, indices1[ix1], indices2[ix2]]] = value; + } + } + + result + } + + /// Returns the number of bins for this `FkTable`. + #[must_use] + pub fn bins(&self) -> usize { + self.grid.bin_info().bins() + } + + /// Extract the normalizations for each bin. + #[must_use] + pub fn bin_normalizations(&self) -> Vec { + self.grid.bin_info().normalizations() + } + + /// Extract the number of dimensions for bins. + #[must_use] + pub fn bin_dimensions(&self) -> usize { + self.grid.bin_info().dimensions() + } + + /// Extract the left edges of a specific bin dimension. + #[must_use] + pub fn bin_left(&self, dimension: usize) -> Vec { + self.grid.bin_info().left(dimension) + } + + /// Extract the right edges of a specific bin dimension. + #[must_use] + pub fn bin_right(&self, dimension: usize) -> Vec { + self.grid.bin_info().right(dimension) + } + + /// Access meta data + #[must_use] + pub const fn key_values(&self) -> Option<&HashMap> { + self.grid.key_values() + } + + /// Return the channel definition for this `FkTable`. All factors are `1.0`. + #[must_use] + pub fn channels(&self) -> Vec<(i32, i32)> { + self.grid + .channels() + .iter() + .map(|entry| (entry.entry()[0].0, entry.entry()[0].1)) + .collect() + } + + /// Returns the single `muf2` scale of this `FkTable`. + #[must_use] + pub fn muf2(&self) -> f64 { + if let &[muf2] = &self.grid.evolve_info(&[true]).fac1[..] { + muf2 + } else { + // every `FkTable` has only a single factorization scale + unreachable!() + } + } + + /// Returns the x grid that all subgrids for all hadronic initial states share. + #[must_use] + pub fn x_grid(&self) -> Vec { + self.grid.evolve_info(&[true]).x1 + } + + /// Propagate write to grid + /// + /// # Errors + /// + /// TODO + pub fn write(&self, writer: impl Write) -> Result<(), GridError> { + self.grid.write(writer) + } + + /// Propagate `write_lz4` to `Grid`. + /// + /// # Errors + /// + /// See [`Grid::write_lz4`]. + pub fn write_lz4(&self, writer: impl Write) -> Result<(), GridError> { + self.grid.write_lz4(writer) + } + + /// Convolve the FK-table. This method has fewer arguments than [`Grid::convolve`], because + /// FK-tables have all orders merged together and do not support scale variations. + pub fn convolve( + &self, + lumi_cache: &mut LumiCache, + bin_indices: &[usize], + channel_mask: &[bool], + ) -> Vec { + self.grid + .convolve(lumi_cache, &[], bin_indices, channel_mask, &[(1.0, 1.0)]) + } + + /// Set a metadata key-value pair + pub fn set_key_value(&mut self, key: &str, value: &str) { + self.grid.set_key_value(key, value); + } + + /// Optimizes the storage of FK tables based of assumptions of the PDFs at the FK table's + /// scale. + /// + /// # Panics + /// + /// TODO + pub fn optimize(&mut self, assumptions: FkAssumptions) { + let mut add = Vec::new(); + + match assumptions { + FkAssumptions::Nf6Ind => { + // nothing to do here + } + FkAssumptions::Nf6Sym => { + add.push((235, 200)); + } + FkAssumptions::Nf5Ind => { + add.extend_from_slice(&[(235, 200), (135, 100)]); + } + FkAssumptions::Nf5Sym => { + add.extend_from_slice(&[(235, 200), (135, 100), (224, 200)]); + } + FkAssumptions::Nf4Ind => { + add.extend_from_slice(&[(235, 200), (135, 100), (224, 200), (124, 100)]); + } + FkAssumptions::Nf4Sym => { + add.extend_from_slice(&[ + (235, 200), + (135, 100), + (224, 200), + (124, 100), + (215, 200), + ]); + } + FkAssumptions::Nf3Ind => { + add.extend_from_slice(&[ + (235, 200), + (135, 100), + (224, 200), + (124, 100), + (215, 200), + (115, 100), + ]); + } + FkAssumptions::Nf3Sym => { + add.extend_from_slice(&[ + (235, 200), + (135, 100), + (224, 200), + (124, 100), + (215, 200), + (115, 100), + (208, 200), + ]); + } + } + + self.grid.rewrite_channels(&add, &[]); + + // store the assumption so that we can check it later on + self.grid + .set_key_value("fk_assumptions", &assumptions.to_string()); + self.grid.optimize(); + } +} + +impl TryFrom for FkTable { + type Error = TryFromGridError; + + fn try_from(grid: Grid) -> Result { + let mut muf2 = -1.0; + + if grid.orders() + != [Order { + alphas: 0, + alpha: 0, + logxir: 0, + logxif: 0, + }] + { + return Err(TryFromGridError::NonTrivialOrder); + } + + for subgrid in grid.subgrids() { + if subgrid.is_empty() { + continue; + } + + let mu2_grid = subgrid.mu2_grid(); + + if mu2_grid.len() > 1 { + return Err(TryFromGridError::MultipleScales); + } + + if muf2 < 0.0 { + muf2 = mu2_grid[0].fac; + } else if muf2 != mu2_grid[0].fac { + return Err(TryFromGridError::MultipleScales); + } + } + + for channel in grid.channels() { + let entry = channel.entry(); + + if entry.len() != 1 || entry[0].2 != 1.0 { + return Err(TryFromGridError::InvalidChannel); + } + } + + if (1..grid.channels().len()) + .any(|i| grid.channels()[i..].contains(&grid.channels()[i - 1])) + { + return Err(TryFromGridError::InvalidChannel); + } + + Ok(Self { grid }) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn fk_assumptions_try_from() { + assert_eq!(FkAssumptions::from_str("Nf6Ind"), Ok(FkAssumptions::Nf6Ind)); + assert_eq!(FkAssumptions::from_str("Nf6Sym"), Ok(FkAssumptions::Nf6Sym)); + assert_eq!(FkAssumptions::from_str("Nf5Ind"), Ok(FkAssumptions::Nf5Ind)); + assert_eq!(FkAssumptions::from_str("Nf5Sym"), Ok(FkAssumptions::Nf5Sym)); + assert_eq!(FkAssumptions::from_str("Nf4Ind"), Ok(FkAssumptions::Nf4Ind)); + assert_eq!(FkAssumptions::from_str("Nf4Sym"), Ok(FkAssumptions::Nf4Sym)); + assert_eq!(FkAssumptions::from_str("Nf3Ind"), Ok(FkAssumptions::Nf3Ind)); + assert_eq!(FkAssumptions::from_str("Nf3Sym"), Ok(FkAssumptions::Nf3Sym)); + assert_eq!( + FkAssumptions::from_str("XXXXXX"), + Err(UnknownFkAssumption { + variant: "XXXXXX".to_owned() + }) + ); + } + + #[test] + fn fk_assumptions_display() { + assert_eq!(format!("{}", FkAssumptions::Nf6Ind), "Nf6Ind"); + assert_eq!(format!("{}", FkAssumptions::Nf6Sym), "Nf6Sym"); + assert_eq!(format!("{}", FkAssumptions::Nf5Ind), "Nf5Ind"); + assert_eq!(format!("{}", FkAssumptions::Nf5Sym), "Nf5Sym"); + assert_eq!(format!("{}", FkAssumptions::Nf4Ind), "Nf4Ind"); + assert_eq!(format!("{}", FkAssumptions::Nf4Sym), "Nf4Sym"); + assert_eq!(format!("{}", FkAssumptions::Nf3Ind), "Nf3Ind"); + assert_eq!(format!("{}", FkAssumptions::Nf3Sym), "Nf3Sym"); + } +} diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs new file mode 100644 index 000000000..c347891d4 --- /dev/null +++ b/pineappl_v0/src/grid.rs @@ -0,0 +1,2180 @@ +//! Module containing all traits and supporting structures for grids. + +use super::bin::{BinInfo, BinLimits, BinRemapper}; +use super::boc::{Channel, Order}; +use super::convolutions::{Convolution, LumiCache}; +use super::empty_subgrid::EmptySubgridV1; +use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; +use super::fk_table::FkTable; +use super::import_only_subgrid::ImportOnlySubgridV2; +use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; +use super::ntuple_subgrid::NtupleSubgridV1; +use super::pids::{self, PidBasis}; +use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; +use bitflags::bitflags; +use float_cmp::{approx_eq, assert_approx_eq}; +use git_version::git_version; +use lz4_flex::frame::{FrameDecoder, FrameEncoder}; +use ndarray::{s, Array3, ArrayView3, ArrayView5, ArrayViewMut3, Axis, CowArray, Dimension, Ix4}; +use serde::{Deserialize, Serialize, Serializer}; +use std::borrow::Cow; +use std::collections::{BTreeMap, HashMap}; +use std::io::{self, BufRead, BufReader, BufWriter, Read, Write}; +use std::iter; +use std::mem; +use std::ops::Range; +use thiserror::Error; + +/// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a +/// corresponding `weight`. The type `W` can either be a `f64` or `()`, which is used when multiple +/// weights should be signaled. +#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +pub struct Ntuple { + /// Momentum fraction of the first parton. + pub x1: f64, + /// Momentum fraction of the second parton. + pub x2: f64, + /// Squared scale. + pub q2: f64, + /// Weight of this entry. + pub weight: W, +} + +/// Error returned when merging two grids fails. +#[derive(Debug, Error)] +pub enum GridError { + /// Returned when trying to merge two `Grid` objects with incompatible bin limits. + #[error(transparent)] + InvalidBinLimits(super::bin::MergeBinError), + /// Returned if the number of bins in the grid and in the remapper do not agree. + #[error("the remapper has {remapper_bins} bins, but the grid has {grid_bins}")] + BinNumberMismatch { + /// Number of bins in the grid. + grid_bins: usize, + /// Number of bins in the remapper. + remapper_bins: usize, + }, + /// Returned when it was tried to merge bins that are non-consecutive. + #[error(transparent)] + MergeBinError(super::bin::MergeBinError), + /// Returned when trying to construct a `Grid` using an unknown subgrid type. + #[error("tried constructing a Grid with unknown Subgrid type `{0}`")] + UnknownSubgridType(String), + /// Returned when failed to read a Grid. + #[error(transparent)] + ReadFailure(bincode::Error), + /// Returned when failed to write a Grid. + #[error(transparent)] + WriteFailure(bincode::Error), + /// Returned while performing IO operations. + #[error(transparent)] + IoFailure(io::Error), + /// Returned when trying to read a `PineAPPL` file with file format version that is not + /// supported. + #[error("the file version is {file_version}, but supported is only {supported_version}")] + FileVersionMismatch { + /// File format version of the file read. + file_version: u64, + /// Maximum supported file format version for this library. + supported_version: u64, + }, + /// Returned from [`Grid::evolve`] if the evolution failed. + #[error("failed to evolve grid: {0}")] + EvolutionFailure(String), + /// Errors that do no originate from this crate itself. + #[error(transparent)] + Other(#[from] anyhow::Error), +} + +#[derive(Clone, Deserialize, Serialize)] +struct Mmv1; + +#[derive(Clone, Deserialize, Serialize)] +struct Mmv2 { + remapper: Option, + key_value_db: HashMap, +} + +fn ordered_map_serialize( + value: &HashMap, + serializer: S, +) -> Result +where + S: Serializer, +{ + let ordered: BTreeMap<_, _> = value.iter().collect(); + ordered.serialize(serializer) +} + +#[derive(Clone, Deserialize, Serialize)] +struct Mmv3 { + remapper: Option, + // order the HashMap before serializing it to make the output stable + #[serde(serialize_with = "ordered_map_serialize")] + key_value_db: HashMap, + subgrid_template: SubgridEnum, +} + +impl Default for Mmv2 { + fn default() -> Self { + Self { + remapper: None, + key_value_db: [ + ( + "pineappl_gitversion".to_owned(), + git_version!( + args = ["--always", "--dirty", "--long", "--tags"], + cargo_prefix = "cargo:", + fallback = "unknown" + ) + .to_owned(), + ), + // by default we assume there are protons in the initial state + ("initial_state_1".to_owned(), "2212".to_owned()), + ("initial_state_2".to_owned(), "2212".to_owned()), + ] + .iter() + .cloned() + .collect(), + } + } +} + +impl Mmv3 { + fn new(subgrid_template: SubgridEnum) -> Self { + Self { + remapper: None, + key_value_db: [ + ( + "pineappl_gitversion".to_owned(), + git_version!( + args = ["--always", "--dirty", "--long", "--tags"], + cargo_prefix = "cargo:", + fallback = "unknown" + ) + .to_owned(), + ), + // by default we assume there are unpolarized protons in the initial state + // do not change these to the new metadata to not break backwards compatibility + ("initial_state_1".to_owned(), "2212".to_owned()), + ("initial_state_2".to_owned(), "2212".to_owned()), + ] + .iter() + .cloned() + .collect(), + subgrid_template, + } + } +} + +// ALLOW: fixing the warning will break the file format +#[allow(clippy::large_enum_variant)] +#[derive(Clone, Deserialize, Serialize)] +enum MoreMembers { + V1(Mmv1), + V2(Mmv2), + V3(Mmv3), +} + +impl MoreMembers { + fn upgrade(&mut self) { + match self { + Self::V1(_) => { + *self = Self::V2(Mmv2::default()); + } + Self::V2(_) | Self::V3(_) => {} + } + } +} + +bitflags! { + /// Bitflags for optimizing a [`Grid`]. See [`Grid::optimize_using`]. + #[derive(Clone, Copy)] + #[repr(transparent)] + pub struct GridOptFlags: u32 { + /// Change the [`Subgrid`] type to optimize storage effeciency. + const OPTIMIZE_SUBGRID_TYPE = 0b1; + /// Recognize whether a subgrid was filled with events with a static scale and if this is + /// the case, optimize it by undoing the interpolation in the scale. This flag requires + /// [`Self::OPTIMIZE_SUBGRID_TYPE`] to be active. + const STATIC_SCALE_DETECTION = 0b10; + /// If two channels differ by transposition of the two initial states and the functions + /// this grid is convolved with are the same for both initial states, this will merge one + /// channel into the other, with the correct transpositions. + const SYMMETRIZE_CHANNELS = 0b100; + /// Remove all orders ([`Grid::orders`]), which do not contain any non-zero subgrids. + const STRIP_EMPTY_ORDERS = 0b1000; + /// Merge the subgrids of channels which have the same definition. + const MERGE_SAME_CHANNELS = 0b10000; + /// Remove all channels ([`Grid::channels`]), which do not contain any non-zero subgrids. + const STRIP_EMPTY_CHANNELS = 0b10_0000; + } +} + +/// Main data structure of `PineAPPL`. This structure contains a `Subgrid` for each `LumiEntry`, +/// bin, and coupling order it was created with. +#[derive(Clone, Deserialize, Serialize)] +pub struct Grid { + subgrids: Array3, + channels: Vec, + bin_limits: BinLimits, + orders: Vec, + subgrid_params: SubgridParams, + more_members: MoreMembers, +} + +impl Grid { + /// Constructor. + #[must_use] + pub fn new( + channels: Vec, + orders: Vec, + bin_limits: Vec, + subgrid_params: SubgridParams, + ) -> Self { + Self { + subgrids: Array3::from_shape_simple_fn( + (orders.len(), bin_limits.len() - 1, channels.len()), + || EmptySubgridV1.into(), + ), + orders, + channels, + bin_limits: BinLimits::new(bin_limits), + more_members: MoreMembers::V3(Mmv3::new( + LagrangeSubgridV2::new(&subgrid_params, &ExtraSubgridParams::from(&subgrid_params)) + .into(), + )), + subgrid_params, + } + } + + /// Constructor. This function can be used like `new`, but the additional parameter + /// `subgrid_type` selects the underlying `Subgrid` type. Supported values are: + /// - `LagrangeSubgrid` + /// - `LagrangeSparseSubgrid` + /// - `NtupleSubgrid` + /// + /// # Errors + /// + /// If `subgrid_type` is none of the values listed above, an error is returned. + pub fn with_subgrid_type( + channels: Vec, + orders: Vec, + bin_limits: Vec, + subgrid_params: SubgridParams, + extra: ExtraSubgridParams, + subgrid_type: &str, + ) -> Result { + let subgrid_template: SubgridEnum = match subgrid_type { + "LagrangeSubgrid" | "LagrangeSubgridV2" => { + LagrangeSubgridV2::new(&subgrid_params, &extra).into() + } + "LagrangeSubgridV1" => LagrangeSubgridV1::new(&subgrid_params).into(), + "NtupleSubgrid" => NtupleSubgridV1::new().into(), + "LagrangeSparseSubgrid" => LagrangeSparseSubgridV1::new(&subgrid_params).into(), + _ => return Err(GridError::UnknownSubgridType(subgrid_type.to_owned())), + }; + + Ok(Self { + subgrids: Array3::from_shape_simple_fn( + (orders.len(), bin_limits.len() - 1, channels.len()), + || EmptySubgridV1.into(), + ), + orders, + channels, + bin_limits: BinLimits::new(bin_limits), + subgrid_params, + more_members: MoreMembers::V3(Mmv3::new(subgrid_template)), + }) + } + + /// Return by which convention the particle IDs are encoded. + #[must_use] + pub fn pid_basis(&self) -> PidBasis { + if let Some(key_values) = self.key_values() { + if let Some(lumi_id_types) = key_values.get("lumi_id_types") { + match lumi_id_types.as_str() { + "pdg_mc_ids" => return PidBasis::Pdg, + "evol" => return PidBasis::Evol, + _ => unimplemented!("unknown particle ID convention {lumi_id_types}"), + } + } + } + + // if there's no basis explicitly set we're assuming to use PDG IDs + PidBasis::Pdg + } + + /// Set the convention by which PIDs of channels are interpreted. + pub fn set_pid_basis(&mut self, pid_basis: PidBasis) { + match pid_basis { + PidBasis::Pdg => self.set_key_value("lumi_id_types", "pdg_mc_ids"), + PidBasis::Evol => self.set_key_value("lumi_id_types", "evol"), + } + } + + fn pdg_channels(&self) -> Cow<[Channel]> { + match self.pid_basis() { + PidBasis::Evol => self + .channels + .iter() + .map(|entry| Channel::translate(entry, &pids::evol_to_pdg_mc_ids)) + .collect(), + PidBasis::Pdg => Cow::Borrowed(self.channels()), + } + } + + /// Perform a convolution using the PDFs and strong coupling in `lumi_cache`, and only + /// selecting only the orders, bins and channels corresponding to `order_mask`, `bin_indices` + /// and `channel_mask`. A variation of the scales is performed using the factors in `xi`; the + /// first factor varies the renormalization scale, the second the factorization scale. Note + /// that for the variation to be trusted all non-zero log-grids must be contained. + /// + /// # Panics + /// + /// TODO + pub fn convolve( + &self, + lumi_cache: &mut LumiCache, + order_mask: &[bool], + bin_indices: &[usize], + channel_mask: &[bool], + xi: &[(f64, f64)], + ) -> Vec { + lumi_cache.setup(self, xi).unwrap(); + + let bin_indices = if bin_indices.is_empty() { + (0..self.bin_info().bins()).collect() + } else { + bin_indices.to_vec() + }; + let mut bins = vec![0.0; bin_indices.len() * xi.len()]; + let normalizations = self.bin_info().normalizations(); + let pdg_channels = self.pdg_channels(); + + for (xi_index, &(xir, xif)) in xi.iter().enumerate() { + for ((ord, bin, chan), subgrid) in self.subgrids.indexed_iter() { + let order = &self.orders[ord]; + + if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) { + continue; + } + + if (!order_mask.is_empty() && !order_mask[ord]) + || (!channel_mask.is_empty() && !channel_mask[chan]) + { + continue; + } + + let Some(bin_index) = bin_indices.iter().position(|&index| index == bin) else { + continue; + }; + + if subgrid.is_empty() { + continue; + } + + let channel = &pdg_channels[chan]; + let mu2_grid = subgrid.mu2_grid(); + let x1_grid = subgrid.x1_grid(); + let x2_grid = subgrid.x2_grid(); + + lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); + + let mut value = + subgrid.convolve(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { + let x1 = x1_grid[ix1]; + let x2 = x2_grid[ix2]; + let mut lumi = 0.0; + + for entry in channel.entry() { + let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); + let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); + lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); + } + + let alphas = lumi_cache.alphas(imu2); + + lumi *= alphas.powi(order.alphas.try_into().unwrap()); + lumi + }); + + if order.logxir > 0 { + value *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); + } + + if order.logxif > 0 { + value *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); + } + + bins[xi_index + xi.len() * bin_index] += value / normalizations[bin]; + } + } + + bins + } + + /// Convolutes a single subgrid `(order, bin, channel)` with the PDFs strong coupling given by + /// `xfx1`, `xfx2` and `alphas`. The convolution result is fully differentially, such that the + /// axes of the result correspond to the values given by the subgrid `q2`, `x1` and `x2` grid + /// values. + /// + /// # Panics + /// + /// TODO + pub fn convolve_subgrid( + &self, + lumi_cache: &mut LumiCache, + ord: usize, + bin: usize, + channel: usize, + xir: f64, + xif: f64, + ) -> Array3 { + lumi_cache.setup(self, &[(xir, xif)]).unwrap(); + + let normalizations = self.bin_info().normalizations(); + let pdg_channels = self.pdg_channels(); + + let subgrid = &self.subgrids[[ord, bin, channel]]; + let order = &self.orders[ord]; + + let channel = &pdg_channels[channel]; + let mu2_grid = subgrid.mu2_grid(); + let x1_grid = subgrid.x1_grid(); + let x2_grid = subgrid.x2_grid(); + + lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); + + let mut array = Array3::zeros((mu2_grid.len(), x1_grid.len(), x2_grid.len())); + + for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { + let x1 = x1_grid[ix1]; + let x2 = x2_grid[ix2]; + let mut lumi = 0.0; + + for entry in channel.entry() { + let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); + let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); + lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); + } + + let alphas = lumi_cache.alphas(imu2); + + lumi *= alphas.powi(order.alphas.try_into().unwrap()); + + array[[imu2, ix1, ix2]] = lumi * value; + } + + if order.logxir > 0 { + array *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); + } + + if order.logxif > 0 { + array *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); + } + + array /= normalizations[bin]; + array + } + + /// Fills the grid with an ntuple for the given `order`, `observable`, and `channel`. + /// + /// # Panics + /// + /// TODO + pub fn fill(&mut self, order: usize, observable: f64, channel: usize, ntuple: &Ntuple) { + if let Some(bin) = self.bin_limits.index(observable) { + let subgrid = &mut self.subgrids[[order, bin, channel]]; + if let SubgridEnum::EmptySubgridV1(_) = subgrid { + if let MoreMembers::V3(mmv3) = &self.more_members { + *subgrid = mmv3.subgrid_template.clone_empty(); + } else { + unreachable!(); + } + } + + subgrid.fill(ntuple); + } + } + + /// Construct a `Grid` by deserializing it from `reader`. Reading is buffered. + /// + /// # Errors + /// + /// If reading from the compressed or uncompressed stream fails an error is returned. + pub fn read(reader: impl Read) -> Result { + let mut reader = BufReader::new(reader); + let buffer = reader.fill_buf().map_err(GridError::IoFailure)?; + let magic_bytes: [u8; 4] = buffer[0..4].try_into().unwrap_or_else(|_| unreachable!()); + + if u32::from_le_bytes(magic_bytes) == 0x18_4D_22_04 { + Self::read_uncompressed(FrameDecoder::new(reader)) + } else { + Self::read_uncompressed(reader) + } + } + + fn read_uncompressed(mut reader: impl BufRead) -> Result { + let magic_bytes: [u8; 16] = reader.fill_buf().map_err(GridError::IoFailure)?[0..16] + .try_into() + .unwrap_or_else(|_| unreachable!()); + + let file_version = if &magic_bytes[0..8] == b"PineAPPL" { + reader.consume(16); + u64::from_le_bytes( + magic_bytes[8..16] + .try_into() + .unwrap_or_else(|_| unreachable!()), + ) + } else { + 0 + }; + + if file_version != 0 { + return Err(GridError::FileVersionMismatch { + file_version, + supported_version: 0, + }); + } + + bincode::deserialize_from(reader).map_err(GridError::ReadFailure) + } + + /// Serializes `self` into `writer`. Writing is buffered. + /// + /// # Errors + /// + /// If writing fails an error is returned. + pub fn write(&self, writer: impl Write) -> Result<(), GridError> { + let mut writer = BufWriter::new(writer); + let file_header = b"PineAPPL\0\0\0\0\0\0\0\0"; + + // first write PineAPPL file header + writer.write(file_header).map_err(GridError::IoFailure)?; + + // then serialize + bincode::serialize_into(writer, self).map_err(GridError::WriteFailure) + } + + /// Serializes `self` into `writer`, using LZ4 compression. Writing is buffered. + /// + /// # Errors + /// + /// If writing or compression fails an error is returned. + /// + /// # Panics + /// + /// TODO + pub fn write_lz4(&self, writer: impl Write) -> Result<(), GridError> { + let mut encoder = FrameEncoder::new(writer); + self.write(&mut encoder)?; + // TODO: get rid of the unwrap call and return the error + encoder.try_finish().unwrap(); + + Ok(()) + } + + /// Fills the grid with events for the parton momentum fractions `x1` and `x2`, the scale `q2`, + /// and the `order` and `observable`. The events are stored in `weights` and their ordering + /// corresponds to the ordering of [`Grid::channels`]. + pub fn fill_all( + &mut self, + order: usize, + observable: f64, + ntuple: &Ntuple<()>, + weights: &[f64], + ) { + for (channel, weight) in weights.iter().enumerate() { + self.fill( + order, + observable, + channel, + &Ntuple { + x1: ntuple.x1, + x2: ntuple.x2, + q2: ntuple.q2, + weight: *weight, + }, + ); + } + } + + /// Return the channels for this `Grid`. + #[must_use] + pub fn channels(&self) -> &[Channel] { + &self.channels + } + + /// Merges the bins for the corresponding range together in a single one. + /// + /// # Errors + /// + /// When the given bins are non-consecutive, an error is returned. + pub fn merge_bins(&mut self, bins: Range) -> Result<(), GridError> { + self.bin_limits + .merge_bins(bins.clone()) + .map_err(GridError::MergeBinError)?; + + if let Some(remapper) = self.remapper_mut() { + remapper + .merge_bins(bins.clone()) + .map_err(GridError::MergeBinError)?; + } + + let bin_count = self.bin_info().bins(); + let mut old_subgrids = mem::replace( + &mut self.subgrids, + Array3::from_shape_simple_fn( + (self.orders.len(), bin_count, self.channels.len()), + || EmptySubgridV1.into(), + ), + ); + + for ((order, bin, channel), subgrid) in old_subgrids.indexed_iter_mut() { + if subgrid.is_empty() { + continue; + } + + if bins.contains(&bin) { + let new_subgrid = &mut self.subgrids[[order, bins.start, channel]]; + + if new_subgrid.is_empty() { + mem::swap(new_subgrid, subgrid); + } else { + new_subgrid.merge(subgrid, false); + } + } else { + let new_bin = if bin > bins.start { + bin - (bins.end - bins.start) + 1 + } else { + bin + }; + + mem::swap(&mut self.subgrids[[order, new_bin, channel]], subgrid); + } + } + + Ok(()) + } + + /// Merges the non-empty `Subgrid`s contained in `other` into `self`. + /// + /// # Errors + /// + /// If the bin limits of `self` and `other` are different and if the bin limits of `other` can + /// not be merged with `self` an error is returned. + /// + /// # Panics + /// + /// TODO + pub fn merge(&mut self, mut other: Self) -> Result<(), GridError> { + let mut new_orders: Vec = Vec::new(); + let mut new_bins = 0; + let mut new_entries: Vec = Vec::new(); + + if self.bin_info() != other.bin_info() { + let lhs_bins = self.bin_info().bins(); + new_bins = other.bin_info().bins(); + + let lhs_remapper = self.remapper_mut(); + let rhs_remapper = other.remapper(); + + if let Some(lhs) = lhs_remapper { + if let Some(rhs) = rhs_remapper { + lhs.merge(rhs).map_err(GridError::MergeBinError)?; + + let a = u32::try_from(lhs_bins).unwrap_or_else(|_| unreachable!()); + let b = u32::try_from(lhs_bins + new_bins).unwrap_or_else(|_| unreachable!()); + + self.bin_limits = BinLimits::new((0..=b).map(f64::from).collect()); + other.bin_limits = BinLimits::new((a..=b).map(f64::from).collect()); + } else { + // Return an error + todo!(); + } + } else if rhs_remapper.is_none() { + self.bin_limits + .merge(&other.bin_limits) + .map_err(GridError::InvalidBinLimits)?; + } else { + // Return an error + todo!(); + } + } + + for ((i, _, k), _) in other + .subgrids + .indexed_iter_mut() + .filter(|((_, _, _), subgrid)| !subgrid.is_empty()) + { + let other_order = &other.orders[i]; + let other_entry = &other.channels[k]; + + if !self + .orders + .iter() + .chain(new_orders.iter()) + .any(|x| x == other_order) + { + new_orders.push(other_order.clone()); + } + + if !self + .channels() + .iter() + .chain(new_entries.iter()) + .any(|y| y == other_entry) + { + new_entries.push(other_entry.clone()); + } + } + + if !new_orders.is_empty() || !new_entries.is_empty() || (new_bins != 0) { + self.increase_shape(&(new_orders.len(), new_bins, new_entries.len())); + } + + self.orders.append(&mut new_orders); + self.channels.append(&mut new_entries); + + let bin_indices: Vec<_> = (0..other.bin_info().bins()) + .map(|bin| { + self.bin_info() + .find_bin(&other.bin_info().bin_limits(bin)) + .unwrap_or_else(|| panic!("failed for {bin}")) + }) + .collect(); + + for ((i, j, k), subgrid) in other + .subgrids + .indexed_iter_mut() + .filter(|((_, _, _), subgrid)| !subgrid.is_empty()) + { + let other_order = &other.orders[i]; + let other_entry = &other.channels[k]; + + let self_i = self.orders.iter().position(|x| x == other_order).unwrap(); + let self_j = bin_indices[j]; + let self_k = self.channels.iter().position(|y| y == other_entry).unwrap(); + + if self.subgrids[[self_i, self_j, self_k]].is_empty() { + mem::swap(&mut self.subgrids[[self_i, self_j, self_k]], subgrid); + } else { + self.subgrids[[self_i, self_j, self_k]].merge(&mut *subgrid, false); + } + } + + Ok(()) + } + + /// Return a vector containing the type of convolutions performed with this grid. + /// + /// # Panics + /// + /// Panics if the metadata key--value pairs `convolution_particle_1` and `convolution_type_1`, + /// or `convolution_particle_2` and `convolution_type_2` are not correctly set. + #[must_use] + pub fn convolutions(&self) -> Vec { + self.key_values().map_or_else( + // if there isn't any metadata, we assume two unpolarized proton-PDFs are used + || vec![Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)], + |kv| { + // the current file format only supports exactly two convolutions + (1..=2) + .map(|index| { + // if there are key-value pairs `convolution_particle_1` and + // `convolution_type_1` and the same with a higher index, we convert this + // metadata into `Convolution` + match ( + kv.get(&format!("convolution_particle_{index}")) + .map(|s| s.parse::()), + kv.get(&format!("convolution_type_{index}")) + .map(String::as_str), + ) { + (_, Some("None")) => Convolution::None, + (Some(Ok(pid)), Some("UnpolPDF")) => Convolution::UnpolPDF(pid), + (Some(Ok(pid)), Some("PolPDF")) => Convolution::PolPDF(pid), + (Some(Ok(pid)), Some("UnpolFF")) => Convolution::UnpolFF(pid), + (Some(Ok(pid)), Some("PolFF")) => Convolution::PolFF(pid), + (None, None) => { + // if these key-value pairs are missing use the old metadata + match kv + .get(&format!("initial_state_{index}")) + .map(|s| s.parse::()) + { + Some(Ok(pid)) => { + let condition = !self.channels().iter().all(|entry| { + entry.entry().iter().all(|&channels| match index { + 1 => channels.0 == pid, + 2 => channels.1 == pid, + _ => unreachable!(), + }) + }); + + if condition { + Convolution::UnpolPDF(pid) + } else { + Convolution::None + } + } + None => Convolution::UnpolPDF(2212), + Some(Err(err)) => panic!("metadata 'initial_state_{index}' could not be parsed: {err}"), + } + } + (None, Some(_)) => { + panic!("metadata 'convolution_type_{index}' is missing") + } + (Some(_), None) => { + panic!("metadata 'convolution_particle_{index}' is missing") + } + (Some(Ok(_)), Some(type_)) => { + panic!("metadata 'convolution_type_{index} = {type_}' is unknown") + } + (Some(Err(err)), Some(_)) => panic!( + "metadata 'convolution_particle_{index}' could not be parsed: {err}" + ), + } + }) + .collect() + }, + ) + } + + /// Set the convolution type for this grid for the corresponding `index`. + pub fn set_convolution(&mut self, index: usize, convolution: Convolution) { + // remove outdated metadata + self.key_values_mut() + .remove(&format!("initial_state_{}", index + 1)); + + let (type_, particle) = match convolution { + Convolution::UnpolPDF(pid) => ("UnpolPDF".to_owned(), pid.to_string()), + Convolution::PolPDF(pid) => ("PolPDF".to_owned(), pid.to_string()), + Convolution::UnpolFF(pid) => ("UnpolFF".to_owned(), pid.to_string()), + Convolution::PolFF(pid) => ("PolFF".to_owned(), pid.to_string()), + Convolution::None => ("None".to_owned(), String::new()), + }; + + self.set_key_value(&format!("convolution_type_{}", index + 1), &type_); + self.set_key_value(&format!("convolution_particle_{}", index + 1), &particle); + + // update the remaining metadata + for (index, convolution) in self.convolutions().into_iter().enumerate() { + if self + .key_values() + // UNWRAP: we set some key-values before so there must be a storage + .unwrap_or_else(|| unreachable!()) + .get(&format!("initial_state_{}", index + 1)) + .is_some() + { + self.set_convolution(index, convolution); + } + } + } + + fn increase_shape(&mut self, new_dim: &(usize, usize, usize)) { + let old_dim = self.subgrids.raw_dim().into_pattern(); + let mut new_subgrids = Array3::from_shape_simple_fn( + ( + old_dim.0 + new_dim.0, + old_dim.1 + new_dim.1, + old_dim.2 + new_dim.2, + ), + || EmptySubgridV1.into(), + ); + + for ((i, j, k), subgrid) in self.subgrids.indexed_iter_mut() { + mem::swap(&mut new_subgrids[[i, j, k]], subgrid); + } + + mem::swap(&mut self.subgrids, &mut new_subgrids); + } + + /// Scale all subgrids by `factor`. + pub fn scale(&mut self, factor: f64) { + self.subgrids + .iter_mut() + .for_each(|subgrid| subgrid.scale(factor)); + } + + /// Scales each subgrid by a factor which is the product of the given values `alphas`, `alpha`, + /// `logxir`, and `logxif`, each raised to the corresponding powers for each subgrid. In + /// addition, every subgrid is scaled by a factor `global` independently of its order. + /// + /// # Panics + /// + /// TODO + pub fn scale_by_order( + &mut self, + alphas: f64, + alpha: f64, + logxir: f64, + logxif: f64, + global: f64, + ) { + for ((i, _, _), subgrid) in self.subgrids.indexed_iter_mut() { + let order = &self.orders[i]; + let factor = global + * alphas.powi(order.alphas.try_into().unwrap()) + * alpha.powi(order.alpha.try_into().unwrap()) + * logxir.powi(order.logxir.try_into().unwrap()) + * logxif.powi(order.logxif.try_into().unwrap()); + + subgrid.scale(factor); + } + } + + /// Scales each subgrid by a bin-dependent factor given in `factors`. If a bin does not have a + /// corresponding entry in `factors` it is not rescaled. If `factors` has more entries than + /// there are bins the superfluous entries do not have an effect. + pub fn scale_by_bin(&mut self, factors: &[f64]) { + for ((_, bin, _), subgrid) in self.subgrids.indexed_iter_mut() { + if let Some(&factor) = factors.get(bin) { + subgrid.scale(factor); + } + } + } + + /// Returns the subgrid parameters. + #[must_use] + pub fn orders(&self) -> &[Order] { + &self.orders + } + + /// Return a mutable reference to the subgrid parameters. + #[must_use] + pub fn orders_mut(&mut self) -> &mut [Order] { + &mut self.orders + } + + /// Return a mutable reference to the grid's channels. + pub fn channels_mut(&mut self) -> &mut [Channel] { + &mut self.channels + } + + /// Return all subgrids as an `ArrayView3`. + #[must_use] + pub fn subgrids(&self) -> ArrayView3 { + self.subgrids.view() + } + + /// Return all subgrids as an `ArrayViewMut3`. + #[must_use] + pub fn subgrids_mut(&mut self) -> ArrayViewMut3 { + self.subgrids.view_mut() + } + + /// Sets a remapper. A remapper can change the dimensions and limits of each bin in this grid. + /// This is useful because many Monte Carlo integrators and also `PineAPPL` do not support + /// multi-dimensional bins. To work around the problem the multi-dimensional bins can be + /// projected to one-dimensional bins, and the remapper can be used to restore the multi + /// dimensionality. Furthermore, it allows to normalize each bin separately, and independently + /// of the bin widths. + /// + /// # Errors + /// + /// Returns an error if the number of bins in the grid and in the remapper do not agree. + /// + /// # Panics + /// + /// TODO + pub fn set_remapper(&mut self, remapper: BinRemapper) -> Result<(), GridError> { + if remapper.bins() != self.bin_info().bins() { + return Err(GridError::BinNumberMismatch { + grid_bins: self.bin_info().bins(), + remapper_bins: remapper.bins(), + }); + } + + self.more_members.upgrade(); + + match &mut self.more_members { + MoreMembers::V1(_) => unreachable!(), + MoreMembers::V2(mmv2) => mmv2.remapper = Some(remapper), + MoreMembers::V3(mmv3) => mmv3.remapper = Some(remapper), + } + + Ok(()) + } + + /// Return the currently set remapper, if there is any. + #[must_use] + pub const fn remapper(&self) -> Option<&BinRemapper> { + match &self.more_members { + MoreMembers::V1(_) => None, + MoreMembers::V2(mmv2) => mmv2.remapper.as_ref(), + MoreMembers::V3(mmv3) => mmv3.remapper.as_ref(), + } + } + + fn remapper_mut(&mut self) -> Option<&mut BinRemapper> { + match &mut self.more_members { + MoreMembers::V1(_) => None, + MoreMembers::V2(mmv2) => mmv2.remapper.as_mut(), + MoreMembers::V3(mmv3) => mmv3.remapper.as_mut(), + } + } + + /// Returns all information about the bins in this grid. + #[must_use] + pub const fn bin_info(&self) -> BinInfo { + BinInfo::new(&self.bin_limits, self.remapper()) + } + + /// Calls [`Self::optimize_using`] with all possible optimization options + /// ([`GridOptFlags::all`]). + pub fn optimize(&mut self) { + self.optimize_using(GridOptFlags::all()); + } + + /// Optimizes the internal datastructures for space efficiency. The parameter `flags` + /// determines which optimizations are applied, see [`GridOptFlags`]. + pub fn optimize_using(&mut self, flags: GridOptFlags) { + if flags.contains(GridOptFlags::OPTIMIZE_SUBGRID_TYPE) { + let ssd = flags.contains(GridOptFlags::STATIC_SCALE_DETECTION); + self.optimize_subgrid_type(ssd); + } + if flags.contains(GridOptFlags::SYMMETRIZE_CHANNELS) { + self.symmetrize_channels(); + } + if flags.contains(GridOptFlags::STRIP_EMPTY_ORDERS) { + self.strip_empty_orders(); + } + if flags.contains(GridOptFlags::MERGE_SAME_CHANNELS) { + self.merge_same_channels(); + } + if flags.contains(GridOptFlags::STRIP_EMPTY_CHANNELS) { + self.strip_empty_channels(); + } + } + + fn optimize_subgrid_type(&mut self, static_scale_detection: bool) { + for subgrid in &mut self.subgrids { + match subgrid { + // replace empty subgrids of any type with `EmptySubgridV1` + _ if subgrid.is_empty() => { + *subgrid = EmptySubgridV1.into(); + } + // can't be optimized without losing information + SubgridEnum::NtupleSubgridV1(_) => continue, + _ => { + // TODO: this requires a `pub(crate)` in `LagrangeSubgridV2`; we should + // replace this with a method + if !static_scale_detection { + if let SubgridEnum::LagrangeSubgridV2(subgrid) = subgrid { + // disable static-scale detection + subgrid.static_q2 = -1.0; + } + } + + let mut new_subgrid = ImportOnlySubgridV2::from(&*subgrid).into(); + mem::swap(subgrid, &mut new_subgrid); + } + } + } + } + + /// Try to deduplicate channels by detecting pairs of them that contain the same subgrids. The + /// numerical equality is tested using a tolerance of `ulps`, given in [units of least + /// precision](https://docs.rs/float-cmp/latest/float_cmp/index.html#some-explanation). + pub fn dedup_channels(&mut self, ulps: i64) { + let mut indices: Vec = (0..self.channels.len()).collect(); + + while let Some(index) = indices.pop() { + if let Some(other_index) = indices.iter().copied().find(|&other_index| { + let (mut a, mut b) = self + .subgrids + .multi_slice_mut((s![.., .., other_index], s![.., .., index])); + + // TODO: use `Iterator::eq_by` once stablizied + for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { + let mut it_a = lhs.indexed_iter(); + let mut it_b = rhs.indexed_iter(); + + loop { + let a = it_a.next(); + let b = it_b.next(); + + match (a, b) { + (Some((tuple_a, value_a)), Some((tuple_b, value_b))) => { + if tuple_a != tuple_b { + return false; + } + + let u = ulps; + if !approx_eq!(f64, value_a, value_b, ulps = u) { + return false; + } + } + (None, None) => break, + _ => return false, + } + } + } + + true + }) { + let old_channel = self.channels.remove(index).entry().to_vec(); + let mut new_channel = self.channels[other_index].entry().to_vec(); + new_channel.extend(old_channel); + self.channels[other_index] = Channel::new(new_channel); + self.subgrids.remove_index(Axis(2), index); + } + } + } + + fn merge_same_channels(&mut self) { + let mut indices: Vec<_> = (0..self.channels.len()).rev().collect(); + + // merge channels that are the same + while let Some(index) = indices.pop() { + if let Some((other_index, factor)) = indices.iter().find_map(|&i| { + self.channels[i] + .common_factor(&self.channels[index]) + .map(|factor| (i, factor)) + }) { + let (mut a, mut b) = self + .subgrids + .multi_slice_mut((s![.., .., other_index], s![.., .., index])); + + // check if in all cases the limits are compatible with merging + for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { + if !rhs.is_empty() { + rhs.scale(1.0 / factor); + if lhs.is_empty() { + // we can't merge into an EmptySubgridV1 + *lhs = rhs.clone_empty(); + } + lhs.merge(rhs, false); + + *rhs = EmptySubgridV1.into(); + } + } + } + } + } + + fn strip_empty_channels(&mut self) { + let mut keep_channel_indices = vec![]; + let mut new_channel_entries = vec![]; + + // only keep channels that have non-zero factors and for which at least one subgrid is + // non-empty + for (channel, entry) in self.channels.iter().enumerate() { + if !entry.entry().iter().all(|&(_, _, factor)| factor == 0.0) + && !self + .subgrids + .slice(s![.., .., channel]) + .iter() + .all(Subgrid::is_empty) + { + keep_channel_indices.push(channel); + new_channel_entries.push(entry.clone()); + } + } + + // only keep the previously selected subgrids + let new_subgrids = Array3::from_shape_fn( + ( + self.orders.len(), + self.bin_info().bins(), + keep_channel_indices.len(), + ), + |(order, bin, new_channel)| { + mem::replace( + &mut self.subgrids[[order, bin, keep_channel_indices[new_channel]]], + EmptySubgridV1.into(), + ) + }, + ); + + self.channels = new_channel_entries; + self.subgrids = new_subgrids; + } + + fn strip_empty_orders(&mut self) { + let mut indices: Vec<_> = (0..self.orders().len()).collect(); + + while let Some(index) = indices.pop() { + if self + .subgrids + .slice(s![index, .., ..]) + .iter() + .all(Subgrid::is_empty) + { + self.orders.remove(index); + self.subgrids.remove_index(Axis(0), index); + } + } + } + + fn symmetrize_channels(&mut self) { + let convolutions = self.convolutions(); + if convolutions[0] != convolutions[1] { + return; + } + + let mut indices: Vec = (0..self.channels.len()).rev().collect(); + + while let Some(index) = indices.pop() { + let channel_entry = &self.channels[index]; + + if *channel_entry == channel_entry.transpose() { + // check if in all cases the limits are compatible with merging + self.subgrids + .slice_mut(s![.., .., index]) + .iter_mut() + .for_each(|subgrid| { + if !subgrid.is_empty() && (subgrid.x1_grid() == subgrid.x2_grid()) { + subgrid.symmetrize(); + } + }); + } else if let Some((j, &other_index)) = indices + .iter() + .enumerate() + .find(|(_, i)| self.channels[**i] == channel_entry.transpose()) + { + indices.remove(j); + + // check if in all cases the limits are compatible with merging + let (mut a, mut b) = self + .subgrids + .multi_slice_mut((s![.., .., index], s![.., .., other_index])); + + for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { + if !rhs.is_empty() { + if lhs.is_empty() { + // we can't merge into an EmptySubgridV1 + *lhs = rhs.clone_empty(); + } + + lhs.merge(rhs, true); + *rhs = EmptySubgridV1.into(); + } + } + } + } + } + + /// Upgrades the internal data structures to their latest versions. + pub fn upgrade(&mut self) { + self.more_members.upgrade(); + } + + /// Returns a map with key-value pairs, if there are any stored in this grid. + #[must_use] + pub const fn key_values(&self) -> Option<&HashMap> { + match &self.more_members { + MoreMembers::V3(mmv3) => Some(&mmv3.key_value_db), + MoreMembers::V2(mmv2) => Some(&mmv2.key_value_db), + MoreMembers::V1(_) => None, + } + } + + /// Returns a map with key-value pairs, if there are any stored in this grid. + /// + /// # Panics + /// + /// TODO + #[must_use] + pub fn key_values_mut(&mut self) -> &mut HashMap { + self.more_members.upgrade(); + + match &mut self.more_members { + MoreMembers::V1(_) => unreachable!(), + MoreMembers::V2(mmv2) => &mut mmv2.key_value_db, + MoreMembers::V3(mmv3) => &mut mmv3.key_value_db, + } + } + + /// Sets a specific key-value pair in this grid. + /// + /// # Panics + /// + /// TODO + pub fn set_key_value(&mut self, key: &str, value: &str) { + self.key_values_mut() + .insert(key.to_owned(), value.to_owned()); + } + + /// Returns information for the generation of evolution operators that are being used in + /// [`Grid::evolve`] with the parameter `order_mask`. + #[must_use] + pub fn evolve_info(&self, order_mask: &[bool]) -> EvolveInfo { + use super::evolution::EVOLVE_INFO_TOL_ULPS; + + let has_pdf1 = self.convolutions()[0] != Convolution::None; + let has_pdf2 = self.convolutions()[1] != Convolution::None; + + let mut ren1 = Vec::new(); + let mut fac1 = Vec::new(); + let mut x1 = Vec::new(); + let mut pids1 = Vec::new(); + + for (channel, subgrid) in self + .subgrids() + .indexed_iter() + .filter_map(|(tuple, subgrid)| { + (!subgrid.is_empty() && (order_mask.is_empty() || order_mask[tuple.0])) + .then_some((tuple.2, subgrid)) + }) + { + ren1.extend(subgrid.mu2_grid().iter().map(|Mu2 { ren, .. }| *ren)); + ren1.sort_by(f64::total_cmp); + ren1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); + + fac1.extend(subgrid.mu2_grid().iter().map(|Mu2 { fac, .. }| *fac)); + fac1.sort_by(f64::total_cmp); + fac1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); + + if has_pdf1 { + x1.extend(subgrid.x1_grid().iter().copied()); + } + if has_pdf2 { + x1.extend(subgrid.x2_grid().iter()); + } + + x1.sort_by(f64::total_cmp); + x1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); + + if has_pdf1 { + pids1.extend(self.channels()[channel].entry().iter().map(|(a, _, _)| a)); + } + if has_pdf2 { + pids1.extend(self.channels()[channel].entry().iter().map(|(_, b, _)| b)); + } + + pids1.sort_unstable(); + pids1.dedup(); + } + + EvolveInfo { + fac1, + pids1, + x1, + ren1, + } + } + + /// Converts this `Grid` into an [`FkTable`] using an evolution kernel operator (EKO) given as + /// `operator`. The dimensions and properties of this operator must be described using `info`. + /// The parameter `order_mask` can be used to include or exclude orders from this operation, + /// and must correspond to the ordering given by [`Grid::orders`]. Orders that are not given + /// are enabled, and in particular if `order_mask` is empty all orders are activated. + /// + /// # Errors + /// + /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is + /// incompatible with this `Grid`. + #[deprecated(since = "0.7.4", note = "use evolve_with_slice_iter instead")] + pub fn evolve( + &self, + operator: ArrayView5, + info: &OperatorInfo, + order_mask: &[bool], + ) -> Result { + self.evolve_with_slice_iter( + info.fac1 + .iter() + .zip(operator.axis_iter(Axis(0))) + .map(|(&fac1, op)| { + Ok::<_, GridError>(( + OperatorSliceInfo { + fac0: info.fac0, + pids0: info.pids0.clone(), + x0: info.x0.clone(), + fac1, + pids1: info.pids1.clone(), + x1: info.x1.clone(), + pid_basis: info.pid_basis, + }, + CowArray::from(op), + )) + }), + order_mask, + (info.xir, info.xif), + &AlphasTable { + ren1: info.ren1.clone(), + alphas: info.alphas.clone(), + }, + ) + } + + // TODO: + // - try to find a better solution than to require that E must be convertible into + // anyhow::Error + + /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] + /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter + /// `order_mask` can be used to include or exclude orders from this operation, and must + /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are + /// enabled, and in particular if `order_mask` is empty all orders are activated. + /// + /// # Errors + /// + /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is + /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` + /// return an error. + pub fn evolve_with_slice_iter<'a, E: Into>( + &self, + slices: impl IntoIterator), E>>, + order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, + ) -> Result { + use super::evolution::EVOLVE_INFO_TOL_ULPS; + + let mut lhs: Option = None; + // Q2 slices we use + let mut used_op_fac1 = Vec::new(); + // Q2 slices we encounter, but possibly don't use + let mut op_fac1 = Vec::new(); + // Q2 slices needed by the grid + let grid_fac1: Vec<_> = self + .evolve_info(order_mask) + .fac1 + .into_iter() + .map(|fac| xi.1 * xi.1 * fac) + .collect(); + + for result in slices { + let (info, operator) = result.map_err(|err| GridError::Other(err.into()))?; + + op_fac1.push(info.fac1); + + // it's possible that due to small numerical differences we get two slices which are + // almost the same. We have to skip those in order not to evolve the 'same' slice twice + if used_op_fac1 + .iter() + .any(|&fac| approx_eq!(f64, fac, info.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) + { + continue; + } + + // skip slices that the grid doesn't use + if !grid_fac1 + .iter() + .any(|&fac| approx_eq!(f64, fac, info.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) + { + continue; + } + + let op_info_dim = ( + info.pids1.len(), + info.x1.len(), + info.pids0.len(), + info.x0.len(), + ); + + if operator.dim() != op_info_dim { + return Err(GridError::EvolutionFailure(format!( + "operator information {:?} does not match the operator's dimensions: {:?}", + op_info_dim, + operator.dim(), + ))); + } + + let view = operator.view(); + + let (subgrids, channels) = if self.convolutions()[0] != Convolution::None + && self.convolutions()[1] != Convolution::None + { + evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) + } else { + evolution::evolve_slice_with_one(self, &view, &info, order_mask, xi, alphas_table) + }?; + + let mut rhs = Self { + subgrids, + channels, + bin_limits: self.bin_limits.clone(), + orders: vec![Order::new(0, 0, 0, 0)], + subgrid_params: SubgridParams::default(), + more_members: self.more_members.clone(), + }; + + // TODO: use a new constructor to set this information + rhs.set_pid_basis(info.pid_basis); + + if let Some(lhs) = &mut lhs { + lhs.merge(rhs)?; + } else { + lhs = Some(rhs); + } + + used_op_fac1.push(info.fac1); + } + + // UNWRAP: if we can't compare two numbers there's a bug + op_fac1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + + // make sure we've evolved all slices + if let Some(muf2) = grid_fac1.into_iter().find(|&grid_mu2| { + !used_op_fac1 + .iter() + .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) + }) { + return Err(GridError::EvolutionFailure(format!( + "no operator for muf2 = {muf2} found in {op_fac1:?}" + ))); + } + + // TODO: convert this unwrap into error + let grid = lhs.unwrap(); + + // UNWRAP: merging evolved slices should be a proper FkTable again + Ok(FkTable::try_from(grid).unwrap_or_else(|_| unreachable!())) + } + + /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] + /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter + /// `order_mask` can be used to include or exclude orders from this operation, and must + /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are + /// enabled, and in particular if `order_mask` is empty all orders are activated. + /// + /// # Errors + /// + /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is + /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` + /// return an error. + pub fn evolve_with_slice_iter2<'a, E: Into>( + &self, + slices_a: impl IntoIterator), E>>, + slices_b: impl IntoIterator), E>>, + order_mask: &[bool], + xi: (f64, f64), + alphas_table: &AlphasTable, + ) -> Result { + use super::evolution::EVOLVE_INFO_TOL_ULPS; + use itertools::izip; + + let mut lhs: Option = None; + // Q2 slices we use + let mut used_op_fac1 = Vec::new(); + // Q2 slices we encounter, but possibly don't use + let mut op_fac1 = Vec::new(); + // Q2 slices needed by the grid + let grid_fac1: Vec<_> = self + .evolve_info(order_mask) + .fac1 + .into_iter() + .map(|fac| xi.1 * xi.1 * fac) + .collect(); + + // TODO: simplify the ugly repetition below by offloading some ops into fn + for (result_a, result_b) in izip!(slices_a, slices_b) { + // Operate on `slices_a` + let (info_a, operator_a) = result_a.map_err(|err| GridError::Other(err.into()))?; + // Operate on `slices_b` + let (info_b, operator_b) = result_b.map_err(|err| GridError::Other(err.into()))?; + + // TODO: what if the scales of the EKOs don't agree? Is there an ordering problem? + assert_approx_eq!(f64, info_a.fac1, info_b.fac1, ulps = EVOLVE_INFO_TOL_ULPS); + + // also the PID bases must be the same + assert_eq!(info_a.pid_basis, info_b.pid_basis); + + op_fac1.push(info_a.fac1); + + // it's possible that due to small numerical differences we get two slices which are + // almost the same. We have to skip those in order not to evolve the 'same' slice twice + if used_op_fac1 + .iter() + .any(|&fac| approx_eq!(f64, fac, info_a.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) + { + continue; + } + + // skip slices that the grid doesn't use + if !grid_fac1 + .iter() + .any(|&fac| approx_eq!(f64, fac, info_a.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) + { + continue; + } + + let op_info_dim_a = ( + info_a.pids1.len(), + info_a.x1.len(), + info_a.pids0.len(), + info_a.x0.len(), + ); + + if operator_a.dim() != op_info_dim_a { + return Err(GridError::EvolutionFailure(format!( + "operator information {:?} does not match the operator's dimensions: {:?}", + op_info_dim_a, + operator_a.dim(), + ))); + } + + let op_info_dim_b = ( + info_b.pids1.len(), + info_b.x1.len(), + info_b.pids0.len(), + info_b.x0.len(), + ); + + if operator_b.dim() != op_info_dim_b { + return Err(GridError::EvolutionFailure(format!( + "operator information {:?} does not match the operator's dimensions: {:?}", + op_info_dim_b, + operator_b.dim(), + ))); + } + + let views = [operator_a.view(), operator_b.view()]; + let infos = [info_a, info_b]; + + let (subgrids, channels) = if self.convolutions()[0] != Convolution::None + && self.convolutions()[1] != Convolution::None + { + evolution::evolve_slice_with_two2( + self, + &views, + &infos, + order_mask, + xi, + alphas_table, + ) + } else { + evolution::evolve_slice_with_one( + self, + &views[0], + &infos[1], + order_mask, + xi, + alphas_table, + ) + }?; + + let mut rhs = Self { + subgrids, + channels, + bin_limits: self.bin_limits.clone(), + orders: vec![Order::new(0, 0, 0, 0)], + subgrid_params: SubgridParams::default(), + more_members: self.more_members.clone(), + }; + + // TODO: use a new constructor to set this information + rhs.set_pid_basis(infos[0].pid_basis); + + if let Some(lhs) = &mut lhs { + lhs.merge(rhs)?; + } else { + lhs = Some(rhs); + } + + used_op_fac1.push(infos[0].fac1); + } + + // UNWRAP: if we can't compare two numbers there's a bug + op_fac1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); + + // make sure we've evolved all slices + if let Some(muf2) = grid_fac1.into_iter().find(|&grid_mu2| { + !used_op_fac1 + .iter() + .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) + }) { + return Err(GridError::EvolutionFailure(format!( + "no operator for muf2 = {muf2} found in {op_fac1:?}" + ))); + } + + // TODO: convert this unwrap into error + let grid = lhs.unwrap(); + + // UNWRAP: merging evolved slices should be a proper FkTable again + Ok(FkTable::try_from(grid).unwrap_or_else(|_| unreachable!())) + } + + /// Deletes bins with the corresponding `bin_indices`. Repeated indices and indices larger or + /// equal the bin length are ignored. + pub fn delete_bins(&mut self, bin_indices: &[usize]) { + let mut bin_indices: Vec<_> = bin_indices + .iter() + .copied() + // ignore indices corresponding to bin that don't exist + .filter(|&index| index < self.bin_info().bins()) + .collect(); + + // sort and remove repeated indices + bin_indices.sort_unstable(); + bin_indices.dedup(); + let bin_indices = bin_indices; + + let mut bin_ranges: Vec> = Vec::new(); + + // convert indices into consecutive ranges + for &bin_index in &bin_indices { + match bin_ranges.last_mut() { + Some(range) if range.end == bin_index => range.end += 1, + _ => bin_ranges.push(bin_index..(bin_index + 1)), + } + } + + let bin_ranges = bin_ranges; + let mut ranges = bin_ranges.as_slice(); + let old_limits = self.bin_limits.limits(); + + // remove the bins from the right first, so as not to invalidate any indices + if let Some((range, remainder)) = ranges.split_last() { + if range.end == self.bin_info().bins() { + self.bin_limits.delete_bins_right(range.end - range.start); + ranges = remainder; + } + } + + // indices on the left aren't affected by removal of bins to their right + if let Some((range, remainder)) = ranges.split_first() { + if range.start == 0 { + self.bin_limits.delete_bins_left(range.end); + ranges = remainder; + } + } + + if !ranges.is_empty() { + // if there's no remapper we need to store the bin limits in a new remapper + if self.remapper_mut().is_none() { + self.set_remapper( + BinRemapper::new( + old_limits.windows(2).map(|win| win[1] - win[0]).collect(), + old_limits.windows(2).map(|win| (win[0], win[1])).collect(), + ) + .unwrap_or_else(|_| unreachable!()), + ) + .unwrap_or_else(|_| unreachable!()); + } + + // the following should not be needed, but let's set these limits to integer values + self.bin_limits = BinLimits::new( + iter::successors(Some(0.0), |x| Some(x + 1.0)) + .take(old_limits.len() - bin_indices.len()) + .collect(), + ); + } + + if let Some(remapper) = self.remapper_mut() { + remapper.delete_bins(&bin_ranges); + } + + for &bin_index in bin_indices.iter().rev() { + self.subgrids.remove_index(Axis(1), bin_index); + } + } + + /// Change the particle ID convention. + pub fn rotate_pid_basis(&mut self, pid_basis: PidBasis) { + match (self.pid_basis(), pid_basis) { + (PidBasis::Pdg, PidBasis::Evol) => { + self.channels = self + .channels() + .iter() + .map(|channel| Channel::translate(channel, &pids::pdg_mc_pids_to_evol)) + .collect(); + + self.set_pid_basis(PidBasis::Evol); + } + (PidBasis::Evol, PidBasis::Pdg) => { + self.channels = self + .channels() + .iter() + .map(|channel| Channel::translate(channel, &pids::evol_to_pdg_mc_ids)) + .collect(); + + self.set_pid_basis(PidBasis::Pdg); + } + (PidBasis::Evol, PidBasis::Evol) | (PidBasis::Pdg, PidBasis::Pdg) => { + // here's nothing to do + } + } + } + + /// Deletes channels with the corresponding `channel_indices`. Repeated indices and indices + /// larger or equal than the number of channels are ignored. + pub fn delete_channels(&mut self, channel_indices: &[usize]) { + let mut channel_indices: Vec<_> = channel_indices + .iter() + .copied() + // ignore indices corresponding to bin that don't exist + .filter(|&index| index < self.channels().len()) + .collect(); + + // sort and remove repeated indices + channel_indices.sort_unstable(); + channel_indices.dedup(); + channel_indices.reverse(); + let channel_indices = channel_indices; + + for index in channel_indices { + self.channels.remove(index); + self.subgrids.remove_index(Axis(2), index); + } + } + + pub(crate) fn rewrite_channels(&mut self, add: &[(i32, i32)], del: &[i32]) { + self.channels = self + .channels() + .iter() + .map(|entry| { + Channel::new( + entry + .entry() + .iter() + .map(|(a, b, f)| { + ( + // if `a` is to be added to another pid replace it with this pid + add.iter().fold( + *a, + |id, &(source, target)| if id == source { target } else { id }, + ), + // if `b` is to be added to another pid replace it with this pid + add.iter().fold( + *b, + |id, &(source, target)| if id == source { target } else { id }, + ), + // if any of the pids `a` or `b` are to b deleted set the factor to + // zero + if del.iter().any(|id| id == a || id == b) { + 0.0 + } else { + *f + }, + ) + }) + .collect(), + ) + }) + .collect(); + } + + /// Splits the grid such that each channel contains only a single tuple of PIDs. + pub fn split_channels(&mut self) { + let indices: Vec<_> = self + .channels() + .iter() + .enumerate() + .flat_map(|(index, entry)| iter::repeat(index).take(entry.entry().len())) + .collect(); + + self.subgrids = self.subgrids.select(Axis(2), &indices); + self.channels = self + .channels() + .iter() + .flat_map(|entry| { + entry + .entry() + .iter() + .copied() + .map(move |entry| Channel::new(vec![entry])) + }) + .collect(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::channel; + use std::fs::File; + + #[test] + fn grid_with_subgrid_type() { + let subgrid_type = String::from("Idontexist"); + let result = Grid::with_subgrid_type( + vec![], + vec![], + vec![], + SubgridParams::default(), + ExtraSubgridParams::default(), + &subgrid_type, + ); + + matches!(result, Err(GridError::UnknownSubgridType(x)) if x == subgrid_type); + } + + #[test] + fn grid_merge_empty_subgrids() { + let mut grid = Grid::new( + vec![ + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], + ], + vec![Order::new(0, 2, 0, 0)], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 1); + + let other = Grid::new( + vec![ + // differently ordered than `grid` + channel![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + ], + vec![Order::new(1, 2, 0, 0), Order::new(1, 2, 0, 1)], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + // merging with empty subgrids should not change the grid + grid.merge(other).unwrap(); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 1); + } + + #[test] + fn grid_merge_orders() { + let mut grid = Grid::new( + vec![ + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], + ], + vec![Order::new(0, 2, 0, 0)], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 1); + + let mut other = Grid::new( + vec![ + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], + ], + vec![ + Order::new(1, 2, 0, 0), + Order::new(1, 2, 0, 1), + Order::new(0, 2, 0, 0), + ], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + other.fill_all( + 0, + 0.1, + &Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: (), + }, + &[1.0, 2.0], + ); + other.fill_all( + 1, + 0.1, + &Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: (), + }, + &[1.0, 2.0], + ); + + // merge with four non-empty subgrids + grid.merge(other).unwrap(); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 3); + } + + #[test] + fn grid_merge_channels_entries() { + let mut grid = Grid::new( + vec![ + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], + ], + vec![Order::new(0, 2, 0, 0)], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 1); + + let mut other = Grid::new( + vec![channel![22, 22, 1.0], channel![2, 2, 1.0; 4, 4, 1.0]], + vec![Order::new(0, 2, 0, 0)], + vec![0.0, 0.25, 0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + // fill the photon-photon entry + other.fill( + 0, + 0.1, + 0, + &Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: 3.0, + }, + ); + + grid.merge(other).unwrap(); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 3); + assert_eq!(grid.orders().len(), 1); + } + + #[test] + fn grid_merge_bins() { + let mut grid = Grid::new( + vec![ + channel![2, 2, 1.0; 4, 4, 1.0], + channel![1, 1, 1.0; 3, 3, 1.0], + ], + vec![Order::new(0, 2, 0, 0)], + vec![0.0, 0.25, 0.5], + SubgridParams::default(), + ); + + assert_eq!(grid.bin_info().bins(), 2); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 1); + + let mut other = Grid::new( + vec![ + // channels are differently sorted + channel![1, 1, 1.0; 3, 3, 1.0], + channel![2, 2, 1.0; 4, 4, 1.0], + ], + vec![Order::new(0, 2, 0, 0)], + vec![0.5, 0.75, 1.0], + SubgridParams::default(), + ); + + other.fill_all( + 0, + 0.1, + &Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: (), + }, + &[2.0, 3.0], + ); + + grid.merge(other).unwrap(); + + assert_eq!(grid.bin_info().bins(), 4); + assert_eq!(grid.channels().len(), 2); + assert_eq!(grid.orders().len(), 1); + } + + // TODO: convolve_subgrid, merge_bins, subgrid, set_subgrid + + #[test] + fn grid_convolutions() { + let mut grid = Grid::new( + vec![channel![21, 21, 1.0]], + vec![Order { + alphas: 0, + alpha: 0, + logxir: 0, + logxif: 0, + }], + vec![0.0, 1.0], + SubgridParams::default(), + ); + + // by default we assume unpolarized proton PDFs are used + assert_eq!( + grid.convolutions(), + [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] + ); + + grid.set_convolution(0, Convolution::UnpolPDF(-2212)); + grid.set_convolution(1, Convolution::UnpolPDF(-2212)); + + assert_eq!( + grid.convolutions(), + [Convolution::UnpolPDF(-2212), Convolution::UnpolPDF(-2212)] + ); + } + + #[test] + fn evolve_info() { + let grid = + Grid::read(File::open("../test-data/LHCB_WP_7TEV.pineappl.lz4").unwrap()).unwrap(); + let info = grid.evolve_info(&[]); + + assert_eq!(info.fac1.len(), 1); + assert_approx_eq!(f64, info.fac1[0], 6456.443904000001, ulps = 64); + + assert_eq!(info.pids1, [-3, -1, 2, 4, 21, 22]); + + assert_eq!(info.x1.len(), 50); + assert_approx_eq!(f64, info.x1[0], 1.9999999999999954e-7, ulps = 64); + + assert_approx_eq!(f64, info.x1[1], 3.034304765867952e-7, ulps = 64); + assert_approx_eq!(f64, info.x1[2], 4.6035014748963906e-7, ulps = 64); + assert_approx_eq!(f64, info.x1[3], 6.984208530700364e-7, ulps = 64); + assert_approx_eq!(f64, info.x1[4], 1.0596094959101024e-6, ulps = 64); + assert_approx_eq!(f64, info.x1[5], 1.607585498470808e-6, ulps = 64); + assert_approx_eq!(f64, info.x1[6], 2.438943292891682e-6, ulps = 64); + assert_approx_eq!(f64, info.x1[7], 3.7002272069854957e-6, ulps = 64); + assert_approx_eq!(f64, info.x1[8], 5.613757716930151e-6, ulps = 64); + assert_approx_eq!(f64, info.x1[9], 8.516806677573355e-6, ulps = 64); + assert_approx_eq!(f64, info.x1[10], 1.292101569074731e-5, ulps = 64); + assert_approx_eq!(f64, info.x1[11], 1.9602505002391748e-5, ulps = 64); + assert_approx_eq!(f64, info.x1[12], 2.97384953722449e-5, ulps = 64); + assert_approx_eq!(f64, info.x1[13], 4.511438394964044e-5, ulps = 64); + assert_approx_eq!(f64, info.x1[14], 6.843744918967896e-5, ulps = 64); + assert_approx_eq!(f64, info.x1[15], 0.00010381172986576898, ulps = 64); + assert_approx_eq!(f64, info.x1[16], 0.00015745605600841445, ulps = 64); + assert_approx_eq!(f64, info.x1[17], 0.00023878782918561914, ulps = 64); + assert_approx_eq!(f64, info.x1[18], 0.00036205449638139736, ulps = 64); + assert_approx_eq!(f64, info.x1[19], 0.0005487795323670796, ulps = 64); + assert_approx_eq!(f64, info.x1[20], 0.0008314068836488144, ulps = 64); + assert_approx_eq!(f64, info.x1[21], 0.0012586797144272762, ulps = 64); + assert_approx_eq!(f64, info.x1[22], 0.0019034634022867384, ulps = 64); + assert_approx_eq!(f64, info.x1[23], 0.0028738675812817515, ulps = 64); + assert_approx_eq!(f64, info.x1[24], 0.004328500638820811, ulps = 64); + assert_approx_eq!(f64, info.x1[25], 0.006496206194633799, ulps = 64); + assert_approx_eq!(f64, info.x1[26], 0.009699159574043398, ulps = 64); + assert_approx_eq!(f64, info.x1[27], 0.014375068581090129, ulps = 64); + assert_approx_eq!(f64, info.x1[28], 0.02108918668378717, ulps = 64); + assert_approx_eq!(f64, info.x1[29], 0.030521584007828916, ulps = 64); + assert_approx_eq!(f64, info.x1[30], 0.04341491741702269, ulps = 64); + assert_approx_eq!(f64, info.x1[31], 0.060480028754447364, ulps = 64); + assert_approx_eq!(f64, info.x1[32], 0.08228122126204893, ulps = 64); + assert_approx_eq!(f64, info.x1[33], 0.10914375746330703, ulps = 64); + assert_approx_eq!(f64, info.x1[34], 0.14112080644440345, ulps = 64); + assert_approx_eq!(f64, info.x1[35], 0.17802566042569432, ulps = 64); + assert_approx_eq!(f64, info.x1[36], 0.2195041265003886, ulps = 64); + assert_approx_eq!(f64, info.x1[37], 0.2651137041582823, ulps = 64); + assert_approx_eq!(f64, info.x1[38], 0.31438740076927585, ulps = 64); + assert_approx_eq!(f64, info.x1[39], 0.3668753186482242, ulps = 64); + assert_approx_eq!(f64, info.x1[40], 0.4221667753589648, ulps = 64); + assert_approx_eq!(f64, info.x1[41], 0.4798989029610255, ulps = 64); + assert_approx_eq!(f64, info.x1[42], 0.5397572337880445, ulps = 64); + assert_approx_eq!(f64, info.x1[43], 0.601472197967335, ulps = 64); + assert_approx_eq!(f64, info.x1[44], 0.6648139482473823, ulps = 64); + assert_approx_eq!(f64, info.x1[45], 0.7295868442414312, ulps = 64); + assert_approx_eq!(f64, info.x1[46], 0.7956242522922756, ulps = 64); + assert_approx_eq!(f64, info.x1[47], 0.8627839323906108, ulps = 64); + assert_approx_eq!(f64, info.x1[48], 0.9309440808717544, ulps = 64); + assert_approx_eq!(f64, info.x1[49], 1.0, ulps = 64); + + assert_eq!(info.ren1.len(), 1); + assert_approx_eq!(f64, info.ren1[0], 6456.443904000001, ulps = 64); + } +} diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs new file mode 100644 index 000000000..04624c09a --- /dev/null +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -0,0 +1,785 @@ +//! TODO + +use super::grid::Ntuple; +use super::sparse_array3::SparseArray3; +use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use serde::{Deserialize, Serialize}; +use std::borrow::Cow; +use std::mem; + +/// TODO +#[derive(Clone, Deserialize, Serialize)] +pub struct ImportOnlySubgridV1 { + array: SparseArray3, + q2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, +} + +impl ImportOnlySubgridV1 { + /// Constructor. + #[must_use] + pub fn new( + array: SparseArray3, + q2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, + ) -> Self { + Self { + array, + q2_grid, + x1_grid, + x2_grid, + } + } + + /// Return the array containing the numerical values of the grid. + pub fn array_mut(&mut self) -> &mut SparseArray3 { + &mut self.array + } +} + +impl Subgrid for ImportOnlySubgridV1 { + fn convolve( + &self, + _: &[f64], + _: &[f64], + _: &[Mu2], + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + self.array + .indexed_iter() + .map(|((imu2, ix1, ix2), sigma)| sigma * lumi(ix1, ix2, imu2)) + .sum() + } + + fn fill(&mut self, _: &Ntuple) { + panic!("ImportOnlySubgridV1 doesn't support the fill operation"); + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + self.q2_grid + .iter() + .copied() + .map(|q2| Mu2 { ren: q2, fac: q2 }) + .collect() + } + + fn x1_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&self.x1_grid) + } + + fn x2_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&self.x2_grid) + } + + fn is_empty(&self) -> bool { + self.array.is_empty() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + if let SubgridEnum::ImportOnlySubgridV1(other_grid) = other { + if self.array.is_empty() && !transpose { + mem::swap(&mut self.array, &mut other_grid.array); + } else { + // TODO: the general case isn't implemented + assert!(self.x1_grid() == other_grid.x1_grid()); + assert!(self.x2_grid() == other_grid.x2_grid()); + + for (other_index, mu2) in other_grid.mu2_grid().iter().enumerate() { + // the following should always be the case + assert_eq!(mu2.ren, mu2.fac); + let q2 = &mu2.ren; + + let index = match self + .q2_grid + .binary_search_by(|val| val.partial_cmp(q2).unwrap()) + { + Ok(index) => index, + Err(index) => { + self.q2_grid.insert(index, *q2); + self.array.increase_x_at(index); + index + } + }; + + for ((_, j, k), value) in other_grid + .array + .indexed_iter() + .filter(|&((i, _, _), _)| i == other_index) + { + let (j, k) = if transpose { (k, j) } else { (j, k) }; + self.array[[index, j, k]] += value; + } + } + } + } else { + todo!(); + } + } + + fn scale(&mut self, factor: f64) { + if factor == 0.0 { + self.array.clear(); + } else { + self.array.iter_mut().for_each(|x| *x *= factor); + } + } + + fn symmetrize(&mut self) { + let mut new_array = + SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); + + for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { + new_array[[i, j, k]] = sigma; + } + // do not change the diagonal entries (k==j) + for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { + new_array[[i, k, j]] += sigma; + } + + mem::swap(&mut self.array, &mut new_array); + } + + fn clone_empty(&self) -> SubgridEnum { + Self { + array: SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()), + q2_grid: self.q2_grid.clone(), + x1_grid: self.x1_grid.clone(), + x2_grid: self.x2_grid.clone(), + } + .into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + Box::new(self.array.indexed_iter()) + } + + fn stats(&self) -> Stats { + Stats { + total: self.q2_grid.len() * self.x1_grid.len() * self.x2_grid.len(), + allocated: self.array.len() + self.array.zeros(), + zeros: self.array.zeros(), + overhead: self.array.overhead(), + bytes_per_value: mem::size_of::(), + } + } + + fn static_scale(&self) -> Option { + if let &[static_scale] = self.q2_grid.as_slice() { + Some(Mu2 { + ren: static_scale, + fac: static_scale, + }) + } else { + None + } + } +} + +/// TODO +#[derive(Clone, Deserialize, Serialize)] +pub struct ImportOnlySubgridV2 { + array: SparseArray3, + mu2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, +} + +impl ImportOnlySubgridV2 { + /// Constructor. + #[must_use] + pub fn new( + array: SparseArray3, + mu2_grid: Vec, + x1_grid: Vec, + x2_grid: Vec, + ) -> Self { + Self { + array, + mu2_grid, + x1_grid, + x2_grid, + } + } + + /// Return the array containing the numerical values of the grid. + pub fn array_mut(&mut self) -> &mut SparseArray3 { + &mut self.array + } +} + +impl Subgrid for ImportOnlySubgridV2 { + fn convolve( + &self, + _: &[f64], + _: &[f64], + _: &[Mu2], + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + self.array + .indexed_iter() + .map(|((imu2, ix1, ix2), sigma)| sigma * lumi(ix1, ix2, imu2)) + .sum() + } + + fn fill(&mut self, _: &Ntuple) { + panic!("ImportOnlySubgridV2 doesn't support the fill operation"); + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + Cow::Borrowed(&self.mu2_grid) + } + + fn x1_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&self.x1_grid) + } + + fn x2_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&self.x2_grid) + } + + fn is_empty(&self) -> bool { + self.array.is_empty() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + if let SubgridEnum::ImportOnlySubgridV2(other_grid) = other { + if self.array.is_empty() && !transpose { + mem::swap(&mut self.array, &mut other_grid.array); + } else { + let rhs_x1 = if transpose { + other_grid.x2_grid() + } else { + other_grid.x1_grid() + }; + let rhs_x2 = if transpose { + other_grid.x1_grid() + } else { + other_grid.x2_grid() + }; + + if (self.x1_grid() != rhs_x1) || (self.x2_grid() != rhs_x2) { + let mut x1_grid = self.x1_grid.clone(); + let mut x2_grid = self.x2_grid.clone(); + + x1_grid.extend_from_slice(&rhs_x1); + x1_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); + x1_grid.dedup(); + x2_grid.extend_from_slice(&rhs_x2); + x2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); + x2_grid.dedup(); + + let mut array = + SparseArray3::new(self.array.dimensions().0, x1_grid.len(), x2_grid.len()); + + for ((i, j, k), value) in self.array.indexed_iter() { + let target_j = x1_grid + .iter() + .position(|&x| x == self.x1_grid[j]) + .unwrap_or_else(|| unreachable!()); + let target_k = x2_grid + .iter() + .position(|&x| x == self.x2_grid[k]) + .unwrap_or_else(|| unreachable!()); + + array[[i, target_j, target_k]] = value; + } + + self.array = array; + self.x1_grid = x1_grid; + self.x2_grid = x2_grid; + } + + for (other_index, mu2) in other_grid.mu2_grid().iter().enumerate() { + let index = match self + .mu2_grid + .binary_search_by(|val| val.partial_cmp(mu2).unwrap()) + { + Ok(index) => index, + Err(index) => { + self.mu2_grid.insert(index, mu2.clone()); + self.array.increase_x_at(index); + index + } + }; + + for ((_, j, k), value) in other_grid + .array + .indexed_iter() + .filter(|&((i, _, _), _)| i == other_index) + { + let (j, k) = if transpose { (k, j) } else { (j, k) }; + let target_j = self + .x1_grid + .iter() + .position(|&x| x == rhs_x1[j]) + .unwrap_or_else(|| unreachable!()); + let target_k = self + .x2_grid + .iter() + .position(|&x| x == rhs_x2[k]) + .unwrap_or_else(|| unreachable!()); + + self.array[[index, target_j, target_k]] += value; + } + } + } + } else { + todo!(); + } + } + + fn scale(&mut self, factor: f64) { + if factor == 0.0 { + self.array.clear(); + } else { + self.array.iter_mut().for_each(|x| *x *= factor); + } + } + + fn symmetrize(&mut self) { + let mut new_array = + SparseArray3::new(self.mu2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); + + for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { + new_array[[i, j, k]] = sigma; + } + // do not change the diagonal entries (k==j) + for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { + new_array[[i, k, j]] += sigma; + } + + mem::swap(&mut self.array, &mut new_array); + } + + fn clone_empty(&self) -> SubgridEnum { + Self { + array: SparseArray3::new(self.mu2_grid.len(), self.x1_grid.len(), self.x2_grid.len()), + mu2_grid: self.mu2_grid.clone(), + x1_grid: self.x1_grid.clone(), + x2_grid: self.x2_grid.clone(), + } + .into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + Box::new(self.array.indexed_iter()) + } + + fn stats(&self) -> Stats { + Stats { + total: self.mu2_grid.len() * self.x1_grid.len() * self.x2_grid.len(), + allocated: self.array.len() + self.array.zeros(), + zeros: self.array.zeros(), + overhead: self.array.overhead(), + bytes_per_value: mem::size_of::(), + } + } + + fn static_scale(&self) -> Option { + if let [static_scale] = self.mu2_grid.as_slice() { + Some(static_scale.clone()) + } else { + None + } + } +} + +impl From<&SubgridEnum> for ImportOnlySubgridV2 { + fn from(subgrid: &SubgridEnum) -> Self { + // find smallest ranges + let (mu2_range, x1_range, x2_range) = subgrid.indexed_iter().fold( + ( + subgrid.mu2_grid().len()..0, + subgrid.x1_grid().len()..0, + subgrid.x2_grid().len()..0, + ), + |prev, ((imu2, ix1, ix2), _)| { + ( + prev.0.start.min(imu2)..prev.0.end.max(imu2 + 1), + prev.1.start.min(ix1)..prev.1.end.max(ix1 + 1), + prev.2.start.min(ix2)..prev.2.end.max(ix2 + 1), + ) + }, + ); + + let (mu2_grid, static_scale) = subgrid.static_scale().map_or_else( + || (subgrid.mu2_grid()[mu2_range.clone()].to_vec(), false), + |scale| (vec![scale], true), + ); + let x1_grid = subgrid.x1_grid()[x1_range.clone()].to_vec(); + let x2_grid = subgrid.x2_grid()[x2_range.clone()].to_vec(); + + let mut array = SparseArray3::new(mu2_grid.len(), x1_grid.len(), x2_grid.len()); + + for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { + // if there's a static scale we want every value to be added to same grid point + let index = if static_scale { + 0 + } else { + imu2 - mu2_range.start + }; + + array[[index, ix1 - x1_range.start, ix2 - x2_range.start]] += value; + } + + Self { + array, + mu2_grid, + x1_grid, + x2_grid, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::lagrange_subgrid::LagrangeSubgridV2; + use crate::subgrid::{ExtraSubgridParams, SubgridParams}; + use float_cmp::assert_approx_eq; + use rand::distributions::{Distribution, Uniform}; + use rand::Rng; + use rand_pcg::Pcg64; + + #[test] + fn test_v1() { + let x = vec![ + 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, + ]; + let mut grid1: SubgridEnum = ImportOnlySubgridV1::new( + SparseArray3::new(1, 10, 10), + vec![0.0], + x.clone(), + x.clone(), + ) + .into(); + + assert_eq!( + grid1.stats(), + Stats { + total: 100, + allocated: 0, + zeros: 0, + overhead: 2, + bytes_per_value: 8, + } + ); + + let mu2 = vec![Mu2 { ren: 0.0, fac: 0.0 }]; + + assert_eq!(grid1.mu2_grid().as_ref(), mu2); + assert_eq!(grid1.x1_grid().as_ref(), x); + assert_eq!(grid1.x2_grid(), grid1.x1_grid()); + + assert!(grid1.is_empty()); + + // only use exactly representable numbers here so that we can avoid using approx_eq + if let SubgridEnum::ImportOnlySubgridV1(ref mut x) = grid1 { + x.array_mut()[[0, 1, 2]] = 1.0; + x.array_mut()[[0, 1, 3]] = 2.0; + x.array_mut()[[0, 4, 3]] = 4.0; + x.array_mut()[[0, 7, 1]] = 8.0; + } else { + unreachable!(); + } + + assert!(!grid1.is_empty()); + + assert_eq!(grid1.indexed_iter().next(), Some(((0, 1, 2), 1.0))); + assert_eq!(grid1.indexed_iter().nth(1), Some(((0, 1, 3), 2.0))); + assert_eq!(grid1.indexed_iter().nth(2), Some(((0, 4, 3), 4.0))); + assert_eq!(grid1.indexed_iter().nth(3), Some(((0, 7, 1), 8.0))); + + // symmetric luminosity function + let lumi = + &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; + + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); + + // create grid with transposed entries, but different q2 + let mut grid2: SubgridEnum = ImportOnlySubgridV1::new( + SparseArray3::new(1, 10, 10), + vec![1.0], + x.clone(), + x.clone(), + ) + .into(); + if let SubgridEnum::ImportOnlySubgridV1(ref mut x) = grid2 { + x.array_mut()[[0, 2, 1]] = 1.0; + x.array_mut()[[0, 3, 1]] = 2.0; + x.array_mut()[[0, 3, 4]] = 4.0; + x.array_mut()[[0, 1, 7]] = 8.0; + } else { + unreachable!(); + } + assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); + + assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); + assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); + assert_eq!(grid2.indexed_iter().nth(2), Some(((0, 3, 1), 2.0))); + assert_eq!(grid2.indexed_iter().nth(3), Some(((0, 3, 4), 4.0))); + + grid1.merge(&mut grid2, false); + + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + + let mut grid1 = { + let mut g = grid1.clone_empty(); + g.merge(&mut grid1, false); + g + }; + + // the luminosity function is symmetric, so after symmetrization the result must be + // unchanged + grid1.symmetrize(); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + + grid1.scale(2.0); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); + + assert_eq!( + grid1.stats(), + Stats { + total: 200, + allocated: 14, + zeros: 6, + overhead: 42, + bytes_per_value: 8, + } + ); + } + + #[test] + fn test_v2() { + let x = vec![ + 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, + ]; + let mut grid1: SubgridEnum = ImportOnlySubgridV2::new( + SparseArray3::new(1, 10, 10), + vec![Mu2 { ren: 0.0, fac: 0.0 }], + x.clone(), + x.clone(), + ) + .into(); + + let mu2 = vec![Mu2 { ren: 0.0, fac: 0.0 }]; + + assert_eq!(grid1.mu2_grid().as_ref(), mu2); + assert_eq!(grid1.x1_grid().as_ref(), x); + assert_eq!(grid1.x2_grid(), grid1.x1_grid()); + + assert!(grid1.is_empty()); + + // only use exactly representable numbers here so that we can avoid using approx_eq + if let SubgridEnum::ImportOnlySubgridV2(ref mut x) = grid1 { + x.array_mut()[[0, 1, 2]] = 1.0; + x.array_mut()[[0, 1, 3]] = 2.0; + x.array_mut()[[0, 4, 3]] = 4.0; + x.array_mut()[[0, 7, 1]] = 8.0; + } else { + unreachable!(); + } + + assert!(!grid1.is_empty()); + + assert_eq!(grid1.indexed_iter().next(), Some(((0, 1, 2), 1.0))); + assert_eq!(grid1.indexed_iter().nth(1), Some(((0, 1, 3), 2.0))); + assert_eq!(grid1.indexed_iter().nth(2), Some(((0, 4, 3), 4.0))); + assert_eq!(grid1.indexed_iter().nth(3), Some(((0, 7, 1), 8.0))); + + // symmetric luminosity function + let lumi = + &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; + + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); + + // create grid with transposed entries, but different q2 + let mut grid2: SubgridEnum = ImportOnlySubgridV2::new( + SparseArray3::new(1, 10, 10), + vec![Mu2 { ren: 1.0, fac: 1.0 }], + x.clone(), + x.clone(), + ) + .into(); + if let SubgridEnum::ImportOnlySubgridV2(ref mut x) = grid2 { + x.array_mut()[[0, 2, 1]] = 1.0; + x.array_mut()[[0, 3, 1]] = 2.0; + x.array_mut()[[0, 3, 4]] = 4.0; + x.array_mut()[[0, 1, 7]] = 8.0; + } else { + unreachable!(); + } + assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); + + assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); + assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); + assert_eq!(grid2.indexed_iter().nth(2), Some(((0, 3, 1), 2.0))); + assert_eq!(grid2.indexed_iter().nth(3), Some(((0, 3, 4), 4.0))); + + grid1.merge(&mut grid2, false); + + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + + let mut grid1 = { + let mut g = grid1.clone_empty(); + g.merge(&mut grid1, false); + g + }; + + // the luminosity function is symmetric, so after symmetrization the result must be + // unchanged + grid1.symmetrize(); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); + + grid1.scale(2.0); + assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); + + assert_eq!( + grid1.stats(), + Stats { + total: 200, + allocated: 14, + zeros: 6, + overhead: 42, + bytes_per_value: 8, + } + ); + } + + #[test] + #[should_panic(expected = "ImportOnlySubgridV1 doesn't support the fill operation")] + fn fill_panic_v1() { + let mut grid = + ImportOnlySubgridV1::new(SparseArray3::new(1, 1, 1), vec![1.0], vec![1.0], vec![1.0]); + + grid.fill(&Ntuple { + x1: 0.0, + x2: 0.0, + q2: 0.0, + weight: 1.0, + }); + } + + #[test] + #[should_panic(expected = "ImportOnlySubgridV2 doesn't support the fill operation")] + fn fill_panic_v2() { + let mut grid = ImportOnlySubgridV2::new( + SparseArray3::new(1, 1, 1), + vec![Mu2 { ren: 1.0, fac: 1.0 }], + vec![1.0], + vec![1.0], + ); + + grid.fill(&Ntuple { + x1: 0.0, + x2: 0.0, + q2: 0.0, + weight: 1.0, + }); + } + + #[test] + fn from_lagrange_subgrid_v2() { + let mut lagrange = + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); + + // by default this should have 40 grid points + assert_eq!(lagrange.mu2_grid().len(), 40); + + // only `q2` are important: they're not static and fall between two grid points + lagrange.fill(&Ntuple { + x1: 0.25, + x2: 0.5, + q2: 10000.0, + weight: 1.0, + }); + lagrange.fill(&Ntuple { + x1: 0.0625, + x2: 0.125, + q2: 10001.0, + weight: 1.0, + }); + lagrange.fill(&Ntuple { + x1: 0.5, + x2: 0.0625, + q2: 10002.0, + weight: 1.0, + }); + lagrange.fill(&Ntuple { + x1: 0.1, + x2: 0.2, + q2: 10003.0, + weight: 1.0, + }); + + let x1 = lagrange.x1_grid().to_vec(); + let x2 = lagrange.x2_grid().to_vec(); + let mu2 = lagrange.mu2_grid().to_vec(); + + let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; + let reference = lagrange.convolve(&x1, &x2, &mu2, lumi); + + let imported = ImportOnlySubgridV2::from(&lagrange.into()); + let test = imported.convolve(&x1, &x2, &mu2, lumi); + + // make sure the conversion did not change the results + assert_approx_eq!(f64, reference, test, ulps = 8); + + // all unneccessary grid points should be gone; since we are inserting between two + // interpolation grid points, the imported grid should have as many interpolation grid + // points as its interpolation order + assert_eq!(imported.mu2_grid().len(), 4); + } + + #[test] + fn merge_with_different_x_grids() { + let mut params = SubgridParams::default(); + let mut grid1 = LagrangeSubgridV2::new(¶ms, &ExtraSubgridParams::default()); + + // change parameters of the second grid to force non-trivial merging + params.set_x_min(0.2); + params.set_x_max(0.5); + + let mut grid2 = LagrangeSubgridV2::new(¶ms, &ExtraSubgridParams::default()); + let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); + let q2_range = Uniform::new(1e4, 1e8); + + for _ in 0..1000 { + grid1.fill(&Ntuple { + x1: rng.gen(), + x2: rng.gen(), + q2: q2_range.sample(&mut rng), + weight: 1.0, + }); + grid2.fill(&Ntuple { + x1: rng.gen(), + x2: rng.gen(), + q2: q2_range.sample(&mut rng), + weight: 1.0, + }); + } + + let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; + let result1 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); + let result2 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); + + let mut grid1: SubgridEnum = ImportOnlySubgridV2::from(&grid1.into()).into(); + let mut grid2: SubgridEnum = ImportOnlySubgridV2::from(&grid2.into()).into(); + + let result3 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); + let result4 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); + + // conversion from LangrangeSubgridV2 to ImportOnlySubgridV2 shouldn't change the results + assert!((result3 / result1 - 1.0).abs() < 1e-13); + assert!((result4 / result2 - 1.0).abs() < 1e-13); + + grid1.merge(&mut grid2, false); + + let result5 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); + + // merging the two grids should give the sum of the two results + assert!((result5 / (result3 + result4) - 1.0).abs() < 1e-12); + } +} diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs new file mode 100644 index 000000000..f3ccf2a5e --- /dev/null +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -0,0 +1,1490 @@ +//! Module containing the Lagrange-interpolation subgrid. + +use super::convert::{f64_from_usize, usize_from_f64}; +use super::grid::Ntuple; +use super::sparse_array3::SparseArray3; +use super::subgrid::{ + ExtraSubgridParams, Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter, SubgridParams, +}; +use arrayvec::ArrayVec; +use ndarray::Array3; +use serde::{Deserialize, Serialize}; +use std::borrow::Cow; +use std::iter; +use std::mem; + +fn weightfun(x: f64) -> f64 { + (x.sqrt() / (1.0 - 0.99 * x)).powi(3) +} + +fn fx(y: f64) -> f64 { + let mut yp = y; + + for _ in 0..100 { + let x = (-yp).exp(); + let delta = y - yp - 5.0 * (1.0 - x); + if (delta).abs() < 1e-12 { + return x; + } + let deriv = -1.0 - 5.0 * x; + yp -= delta / deriv; + } + + unreachable!(); +} + +fn fy(x: f64) -> f64 { + (1.0 - x).mul_add(5.0, -x.ln()) +} + +fn ftau(q2: f64) -> f64 { + (q2 / 0.0625).ln().ln() +} + +fn fq2(tau: f64) -> f64 { + 0.0625 * tau.exp().exp() +} + +fn fi(i: usize, n: usize, u: f64) -> f64 { + let mut factorials = 1; + let mut product = 1.0; + for z in 0..i { + product *= u - f64_from_usize(z); + factorials *= i - z; + } + for z in i + 1..=n { + product *= f64_from_usize(z) - u; + factorials *= z - i; + } + product / f64_from_usize(factorials) +} + +/// Subgrid which uses Lagrange-interpolation. +#[derive(Clone, Deserialize, Serialize)] +pub struct LagrangeSubgridV1 { + grid: Option>, + ntau: usize, + ny: usize, + yorder: usize, + tauorder: usize, + itaumin: usize, + itaumax: usize, + reweight: bool, + ymin: f64, + ymax: f64, + taumin: f64, + taumax: f64, +} + +impl LagrangeSubgridV1 { + /// Constructor. + #[must_use] + pub fn new(subgrid_params: &SubgridParams) -> Self { + Self { + grid: None, + ntau: subgrid_params.q2_bins(), + ny: subgrid_params.x_bins(), + yorder: subgrid_params.x_order(), + tauorder: subgrid_params.q2_order(), + itaumin: 0, + itaumax: 0, + reweight: subgrid_params.reweight(), + ymin: fy(subgrid_params.x_max()), + ymax: fy(subgrid_params.x_min()), + taumin: ftau(subgrid_params.q2_min()), + taumax: ftau(subgrid_params.q2_max()), + } + } + + fn deltay(&self) -> f64 { + (self.ymax - self.ymin) / f64_from_usize(self.ny - 1) + } + + fn deltatau(&self) -> f64 { + (self.taumax - self.taumin) / f64_from_usize(self.ntau - 1) + } + + fn gety(&self, iy: usize) -> f64 { + f64_from_usize(iy).mul_add(self.deltay(), self.ymin) + } + + fn gettau(&self, iy: usize) -> f64 { + f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) + } + + fn increase_tau(&mut self, new_itaumin: usize, new_itaumax: usize) { + let min_diff = self.itaumin - new_itaumin; + + let mut new_grid = Array3::zeros((new_itaumax - new_itaumin, self.ny, self.ny)); + + for ((i, j, k), value) in self.grid.as_ref().unwrap().indexed_iter() { + new_grid[[i + min_diff, j, k]] = *value; + } + + self.itaumin = new_itaumin; + self.itaumax = new_itaumax; + + mem::swap(&mut self.grid, &mut Some(new_grid)); + } +} + +impl Subgrid for LagrangeSubgridV1 { + fn convolve( + &self, + x1: &[f64], + x2: &[f64], + _: &[Mu2], + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + self.grid.as_ref().map_or(0.0, |grid| { + grid.indexed_iter() + .map(|((imu2, ix1, ix2), &sigma)| { + if sigma == 0.0 { + 0.0 + } else { + let mut value = sigma * lumi(ix1, ix2, imu2 + self.itaumin); + if self.reweight { + value *= weightfun(x1[ix1]) * weightfun(x2[ix2]); + } + value + } + }) + .sum() + }) + } + + fn fill(&mut self, ntuple: &Ntuple) { + if ntuple.weight == 0.0 { + return; + } + + let y1 = fy(ntuple.x1); + let y2 = fy(ntuple.x2); + let tau = ftau(ntuple.q2); + + if (y2 < self.ymin) + || (y2 > self.ymax) + || (y1 < self.ymin) + || (y1 > self.ymax) + || (tau < self.taumin) + || (tau > self.taumax) + { + return; + } + + let k1 = usize_from_f64((y1 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) + .min(self.ny - 1 - self.yorder); + let k2 = usize_from_f64((y2 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) + .min(self.ny - 1 - self.yorder); + + let u_y1 = (y1 - self.gety(k1)) / self.deltay(); + let u_y2 = (y2 - self.gety(k2)) / self.deltay(); + + let fi1: ArrayVec<_, 8> = (0..=self.yorder) + .map(|i| fi(i, self.yorder, u_y1)) + .collect(); + let fi2: ArrayVec<_, 8> = (0..=self.yorder) + .map(|i| fi(i, self.yorder, u_y2)) + .collect(); + + let k3 = usize_from_f64( + (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), + ) + .min(self.ntau - 1 - self.tauorder); + + let u_tau = (tau - self.gettau(k3)) / self.deltatau(); + + let factor = if self.reweight { + 1.0 / (weightfun(ntuple.x1) * weightfun(ntuple.x2)) + } else { + 1.0 + }; + + let size = self.tauorder + 1; + let ny = self.ny; + + if self.grid.is_none() { + self.itaumin = k3; + self.itaumax = k3 + size; + } else if k3 < self.itaumin || k3 + size > self.itaumax { + self.increase_tau(self.itaumin.min(k3), self.itaumax.max(k3 + size)); + } + + for i3 in 0..=self.tauorder { + let fi3i3 = fi(i3, self.tauorder, u_tau); + + for (i1, fi1i1) in fi1.iter().enumerate() { + for (i2, fi2i2) in fi2.iter().enumerate() { + let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; + + let grid = self + .grid + .get_or_insert_with(|| Array3::zeros((size, ny, ny))); + + grid[[k3 + i3 - self.itaumin, k1 + i1, k2 + i2]] += fillweight; + } + } + } + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + (0..self.ntau) + .map(|itau| { + let q2 = fq2(self.gettau(itau)); + Mu2 { ren: q2, fac: q2 } + }) + .collect() + } + + fn x1_grid(&self) -> Cow<[f64]> { + (0..self.ny).map(|iy| fx(self.gety(iy))).collect() + } + + fn x2_grid(&self) -> Cow<[f64]> { + self.x1_grid() + } + + fn is_empty(&self) -> bool { + self.grid.is_none() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + let x1_equal = self.x1_grid() == other.x1_grid(); + let x2_equal = self.x2_grid() == other.x2_grid(); + + if let SubgridEnum::LagrangeSubgridV1(other_grid) = other { + if let Some(other_grid_grid) = &mut other_grid.grid { + if self.grid.is_some() { + // TODO: the general case isn't implemented + assert!(x1_equal); + assert!(x2_equal); + + let new_itaumin = self.itaumin.min(other_grid.itaumin); + let new_itaumax = self.itaumax.max(other_grid.itaumax); + let offset = other_grid.itaumin.saturating_sub(self.itaumin); + + // TODO: we need much more checks here if there subgrids are compatible at all + + if (self.itaumin != new_itaumin) || (self.itaumax != new_itaumax) { + self.increase_tau(new_itaumin, new_itaumax); + } + + let self_grid = self.grid.as_mut().unwrap(); + + if transpose { + for ((i, k, j), value) in other_grid_grid.indexed_iter() { + self_grid[[i + offset, j, k]] += value; + } + } else { + for ((i, j, k), value) in other_grid_grid.indexed_iter() { + self_grid[[i + offset, j, k]] += value; + } + } + } else { + self.grid = other_grid.grid.take(); + self.itaumin = other_grid.itaumin; + self.itaumax = other_grid.itaumax; + + if transpose { + if let Some(grid) = &mut self.grid { + grid.swap_axes(1, 2); + } + } + } + } + } else { + todo!(); + } + } + + fn scale(&mut self, factor: f64) { + if factor == 0.0 { + self.grid = None; + } else if let Some(self_grid) = &mut self.grid { + self_grid.iter_mut().for_each(|x| *x *= factor); + } + } + + fn symmetrize(&mut self) { + if let Some(grid) = self.grid.as_mut() { + let (i_size, j_size, k_size) = grid.dim(); + + for i in 0..i_size { + for j in 0..j_size { + for k in j + 1..k_size { + grid[[i, j, k]] += grid[[i, k, j]]; + grid[[i, k, j]] = 0.0; + } + } + } + } + } + + fn clone_empty(&self) -> SubgridEnum { + Self { + grid: None, + ntau: self.ntau, + ny: self.ny, + yorder: self.yorder, + tauorder: self.tauorder, + itaumin: 0, + itaumax: 0, + reweight: self.reweight, + ymin: self.ymin, + ymax: self.ymax, + taumin: self.taumin, + taumax: self.taumax, + } + .into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + self.grid.as_ref().map_or_else( + || Box::new(iter::empty()) as Box>, + |grid| { + Box::new(grid.indexed_iter().filter(|(_, &value)| value != 0.0).map( + |(tuple, &value)| { + ( + (self.itaumin + tuple.0, tuple.1, tuple.2), + value + * if self.reweight { + weightfun(fx(self.gety(tuple.1))) + * weightfun(fx(self.gety(tuple.2))) + } else { + 1.0 + }, + ) + }, + )) + }, + ) + } + + fn stats(&self) -> Stats { + let (non_zeros, zeros) = self.grid.as_ref().map_or((0, 0), |array| { + array.iter().fold((0, 0), |mut result, value| { + if *value == 0.0 { + result.0 += 1; + } else { + result.1 += 1; + } + result + }) + }); + + Stats { + total: non_zeros + zeros, + allocated: non_zeros + zeros, + zeros, + overhead: 0, + bytes_per_value: mem::size_of::(), + } + } + + fn static_scale(&self) -> Option { + if let [static_scale] = self.mu2_grid().as_ref() { + Some(static_scale.clone()) + } else { + None + } + } +} + +/// Subgrid which uses Lagrange-interpolation. +#[derive(Clone, Deserialize, Serialize)] +pub struct LagrangeSubgridV2 { + grid: Option>, + ntau: usize, + ny1: usize, + ny2: usize, + y1order: usize, + y2order: usize, + tauorder: usize, + itaumin: usize, + itaumax: usize, + reweight1: bool, + reweight2: bool, + y1min: f64, + y1max: f64, + y2min: f64, + y2max: f64, + taumin: f64, + taumax: f64, + pub(crate) static_q2: f64, +} + +impl LagrangeSubgridV2 { + /// Constructor. + #[must_use] + pub fn new(subgrid_params: &SubgridParams, extra_params: &ExtraSubgridParams) -> Self { + Self { + grid: None, + ntau: subgrid_params.q2_bins(), + ny1: subgrid_params.x_bins(), + ny2: extra_params.x2_bins(), + y1order: subgrid_params.x_order(), + y2order: extra_params.x2_order(), + tauorder: subgrid_params.q2_order(), + itaumin: 0, + itaumax: 0, + reweight1: subgrid_params.reweight(), + reweight2: extra_params.reweight2(), + y1min: fy(subgrid_params.x_max()), + y1max: fy(subgrid_params.x_min()), + y2min: fy(extra_params.x2_max()), + y2max: fy(extra_params.x2_min()), + taumin: ftau(subgrid_params.q2_min()), + taumax: ftau(subgrid_params.q2_max()), + static_q2: 0.0, + } + } + + fn deltay1(&self) -> f64 { + (self.y1max - self.y1min) / f64_from_usize(self.ny1 - 1) + } + + fn deltay2(&self) -> f64 { + (self.y1max - self.y2min) / f64_from_usize(self.ny2 - 1) + } + + fn deltatau(&self) -> f64 { + (self.taumax - self.taumin) / f64_from_usize(self.ntau - 1) + } + + fn gety1(&self, iy: usize) -> f64 { + if self.y1min == self.y1max { + debug_assert_eq!(iy, 0); + self.y1min + } else { + f64_from_usize(iy).mul_add(self.deltay1(), self.y1min) + } + } + + fn gety2(&self, iy: usize) -> f64 { + if self.y2min == self.y2max { + debug_assert_eq!(iy, 0); + self.y2min + } else { + f64_from_usize(iy).mul_add(self.deltay2(), self.y2min) + } + } + + fn gettau(&self, iy: usize) -> f64 { + if self.taumin == self.taumax { + debug_assert_eq!(iy, 0); + self.taumin + } else { + f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) + } + } + + fn increase_tau(&mut self, new_itaumin: usize, new_itaumax: usize) { + let min_diff = self.itaumin - new_itaumin; + + let mut new_grid = Array3::zeros((new_itaumax - new_itaumin, self.ny1, self.ny2)); + + for ((i, j, k), value) in self.grid.as_ref().unwrap().indexed_iter() { + new_grid[[i + min_diff, j, k]] = *value; + } + + self.itaumin = new_itaumin; + self.itaumax = new_itaumax; + + mem::swap(&mut self.grid, &mut Some(new_grid)); + } +} + +impl Subgrid for LagrangeSubgridV2 { + fn convolve( + &self, + x1: &[f64], + x2: &[f64], + _: &[Mu2], + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + self.grid.as_ref().map_or(0.0, |grid| { + grid.indexed_iter() + .map(|((imu2, ix1, ix2), &sigma)| { + if sigma == 0.0 { + 0.0 + } else { + let mut value = sigma * lumi(ix1, ix2, imu2 + self.itaumin); + if self.reweight1 { + value *= weightfun(x1[ix1]); + } + if self.reweight2 { + value *= weightfun(x2[ix2]); + } + value + } + }) + .sum() + }) + } + + fn fill(&mut self, ntuple: &Ntuple) { + if ntuple.weight == 0.0 { + return; + } + + let y1 = fy(ntuple.x1); + let y2 = fy(ntuple.x2); + let tau = ftau(ntuple.q2); + + if self.static_q2 == 0.0 { + self.static_q2 = ntuple.q2; + } else if (self.static_q2 != -1.0) && (self.static_q2 != ntuple.q2) { + self.static_q2 = -1.0; + } + + if (y2 < self.y2min) + || (y2 > self.y2max) + || (y1 < self.y1min) + || (y1 > self.y1max) + || (tau < self.taumin) + || (tau > self.taumax) + { + return; + } + + let k1 = + usize_from_f64((y1 - self.y1min) / self.deltay1() - f64_from_usize(self.y1order / 2)) + .min(self.ny1 - 1 - self.y1order); + let k2 = + usize_from_f64((y2 - self.y2min) / self.deltay2() - f64_from_usize(self.y2order / 2)) + .min(self.ny2 - 1 - self.y2order); + + let u_y1 = (y1 - self.gety1(k1)) / self.deltay1(); + let u_y2 = (y2 - self.gety2(k2)) / self.deltay2(); + + let fi1: ArrayVec<_, 8> = (0..=self.y1order) + .map(|i| fi(i, self.y1order, u_y1)) + .collect(); + let fi2: ArrayVec<_, 8> = (0..=self.y2order) + .map(|i| fi(i, self.y2order, u_y2)) + .collect(); + + let k3 = usize_from_f64( + (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), + ) + .min(self.ntau - 1 - self.tauorder); + + let u_tau = (tau - self.gettau(k3)) / self.deltatau(); + + let factor = 1.0 + / (if self.reweight1 { + weightfun(ntuple.x1) + } else { + 1.0 + } * if self.reweight2 { + weightfun(ntuple.x2) + } else { + 1.0 + }); + + let size = self.tauorder + 1; + let ny1 = self.ny1; + let ny2 = self.ny2; + + if self.grid.is_none() { + self.itaumin = k3; + self.itaumax = k3 + size; + } else if k3 < self.itaumin || k3 + size > self.itaumax { + self.increase_tau(self.itaumin.min(k3), self.itaumax.max(k3 + size)); + } + + for i3 in 0..=self.tauorder { + let fi3i3 = fi(i3, self.tauorder, u_tau); + + for (i1, fi1i1) in fi1.iter().enumerate() { + for (i2, fi2i2) in fi2.iter().enumerate() { + let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; + + let grid = self + .grid + .get_or_insert_with(|| Array3::zeros((size, ny1, ny2))); + + grid[[k3 + i3 - self.itaumin, k1 + i1, k2 + i2]] += fillweight; + } + } + } + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + (0..self.ntau) + .map(|itau| { + let q2 = fq2(self.gettau(itau)); + Mu2 { ren: q2, fac: q2 } + }) + .collect() + } + + fn x1_grid(&self) -> Cow<[f64]> { + (0..self.ny1).map(|iy| fx(self.gety1(iy))).collect() + } + + fn x2_grid(&self) -> Cow<[f64]> { + (0..self.ny2).map(|iy| fx(self.gety2(iy))).collect() + } + + fn is_empty(&self) -> bool { + self.grid.is_none() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + let x1_equal = self.x1_grid() == other.x1_grid(); + let x2_equal = self.x2_grid() == other.x2_grid(); + + if let SubgridEnum::LagrangeSubgridV2(other_grid) = other { + if let Some(other_grid_grid) = &mut other_grid.grid { + if self.grid.is_some() { + // TODO: the general case isn't implemented + assert!(x1_equal); + assert!(x2_equal); + + let new_itaumin = self.itaumin.min(other_grid.itaumin); + let new_itaumax = self.itaumax.max(other_grid.itaumax); + let offset = other_grid.itaumin.saturating_sub(self.itaumin); + + // TODO: we need much more checks here if there subgrids are compatible at all + + if (self.itaumin != new_itaumin) || (self.itaumax != new_itaumax) { + self.increase_tau(new_itaumin, new_itaumax); + } + + if (other_grid.static_q2 == -1.0) || (self.static_q2 != other_grid.static_q2) { + self.static_q2 = -1.0; + } + + let self_grid = self.grid.as_mut().unwrap(); + + if transpose { + for ((i, k, j), value) in other_grid_grid.indexed_iter() { + self_grid[[i + offset, j, k]] += value; + } + } else { + for ((i, j, k), value) in other_grid_grid.indexed_iter() { + self_grid[[i + offset, j, k]] += value; + } + } + } else { + self.grid = other_grid.grid.take(); + self.itaumin = other_grid.itaumin; + self.itaumax = other_grid.itaumax; + self.static_q2 = other_grid.static_q2; + + if transpose { + if let Some(grid) = &mut self.grid { + grid.swap_axes(1, 2); + } + } + } + } + } else { + todo!(); + } + } + + fn scale(&mut self, factor: f64) { + if factor == 0.0 { + self.grid = None; + } else if let Some(self_grid) = &mut self.grid { + self_grid.iter_mut().for_each(|x| *x *= factor); + } + } + + fn symmetrize(&mut self) { + if let Some(grid) = self.grid.as_mut() { + let (i_size, j_size, k_size) = grid.dim(); + + for i in 0..i_size { + for j in 0..j_size { + for k in j + 1..k_size { + grid[[i, j, k]] += grid[[i, k, j]]; + grid[[i, k, j]] = 0.0; + } + } + } + } + } + + fn clone_empty(&self) -> SubgridEnum { + Self { + grid: None, + ntau: self.ntau, + ny1: self.ny1, + ny2: self.ny2, + y1order: self.y1order, + y2order: self.y2order, + tauorder: self.tauorder, + itaumin: 0, + itaumax: 0, + reweight1: self.reweight1, + reweight2: self.reweight2, + y1min: self.y1min, + y1max: self.y1max, + y2min: self.y2min, + y2max: self.y2max, + taumin: self.taumin, + taumax: self.taumax, + static_q2: 0.0, + } + .into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + self.grid.as_ref().map_or_else( + || Box::new(iter::empty()) as Box>, + |grid| { + Box::new(grid.indexed_iter().filter(|(_, &value)| value != 0.0).map( + |(tuple, &value)| { + ( + (self.itaumin + tuple.0, tuple.1, tuple.2), + value + * if self.reweight1 { + weightfun(fx(self.gety1(tuple.1))) + } else { + 1.0 + } + * if self.reweight2 { + weightfun(fx(self.gety2(tuple.2))) + } else { + 1.0 + }, + ) + }, + )) + }, + ) + } + + fn stats(&self) -> Stats { + let (non_zeros, zeros) = self.grid.as_ref().map_or((0, 0), |array| { + array.iter().fold((0, 0), |mut result, value| { + if *value == 0.0 { + result.0 += 1; + } else { + result.1 += 1; + } + result + }) + }); + + Stats { + total: non_zeros + zeros, + allocated: non_zeros + zeros, + zeros, + overhead: 0, + bytes_per_value: mem::size_of::(), + } + } + + fn static_scale(&self) -> Option { + (self.static_q2 > 0.0).then_some(Mu2 { + ren: self.static_q2, + fac: self.static_q2, + }) + } +} + +/// Subgrid which uses Lagrange-interpolation, but also stores its contents in a space-efficient +/// structure. +#[derive(Clone, Deserialize, Serialize)] +pub struct LagrangeSparseSubgridV1 { + array: SparseArray3, + ntau: usize, + ny: usize, + yorder: usize, + tauorder: usize, + reweight: bool, + ymin: f64, + ymax: f64, + taumin: f64, + taumax: f64, +} + +impl LagrangeSparseSubgridV1 { + /// Constructor. + #[must_use] + pub fn new(subgrid_params: &SubgridParams) -> Self { + Self { + array: SparseArray3::new( + subgrid_params.q2_bins(), + subgrid_params.x_bins(), + subgrid_params.x_bins(), + ), + ntau: subgrid_params.q2_bins(), + ny: subgrid_params.x_bins(), + yorder: subgrid_params.x_order(), + tauorder: subgrid_params.q2_order(), + reweight: subgrid_params.reweight(), + ymin: fy(subgrid_params.x_max()), + ymax: fy(subgrid_params.x_min()), + taumin: ftau(subgrid_params.q2_min()), + taumax: ftau(subgrid_params.q2_max()), + } + } + + fn deltay(&self) -> f64 { + (self.ymax - self.ymin) / f64_from_usize(self.ny - 1) + } + + fn deltatau(&self) -> f64 { + (self.taumax - self.taumin) / f64_from_usize(self.ntau - 1) + } + + fn gety(&self, iy: usize) -> f64 { + f64_from_usize(iy).mul_add(self.deltay(), self.ymin) + } + + fn gettau(&self, iy: usize) -> f64 { + f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) + } +} + +impl Subgrid for LagrangeSparseSubgridV1 { + fn convolve( + &self, + x1: &[f64], + x2: &[f64], + _: &[Mu2], + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + self.array + .indexed_iter() + .map(|((imu2, ix1, ix2), sigma)| { + let mut value = sigma * lumi(ix1, ix2, imu2); + if self.reweight { + value *= weightfun(x1[ix1]) * weightfun(x2[ix2]); + } + value + }) + .sum() + } + + fn fill(&mut self, ntuple: &Ntuple) { + if ntuple.weight == 0.0 { + return; + } + + let y1 = fy(ntuple.x1); + let y2 = fy(ntuple.x2); + let tau = ftau(ntuple.q2); + + if (y2 < self.ymin) + || (y2 > self.ymax) + || (y1 < self.ymin) + || (y1 > self.ymax) + || (tau < self.taumin) + || (tau > self.taumax) + { + return; + } + + let k1 = usize_from_f64((y1 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) + .min(self.ny - 1 - self.yorder); + let k2 = usize_from_f64((y2 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) + .min(self.ny - 1 - self.yorder); + + let u_y1 = (y1 - self.gety(k1)) / self.deltay(); + let u_y2 = (y2 - self.gety(k2)) / self.deltay(); + + let fi1: ArrayVec<_, 8> = (0..=self.yorder) + .map(|i| fi(i, self.yorder, u_y1)) + .collect(); + let fi2: ArrayVec<_, 8> = (0..=self.yorder) + .map(|i| fi(i, self.yorder, u_y2)) + .collect(); + + let k3 = usize_from_f64( + (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), + ) + .min(self.ntau - 1 - self.tauorder); + + let u_tau = (tau - self.gettau(k3)) / self.deltatau(); + + let factor = if self.reweight { + 1.0 / (weightfun(ntuple.x1) * weightfun(ntuple.x2)) + } else { + 1.0 + }; + + for i3 in 0..=self.tauorder { + let fi3i3 = fi(i3, self.tauorder, u_tau); + + for (i1, fi1i1) in fi1.iter().enumerate() { + for (i2, fi2i2) in fi2.iter().enumerate() { + let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; + + self.array[[k3 + i3, k1 + i1, k2 + i2]] += fillweight; + } + } + } + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + (0..self.ntau) + .map(|itau| { + let q2 = fq2(self.gettau(itau)); + Mu2 { ren: q2, fac: q2 } + }) + .collect() + } + + fn x1_grid(&self) -> Cow<[f64]> { + (0..self.ny).map(|iy| fx(self.gety(iy))).collect() + } + + fn x2_grid(&self) -> Cow<[f64]> { + self.x1_grid() + } + + fn is_empty(&self) -> bool { + self.array.is_empty() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + if let SubgridEnum::LagrangeSparseSubgridV1(other_grid) = other { + if self.array.is_empty() && !transpose { + mem::swap(&mut self.array, &mut other_grid.array); + } else { + // TODO: the general case isn't implemented + assert!(self.x1_grid() == other_grid.x1_grid()); + assert!(self.x2_grid() == other_grid.x2_grid()); + + // TODO: we need much more checks here if there subgrids are compatible at all + + if transpose { + for ((i, k, j), value) in other_grid.array.indexed_iter() { + self.array[[i, j, k]] += value; + } + } else { + for ((i, j, k), value) in other_grid.array.indexed_iter() { + self.array[[i, j, k]] += value; + } + } + } + } else { + todo!(); + } + } + + fn scale(&mut self, factor: f64) { + if factor == 0.0 { + self.array.clear(); + } else { + self.array.iter_mut().for_each(|x| *x *= factor); + } + } + + fn symmetrize(&mut self) { + let mut new_array = SparseArray3::new(self.ntau, self.ny, self.ny); + + for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { + new_array[[i, j, k]] = sigma; + } + for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { + new_array[[i, k, j]] += sigma; + } + + mem::swap(&mut self.array, &mut new_array); + } + + fn clone_empty(&self) -> SubgridEnum { + Self { + array: SparseArray3::new(self.ntau, self.ny, self.ny), + ntau: self.ntau, + ny: self.ny, + yorder: self.yorder, + tauorder: self.tauorder, + reweight: self.reweight, + ymin: self.ymin, + ymax: self.ymax, + taumin: self.taumin, + taumax: self.taumax, + } + .into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + Box::new(self.array.indexed_iter().map(|(tuple, value)| { + ( + tuple, + value + * if self.reweight { + weightfun(fx(self.gety(tuple.1))) * weightfun(fx(self.gety(tuple.2))) + } else { + 1.0 + }, + ) + })) + } + + fn stats(&self) -> Stats { + Stats { + total: self.ntau * self.ny * self.ny, + allocated: self.array.len() + self.array.zeros(), + zeros: self.array.zeros(), + overhead: self.array.overhead(), + bytes_per_value: mem::size_of::(), + } + } + + fn static_scale(&self) -> Option { + if let [static_scale] = self.mu2_grid().as_ref() { + Some(static_scale.clone()) + } else { + None + } + } +} + +impl From<&LagrangeSubgridV1> for LagrangeSparseSubgridV1 { + fn from(subgrid: &LagrangeSubgridV1) -> Self { + Self { + array: subgrid.grid.as_ref().map_or_else( + || SparseArray3::new(subgrid.ntau, subgrid.ny, subgrid.ny), + |grid| SparseArray3::from_ndarray(grid.view(), subgrid.itaumin, subgrid.ntau), + ), + ntau: subgrid.ntau, + ny: subgrid.ny, + yorder: subgrid.yorder, + tauorder: subgrid.tauorder, + reweight: subgrid.reweight, + ymin: subgrid.ymin, + ymax: subgrid.ymax, + taumin: subgrid.taumin, + taumax: subgrid.taumax, + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use float_cmp::assert_approx_eq; + + fn test_q2_slice_methods(mut grid: G) -> G { + grid.fill(&Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid.fill(&Ntuple { + x1: 0.9, + x2: 0.1, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid.fill(&Ntuple { + x1: 0.009, + x2: 0.01, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid.fill(&Ntuple { + x1: 0.009, + x2: 0.5, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + + // the grid must not be empty + assert!(!grid.is_empty()); + + let x1 = grid.x1_grid(); + let x2 = grid.x2_grid(); + let mu2 = grid.mu2_grid(); + + let reference = grid.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + + let mut test = 0.0; + + // check `reference` against manually calculated result from q2 slices + for ((_, ix1, ix2), value) in grid.indexed_iter() { + test += value / (x1[ix1] * x2[ix2]); + } + + assert_approx_eq!(f64, test, reference, ulps = 8); + + grid + } + + fn test_merge_method(mut grid1: G, mut grid2: G, mut grid3: G) + where + SubgridEnum: From, + { + grid1.fill(&Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid1.fill(&Ntuple { + x1: 0.9, + x2: 0.1, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid1.fill(&Ntuple { + x1: 0.009, + x2: 0.01, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid1.fill(&Ntuple { + x1: 0.009, + x2: 0.5, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + + assert!(!grid1.is_empty()); + assert!(grid2.is_empty()); + + let x1 = grid1.x1_grid().into_owned(); + let x2 = grid1.x2_grid().into_owned(); + let mu2 = grid1.mu2_grid().into_owned(); + + let reference = + grid1.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + + // merge filled grid into empty one + grid2.merge(&mut grid1.into(), false); + assert!(!grid2.is_empty()); + + let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + + assert_approx_eq!(f64, reference, merged, ulps = 8); + + grid3.fill(&Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid3.fill(&Ntuple { + x1: 0.9, + x2: 0.1, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid3.fill(&Ntuple { + x1: 0.009, + x2: 0.01, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + grid3.fill(&Ntuple { + x1: 0.009, + x2: 0.5, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + + grid2.merge(&mut grid3.into(), false); + + let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + + assert_approx_eq!(f64, 2.0 * reference, merged, ulps = 8); + } + + fn test_empty_subgrid(mut grid: G) { + // this following events should be skipped + + // q2 is too large + grid.fill(&Ntuple { + x1: 0.5, + x2: 0.5, + q2: 2e+8, + weight: 1.0, + }); + // q2 is too small + grid.fill(&Ntuple { + x1: 0.5, + x2: 0.5, + q2: 5e+1, + weight: 1.0, + }); + // x1 is too large + grid.fill(&Ntuple { + x1: 1.1, + x2: 0.5, + q2: 1e+3, + weight: 1.0, + }); + // x1 is too small + grid.fill(&Ntuple { + x1: 0.5, + x2: 1e-7, + q2: 1e+3, + weight: 1.0, + }); + // x1 is too large + grid.fill(&Ntuple { + x1: 0.5, + x2: 1.1, + q2: 1e+3, + weight: 1.0, + }); + // x1 is too small + grid.fill(&Ntuple { + x1: 1e-7, + x2: 0.5, + q2: 1e+3, + weight: 1.0, + }); + + let x1 = grid.x1_grid(); + let x2 = grid.x2_grid(); + let mu2 = grid.mu2_grid(); + + let result = grid.convolve(&x1, &x2, &mu2, &mut |_, _, _| 1.0); + + assert_eq!(result, 0.0); + } + + #[test] + fn q2_slice_v1() { + let subgrid = test_q2_slice_methods(LagrangeSubgridV1::new(&SubgridParams::default())); + + assert_eq!( + subgrid.stats(), + Stats { + total: 10000, + allocated: 10000, + zeros: 256, + overhead: 0, + bytes_per_value: 8 + } + ); + } + + #[test] + fn q2_slice_v2() { + let subgrid = test_q2_slice_methods(LagrangeSubgridV2::new( + &SubgridParams::default(), + &ExtraSubgridParams::default(), + )); + + assert_eq!( + subgrid.stats(), + Stats { + total: 10000, + allocated: 10000, + zeros: 256, + overhead: 0, + bytes_per_value: 8 + } + ); + } + + #[test] + fn sparse_q2_slice() { + let subgrid = + test_q2_slice_methods(LagrangeSparseSubgridV1::new(&SubgridParams::default())); + + assert_eq!( + subgrid.stats(), + Stats { + total: 100000, + allocated: 432, + zeros: 176, + overhead: 402, + bytes_per_value: 8 + } + ); + } + + #[test] + fn fill_zero_v1() { + let mut subgrid = LagrangeSubgridV1::new(&SubgridParams::default()); + + subgrid.fill(&Ntuple { + x1: 0.5, + x2: 0.5, + q2: 1000.0, + weight: 0.0, + }); + + assert!(subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 0); + } + + #[test] + fn fill_zero_v1_sparse() { + let mut subgrid = LagrangeSparseSubgridV1::new(&SubgridParams::default()); + + subgrid.fill(&Ntuple { + x1: 0.5, + x2: 0.5, + q2: 1000.0, + weight: 0.0, + }); + + assert!(subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 0); + } + + #[test] + fn fill_zero_v2() { + let mut subgrid = + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); + + subgrid.fill(&Ntuple { + x1: 0.5, + x2: 0.5, + q2: 1000.0, + weight: 0.0, + }); + + assert!(subgrid.is_empty()); + assert_eq!(subgrid.indexed_iter().count(), 0); + } + + #[test] + fn from() { + // check conversion of empty grids + let mut dense = LagrangeSubgridV1::new(&SubgridParams::default()); + assert!(dense.is_empty()); + let sparse = LagrangeSparseSubgridV1::from(&dense); + assert!(sparse.is_empty()); + + let mu2 = dense.mu2_grid().into_owned(); + let x1 = dense.x1_grid().into_owned(); + let x2 = dense.x2_grid().into_owned(); + + assert_eq!(mu2, *sparse.mu2_grid()); + assert_eq!(x1, *sparse.x1_grid()); + assert_eq!(x2, *sparse.x2_grid()); + + // check conversion of a filled grid + dense.fill(&Ntuple { + x1: 0.1, + x2: 0.2, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + dense.fill(&Ntuple { + x1: 0.9, + x2: 0.1, + q2: 90.0_f64.powi(2), + weight: 1.0, + }); + + assert!(!dense.is_empty()); + + let sparse = LagrangeSparseSubgridV1::from(&dense); + assert!(!sparse.is_empty()); + + let reference = + dense.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + let converted = + sparse.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); + + assert_approx_eq!(f64, reference, converted, ulps = 8); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn merge_dense_v1_with_sparse() { + let mut dense = LagrangeSubgridV1::new(&SubgridParams::default()); + let sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); + + dense.merge(&mut sparse.into(), false); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn merge_dense_v1_with_dense_v2() { + let mut one = LagrangeSubgridV1::new(&SubgridParams::default()); + let two = LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); + + one.merge(&mut two.into(), false); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn merge_dense_v2_with_dense_v1() { + let mut two = + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); + let one = LagrangeSubgridV1::new(&SubgridParams::default()); + + two.merge(&mut one.into(), false); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn merge_dense_v2_with_sparse() { + let mut dense = + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); + let sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); + + dense.merge(&mut sparse.into(), false); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn merge_sparse_with_dense_v1() { + let mut sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); + let dense = LagrangeSubgridV1::new(&SubgridParams::default()); + + sparse.merge(&mut dense.into(), false); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn merge_sparse_with_dense_v2() { + let mut sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); + let dense = + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); + + sparse.merge(&mut dense.into(), false); + } + + #[test] + fn merge_dense_v1() { + test_merge_method( + LagrangeSubgridV1::new(&SubgridParams::default()), + LagrangeSubgridV1::new(&SubgridParams::default()), + LagrangeSubgridV1::new(&SubgridParams::default()), + ); + } + + #[test] + fn merge_dense_v2() { + test_merge_method( + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), + ); + } + + #[test] + fn merge_sparse() { + test_merge_method( + LagrangeSparseSubgridV1::new(&SubgridParams::default()), + LagrangeSparseSubgridV1::new(&SubgridParams::default()), + LagrangeSparseSubgridV1::new(&SubgridParams::default()), + ); + } + + #[test] + fn empty_v1() { + test_empty_subgrid(LagrangeSubgridV1::new(&SubgridParams::default())); + } + + #[test] + fn empty_v2() { + test_empty_subgrid(LagrangeSubgridV2::new( + &SubgridParams::default(), + &ExtraSubgridParams::default(), + )); + } + + #[test] + fn empty_sparse() { + test_empty_subgrid(LagrangeSparseSubgridV1::new(&SubgridParams::default())); + } +} diff --git a/pineappl_v0/src/lib.rs b/pineappl_v0/src/lib.rs new file mode 100644 index 000000000..5a0387999 --- /dev/null +++ b/pineappl_v0/src/lib.rs @@ -0,0 +1,51 @@ +//! `PineAPPL` is not an extension of `APPLgrid`. +//! +//! # Overview +//! +//! The main type of this crate is [`Grid`], which represents the interpolation grids that +//! `PineAPPL` implements. Roughly speaking, a `Grid` is a three-dimensional array of [`Subgrid`] +//! objects together with metadata. The three dimensions are +//! 1. (perturbative) orders, represented by the type [`Order`] and accessible by +//! [`Grid::orders()`], +//! 2. bins, whose limits can be accessed by [`Grid::bin_info()`], and +//! 3. channels, whose definition is returned by [`Grid::channels()`]. +//! +//! `Subgrid` is a `trait` and objects that implement it are of the type [`SubgridEnum`]. The +//! latter is an `enum` of different types that are optimized to different scenarios: fast event +//! filling, small storage profile, etc. +//! +//! [`Grid`]: grid::Grid +//! [`Grid::bin_info()`]: grid::Grid::bin_info +//! [`Grid::channels()`]: grid::Grid::channels +//! [`Grid::orders()`]: grid::Grid::orders +//! [`Subgrid`]: subgrid::Subgrid +//! [`SubgridEnum`]: subgrid::SubgridEnum +//! [`Order`]: order::Order +//! +//! ## Metadata +//! +//! Metadata is a collection of key--value pairs, in which both keys and values are `String` +//! objects. In metadata anything a user whishes can be stored. However, there are [special keys], +//! which have meaning to `PineAPPL` and/or its CLI `pineappl`. This metadata enables the CLI to +//! automatically generate plots that are correctly labeled, for instance. For more applications +//! see also the [CLI tutorial]. +//! +//! [special keys]: https://nnpdf.github.io/pineappl/docs/metadata.html +//! [CLI tutorial]: https://nnpdf.github.io/pineappl/docs/cli-tutorial.html + +mod convert; + +pub mod bin; +pub mod boc; +pub mod convolutions; +pub mod empty_subgrid; +pub mod evolution; +pub mod fk_table; +pub mod grid; +pub mod import_only_subgrid; +pub mod lagrange_subgrid; +pub mod ntuple_subgrid; +pub mod packed_array; +pub mod pids; +pub mod sparse_array3; +pub mod subgrid; diff --git a/pineappl_v0/src/ntuple_subgrid.rs b/pineappl_v0/src/ntuple_subgrid.rs new file mode 100644 index 000000000..282d9fffc --- /dev/null +++ b/pineappl_v0/src/ntuple_subgrid.rs @@ -0,0 +1,198 @@ +//! Provides an implementation of the `Grid` trait with n-tuples. + +use super::grid::Ntuple; +use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use serde::{Deserialize, Serialize}; +use std::borrow::Cow; +use std::mem; + +/// Structure holding a grid with an n-tuple as the storage method for weights. +#[derive(Clone, Default, Deserialize, Serialize)] +pub struct NtupleSubgridV1 { + ntuples: Vec>, +} + +impl NtupleSubgridV1 { + /// Constructor. + #[must_use] + pub const fn new() -> Self { + Self { ntuples: vec![] } + } +} + +impl Subgrid for NtupleSubgridV1 { + fn convolve( + &self, + _: &[f64], + _: &[f64], + _: &[Mu2], + _: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64 { + panic!("NtupleSubgridV1 doesn't support the convolve operation"); + } + + fn fill(&mut self, ntuple: &Ntuple) { + if ntuple.weight == 0.0 { + return; + } + + self.ntuples.push(ntuple.clone()); + } + + fn mu2_grid(&self) -> Cow<[Mu2]> { + Cow::Borrowed(&[]) + } + + fn x1_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&[]) + } + + fn x2_grid(&self) -> Cow<[f64]> { + Cow::Borrowed(&[]) + } + + fn is_empty(&self) -> bool { + self.ntuples.is_empty() + } + + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { + assert!(!transpose); + + if let SubgridEnum::NtupleSubgridV1(other_grid) = other { + self.ntuples.append(&mut other_grid.ntuples); + } else { + panic!("NtupleSubgridV1 doesn't support the merge operation with subgrid types other than itself"); + } + } + + fn scale(&mut self, factor: f64) { + self.ntuples.iter_mut().for_each(|t| t.weight *= factor); + } + + fn symmetrize(&mut self) {} + + fn clone_empty(&self) -> SubgridEnum { + Self::new().into() + } + + fn indexed_iter(&self) -> SubgridIndexedIter { + panic!("NtupleSubgridV1 doesn't support the indexed_iter operation"); + } + + fn stats(&self) -> Stats { + Stats { + total: self.ntuples.len(), + allocated: self.ntuples.len(), + zeros: 0, + overhead: 0, + bytes_per_value: mem::size_of::>(), + } + } + + fn static_scale(&self) -> Option { + todo!() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::lagrange_subgrid::LagrangeSubgridV2; + use crate::subgrid::{ExtraSubgridParams, SubgridParams}; + + #[test] + #[should_panic(expected = "NtupleSubgridV1 doesn't support the convolve operation")] + fn convolve() { + NtupleSubgridV1::new().convolve(&[], &[], &[], &mut |_, _, _| 0.0); + } + + #[test] + fn fill_zero() { + let mut subgrid = NtupleSubgridV1::new(); + + subgrid.fill(&Ntuple { + x1: 0.5, + x2: 0.5, + q2: 1000.0, + weight: 0.0, + }); + + assert!(subgrid.is_empty()); + } + + #[test] + #[should_panic(expected = "NtupleSubgridV1 doesn't support the indexed_iter operation")] + fn indexed_iter() { + // `next` isn't called because `indexed_iter` panics, but it suppresses a warning about an + // unused result + NtupleSubgridV1::new().indexed_iter().next(); + } + + #[test] + fn stats() { + let subgrid = NtupleSubgridV1::new(); + assert_eq!( + subgrid.stats(), + Stats { + total: 0, + allocated: 0, + zeros: 0, + overhead: 0, + bytes_per_value: 32, + } + ); + } + + #[test] + #[should_panic(expected = "not yet implemented")] + fn static_scale() { + let subgrid = NtupleSubgridV1::new(); + subgrid.static_scale(); + } + + #[test] + #[should_panic( + expected = "NtupleSubgridV1 doesn't support the merge operation with subgrid types other than itself" + )] + fn merge_with_lagrange_subgrid() { + let mut subgrid = NtupleSubgridV1::new(); + let mut other = + LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()) + .into(); + subgrid.merge(&mut other, false); + } + + #[test] + fn test() { + let mut subgrid1: SubgridEnum = NtupleSubgridV1::new().into(); + + assert!(subgrid1.is_empty()); + + subgrid1.fill(&Ntuple { + x1: 0.0, + x2: 0.0, + q2: 0.0, + weight: 1.0, + }); + + assert!(!subgrid1.is_empty()); + + assert_eq!(subgrid1.mu2_grid().as_ref(), []); + assert_eq!(subgrid1.x1_grid().as_ref(), []); + assert_eq!(subgrid1.x2_grid().as_ref(), []); + + subgrid1.symmetrize(); + subgrid1.scale(2.0); + + let mut subgrid2: SubgridEnum = subgrid1.clone_empty(); + + subgrid2.fill(&Ntuple { + x1: 0.0, + x2: 0.0, + q2: 0.0, + weight: 1.0, + }); + + subgrid2.merge(&mut subgrid1, false); + } +} diff --git a/pineappl_v0/src/packed_array.rs b/pineappl_v0/src/packed_array.rs new file mode 100644 index 000000000..4f29764a7 --- /dev/null +++ b/pineappl_v0/src/packed_array.rs @@ -0,0 +1,708 @@ +//! Provides the [`PackedArray`] struct. + +use ndarray::ArrayView3; +use serde::{Deserialize, Serialize}; +use std::iter; +use std::mem; +use std::ops::{Index, IndexMut, MulAssign}; + +/// `D`-dimensional array similar to [`ndarray::ArrayBase`], except that `T::default()` is not +/// stored to save space. Instead, adjacent non-default elements are grouped together and the index +/// of their first element (`start_index`) and the length of the group (`lengths`) is stored. +#[derive(Clone, Deserialize, Serialize)] +pub struct PackedArray { + /// The actual values stored in the array. The length of `entries` is always the sum of the + /// elements in `lengths`. + entries: Vec, + /// The indices of the first elements in each group. `start_indices[i]` corresponds to the + /// group with index `i`. + start_indices: Vec, + /// The length of each group. `lengths[i]` corresponds to the group with index `i`. + lengths: Vec, + /// The shape (dimensions) of the array. + shape: Vec, +} + +impl PackedArray { + /// Constructs a new and empty `PackedArray` of shape `shape`. + #[must_use] + pub fn new(shape: [usize; D]) -> Self { + Self { + entries: vec![], + start_indices: vec![], + lengths: vec![], + shape: shape.to_vec(), + } + } + + /// Returns `true` if the array contains no element. + #[must_use] + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Returns the shape of the array. + #[must_use] + pub fn shape(&self) -> &[usize] { + &self.shape + } + + /// Clears the contents of the array. + pub fn clear(&mut self) { + self.entries.clear(); + self.start_indices.clear(); + self.lengths.clear(); + } + + /// Returns the overhead of storing the `start_indices` and the `lengths` of the groups, in + /// units of `f64`. + #[must_use] + pub fn overhead(&self) -> usize { + ((self.start_indices.len() + self.lengths.len()) * mem::size_of::()) + / mem::size_of::() + } + + /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If + /// there is one default element between adjacent groups, it is more economical to store the + /// one default element explicitly and merge the two groups, than to store the `start_indices` + /// and `lengths` of both groups. + #[must_use] + pub fn explicit_zeros(&self) -> usize { + self.entries.iter().filter(|x| **x == T::default()).count() + } + + /// Returns the number of non-default (non-zero) elements stored in the array. + #[must_use] + pub fn non_zeros(&self) -> usize { + self.entries.iter().filter(|x| **x != T::default()).count() + } + + /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of + /// an iterator element is `([usize; D], T)` where the first element of the tuple is the index + /// and the second element is the value. + pub fn indexed_iter(&self) -> impl Iterator + '_ { + self.start_indices + .iter() + .zip(&self.lengths) + .flat_map(|(&start_index, &length)| { + (start_index..(start_index + length)).map(|i| unravel_index(i, &self.shape)) + }) + .zip(&self.entries) + .filter(|&(_, entry)| *entry != Default::default()) + .map(|(indices, entry)| (indices, *entry)) + } +} + +impl, const D: usize> MulAssign for PackedArray { + fn mul_assign(&mut self, rhs: T) { + self.entries.iter_mut().for_each(|x| *x *= rhs); + } +} + +impl PackedArray { + /// Converts `array` into a `PackedArray`. + #[must_use] + pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { + let shape = array.shape(); + + let mut result = Self::new([xsize, shape[1], shape[2]]); + + for ((i, j, k), &entry) in array + .indexed_iter() + .filter(|(_, &entry)| entry != Default::default()) + { + result[[i + xstart, j, k]] = entry; + } + + result + } +} + +/// Converts a `multi_index` into a flat index. +fn ravel_multi_index(multi_index: &[usize; D], shape: &[usize]) -> usize { + assert_eq!(multi_index.len(), shape.len()); + + multi_index + .iter() + .zip(shape) + .fold(0, |acc, (i, d)| acc * d + i) +} + +/// Converts a flat `index` into a `multi_index`. +fn unravel_index(mut index: usize, shape: &[usize]) -> [usize; D] { + assert!(index < shape.iter().product()); + let mut indices = [0; D]; + for (i, d) in indices.iter_mut().zip(shape).rev() { + *i = index % d; + index /= d; + } + indices +} + +impl Index<[usize; D]> for PackedArray { + type Output = T; + + fn index(&self, index: [usize; D]) -> &Self::Output { + assert_eq!(index.len(), self.shape.len()); + assert!( + index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), + "index {:?} is out of bounds for array of shape {:?}", + index, + self.shape + ); + + let raveled_index = ravel_multi_index(&index, &self.shape); + let point = self.start_indices.partition_point(|&i| i <= raveled_index); + + assert!( + point > 0, + "entry at index {index:?} is implicitly set to the default value" + ); + + let start_index = self.start_indices[point - 1]; + let length = self.lengths[point - 1]; + + let point_entries = + self.lengths.iter().take(point - 1).sum::() + raveled_index - start_index; + + assert!( + raveled_index < (start_index + length), + "entry at index {index:?} is implicitly set to the default value" + ); + + &self.entries[point_entries] + } +} + +impl IndexMut<[usize; D]> + for PackedArray +{ + fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { + assert_eq!(index.len(), self.shape.len()); + + // Panic if the index value for any dimension is greater or equal than the length of this + // dimension. + assert!( + index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), + "index {:?} is out of bounds for array of shape {:?}", + index, + self.shape + ); + + // The insertion cases are: + // 1. this array already stores an element at `index`: + // -> we just have to update this element + // 2. this array does not store an element at `index`: + // a. the distance of the (raveled) `index` is `threshold_distance` away from the next + // or previous element that is already stored: + // -> we can merge the new element into already stored groups, potentially padding + // with `T::default()` elements + // b. the distance of the (raveled) `index` from the existing elements is greater than + // `threshold_distance`: + // -> we insert the element as a new group + + let raveled_index = ravel_multi_index(&index, &self.shape); + + // To determine which groups the new element is close to, `point` is the index of the + // start_index of the first group after the new element. `point` is 0 if no elements before + // the new element are stored, and point is `self.start_indices.len()` if no elements after + // the new element are stored. + let point = self.start_indices.partition_point(|&i| i <= raveled_index); + + // `point_entries` is the index of the first element of the next group, given in + // `self.entries`, i.e. the element at index `self.start_indices[point]`. + let point_entries = self.lengths.iter().take(point).sum::(); + + // Maximum distance for merging groups. If the new element is within `threshold_distance` + // of an existing group (i.e. there are `threshold_distance - 1` implicit elements + // between them), we merge the new element into the existing group. We choose 2 as the + // `threshold_distance` based on memory: in the case of `T` = `f64`, it is more economical + // to store one zero explicitly than to store the start_index and length of a new group. + let threshold_distance = 2; + + // If `point > 0`, there is at least one group preceding the new element. Thus, in the + // following we determine if we can insert the new element into this group. + if point > 0 { + // start_index and length of the group before the new element, i.e. the group + // (potentially) getting the new element + let start_index = self.start_indices[point - 1]; + let length = self.lengths[point - 1]; + + // Case 1: an element is already stored at this `index` + if raveled_index < start_index + length { + return &mut self.entries[point_entries - length + raveled_index - start_index]; + // Case 2a: the new element can be merged into the preceding group + } else if raveled_index < start_index + length + threshold_distance { + let distance = raveled_index - (start_index + length) + 1; + // Merging happens by increasing the length of the group + self.lengths[point - 1] += distance; + // and inserting the necessary number of default elements. + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance), + ); + + // If the new element is within `threshold_distance` of the *next* group, we merge + // the next group into this group. + if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + let distance_next = start_index_next - raveled_index; + + // Increase the length of this group + self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; + // and remove the next group. we don't have to manipulate `self.entries`, + // since the grouping of the elements is handled only by + // `self.start_indices` and `self.lengths` + self.lengths.remove(point); + self.start_indices.remove(point); + // Insert the default elements between the groups. + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance_next - 1), + ); + } + } + + return &mut self.entries[point_entries - 1 + distance]; + } + } + + // Case 2a: the new element can be merged into the next group. No `self.lengths.remove` and + // `self.start_indices.remove` here, since we are not merging two groups. + if let Some(start_index_next) = self.start_indices.get(point) { + if raveled_index + threshold_distance >= *start_index_next { + let distance = start_index_next - raveled_index; + + self.start_indices[point] = raveled_index; + self.lengths[point] += distance; + self.entries.splice( + point_entries..point_entries, + iter::repeat(Default::default()).take(distance), + ); + return &mut self.entries[point_entries]; + } + } + + // Case 2b: we insert a new group of length 1 + self.start_indices.insert(point, raveled_index); + self.lengths.insert(point, 1); + self.entries.insert(point_entries, Default::default()); + + &mut self.entries[point_entries] + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ndarray::Array3; + use std::mem; + + #[test] + fn unravel_index() { + assert_eq!(super::unravel_index(0, &[3, 2]), [0, 0]); + assert_eq!(super::unravel_index(1, &[3, 2]), [0, 1]); + assert_eq!(super::unravel_index(2, &[3, 2]), [1, 0]); + assert_eq!(super::unravel_index(3, &[3, 2]), [1, 1]); + assert_eq!(super::unravel_index(4, &[3, 2]), [2, 0]); + assert_eq!(super::unravel_index(5, &[3, 2]), [2, 1]); + } + + #[test] + fn ravel_multi_index() { + assert_eq!(super::ravel_multi_index(&[0, 0], &[3, 2]), 0); + assert_eq!(super::ravel_multi_index(&[0, 1], &[3, 2]), 1); + assert_eq!(super::ravel_multi_index(&[1, 0], &[3, 2]), 2); + assert_eq!(super::ravel_multi_index(&[1, 1], &[3, 2]), 3); + assert_eq!(super::ravel_multi_index(&[2, 0], &[3, 2]), 4); + assert_eq!(super::ravel_multi_index(&[2, 1], &[3, 2]), 5); + } + + #[test] + fn index() { + let mut a = PackedArray::::new([4, 2]); + + a[[0, 0]] = 1.0; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a.entries, vec![1.0]); + assert_eq!(a.start_indices, vec![0]); + assert_eq!(a.lengths, vec![1]); + + a[[3, 0]] = 2.0; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a.entries, vec![1.0, 2.0]); + assert_eq!(a.start_indices, vec![0, 6]); + assert_eq!(a.lengths, vec![1, 1]); + + a[[3, 1]] = 3.0; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a.entries, vec![1.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0, 6]); + assert_eq!(a.lengths, vec![1, 2]); + + a[[2, 0]] = 3.5; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a[[2, 0]], 3.5); + assert_eq!(a.entries, vec![1.0, 3.5, 0.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0, 4]); + assert_eq!(a.lengths, vec![1, 4]); + + a[[2, 0]] = 4.0; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a[[2, 0]], 4.0); + assert_eq!(a.entries, vec![1.0, 4.0, 0.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0, 4]); + assert_eq!(a.lengths, vec![1, 4]); + + a[[1, 0]] = 5.0; + assert_eq!(a[[0, 0]], 1.0); + assert_eq!(a[[3, 0]], 2.0); + assert_eq!(a[[3, 1]], 3.0); + assert_eq!(a[[2, 0]], 4.0); + assert_eq!(a[[1, 0]], 5.0); + assert_eq!(a.entries, vec![1.0, 0.0, 5.0, 0.0, 4.0, 0.0, 2.0, 3.0]); + assert_eq!(a.start_indices, vec![0]); + assert_eq!(a.lengths, vec![8]); + } + + #[test] + fn iter() { + let mut a = PackedArray::::new([6, 5]); + a[[2, 2]] = 1; + a[[2, 4]] = 2; + a[[4, 1]] = 3; + a[[4, 4]] = 4; + a[[5, 0]] = 5; + assert_eq!( + a.indexed_iter().collect::>(), + &[ + ([2, 2], 1), + ([2, 4], 2), + ([4, 1], 3), + ([4, 4], 4), + ([5, 0], 5), + ] + ); + } + + #[test] + fn index_access() { + let mut array = PackedArray::new([40, 50, 50]); + + // after creation the array must be empty + assert_eq!(array.overhead(), 0); + assert!(array.is_empty()); + + // insert the first element + array[[5, 10, 10]] = 1.0; + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 1); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 2); + assert!(!array.is_empty()); + + // insert an element after the first one + array[[8, 10, 10]] = 2.0; + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 2); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 4); + assert!(!array.is_empty()); + + // insert an element before the first one + array[[1, 10, 10]] = 3.0; + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 3); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 6); + assert!(!array.is_empty()); + + array[[1, 10, 11]] = 4.0; + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 4); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 6); + assert!(!array.is_empty()); + + array[[1, 10, 9]] = 5.0; + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 5); + assert_eq!(array.explicit_zeros(), 0); + // dbg!(&array.start_indices); + // dbg!(&array.lengths); + assert_eq!(array.overhead(), 6); + assert!(!array.is_empty()); + + array[[1, 10, 0]] = 6.0; + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 6); + assert_eq!(array.explicit_zeros(), 0); + assert_eq!(array.overhead(), 8); + assert!(!array.is_empty()); + + array[[1, 10, 2]] = 7.0; + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 7); + assert_eq!(array.overhead(), 8); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array.explicit_zeros(), 1); + + array[[1, 15, 2]] = 8.0; + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 8); + assert_eq!(array.overhead(), 10); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array.explicit_zeros(), 1); + + array[[1, 15, 4]] = 9.0; + assert_eq!(array[[1, 15, 4]], 9.0); + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 9); + assert_eq!(array.overhead(), 10); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 15, 3]], 0.0); + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array.explicit_zeros(), 2); + + array[[1, 15, 0]] = 10.0; + assert_eq!(array[[1, 15, 0]], 10.0); + assert_eq!(array[[1, 15, 4]], 9.0); + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.non_zeros(), 10); + assert_eq!(array.overhead(), 10); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 15, 1]], 0.0); + assert_eq!(array[[1, 15, 3]], 0.0); + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array.explicit_zeros(), 3); + } + + #[test] + #[should_panic(expected = "index [40, 0, 50] is out of bounds for array of shape [40, 50, 50]")] + fn index_mut_panic_dim0() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[40, 0, 50]] = 1.0; + } + + #[test] + #[should_panic(expected = "index [0, 50, 0] is out of bounds for array of shape [40, 50, 50]")] + fn index_mut_panic_dim1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 50, 0]] = 1.0; + } + + #[test] + #[should_panic(expected = "index [0, 0, 50] is out of bounds for array of shape [40, 50, 50]")] + fn index_mut_panic_dim2() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 0, 50]] = 1.0; + } + + #[test] + #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] + fn index_panic_dim0_0() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "entry at index [2, 0, 0] is implicitly set to the default value")] + fn index_panic_dim0_1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[2, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "index [1, 50, 0] is out of bounds for array of shape [40, 50, 50]")] + fn index_panic_dim1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[1, 50, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] + fn index_panic_dim2_0() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 0, 1]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "entry at index [0, 0, 2] is implicitly set to the default value")] + fn index_panic_dim2_1() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[0, 0, 1]] = 1.0; + + assert_eq!(array[[0, 0, 2]], 0.0); + } + + #[test] + fn indexed_iter() { + let mut array = PackedArray::new([40, 50, 50]); + + // check empty iterator + assert_eq!(array.indexed_iter().next(), None); + + // insert an element + array[[2, 3, 4]] = 1.0; + + let mut iter = array.indexed_iter(); + + // check iterator with one element + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), None); + + mem::drop(iter); + + // insert another element + array[[2, 3, 6]] = 2.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); + assert_eq!(iter.next(), None); + + mem::drop(iter); + + // insert yet another element + array[[4, 5, 7]] = 3.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); + assert_eq!(iter.next(), Some(([4, 5, 7], 3.0))); + assert_eq!(iter.next(), None); + + mem::drop(iter); + + // insert at the very first position + array[[2, 0, 0]] = 4.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(([2, 0, 0], 4.0))); + assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); + assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); + assert_eq!(iter.next(), Some(([4, 5, 7], 3.0))); + assert_eq!(iter.next(), None); + } + + #[test] + fn clear() { + let mut array = PackedArray::new([40, 50, 50]); + + array[[3, 5, 1]] = 1.0; + array[[7, 8, 9]] = 2.0; + array[[9, 1, 4]] = 3.0; + + assert!(!array.is_empty()); + assert_eq!(array.non_zeros(), 3); + assert_eq!(array.explicit_zeros(), 0); + + array.clear(); + + assert!(array.is_empty()); + assert_eq!(array.non_zeros(), 0); + assert_eq!(array.explicit_zeros(), 0); + } + + #[test] + fn from_ndarray() { + let mut ndarray = Array3::zeros((2, 50, 50)); + + ndarray[[0, 4, 3]] = 1.0; + ndarray[[0, 4, 4]] = 2.0; + ndarray[[0, 4, 6]] = 3.0; + ndarray[[0, 5, 1]] = 4.0; + ndarray[[0, 5, 7]] = 5.0; + ndarray[[1, 3, 9]] = 6.0; + + let array = PackedArray::from_ndarray(ndarray.view(), 3, 40); + + assert_eq!(array[[3, 4, 3]], 1.0); + assert_eq!(array[[3, 4, 4]], 2.0); + assert_eq!(array[[3, 4, 5]], 0.0); + assert_eq!(array[[3, 4, 6]], 3.0); + assert_eq!(array[[3, 5, 1]], 4.0); + assert_eq!(array[[3, 5, 7]], 5.0); + assert_eq!(array[[4, 3, 9]], 6.0); + + assert_eq!(array.explicit_zeros(), 1); + } +} diff --git a/pineappl_v0/src/pids.rs b/pineappl_v0/src/pids.rs new file mode 100644 index 000000000..8e23eaa49 --- /dev/null +++ b/pineappl_v0/src/pids.rs @@ -0,0 +1,902 @@ +//! TODO + +use std::str::FromStr; +use thiserror::Error; + +const EVOL_BASIS_IDS: [i32; 12] = [100, 103, 108, 115, 124, 135, 200, 203, 208, 215, 224, 235]; + +/// Particle ID bases. In `PineAPPL` every particle is identified using a particle identifier +/// (PID), which is represented as an `i32`. The values of this `enum` specify how this value is +/// interpreted. +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +pub enum PidBasis { + /// This basis uses the [particle data group](https://pdg.lbl.gov/) (PDG) PIDs. For a complete + /// definition see the section 'Monte Carlo Particle Numbering Scheme' of the PDG Review, for + /// instance the [2023 review](https://pdg.lbl.gov/2023/mcdata/mc_particle_id_contents.html). + Pdg, + /// This basis specifies the evolution basis, which is the same as [`PidBasis::Pdg`], except + /// the following values have a special meaning: `100`, `103`, `108`, `115`, `124`, `135`, + /// `200`, `203`, `208`, `215`, `224`, `235`. + Evol, +} + +impl FromStr for PidBasis { + type Err = UnknownPidBasis; + + fn from_str(s: &str) -> Result { + match s { + "Pdg" | "PDG" | "pdg_mc_ids" => Ok(Self::Pdg), + "Evol" | "EVOL" | "evol" => Ok(Self::Evol), + _ => Err(UnknownPidBasis { + basis: s.to_owned(), + }), + } + } +} + +impl PidBasis { + /// Return the charge-conjugated particle ID of `pid` given in the basis of `self`. The + /// returned tuple contains a factor that possibly arises during the charge conjugation. + #[must_use] + pub const fn charge_conjugate(&self, pid: i32) -> (i32, f64) { + match (*self, pid) { + // TODO: in the general case we should allow to return a vector of tuples + (Self::Evol, 100 | 103 | 108 | 115 | 124 | 135) => (pid, 1.0), + (Self::Evol, 200 | 203 | 208 | 215 | 224 | 235) => (pid, -1.0), + (Self::Evol | Self::Pdg, _) => (charge_conjugate_pdg_pid(pid), 1.0), + } + } + + /// Given the particle IDs in `pids`, guess the [`PidBasis`]. + #[must_use] + pub fn guess(pids: &[i32]) -> Self { + // if we find more than 3 pids that are recognized to be from the evolution basis, declare + // it to be the evolution basis (that's a heuristic), otherwise PDG MC IDs + if pids + .iter() + .filter(|&pid| EVOL_BASIS_IDS.iter().any(|evol_pid| pid == evol_pid)) + .count() + > 3 + { + Self::Evol + } else { + Self::Pdg + } + } +} + +/// Error returned by [`PidBasis::from_str`] when passed with an unknown argument. +#[derive(Debug, Error)] +#[error("unknown PID basis: {basis}")] +pub struct UnknownPidBasis { + basis: String, +} + +/// Translates IDs from the evolution basis into IDs using PDG Monte Carlo IDs. +#[must_use] +pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { + match id { + 100 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, 1.0), + (-5, 1.0), + (6, 1.0), + (-6, 1.0), + ], + 103 => vec![(2, 1.0), (-2, 1.0), (1, -1.0), (-1, -1.0)], + 108 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, -2.0), + (-3, -2.0), + ], + 115 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, -3.0), + (-4, -3.0), + ], + 124 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, -4.0), + (-5, -4.0), + ], + 135 => vec![ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, 1.0), + (-5, 1.0), + (6, -5.0), + (-6, -5.0), + ], + 200 => vec![ + (1, 1.0), + (-1, -1.0), + (2, 1.0), + (-2, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, 1.0), + (-5, -1.0), + (6, 1.0), + (-6, -1.0), + ], + 203 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], + 208 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, -2.0), + (-3, 2.0), + ], + 215 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, -3.0), + (-4, 3.0), + ], + 224 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, -4.0), + (-5, 4.0), + ], + 235 => vec![ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, 1.0), + (-5, -1.0), + (6, -5.0), + (-6, 5.0), + ], + _ => vec![(id, 1.0)], + } +} + +/// Translates PDG Monte Carlo IDs to particle IDs from the evolution basis. +#[must_use] +pub fn pdg_mc_pids_to_evol(pid: i32) -> Vec<(i32, f64)> { + match pid { + -6 => vec![ + (100, 1.0 / 12.0), + (135, -1.0 / 12.0), + (200, -1.0 / 12.0), + (235, 1.0 / 12.0), + ], + -5 => vec![ + (100, 1.0 / 12.0), + (124, -1.0 / 10.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (224, 1.0 / 10.0), + (235, -1.0 / 60.0), + ], + -4 => vec![ + (100, 1.0 / 12.0), + (115, -1.0 / 8.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (215, 1.0 / 8.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + -3 => vec![ + (100, 1.0 / 12.0), + (108, -1.0 / 6.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (208, 1.0 / 6.0), + (215, -1.0 / 24.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + -2 => vec![ + (100, 1.0 / 12.0), + (103, 1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (203, -1.0 / 4.0), + (208, -1.0 / 12.0), + (215, -1.0 / 24.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + -1 => vec![ + (100, 1.0 / 12.0), + (103, -1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, -1.0 / 12.0), + (203, 1.0 / 4.0), + (208, -1.0 / 12.0), + (215, -1.0 / 24.0), + (224, -1.0 / 40.0), + (235, -1.0 / 60.0), + ], + 1 => vec![ + (100, 1.0 / 12.0), + (103, -1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (203, -1.0 / 4.0), + (208, 1.0 / 12.0), + (215, 1.0 / 24.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 2 => vec![ + (100, 1.0 / 12.0), + (103, 1.0 / 4.0), + (108, 1.0 / 12.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (203, 1.0 / 4.0), + (208, 1.0 / 12.0), + (215, 1.0 / 24.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 3 => vec![ + (100, 1.0 / 12.0), + (108, -1.0 / 6.0), + (115, 1.0 / 24.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (208, -1.0 / 6.0), + (215, 1.0 / 24.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 4 => vec![ + (100, 1.0 / 12.0), + (115, -1.0 / 8.0), + (124, 1.0 / 40.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (215, -1.0 / 8.0), + (224, 1.0 / 40.0), + (235, 1.0 / 60.0), + ], + 5 => vec![ + (100, 1.0 / 12.0), + (124, -1.0 / 10.0), + (135, 1.0 / 60.0), + (200, 1.0 / 12.0), + (224, -1.0 / 10.0), + (235, 1.0 / 60.0), + ], + 6 => vec![ + (100, 1.0 / 12.0), + (135, -1.0 / 12.0), + (200, 1.0 / 12.0), + (235, -1.0 / 12.0), + ], + _ => vec![(pid, 1.0)], + } +} + +/// Return the charge-conjugated PDG ID of `pid`. +#[must_use] +pub const fn charge_conjugate_pdg_pid(pid: i32) -> i32 { + match pid { + 21 | 22 => pid, + _ => -pid, + } +} + +/// Given `tuples` represting a linear combination of PDG MC IDs, return a PID for the `evol` +/// basis. The order of each tuple in `tuples` is not relevant. This function inverts +/// [`evol_to_pdg_mc_ids`]. If the inversion is not possible, `None` is returned. +#[must_use] +pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { + let mut tuples = tuples.to_vec(); + tuples.retain(|&(_, factor)| factor != 0.0); + tuples.sort_by_key(|&(id, _)| id); + let tuples = tuples; + + for &evol_pid in &EVOL_BASIS_IDS { + let mut evol_vec = evol_to_pdg_mc_ids(evol_pid); + evol_vec.sort_by_key(|&(id, _)| id); + let evol_vec = evol_vec; + + if evol_vec == tuples { + return Some(evol_pid); + } + } + + let non_zero: Vec<_> = tuples + .into_iter() + .filter(|&(_, factor)| factor != 0.0) + .collect(); + + if let &[(pid, factor)] = non_zero.as_slice() { + if factor == 1.0 { + return Some(pid); + } + } + + None +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::boc::Channel; + use crate::channel; + use float_cmp::assert_approx_eq; + + #[test] + fn test() { + // check photon + assert_eq!(evol_to_pdg_mc_ids(21), [(21, 1.0)]); + + // check gluon + assert_eq!(evol_to_pdg_mc_ids(22), [(22, 1.0)]); + + // check singlet + assert_eq!( + evol_to_pdg_mc_ids(100), + [ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, 1.0), + (-5, 1.0), + (6, 1.0), + (-6, 1.0), + ] + ); + + // check T3 + assert_eq!( + evol_to_pdg_mc_ids(103), + [(2, 1.0), (-2, 1.0), (1, -1.0), (-1, -1.0)] + ); + + // check T8 + assert_eq!( + evol_to_pdg_mc_ids(108), + [ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, -2.0), + (-3, -2.0), + ], + ); + + // check T15 + assert_eq!( + evol_to_pdg_mc_ids(115), + [ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, -3.0), + (-4, -3.0), + ], + ); + + // check T24 + assert_eq!( + evol_to_pdg_mc_ids(124), + [ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, -4.0), + (-5, -4.0), + ], + ); + + // check T35 + assert_eq!( + evol_to_pdg_mc_ids(135), + [ + (2, 1.0), + (-2, 1.0), + (1, 1.0), + (-1, 1.0), + (3, 1.0), + (-3, 1.0), + (4, 1.0), + (-4, 1.0), + (5, 1.0), + (-5, 1.0), + (6, -5.0), + (-6, -5.0), + ], + ); + + // check valence + assert_eq!( + evol_to_pdg_mc_ids(200), + [ + (1, 1.0), + (-1, -1.0), + (2, 1.0), + (-2, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, 1.0), + (-5, -1.0), + (6, 1.0), + (-6, -1.0), + ], + ); + + // check V3 + assert_eq!( + evol_to_pdg_mc_ids(203), + [(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], + ); + + // check V8 + assert_eq!( + evol_to_pdg_mc_ids(208), + [ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, -2.0), + (-3, 2.0), + ], + ); + + // check V15 + assert_eq!( + evol_to_pdg_mc_ids(215), + [ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, -3.0), + (-4, 3.0), + ], + ); + + // check V24 + assert_eq!( + evol_to_pdg_mc_ids(224), + [ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, -4.0), + (-5, 4.0), + ], + ); + + // check V35 + assert_eq!( + evol_to_pdg_mc_ids(235), + [ + (2, 1.0), + (-2, -1.0), + (1, 1.0), + (-1, -1.0), + (3, 1.0), + (-3, -1.0), + (4, 1.0), + (-4, -1.0), + (5, 1.0), + (-5, -1.0), + (6, -5.0), + (-6, 5.0), + ], + ); + } + + #[test] + fn test_pdg_mc_ids_to_evol() { + assert_eq!(pdg_mc_ids_to_evol(&[]), None); + + // check photon + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 1.0), + (-6, 0.0), + (-5, 0.0), + (-4, 0.0), + (-3, 0.0), + (-2, 0.0), + (-1, 0.0), + (21, 0.0), + (1, 0.0), + (2, 0.0), + (3, 0.0), + (4, 0.0), + (5, 0.0), + (6, 0.0), + ]), + Some(22) + ); + + // check gluon + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, 0.0), + (-3, 0.0), + (-2, 0.0), + (-1, 0.0), + (21, 1.0), + (1, 0.0), + (2, 0.0), + (3, 0.0), + (4, 0.0), + (5, 0.0), + (6, 0.0), + ]), + Some(21) + ); + + // check singlet + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 1.0), + (-5, 1.0), + (-4, 1.0), + (-3, 1.0), + (-2, 1.0), + (-1, 1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, 1.0), + (5, 1.0), + (6, 1.0), + ]), + Some(100) + ); + + // check T3 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, 0.0), + (-3, 0.0), + (-2, 1.0), + (-1, -1.0), + (21, 0.0), + (1, -1.0), + (2, 1.0), + (3, 0.0), + (4, 0.0), + (5, 0.0), + (6, 0.0), + ]), + Some(103) + ); + + // check T8 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, 0.0), + (-3, -2.0), + (-2, 1.0), + (-1, 1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, -2.0), + (4, 0.0), + (5, 0.0), + (6, 0.0), + ]), + Some(108) + ); + + // check T15 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, -3.0), + (-3, 1.0), + (-2, 1.0), + (-1, 1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, -3.0), + (5, 0.0), + (6, 0.0), + ]), + Some(115) + ); + + // check T24 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, -4.0), + (-4, 1.0), + (-3, 1.0), + (-2, 1.0), + (-1, 1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, 1.0), + (5, -4.0), + (6, 0.0), + ]), + Some(124) + ); + + // check T35 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, -5.0), + (-5, 1.0), + (-4, 1.0), + (-3, 1.0), + (-2, 1.0), + (-1, 1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, 1.0), + (5, 1.0), + (6, -5.0), + ]), + Some(135) + ); + + // check valence + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, -1.0), + (-5, -1.0), + (-4, -1.0), + (-3, -1.0), + (-2, -1.0), + (-1, -1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, 1.0), + (5, 1.0), + (6, 1.0), + ]), + Some(200) + ); + + // check V3 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, 0.0), + (-3, 0.0), + (-2, -1.0), + (-1, 1.0), + (21, 0.0), + (1, -1.0), + (2, 1.0), + (3, 0.0), + (4, 0.0), + (5, 0.0), + (6, 0.0), + ]), + Some(203) + ); + + // check V8 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, 0.0), + (-3, 2.0), + (-2, -1.0), + (-1, -1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, -2.0), + (4, 0.0), + (5, 0.0), + (6, 0.0), + ]), + Some(208) + ); + + // check V15 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 0.0), + (-4, 3.0), + (-3, -1.0), + (-2, -1.0), + (-1, -1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, -3.0), + (5, 0.0), + (6, 0.0), + ]), + Some(215) + ); + + // check V24 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 0.0), + (-5, 4.0), + (-4, -1.0), + (-3, -1.0), + (-2, -1.0), + (-1, -1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, 1.0), + (5, -4.0), + (6, 0.0), + ]), + Some(224) + ); + + // check V35 + assert_eq!( + pdg_mc_ids_to_evol(&[ + (22, 0.0), + (-6, 5.0), + (-5, -1.0), + (-4, -1.0), + (-3, -1.0), + (-2, -1.0), + (-1, -1.0), + (21, 0.0), + (1, 1.0), + (2, 1.0), + (3, 1.0), + (4, 1.0), + (5, 1.0), + (6, -5.0), + ]), + Some(235) + ); + } + + #[test] + fn pid_basis_guess() { + assert_eq!( + PidBasis::guess(&[22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]), + PidBasis::Pdg, + ); + + assert_eq!( + PidBasis::guess(&[ + 22, 100, 200, 21, 100, 103, 108, 115, 124, 135, 203, 208, 215, 224, 235 + ]), + PidBasis::Evol, + ); + } + + #[test] + fn inverse_inverse_evol() { + for pid in [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6] { + let result = Channel::translate( + &Channel::translate(&channel![pid, pid, 1.0], &pdg_mc_pids_to_evol), + &evol_to_pdg_mc_ids, + ); + + assert_eq!(result.entry().len(), 1); + assert_eq!(result.entry()[0].0, pid); + assert_eq!(result.entry()[0].1, pid); + assert_approx_eq!(f64, result.entry()[0].2, 1.0, ulps = 8); + } + } +} diff --git a/pineappl_v0/src/sparse_array3.rs b/pineappl_v0/src/sparse_array3.rs new file mode 100644 index 000000000..1debae7e8 --- /dev/null +++ b/pineappl_v0/src/sparse_array3.rs @@ -0,0 +1,1135 @@ +//! Module containing the `SparseArray3` struct. + +use ndarray::{ArrayView3, Axis}; +use serde::{Deserialize, Serialize}; +use std::iter; +use std::mem; +use std::ops::{Index, IndexMut, Range}; +use std::slice::{Iter, IterMut}; + +/// Struct for a sparse three-dimensional array, which is optimized for the sparsity of +/// interpolation grids. +#[derive(Clone, Deserialize, Serialize)] +pub struct SparseArray3 { + entries: Vec, + indices: Vec<(usize, usize)>, + start: usize, + dimensions: (usize, usize, usize), +} + +// TODO: write panic messages + +impl Index<[usize; 3]> for SparseArray3 { + type Output = T; + + fn index(&self, mut index: [usize; 3]) -> &Self::Output { + // index too small + assert!(index[0] >= self.start); + + let dim1 = if self.dimensions.1 > self.dimensions.2 { + index.swap(1, 2); + self.dimensions.2 + } else { + self.dimensions.1 + }; + + // index too large + assert!(index[0] < (self.start + (self.indices.len() - 1) / dim1)); + + // index too large + assert!(index[1] < dim1); + + let forward = dim1 * (index[0] - self.start) + index[1]; + let indices_a = &self.indices[forward]; + let indices_b = &self.indices[forward + 1]; + + let zeros_left = indices_a.0; + let offset = indices_a.1; + let non_zeros = indices_b.1 - offset; + + // index too small + assert!(index[2] >= zeros_left); + + // index too large + assert!(index[2] < (non_zeros + zeros_left)); + + &self.entries[offset + (index[2] - zeros_left)] + } +} + +impl IndexMut<[usize; 3]> for SparseArray3 { + fn index_mut(&mut self, mut index: [usize; 3]) -> &mut Self::Output { + let dim1 = if self.dimensions.1 > self.dimensions.2 { + index.swap(1, 2); + self.dimensions.2 + } else { + self.dimensions.1 + }; + + let max_index0 = self.start + (self.indices.len() - 1) / dim1; + + if index[0] < self.start { + let elements = self.start - index[0]; + self.start = index[0]; + self.indices + .splice(0..0, iter::repeat((0, 0)).take(elements * dim1)); + } else if index[0] >= self.dimensions.0 { + panic!(); + } else if self.entries.is_empty() || (index[0] >= max_index0) { + let elements = if self.entries.is_empty() { + self.start = index[0]; + 1 + } else { + index[0] - max_index0 + 1 + }; + + let insert = self.indices.len() - 1; + self.indices.splice( + insert..insert, + iter::repeat((0, self.indices.last().unwrap().1)).take(elements * dim1), + ); + } + + // index too large + assert!(index[1] < dim1); + + let forward = dim1 * (index[0] - self.start) + index[1]; + let indices_a = &self.indices[forward]; + let indices_b = &self.indices[forward + 1]; + + let zeros_left = indices_a.0; + let offset = indices_a.1; + let non_zeros = indices_b.1 - offset; + + let elements; + let insert; + + if index[2] < zeros_left { + elements = zeros_left - index[2]; + insert = offset; + self.indices[forward].0 -= elements; + } else if index[2] >= self.dimensions.2.max(self.dimensions.1) { + panic!(); + } else if non_zeros == 0 { + elements = 1; + insert = offset; + self.indices[forward].0 = index[2]; + } else if index[2] >= (zeros_left + non_zeros) { + elements = index[2] - (zeros_left + non_zeros) + 1; + insert = offset + non_zeros; + } else { + return &mut self.entries[offset + (index[2] - zeros_left)]; + } + + self.entries + .splice(insert..insert, iter::repeat(T::default()).take(elements)); + self.indices + .iter_mut() + .skip(forward + 1) + .for_each(|ix| ix.1 += elements); + + &mut self.entries[offset + (index[2] - self.indices[forward].0)] + } +} + +/// Immutable iterator over the elements of a `SparseArray3`. +pub struct IndexedIter<'a, T> { + entry_iter: Iter<'a, T>, + index_iter: Iter<'a, (usize, usize)>, + offset_a: Option<&'a (usize, usize)>, + offset_b: Option<&'a (usize, usize)>, + tuple: (usize, usize, usize), + dimensions: (usize, usize, usize), +} + +impl<'a, T: Copy + Default + PartialEq> Iterator for IndexedIter<'a, T> { + type Item = ((usize, usize, usize), T); + + fn next(&mut self) -> Option { + if let Some(element) = self.entry_iter.next() { + let offset_a = self.offset_a.unwrap(); + let offset_b = self.offset_b.unwrap(); + + if self.dimensions.1 > self.dimensions.2 { + self.tuple.1 = self.tuple.1.max(offset_a.0); + + if self.tuple.1 >= (offset_b.1 - offset_a.1 + offset_a.0) { + loop { + self.offset_a = self.offset_b; + self.offset_b = self.index_iter.next(); + + let offset_a = self.offset_a.unwrap(); + let offset_b = self.offset_b?; + + self.tuple.2 += 1; + + if self.tuple.2 >= self.dimensions.2 { + self.tuple.0 += 1; + self.tuple.2 = 0; + } + + if (offset_b.1 - offset_a.1) != 0 { + self.tuple.1 = offset_a.0; + break; + } + } + } + + if *element == T::default() { + self.tuple.1 += 1; + self.next() + } else { + let result = Some((self.tuple, *element)); + self.tuple.1 += 1; + result + } + } else { + self.tuple.2 = self.tuple.2.max(offset_a.0); + + if self.tuple.2 >= (offset_b.1 - offset_a.1 + offset_a.0) { + loop { + self.offset_a = self.offset_b; + self.offset_b = self.index_iter.next(); + + let offset_a = self.offset_a.unwrap(); + let offset_b = self.offset_b?; + + self.tuple.1 += 1; + + if self.tuple.1 >= self.dimensions.1 { + self.tuple.0 += 1; + self.tuple.1 = 0; + } + + if (offset_b.1 - offset_a.1) != 0 { + self.tuple.2 = offset_a.0; + break; + } + } + } + + if *element == T::default() { + self.tuple.2 += 1; + self.next() + } else { + let result = Some((self.tuple, *element)); + self.tuple.2 += 1; + result + } + } + } else { + None + } + } +} + +impl SparseArray3 { + /// Constructs a new and empty `SparseArray3` with the specified dimensions `nx`, `ny` and + /// `nz`. + #[must_use] + pub fn new(nx: usize, ny: usize, nz: usize) -> Self { + Self { + entries: vec![], + indices: vec![(0, 0)], + start: 0, + dimensions: (nx, ny, nz), + } + } + + /// Converts `array` into a `SparseArray3`. + #[must_use] + pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { + let (_, ny, nz) = array.dim(); + let array = if ny > nz { + let mut array = array; + array.swap_axes(1, 2); + array + } else { + array + }; + + let dimensions = (xsize, ny, nz); + let mut entries = vec![]; + let mut indices = vec![]; + + let mut offset = 0; + + for array2 in array.axis_iter(Axis(0)) { + for array1 in array2.axis_iter(Axis(0)) { + let start = array1.iter().position(|x| *x != T::default()); + + if let Some(start) = start { + let end = array1.iter().enumerate().skip(start).fold( + start, + |last_non_zero, (index, x)| { + if *x == T::default() { + last_non_zero + } else { + index + } + }, + ) + 1; + indices.push((start, offset)); + offset += end - start; + entries.splice( + entries.len()..entries.len(), + array1.iter().skip(start).take(end - start).cloned(), + ); + } else { + indices.push((0, offset)); + } + } + } + + indices.push((0, offset)); + + Self { + entries, + indices, + start: xstart, + dimensions, + } + } + + /// Clear the contents of the array. + pub fn clear(&mut self) { + self.entries.clear(); + self.indices.clear(); + self.indices.push((0, 0)); + self.start = 0; + } + + /// Returns the dimensions of this array. + #[must_use] + pub const fn dimensions(&self) -> (usize, usize, usize) { + self.dimensions + } + + /// Returns the overhead for storing the explicitly zero and non-zero elements. + #[must_use] + pub fn overhead(&self) -> usize { + (2 * self.indices.len() * mem::size_of::()) / mem::size_of::() + } + + /// Returns the number of default (zero) elements in this array. + #[must_use] + pub fn zeros(&self) -> usize { + self.entries.iter().filter(|x| **x == T::default()).count() + } + + /// Returns the number of non-default (non-zero) elements in this array. + #[must_use] + pub fn len(&self) -> usize { + self.entries.iter().filter(|x| **x != T::default()).count() + } + + /// Returns `true` if the array contains no element. + #[must_use] + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } + + /// Return an indexed `Iterator` over the non-zero elements of this array. The iterator element + /// type is `((usize, usize, usize), T)`. + #[must_use] + pub fn indexed_iter(&self) -> IndexedIter<'_, T> { + let mut result = IndexedIter { + entry_iter: self.entries.iter(), + index_iter: self.indices.iter(), + offset_a: None, + offset_b: None, + tuple: (self.start, 0, 0), + dimensions: self.dimensions, + }; + + result.offset_a = result.index_iter.next(); + result.offset_b = result.index_iter.next(); + + result + } + + /// Return an iterator over the elements, including zero elements. + pub fn iter_mut(&mut self) -> IterMut<'_, T> { + self.entries.iter_mut() + } + + /// Return a half-open interval of indices that are filled for the first dimension. + #[must_use] + pub fn x_range(&self) -> Range { + self.start + ..(self.start + (self.indices.len() - 1) / self.dimensions.1.min(self.dimensions.2)) + } + + /// Increase the number of entries of the x-axis by one by inserting zeros at `x`. + pub fn increase_x_at(&mut self, x: usize) { + let dim1 = self.dimensions.1.min(self.dimensions.2); + let nx = (self.indices.len() - 1) / dim1; + + if x <= self.start { + self.start += 1; + } else if x < self.start + nx { + let at = (x - self.start) * dim1; + let offset = self.indices[at].1; + self.indices + .splice(at..at, iter::repeat((0, offset)).take(dim1)); + } else if x <= self.dimensions.0 { + // nothing to do here + } else { + self.dimensions.0 = x; + } + + self.dimensions.0 += 1; + } + + /// Removes all elements with the specified x coordinate. + /// + /// # Panics + /// + /// TODO + pub fn remove_x(&mut self, x: usize) { + let dim1 = self.dimensions.1.min(self.dimensions.2); + let nx = (self.indices.len() - 1) / dim1; + + assert!((x >= self.start) && (x < self.start + nx)); + + let index_a = (x - self.start) * dim1; + let index_b = (x - self.start + 1) * dim1; + let offset_a = self.indices[index_a].1; + let offset_b = self.indices[index_b].1; + + self.entries.drain(offset_a..offset_b); + self.indices + .iter_mut() + .skip(index_b) + .for_each(|o| o.1 -= offset_b - offset_a); + + if (x != self.start) && (x != (self.start + nx - 1)) { + self.indices + .splice(index_a..index_b, iter::repeat((0, offset_a)).take(dim1)); + } else { + if x == self.start { + self.start += 1; + } + + self.indices.drain(index_a..index_b); + } + + if self.indices.last().unwrap().1 == 0 { + self.clear(); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ndarray::Array3; + + #[test] + fn index_access() { + let mut array = SparseArray3::new(40, 50, 50); + + // after creation the array must be empty + assert_eq!(array.x_range(), 0..0); + assert_eq!(array.overhead(), 2); + assert!(array.is_empty()); + + // insert the first element + array[[5, 10, 10]] = 1.0; + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 1); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 5..6); + assert_eq!(array.overhead(), 102); + assert!(!array.is_empty()); + + // insert an element after the first one + array[[8, 10, 10]] = 2.0; + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 2); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 5..9); + assert_eq!(array.overhead(), 402); + assert!(!array.is_empty()); + + // insert an element before the first one + array[[1, 10, 10]] = 3.0; + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 3); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + array[[1, 10, 11]] = 4.0; + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 4); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + array[[1, 10, 9]] = 5.0; + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 5); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + array[[1, 10, 0]] = 6.0; + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 6); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 2]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.zeros(), 8); + + // insert where previously a zero was + array[[1, 10, 2]] = 7.0; + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 7); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.zeros(), 7); + + array[[1, 15, 2]] = 8.0; + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 8); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.zeros(), 7); + + array[[1, 15, 4]] = 9.0; + assert_eq!(array[[1, 15, 4]], 9.0); + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 9); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 15, 3]], 0.0); + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.zeros(), 8); + + array[[1, 15, 0]] = 10.0; + assert_eq!(array[[1, 15, 0]], 10.0); + assert_eq!(array[[1, 15, 4]], 9.0); + assert_eq!(array[[1, 15, 2]], 8.0); + assert_eq!(array[[1, 10, 2]], 7.0); + assert_eq!(array[[1, 10, 0]], 6.0); + assert_eq!(array[[1, 10, 9]], 5.0); + assert_eq!(array[[1, 10, 11]], 4.0); + assert_eq!(array[[1, 10, 10]], 3.0); + assert_eq!(array[[8, 10, 10]], 2.0); + assert_eq!(array[[5, 10, 10]], 1.0); + assert_eq!(array.len(), 10); + assert_eq!(array.x_range(), 1..9); + assert_eq!(array.overhead(), 802); + assert!(!array.is_empty()); + + // check zeros + assert_eq!(array[[1, 15, 1]], 0.0); + assert_eq!(array[[1, 15, 3]], 0.0); + assert_eq!(array[[1, 10, 1]], 0.0); + assert_eq!(array[[1, 10, 3]], 0.0); + assert_eq!(array[[1, 10, 4]], 0.0); + assert_eq!(array[[1, 10, 5]], 0.0); + assert_eq!(array[[1, 10, 6]], 0.0); + assert_eq!(array[[1, 10, 7]], 0.0); + assert_eq!(array[[1, 10, 8]], 0.0); + assert_eq!(array.zeros(), 9); + } + + #[test] + #[should_panic(expected = "explicit panic")] + fn index_mut_panic_dim0() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[40, 0, 50]] = 1.0; + } + + #[test] + #[should_panic(expected = "assertion failed: index[1] < dim1")] + fn index_mut_panic_dim1() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[0, 50, 0]] = 1.0; + } + + #[test] + #[should_panic(expected = "explicit panic")] + fn index_mut_panic_dim2() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[0, 0, 50]] = 1.0; + } + + #[test] + #[should_panic(expected = "assertion failed: index[0] >= self.start")] + fn index_panic_dim0_0() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 0.0); + } + + #[test] + #[should_panic( + expected = "assertion failed: index[0] < (self.start + (self.indices.len() - 1) / dim1)" + )] + fn index_panic_dim0_1() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[2, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "assertion failed: index[1] < dim1")] + fn index_panic_dim1() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[1, 0, 0]] = 1.0; + + assert_eq!(array[[1, 50, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "assertion failed: index[2] >= zeros_left")] + fn index_panic_dim2_0() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[0, 0, 1]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 0.0); + } + + #[test] + #[should_panic(expected = "assertion failed: index[2] < (non_zeros + zeros_left)")] + fn index_panic_dim2_1() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[0, 0, 1]] = 1.0; + + assert_eq!(array[[0, 0, 2]], 0.0); + } + + #[test] + fn indexed_iter() { + let mut array = SparseArray3::new(40, 50, 50); + + // check empty iterator + assert_eq!(array.indexed_iter().next(), None); + + // insert an element + array[[2, 3, 4]] = 1.0; + + let mut iter = array.indexed_iter(); + + // check iterator with one element + assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); + assert_eq!(iter.next(), None); + + // insert another element + array[[2, 3, 6]] = 2.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); + assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); + assert_eq!(iter.next(), None); + + // insert yet another element + array[[4, 5, 7]] = 3.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); + assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); + assert_eq!(iter.next(), Some(((4, 5, 7), 3.0))); + assert_eq!(iter.next(), None); + + // insert at the very first position + array[[2, 0, 0]] = 4.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((2, 0, 0), 4.0))); + assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); + assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); + assert_eq!(iter.next(), Some(((4, 5, 7), 3.0))); + assert_eq!(iter.next(), None); + } + + #[test] + fn iter_mut() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[3, 5, 1]] = 1.0; + array[[7, 8, 9]] = 2.0; + array[[7, 8, 13]] = 3.0; + array[[9, 1, 4]] = 4.0; + + let mut iter = array.iter_mut(); + + assert_eq!(iter.next(), Some(&mut 1.0)); + assert_eq!(iter.next(), Some(&mut 2.0)); + assert_eq!(iter.next(), Some(&mut 0.0)); + assert_eq!(iter.next(), Some(&mut 0.0)); + assert_eq!(iter.next(), Some(&mut 0.0)); + assert_eq!(iter.next(), Some(&mut 3.0)); + assert_eq!(iter.next(), Some(&mut 4.0)); + assert_eq!(iter.next(), None); + } + + #[test] + fn clear() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[3, 5, 1]] = 1.0; + array[[7, 8, 9]] = 2.0; + array[[9, 1, 4]] = 3.0; + + assert!(!array.is_empty()); + assert_eq!(array.len(), 3); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 3..10); + + array.clear(); + + assert!(array.is_empty()); + assert_eq!(array.len(), 0); + assert_eq!(array.zeros(), 0); + assert_eq!(array.x_range(), 0..0); + } + + #[test] + fn remove_x() { + let mut array = SparseArray3::new(40, 50, 50); + + array[[1, 5, 6]] = 1.0; + array[[1, 6, 5]] = 2.0; + array[[1, 2, 3]] = 3.0; + array[[1, 9, 3]] = 4.0; + array[[1, 8, 4]] = 5.0; + array[[2, 0, 0]] = 6.0; + array[[3, 4, 5]] = 7.0; + array[[3, 4, 6]] = 8.0; + array[[3, 4, 7]] = 9.0; + array[[4, 0, 2]] = 10.0; + array[[4, 0, 3]] = 11.0; + array[[5, 0, 1]] = 12.0; + array[[5, 0, 2]] = 13.0; + + assert_eq!(array.x_range(), 1..6); + assert_eq!(array.len(), 13); + assert_eq!(array.zeros(), 0); + + // remove the first five entries + array.remove_x(1); + + assert_eq!(array.x_range(), 2..6); + assert_eq!(array.len(), 8); + assert_eq!(array.zeros(), 0); + + // remove the last two entries + array.remove_x(5); + + assert_eq!(array.x_range(), 2..5); + assert_eq!(array.len(), 6); + assert_eq!(array.zeros(), 0); + + // remove the from the middle + array.remove_x(3); + + assert_eq!(array.x_range(), 2..5); + assert_eq!(array.len(), 3); + assert_eq!(array.zeros(), 0); + + // remove also the rest + array.remove_x(4); + array.remove_x(2); + + assert_eq!(array.x_range(), 0..0); + assert_eq!(array.len(), 0); + assert_eq!(array.zeros(), 0); + } + + #[test] + #[should_panic(expected = "assertion failed: (x >= self.start) && (x < self.start + nx)")] + fn remove_x_panic() { + let mut array = SparseArray3::::new(40, 50, 50); + + array.remove_x(0); + } + + #[test] + fn increase_at_x() { + let mut array = SparseArray3::new(1, 50, 50); + + array[[0, 0, 0]] = 1.0; + array[[0, 2, 3]] = 2.0; + array[[0, 2, 4]] = 3.0; + array[[0, 2, 5]] = 4.0; + array[[0, 3, 0]] = 5.0; + array[[0, 49, 49]] = 6.0; + + assert_eq!(array.dimensions(), (1, 50, 50)); + assert_eq!(array[[0, 0, 0]], 1.0); + assert_eq!(array[[0, 2, 3]], 2.0); + assert_eq!(array[[0, 2, 4]], 3.0); + assert_eq!(array[[0, 2, 5]], 4.0); + assert_eq!(array[[0, 3, 0]], 5.0); + assert_eq!(array[[0, 49, 49]], 6.0); + + // increase at the end + array.increase_x_at(1); + + assert_eq!(array.dimensions(), (2, 50, 50)); + assert_eq!(array[[0, 0, 0]], 1.0); + assert_eq!(array[[0, 2, 3]], 2.0); + assert_eq!(array[[0, 2, 4]], 3.0); + assert_eq!(array[[0, 2, 5]], 4.0); + assert_eq!(array[[0, 3, 0]], 5.0); + assert_eq!(array[[0, 49, 49]], 6.0); + + array[[1, 5, 0]] = 7.0; + array[[1, 5, 5]] = 8.0; + array[[1, 6, 3]] = 9.0; + array[[1, 6, 0]] = 10.0; + + assert_eq!(array[[0, 0, 0]], 1.0); + assert_eq!(array[[0, 2, 3]], 2.0); + assert_eq!(array[[0, 2, 4]], 3.0); + assert_eq!(array[[0, 2, 5]], 4.0); + assert_eq!(array[[0, 3, 0]], 5.0); + assert_eq!(array[[0, 49, 49]], 6.0); + assert_eq!(array[[1, 5, 0]], 7.0); + assert_eq!(array[[1, 5, 5]], 8.0); + assert_eq!(array[[1, 6, 3]], 9.0); + assert_eq!(array[[1, 6, 0]], 10.0); + + // increase at the start + array.increase_x_at(0); + + assert_eq!(array.dimensions(), (3, 50, 50)); + assert_eq!(array[[1, 0, 0]], 1.0); + assert_eq!(array[[1, 2, 3]], 2.0); + assert_eq!(array[[1, 2, 4]], 3.0); + assert_eq!(array[[1, 2, 5]], 4.0); + assert_eq!(array[[1, 3, 0]], 5.0); + assert_eq!(array[[1, 49, 49]], 6.0); + assert_eq!(array[[2, 5, 0]], 7.0); + assert_eq!(array[[2, 5, 5]], 8.0); + assert_eq!(array[[2, 6, 3]], 9.0); + assert_eq!(array[[2, 6, 0]], 10.0); + + // increase at the end + array.increase_x_at(3); + + assert_eq!(array.dimensions(), (4, 50, 50)); + assert_eq!(array[[1, 0, 0]], 1.0); + assert_eq!(array[[1, 2, 3]], 2.0); + assert_eq!(array[[1, 2, 4]], 3.0); + assert_eq!(array[[1, 2, 5]], 4.0); + assert_eq!(array[[1, 3, 0]], 5.0); + assert_eq!(array[[1, 49, 49]], 6.0); + assert_eq!(array[[2, 5, 0]], 7.0); + assert_eq!(array[[2, 5, 5]], 8.0); + assert_eq!(array[[2, 6, 3]], 9.0); + assert_eq!(array[[2, 6, 0]], 10.0); + + // increase after the end + array.increase_x_at(5); + + assert_eq!(array.dimensions(), (6, 50, 50)); + assert_eq!(array[[1, 0, 0]], 1.0); + assert_eq!(array[[1, 2, 3]], 2.0); + assert_eq!(array[[1, 2, 4]], 3.0); + assert_eq!(array[[1, 2, 5]], 4.0); + assert_eq!(array[[1, 3, 0]], 5.0); + assert_eq!(array[[1, 49, 49]], 6.0); + assert_eq!(array[[2, 5, 0]], 7.0); + assert_eq!(array[[2, 5, 5]], 8.0); + assert_eq!(array[[2, 6, 3]], 9.0); + assert_eq!(array[[2, 6, 0]], 10.0); + + // increase in the middle + array.increase_x_at(2); + + assert_eq!(array.dimensions(), (7, 50, 50)); + assert_eq!(array[[1, 0, 0]], 1.0); + assert_eq!(array[[1, 2, 3]], 2.0); + assert_eq!(array[[1, 2, 4]], 3.0); + assert_eq!(array[[1, 2, 5]], 4.0); + assert_eq!(array[[1, 3, 0]], 5.0); + assert_eq!(array[[1, 49, 49]], 6.0); + assert_eq!(array[[3, 5, 0]], 7.0); + assert_eq!(array[[3, 5, 5]], 8.0); + assert_eq!(array[[3, 6, 3]], 9.0); + assert_eq!(array[[3, 6, 0]], 10.0); + } + + #[test] + fn from_ndarray() { + let mut ndarray = Array3::zeros((2, 50, 50)); + + ndarray[[0, 4, 3]] = 1.0; + ndarray[[0, 4, 4]] = 2.0; + ndarray[[0, 4, 6]] = 3.0; + ndarray[[0, 5, 1]] = 4.0; + ndarray[[0, 5, 7]] = 5.0; + ndarray[[1, 3, 9]] = 6.0; + + let array = SparseArray3::from_ndarray(ndarray.view(), 3, 40); + + assert_eq!(array[[3, 4, 3]], 1.0); + assert_eq!(array[[3, 4, 4]], 2.0); + assert_eq!(array[[3, 4, 5]], 0.0); + assert_eq!(array[[3, 4, 6]], 3.0); + assert_eq!(array[[3, 5, 1]], 4.0); + assert_eq!(array[[3, 5, 2]], 0.0); + assert_eq!(array[[3, 5, 3]], 0.0); + assert_eq!(array[[3, 5, 4]], 0.0); + assert_eq!(array[[3, 5, 5]], 0.0); + assert_eq!(array[[3, 5, 6]], 0.0); + assert_eq!(array[[3, 5, 7]], 5.0); + assert_eq!(array[[4, 3, 9]], 6.0); + + assert_eq!(array.len(), 6); + assert_eq!(array.zeros(), 6); + } + + #[test] + fn test_index_swap() { + let mut array = SparseArray3::new(5, 50, 2); + + array[[0, 0, 0]] = 1.0; + array[[0, 0, 1]] = 2.0; + array[[1, 2, 1]] = 3.0; + array[[1, 5, 1]] = 4.0; + array[[1, 6, 0]] = 5.0; + array[[1, 8, 0]] = 6.0; + array[[1, 9, 0]] = 7.0; + array[[2, 0, 0]] = 8.0; + array[[3, 2, 1]] = 9.0; + array[[3, 4, 0]] = 10.0; + array[[3, 4, 1]] = 11.0; + array[[4, 0, 0]] = 12.0; + array[[4, 0, 1]] = 13.0; + + assert_eq!(array[[0, 0, 0]], 1.0); + assert_eq!(array[[0, 0, 1]], 2.0); + assert_eq!(array[[1, 2, 1]], 3.0); + assert_eq!(array[[1, 5, 1]], 4.0); + assert_eq!(array[[1, 6, 0]], 5.0); + assert_eq!(array[[1, 8, 0]], 6.0); + assert_eq!(array[[1, 9, 0]], 7.0); + assert_eq!(array[[2, 0, 0]], 8.0); + assert_eq!(array[[3, 2, 1]], 9.0); + assert_eq!(array[[3, 4, 0]], 10.0); + assert_eq!(array[[3, 4, 1]], 11.0); + assert_eq!(array[[4, 0, 0]], 12.0); + assert_eq!(array[[4, 0, 1]], 13.0); + + assert_eq!(array.x_range(), 0..5); + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((0, 0, 0), 1.0))); + assert_eq!(iter.next(), Some(((0, 0, 1), 2.0))); + assert_eq!(iter.next(), Some(((1, 6, 0), 5.0))); + assert_eq!(iter.next(), Some(((1, 8, 0), 6.0))); + assert_eq!(iter.next(), Some(((1, 9, 0), 7.0))); + assert_eq!(iter.next(), Some(((1, 2, 1), 3.0))); + assert_eq!(iter.next(), Some(((1, 5, 1), 4.0))); + assert_eq!(iter.next(), Some(((2, 0, 0), 8.0))); + assert_eq!(iter.next(), Some(((3, 4, 0), 10.0))); + assert_eq!(iter.next(), Some(((3, 2, 1), 9.0))); + assert_eq!(iter.next(), Some(((3, 4, 1), 11.0))); + assert_eq!(iter.next(), Some(((4, 0, 0), 12.0))); + assert_eq!(iter.next(), Some(((4, 0, 1), 13.0))); + assert_eq!(iter.next(), None); + + let mut ndarray = Array3::zeros((5, 50, 2)); + + ndarray[[0, 0, 0]] = 1.0; + ndarray[[0, 0, 1]] = 2.0; + ndarray[[1, 2, 1]] = 3.0; + ndarray[[1, 5, 1]] = 4.0; + ndarray[[1, 6, 0]] = 5.0; + ndarray[[1, 8, 0]] = 6.0; + ndarray[[1, 9, 0]] = 7.0; + ndarray[[2, 0, 0]] = 8.0; + ndarray[[3, 2, 1]] = 9.0; + ndarray[[3, 4, 0]] = 10.0; + ndarray[[3, 4, 1]] = 11.0; + ndarray[[4, 0, 0]] = 12.0; + ndarray[[4, 0, 1]] = 13.0; + + let mut other = SparseArray3::from_ndarray(ndarray.view(), 0, 5); + + assert_eq!(other[[0, 0, 0]], 1.0); + assert_eq!(other[[0, 0, 1]], 2.0); + assert_eq!(other[[1, 2, 1]], 3.0); + assert_eq!(other[[1, 5, 1]], 4.0); + assert_eq!(other[[1, 6, 0]], 5.0); + assert_eq!(other[[1, 8, 0]], 6.0); + assert_eq!(other[[1, 9, 0]], 7.0); + assert_eq!(other[[2, 0, 0]], 8.0); + assert_eq!(other[[3, 2, 1]], 9.0); + assert_eq!(other[[3, 4, 0]], 10.0); + assert_eq!(other[[3, 4, 1]], 11.0); + assert_eq!(other[[4, 0, 0]], 12.0); + assert_eq!(other[[4, 0, 1]], 13.0); + + assert_eq!(other.x_range(), 0..5); + + other.remove_x(0); + + assert_eq!(other[[1, 2, 1]], 3.0); + assert_eq!(other[[1, 5, 1]], 4.0); + assert_eq!(other[[1, 6, 0]], 5.0); + assert_eq!(other[[1, 8, 0]], 6.0); + assert_eq!(other[[1, 9, 0]], 7.0); + assert_eq!(other[[2, 0, 0]], 8.0); + assert_eq!(other[[3, 2, 1]], 9.0); + assert_eq!(other[[3, 4, 0]], 10.0); + assert_eq!(other[[3, 4, 1]], 11.0); + assert_eq!(other[[4, 0, 0]], 12.0); + assert_eq!(other[[4, 0, 1]], 13.0); + + other.remove_x(3); + + assert_eq!(other[[1, 2, 1]], 3.0); + assert_eq!(other[[1, 5, 1]], 4.0); + assert_eq!(other[[1, 6, 0]], 5.0); + assert_eq!(other[[1, 8, 0]], 6.0); + assert_eq!(other[[1, 9, 0]], 7.0); + assert_eq!(other[[2, 0, 0]], 8.0); + assert_eq!(other[[4, 0, 0]], 12.0); + assert_eq!(other[[4, 0, 1]], 13.0); + + other.remove_x(4); + + assert_eq!(other[[1, 2, 1]], 3.0); + assert_eq!(other[[1, 5, 1]], 4.0); + assert_eq!(other[[1, 6, 0]], 5.0); + assert_eq!(other[[1, 8, 0]], 6.0); + assert_eq!(other[[1, 9, 0]], 7.0); + assert_eq!(other[[2, 0, 0]], 8.0); + } + + // https://github.com/NNPDF/pineappl/issues/220 + #[test] + fn regression_test_220() { + let mut array = SparseArray3::new(1, 2, 4); + + array[[0, 0, 0]] = 1.0; + + assert_eq!(array[[0, 0, 0]], 1.0); + + assert_eq!(array.x_range(), 0..1); + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((0, 0, 0), 1.0))); + assert_eq!(iter.next(), None); + + array.increase_x_at(0); + + array[[0, 0, 0]] = 2.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((0, 0, 0), 2.0))); + assert_eq!(iter.next(), Some(((1, 0, 0), 1.0))); + assert_eq!(iter.next(), None); + + array.increase_x_at(1); + + array[[1, 0, 0]] = 3.0; + + let mut iter = array.indexed_iter(); + + assert_eq!(iter.next(), Some(((0, 0, 0), 2.0))); + assert_eq!(iter.next(), Some(((1, 0, 0), 3.0))); + assert_eq!(iter.next(), Some(((2, 0, 0), 1.0))); + assert_eq!(iter.next(), None); + } +} diff --git a/pineappl_v0/src/subgrid.rs b/pineappl_v0/src/subgrid.rs new file mode 100644 index 000000000..1dce12abb --- /dev/null +++ b/pineappl_v0/src/subgrid.rs @@ -0,0 +1,362 @@ +//! Module containing the trait `Subgrid` and supporting structs. + +use super::empty_subgrid::EmptySubgridV1; +use super::grid::Ntuple; +use super::import_only_subgrid::{ImportOnlySubgridV1, ImportOnlySubgridV2}; +use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; +use super::ntuple_subgrid::NtupleSubgridV1; +use enum_dispatch::enum_dispatch; +use ndarray::Array3; +use serde::{Deserialize, Serialize}; +use std::borrow::Cow; + +/// Enum which lists all possible `Subgrid` variants possible. +#[enum_dispatch(Subgrid)] +#[derive(Clone, Deserialize, Serialize)] +pub enum SubgridEnum { + // WARNING: never change the order or content of this enum, only add to the end of it + /// Lagrange-interpolation subgrid. + LagrangeSubgridV1, + /// N-tuple subgrid. + NtupleSubgridV1, + /// Lagrange-interpolation subgrid. + LagrangeSparseSubgridV1, + /// Lagrange-interpolation subgrid with possibly different x1 and x2 bins. + LagrangeSubgridV2, + /// Import-only sparse subgrid with possibly different x1 and x2 bins. + ImportOnlySubgridV1, + /// Empty subgrid. + EmptySubgridV1, + /// Same as [`ImportOnlySubgridV1`], but with support for different renormalization and + /// factorization scales choices. + ImportOnlySubgridV2, +} + +/// Structure denoting renormalization and factorization scale values. +#[derive(Debug, Deserialize, Clone, PartialEq, PartialOrd, Serialize)] +pub struct Mu2 { + /// The (squared) renormalization scale value. + pub ren: f64, + /// The (squared) factorization scale value. + pub fac: f64, +} + +/// Size-related statistics for a subgrid. +#[derive(Debug, Eq, PartialEq)] +pub struct Stats { + /// Number of possible total entries for a subgrid. This number is the product of the lengths + /// of the slices returned by [`Subgrid::mu2_grid`], [`Subgrid::x1_grid`] and + /// [`Subgrid::x2_grid`]. + pub total: usize, + /// Number of allocated entries for a subgrid. This number is always smaller or equal than + /// [`Self::total`]. + pub allocated: usize, + /// Number of allocated zero entries for a subgrid. This number is always smaller or equal than + /// [`Self::allocated`] and contributes to [`Self::overhead`]. + pub zeros: usize, + /// The overhead of a [`Subgrid`] is the size of internal data not used to store grid values. + pub overhead: usize, + /// This value multiplied with any other member of this struct gives an approximate size in + /// bytes. + pub bytes_per_value: usize, +} + +/// Trait each subgrid must implement. +#[enum_dispatch] +pub trait Subgrid { + /// Return a slice of [`Mu2`] values corresponding to the (squared) renormalization and + /// factorization values of the grid. If the subgrid does not use a grid, this method should + /// return an empty slice. + fn mu2_grid(&self) -> Cow<[Mu2]>; + + /// Return a slice of values of `x1`. If the subgrid does not use a grid, this method should + /// return an empty slice. + fn x1_grid(&self) -> Cow<[f64]>; + + /// Return a slice of values of `x2`. If the subgrid does not use a grid, this method should + /// return an empty slice. + fn x2_grid(&self) -> Cow<[f64]>; + + /// Convolute the subgrid with a luminosity function, which takes indices as arguments that + /// correspond to the entries given in the slices `x1`, `x2` and `mu2`. + fn convolve( + &self, + x1: &[f64], + x2: &[f64], + mu2: &[Mu2], + lumi: &mut dyn FnMut(usize, usize, usize) -> f64, + ) -> f64; + + /// Fills the subgrid with `weight` for the parton momentum fractions `x1` and `x2`, and the + /// scale `q2`. Filling is currently only support where both renormalization and factorization + /// scale have the same value. + fn fill(&mut self, ntuple: &Ntuple); + + /// Returns true if `fill` was never called for this grid. + fn is_empty(&self) -> bool; + + /// Merges `other` into this subgrid. + fn merge(&mut self, other: &mut SubgridEnum, transpose: bool); + + /// Scale the subgrid by `factor`. + fn scale(&mut self, factor: f64); + + /// Assumes that the initial states for this grid are the same and uses this to optimize the + /// grid by getting rid of almost half of the entries. + fn symmetrize(&mut self); + + /// Returns an empty copy of the current subgrid. + fn clone_empty(&self) -> SubgridEnum; + + /// Return an iterator over all non-zero elements of the subgrid. + fn indexed_iter(&self) -> SubgridIndexedIter; + + /// Return statistics for this subgrid. + fn stats(&self) -> Stats; + + /// Return the static (single) scale, if this subgrid has one. + fn static_scale(&self) -> Option; +} + +// this is needed in the Python interface +impl From<&SubgridEnum> for Array3 { + fn from(subgrid: &SubgridEnum) -> Self { + let mut result = Self::zeros(( + subgrid.mu2_grid().len(), + subgrid.x1_grid().len(), + subgrid.x2_grid().len(), + )); + + for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { + result[[imu2, ix1, ix2]] = value; + } + + result + } +} + +/// Type to iterate over the non-zero contents of a subgrid. The tuple contains the indices of the +/// `mu2_grid`, the `x1_grid` and finally the `x2_grid`. +pub type SubgridIndexedIter<'a> = Box + 'a>; + +/// Subgrid creation parameters for subgrids that perform interpolation. +#[derive(Clone, Debug, Deserialize, Serialize)] +pub struct SubgridParams { + q2_bins: usize, + q2_max: f64, + q2_min: f64, + q2_order: usize, + reweight: bool, + x_bins: usize, + x_max: f64, + x_min: f64, + x_order: usize, +} + +impl Default for SubgridParams { + fn default() -> Self { + Self { + q2_bins: 40, + q2_max: 1e8, + q2_min: 1e2, + q2_order: 3, + reweight: true, + x_bins: 50, + x_max: 1.0, + x_min: 2e-7, + x_order: 3, + } + } +} + +impl SubgridParams { + /// Returns the number of bins for the $Q^2$ axis. + #[must_use] + pub const fn q2_bins(&self) -> usize { + self.q2_bins + } + + /// Returns the upper limit of the $Q^2$ axis. + #[must_use] + pub const fn q2_max(&self) -> f64 { + self.q2_max + } + + /// Returns the lower limit of the $Q^2$ axis. + #[must_use] + pub const fn q2_min(&self) -> f64 { + self.q2_min + } + + /// Returns the interpolation order for the $Q^2$ axis. + #[must_use] + pub const fn q2_order(&self) -> usize { + self.q2_order + } + + /// Returns whether reweighting is enabled or not. + #[must_use] + pub const fn reweight(&self) -> bool { + self.reweight + } + + /// Sets the number of bins for the $Q^2$ axis. + pub fn set_q2_bins(&mut self, q2_bins: usize) { + self.q2_bins = q2_bins; + } + + /// Sets the upper limit of the $Q^2$ axis. + pub fn set_q2_max(&mut self, q2_max: f64) { + self.q2_max = q2_max; + } + + /// Sets the lower limit of the $Q^2$ axis. + pub fn set_q2_min(&mut self, q2_min: f64) { + self.q2_min = q2_min; + } + + /// Sets the interpolation order for the $Q^2$ axis. + pub fn set_q2_order(&mut self, q2_order: usize) { + self.q2_order = q2_order; + } + + /// Sets the reweighting parameter. + pub fn set_reweight(&mut self, reweight: bool) { + self.reweight = reweight; + } + + /// Sets the number of bins for the $x$ axes. + pub fn set_x_bins(&mut self, x_bins: usize) { + self.x_bins = x_bins; + } + + /// Sets the upper limit of the $x$ axes. + pub fn set_x_max(&mut self, x_max: f64) { + self.x_max = x_max; + } + + /// Sets the lower limit of the $x$ axes. + pub fn set_x_min(&mut self, x_min: f64) { + self.x_min = x_min; + } + + /// Sets the interpolation order for the $x$ axes. + pub fn set_x_order(&mut self, x_order: usize) { + self.x_order = x_order; + } + + /// Returns the number of bins for the $x$ axes. + #[must_use] + pub const fn x_bins(&self) -> usize { + self.x_bins + } + + /// Returns the upper limit of the $x$ axes. + #[must_use] + pub const fn x_max(&self) -> f64 { + self.x_max + } + + /// Returns the lower limit of the $x$ axes. + #[must_use] + pub const fn x_min(&self) -> f64 { + self.x_min + } + + /// Returns the interpolation order for the $x$ axes. + #[must_use] + pub const fn x_order(&self) -> usize { + self.x_order + } +} + +/// Extra grid creation parameters when the limits for `x1` and `x2` are different. +#[derive(Deserialize, Serialize)] +pub struct ExtraSubgridParams { + reweight2: bool, + x2_bins: usize, + x2_max: f64, + x2_min: f64, + x2_order: usize, +} + +impl Default for ExtraSubgridParams { + fn default() -> Self { + Self { + reweight2: true, + x2_bins: 50, + x2_max: 1.0, + x2_min: 2e-7, + x2_order: 3, + } + } +} + +impl From<&SubgridParams> for ExtraSubgridParams { + fn from(subgrid_params: &SubgridParams) -> Self { + Self { + reweight2: subgrid_params.reweight(), + x2_bins: subgrid_params.x_bins(), + x2_max: subgrid_params.x_max(), + x2_min: subgrid_params.x_min(), + x2_order: subgrid_params.x_order(), + } + } +} + +impl ExtraSubgridParams { + /// Returns whether reweighting is enabled for the `x2` axis or not. + #[must_use] + pub const fn reweight2(&self) -> bool { + self.reweight2 + } + + /// Sets the reweighting parameter for the `x2` axis. + pub fn set_reweight2(&mut self, reweight2: bool) { + self.reweight2 = reweight2; + } + + /// Sets the number of bins for the `x2` axes. + pub fn set_x2_bins(&mut self, x_bins: usize) { + self.x2_bins = x_bins; + } + + /// Sets the upper limit of the `x2` axes. + pub fn set_x2_max(&mut self, x_max: f64) { + self.x2_max = x_max; + } + + /// Sets the lower limit of the `x2` axes. + pub fn set_x2_min(&mut self, x_min: f64) { + self.x2_min = x_min; + } + + /// Sets the interpolation order for the `x2` axes. + pub fn set_x2_order(&mut self, x_order: usize) { + self.x2_order = x_order; + } + + /// Returns the number of bins for the `x2` axes. + #[must_use] + pub const fn x2_bins(&self) -> usize { + self.x2_bins + } + + /// Returns the upper limit of the `x2` axes. + #[must_use] + pub const fn x2_max(&self) -> f64 { + self.x2_max + } + + /// Returns the lower limit of the `x2` axes. + #[must_use] + pub const fn x2_min(&self) -> f64 { + self.x2_min + } + + /// Returns the interpolation order for the `x2` axes. + #[must_use] + pub const fn x2_order(&self) -> usize { + self.x2_order + } +} diff --git a/pineappl_v0/tests/drell_yan_lo.rs b/pineappl_v0/tests/drell_yan_lo.rs new file mode 100644 index 000000000..0b53bc2b0 --- /dev/null +++ b/pineappl_v0/tests/drell_yan_lo.rs @@ -0,0 +1,822 @@ +use anyhow::Result; +use float_cmp::assert_approx_eq; +use lhapdf::Pdf; +use num_complex::Complex; +use pineappl::bin::BinRemapper; +use pineappl::boc::Order; +use pineappl::channel; +use pineappl::convolutions::LumiCache; +use pineappl::grid::{Grid, GridOptFlags, Ntuple}; +use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; +use rand::Rng; +use rand_pcg::Pcg64; +use std::f64::consts::PI; +use std::io::Cursor; +use std::mem; + +// If equation numbers are given, they are from Stefan Dittmaier and Max Huber's paper: +// 'Radiative corrections to the neutral-current Drell–Yan process in the Standard Model and its +// minimal supersymmetric extension' (https://arxiv.org/abs/0911.2329) + +// Eq. (2.13) - gamma-gamma contribution to DY lepton pair production +fn int_photo(s: f64, t: f64, u: f64) -> f64 { + let alpha0: f64 = 1.0 / 137.03599911; + alpha0.powi(2) / 2.0 / s * (t / u + u / t) +} + +// Eq. (2.12) - quark-antiquark contribution to DY lepton pair production +fn int_quark(s: f64, t: f64, u: f64, qq: f64, i3_wq: f64) -> f64 { + let alphagf: f64 = 1.0 / 132.30818655547878; + let mw = 80.35198454966643; + let mz = 91.15348061918276; + let gw = 2.083799397775285; + let gz = 2.494266378772824; + + // lepton charge + let ql: f64 = -1.0; + // lepton weak isospin + let i3_wl = -0.5; + + // weak mixing angles + let cw = (Complex::new(mw * mw, -mw * gw) / Complex::new(mz * mz, -mz * gz)).sqrt(); + let sw = (Complex::new(1.0, 0.0) - cw * cw).sqrt(); + + // Eq. (2.8) + let chi_z = Complex::new(s, 0.0) / Complex::new(s - mz * mz, mz * gz); + + // Eq. (2.7) + let gp_qqz = -sw / cw * qq; + let gm_qqz = (i3_wq - sw * sw * qq) / (sw * cw); + let gp_llz = -sw / cw * ql; + let gm_llz = (i3_wl - sw * sw * ql) / (sw * cw); + + alphagf.powi(2) / 12.0 / s.powi(3) + * (2.0 * qq.powi(2) * ql.powi(2) * (t * t + u * u) + + 2.0 + * qq + * ql + * (((gp_qqz * gp_llz + gm_qqz * gm_llz) * u * u + + (gp_qqz * gm_llz + gm_qqz * gp_llz) * t * t) + * chi_z) + .re + + ((gp_qqz.norm_sqr() * gp_llz.norm_sqr() + gm_qqz.norm_sqr() * gm_llz.norm_sqr()) + * u + * u + + (gp_qqz.norm_sqr() * gm_llz.norm_sqr() + gm_qqz.norm_sqr() * gp_llz.norm_sqr()) + * t + * t) + * chi_z.norm_sqr()) +} + +struct Psp2to2 { + s: f64, + t: f64, + u: f64, + x1: f64, + x2: f64, + jacobian: f64, +} + +fn hadronic_pspgen(rng: &mut impl Rng, mmin: f64, mmax: f64) -> Psp2to2 { + let smin = mmin * mmin; + let smax = mmax * mmax; + + let mut jacobian = 1.0; + + let r1 = rng.gen::(); + let r2 = rng.gen::(); + let tau0 = smin / smax; + let tau = tau0.powf(r1); + let y = tau.powf(1.0 - r2); + let x1 = y; + let x2 = tau / y; + let s = tau * smax; + jacobian *= tau * tau0.ln().powi(2) * r1; + + // theta integration (in the CMS) + let cos_theta = 2.0 * rng.gen::() - 1.0; + jacobian *= 2.0; + + let t = -0.5 * s * (1.0 - cos_theta); + let u = -0.5 * s * (1.0 + cos_theta); + + // phi integration + jacobian *= 2.0 * PI; + + Psp2to2 { + s, + t, + u, + x1, + x2, + jacobian, + } +} + +fn fill_drell_yan_lo_grid( + rng: &mut impl Rng, + calls: u32, + subgrid_type: &str, + dynamic: bool, + reweight: bool, +) -> Result { + let channels = vec![ + // photons + channel![22, 22, 1.0], + // up-antiup + channel![2, -2, 1.0; 4, -4, 1.0], + // antiup-up + channel![-2, 2, 1.0; -4, 4, 1.0], + // down-antidown + channel![1, -1, 1.0; 3, -3, 1.0; 5, -5, 1.0], + // antidown-down + channel![-1, 1, 1.0; -3, 3, 1.0; -5, 5, 1.0], + ]; + + let orders = vec![ + // LO + Order { + alphas: 0, + alpha: 2, + logxir: 0, + logxif: 0, + }, + // NLO QCD - won't be filled + Order { + alphas: 1, + alpha: 2, + logxir: 0, + logxif: 0, + }, + Order { + alphas: 1, + alpha: 2, + logxir: 0, + logxif: 1, + }, + ]; + + // we bin in rapidity from 0 to 2.4 in steps of 0.1 + let bin_limits: Vec<_> = (0..=24).map(|x: u32| f64::from(x) / 10.0).collect(); + + let mut subgrid_params = SubgridParams::default(); + let mut extra = ExtraSubgridParams::default(); + + subgrid_params.set_q2_bins(30); + subgrid_params.set_q2_max(1e6); + subgrid_params.set_q2_min(1e2); + subgrid_params.set_q2_order(3); + subgrid_params.set_reweight(reweight); + subgrid_params.set_x_bins(50); + subgrid_params.set_x_max(1.0); + subgrid_params.set_x_min(2e-7); + subgrid_params.set_x_order(3); + extra.set_x2_bins(50); + extra.set_x2_max(1.0); + extra.set_x2_min(2e-7); + extra.set_x2_order(3); + extra.set_reweight2(reweight); + + // create the PineAPPL grid + let mut grid = Grid::with_subgrid_type( + channels, + orders, + bin_limits, + subgrid_params, + extra, + subgrid_type, + )?; + + // in GeV^2 pbarn + let hbarc2 = 3.893793721e8; + + for _ in 0..calls { + // generate a phase-space point + let Psp2to2 { + s, + t, + u, + x1, + x2, + mut jacobian, + } = hadronic_pspgen(rng, 10.0, 7000.0); + + let ptl = (t * u / s).sqrt(); + let mll = s.sqrt(); + let yll = 0.5 * (x1 / x2).ln(); + let ylp = (yll + (0.5 * mll / ptl).acosh()).abs(); + let ylm = (yll - (0.5 * mll / ptl).acosh()).abs(); + + jacobian *= hbarc2 / f64::from(calls); + + // cuts for LO for the invariant-mass slice containing the Z-peak from CMSDY2D11 + if (ptl < 14.0) + || (yll.abs() > 2.4) + || (ylp > 2.4) + || (ylm > 2.4) + || !(60.0..=120.0).contains(&mll) + { + continue; + } + + let q2 = if dynamic { mll * mll } else { 90.0 * 90.0 }; + + // LO photon-photon channel + let weight = jacobian * int_photo(s, t, u); + let pto = 0; + let channel = 0; + + grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); + + // LO up-antiup-type channel + let weight = jacobian * int_quark(s, t, u, 2.0 / 3.0, 0.5); + let pto = 0; + let channel = 1; + + grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); + + // LO antiup-up-type channel - swap (x1 <-> x2) and (t <-> u) + let weight = jacobian * int_quark(s, u, t, 2.0 / 3.0, 0.5); + let pto = 0; + let channel = 2; + + grid.fill( + pto, + yll.abs(), + channel, + &Ntuple { + x1: x2, + x2: x1, + q2, + weight, + }, + ); + + // LO down-antidown-type channel + let weight = jacobian * int_quark(s, t, u, -1.0 / 3.0, -0.5); + let pto = 0; + let channel = 3; + + grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); + + // LO antidown-down-type channel - swap (x1 <-> x2) and (t <-> u) + let weight = jacobian * int_quark(s, u, t, -1.0 / 3.0, -0.5); + let pto = 0; + let channel = 4; + + grid.fill( + pto, + yll.abs(), + channel, + &Ntuple { + x1: x2, + x2: x1, + q2, + weight, + }, + ); + } + + Ok(grid) +} + +fn perform_grid_tests( + subgrid_type: &str, + dynamic: bool, + reference: &[f64], + reference_after_ssd: &[f64], + x_grid: &[f64], + reweight: bool, +) -> Result<()> { + let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); + let mut grid = fill_drell_yan_lo_grid(&mut rng, 500_000, subgrid_type, dynamic, reweight)?; + + // TEST 1: `merge` and `scale` + grid.merge(fill_drell_yan_lo_grid( + &mut rng, + 500_000, + subgrid_type, + dynamic, + reweight, + )?)?; + grid.scale(0.5); + + // suppress LHAPDF banners + lhapdf::set_verbosity(0); + + let pdf_set = "NNPDF31_nlo_as_0118_luxqed"; + + let pdf = Pdf::with_setname_and_member(pdf_set, 0)?; + let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut alphas = |_| 0.0; + + // TEST 2: `read` and `write` + let mut file = Cursor::new(Vec::new()); + grid.write(&mut file)?; + file.set_position(0); + mem::drop(grid); + let grid = Grid::read(&mut file)?; + + // TEST 3: `write_lz4` + let mut file = Cursor::new(Vec::new()); + grid.write_lz4(&mut file)?; + file.set_position(0); + mem::drop(grid); + let mut grid = Grid::read(&mut file)?; + + // TEST 4: `scale_by_order` + grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); + grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); + + // TEST 5: `convolve` + let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); + let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + + for (result, reference) in bins.iter().zip(reference.iter()) { + assert_approx_eq!(f64, *result, *reference, ulps = 16); + } + + // TEST 5b: `convolve` with `LumiCache::with_two` + let mut xfx1 = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut xfx2 = |id, x, q2| pdf.xfx_q2(id, x, q2); + let mut alphas2 = |_| 0.0; + let mut lumi_cache2 = LumiCache::with_two(2212, &mut xfx1, 2212, &mut xfx2, &mut alphas2); + let bins2 = grid.convolve(&mut lumi_cache2, &[], &[], &[], &[(1.0, 1.0)]); + + for (result, reference) in bins2.iter().zip(reference.iter()) { + assert_approx_eq!(f64, *result, *reference, ulps = 16); + } + + mem::drop(lumi_cache2); + mem::drop(bins2); + + // TEST 6: `convolve_subgrid` + let bins: Vec<_> = (0..grid.bin_info().bins()) + .map(|bin| { + (0..grid.channels().len()) + .map(|channel| { + grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) + .sum() + }) + .sum() + }) + .collect(); + + for (result, reference) in bins.iter().zip(reference.iter()) { + assert_approx_eq!(f64, *result, *reference, ulps = 24); + } + + // TEST 7a: `optimize_using` - tests `symmetrize` for each subgrid type + grid.optimize_using(GridOptFlags::SYMMETRIZE_CHANNELS); + + // TEST 7b: `optimize` + grid.optimize(); + + assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().as_ref(), x_grid); + assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().as_ref(), x_grid); + + // TEST 8: `convolve_subgrid` for the optimized subgrids + let bins: Vec<_> = (0..grid.bin_info().bins()) + .map(|bin| { + (0..grid.channels().len()) + .map(|channel| { + grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) + .sum() + }) + .sum() + }) + .collect(); + + for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { + assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); + } + + let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + + for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { + assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); + } + + // TEST 9: `set_remapper` + + // make a two-dimensional distribution out of it + grid.set_remapper(BinRemapper::new( + vec![0.1; 24], + (0..24) + .flat_map(|index| { + let index = f64::from(index); + vec![(60.0, 120.0), (index * 0.1, (index + 1.0) * 0.1)] + }) + .collect::>(), + )?)?; + + // TEST 10: `merge_bins` + + // trivial merge: first bin is merged into first bin + grid.merge_bins(0..1)?; + + for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { + assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); + } + + // merge two bins with each other + for bin in 0..12 { + grid.merge_bins(bin..bin + 2)?; + } + + let merged2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + + for (result, reference_after_ssd) in merged2.iter().zip( + reference_after_ssd + .chunks_exact(2) + .map(|chunk| chunk.iter().sum::() / 2.0), + ) { + assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 32); + } + + // TEST 11: `delete_bins` + + // delete a few bins from the start + grid.delete_bins(&[0, 1]); + + let deleted = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + + assert_eq!(deleted.len(), 10); + + for (result, reference_after_ssd) in deleted.iter().zip( + reference_after_ssd + .chunks_exact(2) + .map(|chunk| chunk.iter().sum::() / 2.0) + .skip(2), + ) { + assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 32); + } + + // delete a few bins from the ending + grid.delete_bins(&[8, 9]); + + let deleted2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); + + assert_eq!(deleted2.len(), 8); + + for (result, reference_after_ssd) in deleted2.iter().zip( + reference_after_ssd + .chunks_exact(2) + .map(|chunk| chunk.iter().sum::() / 2.0) + .skip(2) + .take(6), + ) { + assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 16); + } + + Ok(()) +} + +fn generate_grid(subgrid_type: &str, dynamic: bool, reweight: bool) -> Result { + let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); + fill_drell_yan_lo_grid(&mut rng, 500_000, subgrid_type, dynamic, reweight) +} + +const STATIC_REFERENCE: [f64; 24] = [ + 269.89225458312495, + 266.2168804878282, + 290.0467478314624, + 258.0064918266305, + 239.54186548997865, + 300.17541324377703, + 258.8811221515799, + 238.4064950360576, + 242.5494601562957, + 236.34329830221077, + 230.63243720020898, + 190.03118557029666, + 213.22241277258763, + 177.75582251643334, + 168.07022695390958, + 151.59217101220256, + 143.81017491485716, + 97.09707327367487, + 91.38465432190982, + 73.94464862425771, + 63.859689262732104, + 48.595785504299926, + 27.94818010640803, + 9.343737799674852, +]; + +// numbers are slightly different from `STATIC_REFERENCE` because the static scale detection (SSD) +// removes the Q^2 interpolation error +const STATIC_REFERENCE_AFTER_SSD: [f64; 24] = [ + 269.89240546283145, + 266.2170285827742, + 290.04690782935967, + 258.0066322019259, + 239.54199362567599, + 300.17556967146095, + 258.88125430161745, + 238.40661279174125, + 242.54957458220744, + 236.34340283622035, + 230.63253265929194, + 190.03125927151245, + 213.2224910582812, + 177.7558806305883, + 168.07027678254747, + 151.59220685502618, + 143.81020355582885, + 97.09708758263099, + 91.38466242593998, + 73.94465114837278, + 63.859687905917, + 48.595781165174515, + 27.94817639459665, + 9.343735959243446, +]; + +const DYNAMIC_REFERENCE: [f64; 24] = [ + 269.9662650413552, + 266.2274509325408, + 290.039119030095, + 258.04801305108583, + 239.63561020879277, + 300.2475932636636, + 258.88126161648313, + 238.42709542929794, + 242.5724521248901, + 236.3541498865422, + 230.64832146047578, + 189.999243811704, + 213.2896760201295, + 177.7280865940876, + 168.0886178280483, + 151.59285700593935, + 143.80051106343882, + 97.0715765765853, + 91.38479915098559, + 73.94713838892906, + 63.85622547082087, + 48.61296466751912, + 27.948404940991445, + 9.342761664545428, +]; + +const DYNAMIC_REFERENCE_NO_REWEIGHT: [f64; 24] = [ + 268.8874311488598, + 265.3130436782233, + 289.0614714145284, + 257.02578172672656, + 238.76378338813032, + 299.1756333696102, + 257.98748703027104, + 237.58099891213897, + 241.75215319366012, + 235.41757682699438, + 229.8671307486547, + 189.47964517011536, + 212.56055728623704, + 176.9591711445695, + 167.56523215346917, + 151.30532185043768, + 143.20366078799765, + 96.67453775369947, + 91.18334210163036, + 73.75879631942671, + 63.629606742074984, + 48.47126745674977, + 27.86328933386428, + 9.32654010506528, +]; + +#[test] +fn drell_yan_lagrange_static() -> Result<()> { + perform_grid_tests( + "LagrangeSubgrid", + false, + &STATIC_REFERENCE, + &STATIC_REFERENCE_AFTER_SSD, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn drell_yan_lagrange_v1_static() -> Result<()> { + perform_grid_tests( + "LagrangeSubgridV1", + false, + &STATIC_REFERENCE, + &STATIC_REFERENCE, // LagrangeSubgridV1 doesn't have static-scale detection + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn drell_yan_lagrange_v2_static() -> Result<()> { + perform_grid_tests( + "LagrangeSubgridV2", + false, + &STATIC_REFERENCE, + &STATIC_REFERENCE_AFTER_SSD, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn drell_yan_lagrange_dynamic() -> Result<()> { + perform_grid_tests( + "LagrangeSubgrid", + true, + &DYNAMIC_REFERENCE, + &DYNAMIC_REFERENCE, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn drell_yan_lagrange_v1_dynamic() -> Result<()> { + perform_grid_tests( + "LagrangeSubgridV1", + true, + &DYNAMIC_REFERENCE, + &DYNAMIC_REFERENCE, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn drell_yan_lagrange_v1_dynamic_no_reweight() -> Result<()> { + perform_grid_tests( + "LagrangeSubgridV1", + true, + &DYNAMIC_REFERENCE_NO_REWEIGHT, + &DYNAMIC_REFERENCE_NO_REWEIGHT, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + false, + ) +} + +#[test] +fn drell_yan_lagrange_v2_dynamic() -> Result<()> { + perform_grid_tests( + "LagrangeSubgridV2", + true, + &DYNAMIC_REFERENCE, + &DYNAMIC_REFERENCE, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn drell_yan_lagrange_v2_dynamic_no_reweight() -> Result<()> { + perform_grid_tests( + "LagrangeSubgridV2", + true, + &DYNAMIC_REFERENCE_NO_REWEIGHT, + &DYNAMIC_REFERENCE_NO_REWEIGHT, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + false, + ) +} + +#[test] +fn drell_yan_lagrange_sparse_dynamic() -> Result<()> { + perform_grid_tests( + "LagrangeSparseSubgrid", + true, + &DYNAMIC_REFERENCE, + &DYNAMIC_REFERENCE, + &[ + 0.030521584007828916, + 0.02108918668378717, + 0.014375068581090129, + 0.009699159574043398, + 0.006496206194633799, + 0.004328500638820811, + ], + true, + ) +} + +#[test] +fn grid_optimize() -> Result<()> { + let mut grid = generate_grid("LagrangeSubgridV2", false, false)?; + + assert_eq!(grid.orders().len(), 3); + assert_eq!(grid.channels().len(), 5); + assert!(matches!( + grid.subgrids()[[0, 0, 0]], + SubgridEnum::LagrangeSubgridV2 { .. } + )); + assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().len(), 50); + assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().len(), 50); + assert_eq!(grid.subgrids()[[0, 0, 0]].mu2_grid().len(), 30); + + let mut grid2 = grid.clone(); + grid2.optimize_using(GridOptFlags::OPTIMIZE_SUBGRID_TYPE); + + // `OPTIMIZE_SUBGRID_TYPE` changes the subgrid type ... + assert!(matches!( + grid2.subgrids()[[0, 0, 0]], + SubgridEnum::ImportOnlySubgridV2 { .. } + )); + // and the dimensions of the subgrid + assert_eq!(grid2.subgrids()[[0, 0, 0]].x1_grid().len(), 6); + assert_eq!(grid2.subgrids()[[0, 0, 0]].x2_grid().len(), 6); + assert_eq!(grid2.subgrids()[[0, 0, 0]].mu2_grid().len(), 4); + + grid.optimize_using(GridOptFlags::OPTIMIZE_SUBGRID_TYPE | GridOptFlags::STATIC_SCALE_DETECTION); + + assert!(matches!( + grid.subgrids()[[0, 0, 0]], + SubgridEnum::ImportOnlySubgridV2 { .. } + )); + // if `STATIC_SCALE_DETECTION` is present the `mu2_grid` dimension are better optimized + assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().len(), 6); + assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().len(), 6); + assert_eq!(grid.subgrids()[[0, 0, 0]].mu2_grid().len(), 1); + + // has no effect for this test + grid.optimize_using(GridOptFlags::SYMMETRIZE_CHANNELS); + + assert_eq!(grid.orders().len(), 3); + assert_eq!(grid.channels().len(), 5); + + grid.optimize_using(GridOptFlags::STRIP_EMPTY_ORDERS); + + assert_eq!(grid.orders().len(), 1); + assert_eq!(grid.channels().len(), 5); + + // has no effect for this test + grid.optimize_using(GridOptFlags::MERGE_SAME_CHANNELS); + + assert_eq!(grid.orders().len(), 1); + assert_eq!(grid.channels().len(), 5); + + grid.optimize_using(GridOptFlags::STRIP_EMPTY_CHANNELS); + + assert_eq!(grid.orders().len(), 1); + assert_eq!(grid.channels().len(), 3); + + Ok(()) +} From 34cefefaeb36bc992a78529d9a8f0b50c799219f Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:19:19 +0200 Subject: [PATCH 02/42] Replace `pineappl-v0.8.x` with new crate `pineappl_v0` --- Cargo.lock | 87 +++++++++++++++++------------------------- Cargo.toml | 1 + pineappl/Cargo.toml | 2 +- pineappl_v0/Cargo.toml | 8 ++-- 4 files changed, 40 insertions(+), 58 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index ea33ec9f5..7484567ab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -712,22 +712,13 @@ version = "0.4.20" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b5e6163cb8c49088c2c36f57875e58ccd8c87c7427f7fbd50ea6710b2f3f2e8f" -[[package]] -name = "lz4_flex" -version = "0.9.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1a8cbbb2831780bc3b9c15a41f5b49222ef756b6730a95f3decfdd15903eb5a3" -dependencies = [ - "twox-hash 1.6.3", -] - [[package]] name = "lz4_flex" version = "0.11.6" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "373f5eceeeab7925e0c1098212f2fbc4d416adec9d35051a6ab251e824c1854a" dependencies = [ - "twox-hash 2.1.2", + "twox-hash", ] [[package]] @@ -807,6 +798,7 @@ checksum = "f85776816e34becd8bd9540818d7dc77bf28307f3b3dcc51cc82403c6931680c" dependencies = [ "byteorder", "ndarray", + "num-complex", "num-traits", "py_literal", "zip", @@ -930,27 +922,6 @@ dependencies = [ "sha2", ] -[[package]] -name = "pineappl" -version = "0.8.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "5041fcf611eb0c41f1f6562b498fabdd1319c8d4572fd137ac244ca4e73d999c" -dependencies = [ - "anyhow", - "arrayvec", - "bincode", - "bitflags 2.4.2", - "enum_dispatch", - "float-cmp", - "git-version", - "itertools", - "lz4_flex 0.9.5", - "ndarray", - "rustc-hash 1.1.0", - "serde", - "thiserror", -] - [[package]] name = "pineappl" version = "1.3.3" @@ -963,11 +934,11 @@ dependencies = [ "float-cmp", "git-version", "itertools", - "lz4_flex 0.11.6", + "lz4_flex", "managed-lhapdf", "ndarray", "num-complex", - "pineappl 0.8.3", + "pineappl_v0", "rand", "rand_pcg", "rayon", @@ -993,7 +964,7 @@ version = "1.3.3" dependencies = [ "itertools", "ndarray", - "pineappl 1.3.3", + "pineappl", ] [[package]] @@ -1012,11 +983,11 @@ dependencies = [ "float-cmp", "git-version", "itertools", - "lz4_flex 0.11.6", + "lz4_flex", "managed-lhapdf", "ndarray", "ndarray-npy", - "pineappl 1.3.3", + "pineappl", "pineappl_applgrid", "pineappl_fastnlo", "predicates", @@ -1044,10 +1015,35 @@ dependencies = [ "itertools", "ndarray", "numpy", - "pineappl 1.3.3", + "pineappl", "pyo3", ] +[[package]] +name = "pineappl_v0" +version = "1.3.3" +dependencies = [ + "anyhow", + "arrayvec", + "bincode", + "bitflags 2.4.2", + "enum_dispatch", + "float-cmp", + "git-version", + "itertools", + "lz4_flex", + "managed-lhapdf", + "ndarray", + "ndarray-npy", + "num-complex", + "rand", + "rand_pcg", + "rustc-hash 1.1.0", + "serde", + "serde_yaml", + "thiserror", +] + [[package]] name = "pkg-config" version = "0.3.29" @@ -1448,12 +1444,6 @@ version = "1.13.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "3c5e1a9a646d36c3599cd173a41282daf47c44583ad367b8e6837255952e5c67" -[[package]] -name = "static_assertions" -version = "1.1.0" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2eb9349b6444b326872e140eb1cf5e7c522154d69e7a0ffb0fb81c06b37543f" - [[package]] name = "strsim" version = "0.11.1" @@ -1586,16 +1576,6 @@ dependencies = [ "winnow", ] -[[package]] -name = "twox-hash" -version = "1.6.3" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "97fee6b57c6a41524a810daee9286c02d7752c4253064d0b05472833a438f675" -dependencies = [ - "cfg-if", - "static_assertions", -] - [[package]] name = "twox-hash" version = "2.1.2" @@ -1921,5 +1901,6 @@ checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" dependencies = [ "byteorder", "crc32fast", + "flate2", "thiserror", ] diff --git a/Cargo.toml b/Cargo.toml index e4fde0026..dd0028991 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -6,6 +6,7 @@ members = [ "pineappl_cli", "pineappl_fastnlo", "pineappl_py", + "pineappl_v0", "xtask", ] default-members = [ diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index 75e1e3ca8..cf042b605 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -27,7 +27,7 @@ itertools = "0.10.1" lz4_flex = "0.11.6" ndarray = { features = ["serde"], version = "0.15.4" } # TODO: opt out of default features in this crate to match line above -pineappl-v0 = { package = "pineappl", version = "0.8.2" } +pineappl_v0 = { path = "../pineappl_v0", version = "=1.3.3" } rayon = "1.5.1" rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0.130" } diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml index c279937e9..5a97cc1c0 100644 --- a/pineappl_v0/Cargo.toml +++ b/pineappl_v0/Cargo.toml @@ -1,11 +1,11 @@ [package] authors = ["Christopher Schwan "] description = "PineAPPL is not an extension of APPLgrid" -name = "pineappl" +name = "pineappl_v0" readme = "README.md" categories.workspace = true -edition.workspace = true +edition = "2021" keywords.workspace = true license.workspace = true repository.workspace = true @@ -24,7 +24,7 @@ enum_dispatch = "0.3.7" float-cmp = "0.9.0" git-version = "0.3.5" itertools = "0.10.1" -lz4_flex = "0.9.2" +lz4_flex = "0.11.6" ndarray = { features = ["serde"], version = "0.15.4" } rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0.130" } @@ -32,7 +32,7 @@ thiserror = "1.0.30" [dev-dependencies] anyhow = "1.0.48" -lhapdf = { package = "managed-lhapdf", version = "0.3.2" } +lhapdf = { package = "managed-lhapdf", version = "0.4.0" } num-complex = "0.4.4" rand = { default-features = false, version = "0.8.4" } rand_pcg = { default-features = false, version = "0.3.1" } From bedb693b4b454f681344ce09913ec6fbf4f692dd Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:22:23 +0200 Subject: [PATCH 03/42] Update also `pineappl_v0` version numbers for a release --- maintainer/make-release.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/maintainer/make-release.sh b/maintainer/make-release.sh index e3afe71f1..372daa7f4 100755 --- a/maintainer/make-release.sh +++ b/maintainer/make-release.sh @@ -70,6 +70,7 @@ for crate in . ${crates[@]}; do -e "s:^\(pineappl_applgrid = .*\)version = \".*\":\1version = \"=${version}\":" \ -e "s:^\(pineappl_cli = .*\)version = \".*\":\1version = \"=${version}\":" \ -e "s:^\(pineappl_fastnlo = .*\)version = \".*\":\1version = \"=${version}\":" \ + -e "s:^\(pineappl_v0 = .*\)version = \".*\":\1version = \"=${version}\":" \ ${crate}/Cargo.toml git add ${crate}/Cargo.toml done From 27aac447cc552f5086a6cff65e8cb59039618929 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:23:12 +0200 Subject: [PATCH 04/42] Fix compiler warnings --- pineappl_v0/src/empty_subgrid.rs | 8 ++++---- pineappl_v0/src/grid.rs | 8 ++++---- pineappl_v0/src/import_only_subgrid.rs | 16 ++++++++-------- pineappl_v0/src/lagrange_subgrid.rs | 24 ++++++++++++------------ pineappl_v0/src/ntuple_subgrid.rs | 8 ++++---- pineappl_v0/src/subgrid.rs | 8 ++++---- 6 files changed, 36 insertions(+), 36 deletions(-) diff --git a/pineappl_v0/src/empty_subgrid.rs b/pineappl_v0/src/empty_subgrid.rs index 79640e655..f53fe133c 100644 --- a/pineappl_v0/src/empty_subgrid.rs +++ b/pineappl_v0/src/empty_subgrid.rs @@ -25,15 +25,15 @@ impl Subgrid for EmptySubgridV1 { panic!("EmptySubgridV1 doesn't support the fill operation"); } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&[]) } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&[]) } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&[]) } @@ -56,7 +56,7 @@ impl Subgrid for EmptySubgridV1 { Self.into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(iter::empty()) } diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index c347891d4..36baf6100 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -313,7 +313,7 @@ impl Grid { } } - fn pdg_channels(&self) -> Cow<[Channel]> { + fn pdg_channels(&self) -> Cow<'_, [Channel]> { match self.pid_basis() { PidBasis::Evol => self .channels @@ -953,13 +953,13 @@ impl Grid { /// Return all subgrids as an `ArrayView3`. #[must_use] - pub fn subgrids(&self) -> ArrayView3 { + pub fn subgrids(&self) -> ArrayView3<'_, SubgridEnum> { self.subgrids.view() } /// Return all subgrids as an `ArrayViewMut3`. #[must_use] - pub fn subgrids_mut(&mut self) -> ArrayViewMut3 { + pub fn subgrids_mut(&mut self) -> ArrayViewMut3<'_, SubgridEnum> { self.subgrids.view_mut() } @@ -1016,7 +1016,7 @@ impl Grid { /// Returns all information about the bins in this grid. #[must_use] - pub const fn bin_info(&self) -> BinInfo { + pub const fn bin_info(&self) -> BinInfo<'_> { BinInfo::new(&self.bin_limits, self.remapper()) } diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs index 04624c09a..dae5c169d 100644 --- a/pineappl_v0/src/import_only_subgrid.rs +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -57,7 +57,7 @@ impl Subgrid for ImportOnlySubgridV1 { panic!("ImportOnlySubgridV1 doesn't support the fill operation"); } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { self.q2_grid .iter() .copied() @@ -65,11 +65,11 @@ impl Subgrid for ImportOnlySubgridV1 { .collect() } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&self.x1_grid) } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&self.x2_grid) } @@ -151,7 +151,7 @@ impl Subgrid for ImportOnlySubgridV1 { .into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(self.array.indexed_iter()) } @@ -227,15 +227,15 @@ impl Subgrid for ImportOnlySubgridV2 { panic!("ImportOnlySubgridV2 doesn't support the fill operation"); } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&self.mu2_grid) } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&self.x1_grid) } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&self.x2_grid) } @@ -363,7 +363,7 @@ impl Subgrid for ImportOnlySubgridV2 { .into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(self.array.indexed_iter()) } diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs index f3ccf2a5e..ae5ec6eca 100644 --- a/pineappl_v0/src/lagrange_subgrid.rs +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -227,7 +227,7 @@ impl Subgrid for LagrangeSubgridV1 { } } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { (0..self.ntau) .map(|itau| { let q2 = fq2(self.gettau(itau)); @@ -236,11 +236,11 @@ impl Subgrid for LagrangeSubgridV1 { .collect() } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { (0..self.ny).map(|iy| fx(self.gety(iy))).collect() } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { self.x1_grid() } @@ -338,7 +338,7 @@ impl Subgrid for LagrangeSubgridV1 { .into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { self.grid.as_ref().map_or_else( || Box::new(iter::empty()) as Box>, |grid| { @@ -610,7 +610,7 @@ impl Subgrid for LagrangeSubgridV2 { } } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { (0..self.ntau) .map(|itau| { let q2 = fq2(self.gettau(itau)); @@ -619,11 +619,11 @@ impl Subgrid for LagrangeSubgridV2 { .collect() } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { (0..self.ny1).map(|iy| fx(self.gety1(iy))).collect() } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { (0..self.ny2).map(|iy| fx(self.gety2(iy))).collect() } @@ -732,7 +732,7 @@ impl Subgrid for LagrangeSubgridV2 { .into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { self.grid.as_ref().map_or_else( || Box::new(iter::empty()) as Box>, |grid| { @@ -922,7 +922,7 @@ impl Subgrid for LagrangeSparseSubgridV1 { } } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { (0..self.ntau) .map(|itau| { let q2 = fq2(self.gettau(itau)); @@ -931,11 +931,11 @@ impl Subgrid for LagrangeSparseSubgridV1 { .collect() } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { (0..self.ny).map(|iy| fx(self.gety(iy))).collect() } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { self.x1_grid() } @@ -1006,7 +1006,7 @@ impl Subgrid for LagrangeSparseSubgridV1 { .into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(self.array.indexed_iter().map(|(tuple, value)| { ( tuple, diff --git a/pineappl_v0/src/ntuple_subgrid.rs b/pineappl_v0/src/ntuple_subgrid.rs index 282d9fffc..b1bb0a5b3 100644 --- a/pineappl_v0/src/ntuple_subgrid.rs +++ b/pineappl_v0/src/ntuple_subgrid.rs @@ -39,15 +39,15 @@ impl Subgrid for NtupleSubgridV1 { self.ntuples.push(ntuple.clone()); } - fn mu2_grid(&self) -> Cow<[Mu2]> { + fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&[]) } - fn x1_grid(&self) -> Cow<[f64]> { + fn x1_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&[]) } - fn x2_grid(&self) -> Cow<[f64]> { + fn x2_grid(&self) -> Cow<'_, [f64]> { Cow::Borrowed(&[]) } @@ -75,7 +75,7 @@ impl Subgrid for NtupleSubgridV1 { Self::new().into() } - fn indexed_iter(&self) -> SubgridIndexedIter { + fn indexed_iter(&self) -> SubgridIndexedIter<'_> { panic!("NtupleSubgridV1 doesn't support the indexed_iter operation"); } diff --git a/pineappl_v0/src/subgrid.rs b/pineappl_v0/src/subgrid.rs index 1dce12abb..1fe8e286c 100644 --- a/pineappl_v0/src/subgrid.rs +++ b/pineappl_v0/src/subgrid.rs @@ -67,15 +67,15 @@ pub trait Subgrid { /// Return a slice of [`Mu2`] values corresponding to the (squared) renormalization and /// factorization values of the grid. If the subgrid does not use a grid, this method should /// return an empty slice. - fn mu2_grid(&self) -> Cow<[Mu2]>; + fn mu2_grid(&self) -> Cow<'_, [Mu2]>; /// Return a slice of values of `x1`. If the subgrid does not use a grid, this method should /// return an empty slice. - fn x1_grid(&self) -> Cow<[f64]>; + fn x1_grid(&self) -> Cow<'_, [f64]>; /// Return a slice of values of `x2`. If the subgrid does not use a grid, this method should /// return an empty slice. - fn x2_grid(&self) -> Cow<[f64]>; + fn x2_grid(&self) -> Cow<'_, [f64]>; /// Convolute the subgrid with a luminosity function, which takes indices as arguments that /// correspond to the entries given in the slices `x1`, `x2` and `mu2`. @@ -109,7 +109,7 @@ pub trait Subgrid { fn clone_empty(&self) -> SubgridEnum; /// Return an iterator over all non-zero elements of the subgrid. - fn indexed_iter(&self) -> SubgridIndexedIter; + fn indexed_iter(&self) -> SubgridIndexedIter<'_>; /// Return statistics for this subgrid. fn stats(&self) -> Stats; From 7a0d6efab2f62b4e76795d4ed1514a2b942fee84 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:30:45 +0200 Subject: [PATCH 05/42] Fix crate name --- pineappl_v0/tests/drell_yan_lo.rs | 12 ++++++------ 1 file changed, 6 insertions(+), 6 deletions(-) diff --git a/pineappl_v0/tests/drell_yan_lo.rs b/pineappl_v0/tests/drell_yan_lo.rs index 0b53bc2b0..440553932 100644 --- a/pineappl_v0/tests/drell_yan_lo.rs +++ b/pineappl_v0/tests/drell_yan_lo.rs @@ -2,12 +2,12 @@ use anyhow::Result; use float_cmp::assert_approx_eq; use lhapdf::Pdf; use num_complex::Complex; -use pineappl::bin::BinRemapper; -use pineappl::boc::Order; -use pineappl::channel; -use pineappl::convolutions::LumiCache; -use pineappl::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; +use pineappl_v0::bin::BinRemapper; +use pineappl_v0::boc::Order; +use pineappl_v0::channel; +use pineappl_v0::convolutions::LumiCache; +use pineappl_v0::grid::{Grid, GridOptFlags, Ntuple}; +use pineappl_v0::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; use rand::Rng; use rand_pcg::Pcg64; use std::f64::consts::PI; From a4f949a2615386e5eb2375b2a7cdf66898ebaf6e Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:39:18 +0200 Subject: [PATCH 06/42] Minimize dependencies --- Cargo.lock | 3 --- pineappl_v0/Cargo.toml | 2 +- 2 files changed, 1 insertion(+), 4 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 7484567ab..802d83a36 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -487,9 +487,6 @@ name = "float-cmp" version = "0.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "98de4bbd547a563b716d8dfa9aad1cb19bfab00f4fa09a6a4ed21dbcf44ce9c4" -dependencies = [ - "num-traits", -] [[package]] name = "form_urlencoded" diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml index 5a97cc1c0..12fc1c4f5 100644 --- a/pineappl_v0/Cargo.toml +++ b/pineappl_v0/Cargo.toml @@ -21,7 +21,7 @@ arrayvec = "0.7.2" bincode = "1.3.3" bitflags = "2.4.2" enum_dispatch = "0.3.7" -float-cmp = "0.9.0" +float-cmp = { default-features = false, version = "0.9.0" } git-version = "0.3.5" itertools = "0.10.1" lz4_flex = "0.11.6" From eaa01fb94096497c4afdb0d3cc31c28a8651439b Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:39:45 +0200 Subject: [PATCH 07/42] Upgrade `pineappl_v0` to Rust 2024 --- pineappl_v0/Cargo.toml | 2 +- pineappl_v0/src/import_only_subgrid.rs | 8 ++++---- pineappl_v0/src/lagrange_subgrid.rs | 4 ++-- pineappl_v0/src/packed_array.rs | 2 +- pineappl_v0/tests/drell_yan_lo.rs | 6 +++--- 5 files changed, 11 insertions(+), 11 deletions(-) diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml index 12fc1c4f5..6561e1a6c 100644 --- a/pineappl_v0/Cargo.toml +++ b/pineappl_v0/Cargo.toml @@ -5,7 +5,7 @@ name = "pineappl_v0" readme = "README.md" categories.workspace = true -edition = "2021" +edition.workspace = true keywords.workspace = true license.workspace = true repository.workspace = true diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs index dae5c169d..a4b5df098 100644 --- a/pineappl_v0/src/import_only_subgrid.rs +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -748,14 +748,14 @@ mod tests { for _ in 0..1000 { grid1.fill(&Ntuple { - x1: rng.gen(), - x2: rng.gen(), + x1: rng.r#gen(), + x2: rng.r#gen(), q2: q2_range.sample(&mut rng), weight: 1.0, }); grid2.fill(&Ntuple { - x1: rng.gen(), - x2: rng.gen(), + x1: rng.r#gen(), + x2: rng.r#gen(), q2: q2_range.sample(&mut rng), weight: 1.0, }); diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs index ae5ec6eca..9c125fbec 100644 --- a/pineappl_v0/src/lagrange_subgrid.rs +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -342,7 +342,7 @@ impl Subgrid for LagrangeSubgridV1 { self.grid.as_ref().map_or_else( || Box::new(iter::empty()) as Box>, |grid| { - Box::new(grid.indexed_iter().filter(|(_, &value)| value != 0.0).map( + Box::new(grid.indexed_iter().filter(|&(_, &value)| value != 0.0).map( |(tuple, &value)| { ( (self.itaumin + tuple.0, tuple.1, tuple.2), @@ -736,7 +736,7 @@ impl Subgrid for LagrangeSubgridV2 { self.grid.as_ref().map_or_else( || Box::new(iter::empty()) as Box>, |grid| { - Box::new(grid.indexed_iter().filter(|(_, &value)| value != 0.0).map( + Box::new(grid.indexed_iter().filter(|&(_, &value)| value != 0.0).map( |(tuple, &value)| { ( (self.itaumin + tuple.0, tuple.1, tuple.2), diff --git a/pineappl_v0/src/packed_array.rs b/pineappl_v0/src/packed_array.rs index 4f29764a7..41b6ab666 100644 --- a/pineappl_v0/src/packed_array.rs +++ b/pineappl_v0/src/packed_array.rs @@ -109,7 +109,7 @@ impl PackedArray { for ((i, j, k), &entry) in array .indexed_iter() - .filter(|(_, &entry)| entry != Default::default()) + .filter(|&(_, &entry)| entry != Default::default()) { result[[i + xstart, j, k]] = entry; } diff --git a/pineappl_v0/tests/drell_yan_lo.rs b/pineappl_v0/tests/drell_yan_lo.rs index 440553932..e5c99e246 100644 --- a/pineappl_v0/tests/drell_yan_lo.rs +++ b/pineappl_v0/tests/drell_yan_lo.rs @@ -83,8 +83,8 @@ fn hadronic_pspgen(rng: &mut impl Rng, mmin: f64, mmax: f64) -> Psp2to2 { let mut jacobian = 1.0; - let r1 = rng.gen::(); - let r2 = rng.gen::(); + let r1 = rng.r#gen::(); + let r2 = rng.r#gen::(); let tau0 = smin / smax; let tau = tau0.powf(r1); let y = tau.powf(1.0 - r2); @@ -94,7 +94,7 @@ fn hadronic_pspgen(rng: &mut impl Rng, mmin: f64, mmax: f64) -> Psp2to2 { jacobian *= tau * tau0.ln().powi(2) * r1; // theta integration (in the CMS) - let cos_theta = 2.0 * rng.gen::() - 1.0; + let cos_theta = 2.0 * rng.r#gen::() - 1.0; jacobian *= 2.0; let t = -0.5 * s * (1.0 - cos_theta); From 26fd1103fdaab57706f78c37f9570a0e0c07dceb Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:56:41 +0200 Subject: [PATCH 08/42] Actually test all crates in CI --- .github/workflows/msrv.yml | 2 +- .github/workflows/rust.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 119f222b8..8c08aa30d 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -24,4 +24,4 @@ jobs: MSRV=$(grep '^rust-version ' Cargo.toml | cut -d= -f2- | tr -d ' "') # enable the MSRV rustup default "${MSRV}" - cargo check --all-features --all-targets + cargo check --all-features --all-targets --workspace diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 2a089d1ea..3845748dc 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -36,7 +36,7 @@ jobs: RUSTFLAGS: '-Cinstrument-coverage -Clink-dead-code' run: | # we need stderr, but we can't run test twice because it'll regenerate/modify the binaries which interferes with `llvm-cov` - cargo test --features=applgrid,evolve,fastnlo,fktable --no-fail-fast 2> >(tee stderr 1>&2) + cargo test --features=applgrid,evolve,fastnlo,fktable --no-fail-fast --workspace 2> >(tee stderr 1>&2) # from https://stackoverflow.com/a/51141872/812178 sed -i 's/\x1B\[[0-9;]\{1,\}[A-Za-z]//g' stderr From 85dfb44d2d9ec66dd84fc6184ec7902ea6784278 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:57:13 +0200 Subject: [PATCH 09/42] Fix `pineappl_v0` doctests --- pineappl_v0/src/bin.rs | 4 ++-- pineappl_v0/src/boc.rs | 24 ++++++++++++------------ 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/pineappl_v0/src/bin.rs b/pineappl_v0/src/bin.rs index 3d45a597e..5ba5b9cea 100644 --- a/pineappl_v0/src/bin.rs +++ b/pineappl_v0/src/bin.rs @@ -659,7 +659,7 @@ impl BinLimits { /// # Examples /// /// ```rust - /// use pineappl::bin::BinLimits; + /// use pineappl_v0::bin::BinLimits; /// /// // example with equally sized bins /// let equal_bins = BinLimits::new(vec![0.25, 0.5, 0.75, 1.0]); @@ -705,7 +705,7 @@ impl BinLimits { /// # Examples /// /// ```rust - /// use pineappl::bin::BinLimits; + /// use pineappl_v0::bin::BinLimits; /// /// // example with equally sized bins /// let equal_bins = BinLimits::new(vec![0.25, 0.5, 0.75, 1.0]); diff --git a/pineappl_v0/src/boc.rs b/pineappl_v0/src/boc.rs index 62599b098..52da7f060 100644 --- a/pineappl_v0/src/boc.rs +++ b/pineappl_v0/src/boc.rs @@ -133,7 +133,7 @@ impl Order { /// - the mixed NNLO QCD—EW. /// /// ```rust - /// use pineappl::boc::Order; + /// use pineappl_v0::boc::Order; /// /// let orders = [ /// Order::new(0, 2, 0, 0), // LO : alpha^2 @@ -164,7 +164,7 @@ impl Order { /// `true`: /// /// ```rust - /// use pineappl::boc::Order; + /// use pineappl_v0::boc::Order; /// /// let orders = [ /// Order::new(0, 2, 0, 0), // LO : alpha^2 @@ -181,7 +181,7 @@ impl Order { /// the selection for different LOs: /// /// ```rust - /// use pineappl::boc::Order; + /// use pineappl_v0::boc::Order; /// /// let orders = [ /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 @@ -276,7 +276,7 @@ impl Channel { /// Ordering of the arguments doesn't matter: /// /// ```rust - /// use pineappl::boc::Channel; + /// use pineappl_v0::boc::Channel; /// /// let entry1 = Channel::new(vec![(2, 2, 1.0), (4, 4, 1.0)]); /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); @@ -288,7 +288,7 @@ impl Channel { /// Same arguments are merged together: /// /// ```rust - /// use pineappl::boc::Channel; + /// use pineappl_v0::boc::Channel; /// /// let entry1 = Channel::new(vec![(1, 1, 1.0), (1, 1, 3.0), (3, 3, 1.0), (1, 1, 6.0)]); /// let entry2 = Channel::new(vec![(1, 1, 10.0), (3, 3, 1.0)]); @@ -301,7 +301,7 @@ impl Channel { /// Creating an empty channel panics: /// /// ```rust,should_panic - /// use pineappl::boc::Channel; + /// use pineappl_v0::boc::Channel; /// /// let _ = Channel::new(vec![]); /// ``` @@ -336,8 +336,8 @@ impl Channel { /// # Examples /// /// ```rust - /// use pineappl::boc::Channel; - /// use pineappl::channel; + /// use pineappl_v0::boc::Channel; + /// use pineappl_v0::channel; /// /// let entry = Channel::translate(&channel![103, 11, 1.0], &|evol_id| match evol_id { /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], @@ -365,8 +365,8 @@ impl Channel { /// # Examples /// /// ```rust - /// use pineappl::channel; - /// use pineappl::boc::Channel; + /// use pineappl_v0::channel; + /// use pineappl_v0::boc::Channel; /// /// let entry = channel![4, 4, 1.0; 2, 2, 1.0]; /// @@ -390,7 +390,7 @@ impl Channel { /// # Examples /// /// ```rust - /// use pineappl::boc::Channel; + /// use pineappl_v0::boc::Channel; /// /// let entry1 = Channel::new(vec![(2, 2, 2.0), (4, 4, 2.0)]); /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); @@ -491,7 +491,7 @@ impl FromStr for Channel { /// In the following example `entry1` and `entry2` represent the same values: /// /// ```rust -/// use pineappl::channel; +/// use pineappl_v0::channel; /// /// let entry1 = channel![2, 2, 1.0; 4, 4, 1.0]; /// let entry2 = channel![4, 4, 1.0; 2, 2, 1.0]; From 267d72be4a863d6f149f3be4465a46b2bc8c3896 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Sun, 29 Mar 2026 21:57:31 +0200 Subject: [PATCH 10/42] Remove `evolve_info` unit test --- pineappl_v0/src/grid.rs | 69 ----------------------------------------- 1 file changed, 69 deletions(-) diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index 36baf6100..386ac30f2 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -1877,7 +1877,6 @@ impl Grid { mod tests { use super::*; use crate::channel; - use std::fs::File; #[test] fn grid_with_subgrid_type() { @@ -2109,72 +2108,4 @@ mod tests { [Convolution::UnpolPDF(-2212), Convolution::UnpolPDF(-2212)] ); } - - #[test] - fn evolve_info() { - let grid = - Grid::read(File::open("../test-data/LHCB_WP_7TEV.pineappl.lz4").unwrap()).unwrap(); - let info = grid.evolve_info(&[]); - - assert_eq!(info.fac1.len(), 1); - assert_approx_eq!(f64, info.fac1[0], 6456.443904000001, ulps = 64); - - assert_eq!(info.pids1, [-3, -1, 2, 4, 21, 22]); - - assert_eq!(info.x1.len(), 50); - assert_approx_eq!(f64, info.x1[0], 1.9999999999999954e-7, ulps = 64); - - assert_approx_eq!(f64, info.x1[1], 3.034304765867952e-7, ulps = 64); - assert_approx_eq!(f64, info.x1[2], 4.6035014748963906e-7, ulps = 64); - assert_approx_eq!(f64, info.x1[3], 6.984208530700364e-7, ulps = 64); - assert_approx_eq!(f64, info.x1[4], 1.0596094959101024e-6, ulps = 64); - assert_approx_eq!(f64, info.x1[5], 1.607585498470808e-6, ulps = 64); - assert_approx_eq!(f64, info.x1[6], 2.438943292891682e-6, ulps = 64); - assert_approx_eq!(f64, info.x1[7], 3.7002272069854957e-6, ulps = 64); - assert_approx_eq!(f64, info.x1[8], 5.613757716930151e-6, ulps = 64); - assert_approx_eq!(f64, info.x1[9], 8.516806677573355e-6, ulps = 64); - assert_approx_eq!(f64, info.x1[10], 1.292101569074731e-5, ulps = 64); - assert_approx_eq!(f64, info.x1[11], 1.9602505002391748e-5, ulps = 64); - assert_approx_eq!(f64, info.x1[12], 2.97384953722449e-5, ulps = 64); - assert_approx_eq!(f64, info.x1[13], 4.511438394964044e-5, ulps = 64); - assert_approx_eq!(f64, info.x1[14], 6.843744918967896e-5, ulps = 64); - assert_approx_eq!(f64, info.x1[15], 0.00010381172986576898, ulps = 64); - assert_approx_eq!(f64, info.x1[16], 0.00015745605600841445, ulps = 64); - assert_approx_eq!(f64, info.x1[17], 0.00023878782918561914, ulps = 64); - assert_approx_eq!(f64, info.x1[18], 0.00036205449638139736, ulps = 64); - assert_approx_eq!(f64, info.x1[19], 0.0005487795323670796, ulps = 64); - assert_approx_eq!(f64, info.x1[20], 0.0008314068836488144, ulps = 64); - assert_approx_eq!(f64, info.x1[21], 0.0012586797144272762, ulps = 64); - assert_approx_eq!(f64, info.x1[22], 0.0019034634022867384, ulps = 64); - assert_approx_eq!(f64, info.x1[23], 0.0028738675812817515, ulps = 64); - assert_approx_eq!(f64, info.x1[24], 0.004328500638820811, ulps = 64); - assert_approx_eq!(f64, info.x1[25], 0.006496206194633799, ulps = 64); - assert_approx_eq!(f64, info.x1[26], 0.009699159574043398, ulps = 64); - assert_approx_eq!(f64, info.x1[27], 0.014375068581090129, ulps = 64); - assert_approx_eq!(f64, info.x1[28], 0.02108918668378717, ulps = 64); - assert_approx_eq!(f64, info.x1[29], 0.030521584007828916, ulps = 64); - assert_approx_eq!(f64, info.x1[30], 0.04341491741702269, ulps = 64); - assert_approx_eq!(f64, info.x1[31], 0.060480028754447364, ulps = 64); - assert_approx_eq!(f64, info.x1[32], 0.08228122126204893, ulps = 64); - assert_approx_eq!(f64, info.x1[33], 0.10914375746330703, ulps = 64); - assert_approx_eq!(f64, info.x1[34], 0.14112080644440345, ulps = 64); - assert_approx_eq!(f64, info.x1[35], 0.17802566042569432, ulps = 64); - assert_approx_eq!(f64, info.x1[36], 0.2195041265003886, ulps = 64); - assert_approx_eq!(f64, info.x1[37], 0.2651137041582823, ulps = 64); - assert_approx_eq!(f64, info.x1[38], 0.31438740076927585, ulps = 64); - assert_approx_eq!(f64, info.x1[39], 0.3668753186482242, ulps = 64); - assert_approx_eq!(f64, info.x1[40], 0.4221667753589648, ulps = 64); - assert_approx_eq!(f64, info.x1[41], 0.4798989029610255, ulps = 64); - assert_approx_eq!(f64, info.x1[42], 0.5397572337880445, ulps = 64); - assert_approx_eq!(f64, info.x1[43], 0.601472197967335, ulps = 64); - assert_approx_eq!(f64, info.x1[44], 0.6648139482473823, ulps = 64); - assert_approx_eq!(f64, info.x1[45], 0.7295868442414312, ulps = 64); - assert_approx_eq!(f64, info.x1[46], 0.7956242522922756, ulps = 64); - assert_approx_eq!(f64, info.x1[47], 0.8627839323906108, ulps = 64); - assert_approx_eq!(f64, info.x1[48], 0.9309440808717544, ulps = 64); - assert_approx_eq!(f64, info.x1[49], 1.0, ulps = 64); - - assert_eq!(info.ren1.len(), 1); - assert_approx_eq!(f64, info.ren1[0], 6456.443904000001, ulps = 64); - } } From ba5aadb25b585f793517a5f773f65855aac1bc30 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:27:44 +0200 Subject: [PATCH 11/42] Setup Python in workflows to fix CI --- .github/workflows/msrv.yml | 5 +++++ .github/workflows/rust.yml | 5 +++++ 2 files changed, 10 insertions(+) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 8c08aa30d..3be9f6165 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -18,6 +18,11 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + - name: Run check run: | # extract the MSRV diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 3845748dc..4bab1ce79 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -22,6 +22,11 @@ jobs: steps: - uses: actions/checkout@v4 + - name: Set up Python + uses: actions/setup-python@v5 + with: + python-version: "3.13" + - name: Get test data uses: ./.github/actions/cache-test-data From 38d2eb4a95c182686867ee22065b2688e10d16c3 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:40:21 +0200 Subject: [PATCH 12/42] Print environment variables --- .github/workflows/msrv.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 3be9f6165..84b41327c 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -25,6 +25,8 @@ jobs: - name: Run check run: | + # Print environment variables to debug Python problem + env # extract the MSRV MSRV=$(grep '^rust-version ' Cargo.toml | cut -d= -f2- | tr -d ' "') # enable the MSRV From 2bdafcb8e780694666676b1f45071c0dcb7c9b43 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:40:39 +0200 Subject: [PATCH 13/42] Disable some workflows temporarily --- .github/workflows/capi.yaml | 1 + .github/workflows/python.yml | 1 + .github/workflows/rust.yml | 1 + 3 files changed, 3 insertions(+) diff --git a/.github/workflows/capi.yaml b/.github/workflows/capi.yaml index 518c143e4..b6cd021dd 100644 --- a/.github/workflows/capi.yaml +++ b/.github/workflows/capi.yaml @@ -3,6 +3,7 @@ name: CAPI on: push: branches-ignore: + - pineappl-v0 - pycli - bump-pyo3-version - update-wheels-actions diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index 584de61df..b58b1f599 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -3,6 +3,7 @@ name: Python on: push: branches-ignore: + - pineappl-v0 - pycli - update-wheels-actions - update-wheels-actions-2 diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 4bab1ce79..030ce1b9b 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -3,6 +3,7 @@ name: Rust on: push: branches-ignore: + - pineappl-v0 - pycli - bump-pyo3-version - update-wheels-actions From 370b06bb1942c2b124ae104db1108f4649662cbb Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:49:17 +0200 Subject: [PATCH 14/42] Revert "Actually test all crates in CI" This reverts commit 26fd1103fdaab57706f78c37f9570a0e0c07dceb. --- .github/workflows/msrv.yml | 2 +- .github/workflows/rust.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 84b41327c..78b2a420e 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -31,4 +31,4 @@ jobs: MSRV=$(grep '^rust-version ' Cargo.toml | cut -d= -f2- | tr -d ' "') # enable the MSRV rustup default "${MSRV}" - cargo check --all-features --all-targets --workspace + cargo check --all-features --all-targets diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 030ce1b9b..ce2e7dc32 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -42,7 +42,7 @@ jobs: RUSTFLAGS: '-Cinstrument-coverage -Clink-dead-code' run: | # we need stderr, but we can't run test twice because it'll regenerate/modify the binaries which interferes with `llvm-cov` - cargo test --features=applgrid,evolve,fastnlo,fktable --no-fail-fast --workspace 2> >(tee stderr 1>&2) + cargo test --features=applgrid,evolve,fastnlo,fktable --no-fail-fast 2> >(tee stderr 1>&2) # from https://stackoverflow.com/a/51141872/812178 sed -i 's/\x1B\[[0-9;]\{1,\}[A-Za-z]//g' stderr From ff422a24d8283feb89c4d0cdcfe2e302215858da Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:49:33 +0200 Subject: [PATCH 15/42] Revert "Disable some workflows temporarily" This reverts commit 2bdafcb8e780694666676b1f45071c0dcb7c9b43. --- .github/workflows/capi.yaml | 1 - .github/workflows/python.yml | 1 - .github/workflows/rust.yml | 1 - 3 files changed, 3 deletions(-) diff --git a/.github/workflows/capi.yaml b/.github/workflows/capi.yaml index b6cd021dd..518c143e4 100644 --- a/.github/workflows/capi.yaml +++ b/.github/workflows/capi.yaml @@ -3,7 +3,6 @@ name: CAPI on: push: branches-ignore: - - pineappl-v0 - pycli - bump-pyo3-version - update-wheels-actions diff --git a/.github/workflows/python.yml b/.github/workflows/python.yml index b58b1f599..584de61df 100644 --- a/.github/workflows/python.yml +++ b/.github/workflows/python.yml @@ -3,7 +3,6 @@ name: Python on: push: branches-ignore: - - pineappl-v0 - pycli - update-wheels-actions - update-wheels-actions-2 diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index ce2e7dc32..771bbbf26 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -3,7 +3,6 @@ name: Rust on: push: branches-ignore: - - pineappl-v0 - pycli - bump-pyo3-version - update-wheels-actions From 3d39e89da15862b0a9fbfd61dca38e3bdd01ffc6 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:49:47 +0200 Subject: [PATCH 16/42] Revert "Print environment variables" This reverts commit 38d2eb4a95c182686867ee22065b2688e10d16c3. --- .github/workflows/msrv.yml | 2 -- 1 file changed, 2 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 78b2a420e..635ce91c7 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -25,8 +25,6 @@ jobs: - name: Run check run: | - # Print environment variables to debug Python problem - env # extract the MSRV MSRV=$(grep '^rust-version ' Cargo.toml | cut -d= -f2- | tr -d ' "') # enable the MSRV From 68fa7245e061ff35d528968c2bec115b478a1a00 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 09:49:59 +0200 Subject: [PATCH 17/42] Revert "Setup Python in workflows to fix CI" This reverts commit ba5aadb25b585f793517a5f773f65855aac1bc30. --- .github/workflows/msrv.yml | 5 ----- .github/workflows/rust.yml | 5 ----- 2 files changed, 10 deletions(-) diff --git a/.github/workflows/msrv.yml b/.github/workflows/msrv.yml index 635ce91c7..119f222b8 100644 --- a/.github/workflows/msrv.yml +++ b/.github/workflows/msrv.yml @@ -18,11 +18,6 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - name: Run check run: | # extract the MSRV diff --git a/.github/workflows/rust.yml b/.github/workflows/rust.yml index 771bbbf26..2a089d1ea 100644 --- a/.github/workflows/rust.yml +++ b/.github/workflows/rust.yml @@ -22,11 +22,6 @@ jobs: steps: - uses: actions/checkout@v4 - - name: Set up Python - uses: actions/setup-python@v5 - with: - python-version: "3.13" - - name: Get test data uses: ./.github/actions/cache-test-data From fff96733d233b073590758966247c14b83da9a55 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 10:20:48 +0200 Subject: [PATCH 18/42] Strip out unused code from `pineappl_v0` --- pineappl_v0/src/convolutions.rs | 340 ------ pineappl_v0/src/evolution.rs | 801 -------------- pineappl_v0/src/fk_table.rs | 441 -------- pineappl_v0/src/grid.rs | 1696 +---------------------------- pineappl_v0/src/lib.rs | 2 - pineappl_v0/tests/drell_yan_lo.rs | 822 -------------- 6 files changed, 15 insertions(+), 4087 deletions(-) delete mode 100644 pineappl_v0/src/evolution.rs delete mode 100644 pineappl_v0/src/fk_table.rs delete mode 100644 pineappl_v0/tests/drell_yan_lo.rs diff --git a/pineappl_v0/src/convolutions.rs b/pineappl_v0/src/convolutions.rs index ea726c440..6adf603a0 100644 --- a/pineappl_v0/src/convolutions.rs +++ b/pineappl_v0/src/convolutions.rs @@ -1,346 +1,6 @@ //! Module for everything related to luminosity functions. -use super::grid::Grid; use super::pids; -use super::subgrid::{Mu2, Subgrid}; -use rustc_hash::FxHashMap; - -enum Pdfs<'a> { - Two { - xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, - xfx1_cache: FxHashMap<(i32, usize, usize), f64>, - xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, - xfx2_cache: FxHashMap<(i32, usize, usize), f64>, - }, - One { - xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, - xfx_cache: FxHashMap<(i32, usize, usize), f64>, - }, -} - -impl<'a> Pdfs<'a> { - pub fn clear(&mut self) { - match self { - Self::One { xfx_cache, .. } => xfx_cache.clear(), - Self::Two { - xfx1_cache, - xfx2_cache, - .. - } => { - xfx1_cache.clear(); - xfx2_cache.clear(); - } - } - } -} - -/// A cache for evaluating PDFs. Methods like [`Grid::convolve`] accept instances of this `struct` -/// instead of the PDFs themselves. -pub struct LumiCache<'a> { - pdfs: Pdfs<'a>, - alphas: &'a mut dyn FnMut(f64) -> f64, - alphas_cache: Vec, - mur2_grid: Vec, - muf2_grid: Vec, - x_grid: Vec, - imur2: Vec, - imuf2: Vec, - ix1: Vec, - ix2: Vec, - pdg1: i32, - pdg2: i32, - cc1: i32, - cc2: i32, -} - -impl<'a> LumiCache<'a> { - /// Construct a luminosity cache with two PDFs, `xfx1` and `xfx2`. The types of hadrons the - /// PDFs correspond to must be given as `pdg1` and `pdg2`. The function to evaluate the - /// strong coupling must be given as `alphas`. The grid that the cache will be used with must - /// be given as `grid`; this parameter determines which of the initial states are hadronic, and - /// if an initial states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some - /// of the PDFs must be charge-conjugated, this is automatically done in this function. - pub fn with_two( - pdg1: i32, - xfx1: &'a mut dyn FnMut(i32, f64, f64) -> f64, - pdg2: i32, - xfx2: &'a mut dyn FnMut(i32, f64, f64) -> f64, - alphas: &'a mut dyn FnMut(f64) -> f64, - ) -> Self { - Self { - pdfs: Pdfs::Two { - xfx1, - xfx1_cache: FxHashMap::default(), - xfx2, - xfx2_cache: FxHashMap::default(), - }, - alphas, - alphas_cache: vec![], - mur2_grid: vec![], - muf2_grid: vec![], - x_grid: vec![], - imur2: Vec::new(), - imuf2: Vec::new(), - ix1: Vec::new(), - ix2: Vec::new(), - pdg1, - pdg2, - cc1: 0, - cc2: 0, - } - } - - /// Construct a luminosity cache with a single PDF `xfx`. The type of hadron the PDF - /// corresponds to must be given as `pdg`. The function to evaluate the strong coupling must be - /// given as `alphas`. The grid that the cache should be used with must be given as `grid`; - /// this parameter determines which of the initial states are hadronic, and if an initial - /// states is not hadronic the corresponding 'PDF' is set to `xfx = x`. If some of the PDFs - /// must be charge-conjugated, this is automatically done in this function. - pub fn with_one( - pdg: i32, - xfx: &'a mut dyn FnMut(i32, f64, f64) -> f64, - alphas: &'a mut dyn FnMut(f64) -> f64, - ) -> Self { - Self { - pdfs: Pdfs::One { - xfx, - xfx_cache: FxHashMap::default(), - }, - alphas, - alphas_cache: vec![], - mur2_grid: vec![], - muf2_grid: vec![], - x_grid: vec![], - imur2: Vec::new(), - imuf2: Vec::new(), - ix1: Vec::new(), - ix2: Vec::new(), - pdg1: pdg, - pdg2: pdg, - cc1: 0, - cc2: 0, - } - } - - pub(crate) fn setup(&mut self, grid: &Grid, xi: &[(f64, f64)]) -> Result<(), ()> { - let convolutions = grid.convolutions(); - - // TODO: the following code only works with exactly two convolutions - assert_eq!(convolutions.len(), 2); - - // do we have to charge-conjugate the initial states? - let cc1 = if let Some(pid) = convolutions[0].pid() { - if self.pdg1 == pid { - 1 - } else if self.pdg1 == pids::charge_conjugate_pdg_pid(pid) { - -1 - } else { - // TODO: return a proper error - return Err(()); - } - } else { - 0 - }; - let cc2 = if let Some(pid) = convolutions[1].pid() { - if self.pdg2 == pid { - 1 - } else if self.pdg2 == pids::charge_conjugate_pdg_pid(pid) { - -1 - } else { - // TODO: return a proper error - return Err(()); - } - } else { - 0 - }; - - // TODO: try to avoid calling clear - self.clear(); - - let mut x_grid: Vec<_> = grid - .subgrids() - .iter() - .filter_map(|subgrid| { - if subgrid.is_empty() { - None - } else { - let mut vec = subgrid.x1_grid().into_owned(); - vec.extend_from_slice(&subgrid.x2_grid()); - Some(vec) - } - }) - .flatten() - .collect(); - x_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - x_grid.dedup(); - - let mut mur2_grid: Vec<_> = grid - .subgrids() - .iter() - .filter_map(|subgrid| { - if subgrid.is_empty() { - None - } else { - Some(subgrid.mu2_grid().into_owned()) - } - }) - .flatten() - .flat_map(|Mu2 { ren, .. }| { - xi.iter() - .map(|(xir, _)| xir * xir * ren) - .collect::>() - }) - .collect(); - mur2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - mur2_grid.dedup(); - - let mut muf2_grid: Vec<_> = grid - .subgrids() - .iter() - .filter_map(|subgrid| { - if subgrid.is_empty() { - None - } else { - Some(subgrid.mu2_grid().into_owned()) - } - }) - .flatten() - .flat_map(|Mu2 { fac, .. }| { - xi.iter() - .map(|(_, xif)| xif * xif * fac) - .collect::>() - }) - .collect(); - muf2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - muf2_grid.dedup(); - - self.alphas_cache = mur2_grid.iter().map(|&mur2| (self.alphas)(mur2)).collect(); - - self.mur2_grid = mur2_grid; - self.muf2_grid = muf2_grid; - self.x_grid = x_grid; - self.cc1 = cc1; - self.cc2 = cc2; - - Ok(()) - } - - /// Return the PDF (multiplied with `x`) for the first initial state. - pub fn xfx1(&mut self, pdg_id: i32, ix1: usize, imu2: usize) -> f64 { - let ix1 = self.ix1[ix1]; - let x = self.x_grid[ix1]; - if self.cc1 == 0 { - x - } else { - let imuf2 = self.imuf2[imu2]; - let muf2 = self.muf2_grid[imuf2]; - let pid = if self.cc1 == 1 { - pdg_id - } else { - pids::charge_conjugate_pdg_pid(pdg_id) - }; - let (xfx, xfx_cache) = match &mut self.pdfs { - Pdfs::One { xfx, xfx_cache, .. } => (xfx, xfx_cache), - Pdfs::Two { - xfx1, xfx1_cache, .. - } => (xfx1, xfx1_cache), - }; - *xfx_cache - .entry((pid, ix1, imuf2)) - .or_insert_with(|| xfx(pid, x, muf2)) - } - } - - /// Return the PDF (multiplied with `x`) for the second initial state. - pub fn xfx2(&mut self, pdg_id: i32, ix2: usize, imu2: usize) -> f64 { - let ix2 = self.ix2[ix2]; - let x = self.x_grid[ix2]; - if self.cc2 == 0 { - x - } else { - let imuf2 = self.imuf2[imu2]; - let muf2 = self.muf2_grid[imuf2]; - let pid = if self.cc2 == 1 { - pdg_id - } else { - pids::charge_conjugate_pdg_pid(pdg_id) - }; - let (xfx, xfx_cache) = match &mut self.pdfs { - Pdfs::One { xfx, xfx_cache, .. } => (xfx, xfx_cache), - Pdfs::Two { - xfx2, xfx2_cache, .. - } => (xfx2, xfx2_cache), - }; - *xfx_cache - .entry((pid, ix2, imuf2)) - .or_insert_with(|| xfx(pid, x, muf2)) - } - } - - /// Return the strong coupling for the renormalization scale set with [`LumiCache::set_grids`], - /// in the grid `mu2_grid` at the index `imu2`. - #[must_use] - pub fn alphas(&self, imu2: usize) -> f64 { - self.alphas_cache[self.imur2[imu2]] - } - - /// Clears the cache. - pub fn clear(&mut self) { - self.alphas_cache.clear(); - self.pdfs.clear(); - self.mur2_grid.clear(); - self.muf2_grid.clear(); - self.x_grid.clear(); - } - - /// Set the grids. - pub fn set_grids( - &mut self, - mu2_grid: &[Mu2], - x1_grid: &[f64], - x2_grid: &[f64], - xir: f64, - xif: f64, - ) { - self.imur2 = mu2_grid - .iter() - .map(|Mu2 { ren, .. }| { - self.mur2_grid - .iter() - .position(|&mur2| mur2 == xir * xir * ren) - .unwrap_or_else(|| unreachable!()) - }) - .collect(); - self.imuf2 = mu2_grid - .iter() - .map(|Mu2 { fac, .. }| { - self.muf2_grid - .iter() - .position(|&muf2| muf2 == xif * xif * fac) - .unwrap_or_else(|| unreachable!()) - }) - .collect(); - self.ix1 = x1_grid - .iter() - .map(|x1| { - self.x_grid - .iter() - .position(|x| x1 == x) - .unwrap_or_else(|| unreachable!()) - }) - .collect(); - - self.ix2 = x2_grid - .iter() - .map(|x2| { - self.x_grid - .iter() - .position(|x| x2 == x) - .unwrap_or_else(|| unreachable!()) - }) - .collect(); - } -} /// Data type that indentifies different types of convolutions. #[derive(Debug, Eq, PartialEq)] diff --git a/pineappl_v0/src/evolution.rs b/pineappl_v0/src/evolution.rs deleted file mode 100644 index 83e57c3d2..000000000 --- a/pineappl_v0/src/evolution.rs +++ /dev/null @@ -1,801 +0,0 @@ -//! Supporting classes and functions for [`Grid::evolve`]. - -use super::boc::{Channel, Order}; -use super::channel; -use super::convolutions::Convolution; -use super::grid::{Grid, GridError}; -use super::import_only_subgrid::ImportOnlySubgridV2; -use super::pids::PidBasis; -use super::sparse_array3::SparseArray3; -use super::subgrid::{Mu2, Subgrid, SubgridEnum}; -use float_cmp::approx_eq; -use itertools::izip; -use itertools::Itertools; -use ndarray::linalg; -use ndarray::{s, Array1, Array2, Array3, ArrayView1, ArrayView4, Axis}; -use std::iter; - -/// Number of ULPS used to de-duplicate grid values in [`Grid::evolve_info`]. -pub(crate) const EVOLVE_INFO_TOL_ULPS: i64 = 256; - -/// Number of ULPS used to search for grid values in this module. This value must be a large-enough -/// multiple of [`EVOLVE_INFO_TOL_ULPS`], because otherwise similar values are not found in -/// [`Grid::evolve`]. See for details. -const EVOLUTION_TOL_ULPS: i64 = 4 * EVOLVE_INFO_TOL_ULPS; - -/// This structure captures the information needed to create an evolution kernel operator (EKO) for -/// a specific [`Grid`]. -pub struct EvolveInfo { - /// Squared factorization scales of the `Grid`. - pub fac1: Vec, - /// Particle identifiers of the `Grid`. - pub pids1: Vec, - /// `x`-grid coordinates of the `Grid`. - pub x1: Vec, - /// Renormalization scales of the `Grid`. - pub ren1: Vec, -} - -/// Information about the evolution kernel operator (EKO) passed to [`Grid::evolve`] as `operator`, -/// which is used to convert a [`Grid`] into an [`FkTable`]. The dimensions of the EKO must -/// correspond to the values given in [`fac1`], [`pids0`], [`x0`], [`pids1`] and [`x1`], exactly in -/// this order. Members with a `1` are defined at the squared factorization scales given in -/// [`fac1`] (often called process scales) and are found in the [`Grid`] that [`Grid::evolve`] is -/// called with. Members with a `0` are defined at the squared factorization scale [`fac0`] (often -/// called fitting scale or starting scale) and are found in the [`FkTable`] resulting from -/// [`Grid::evolve`]. -/// -/// The EKO may convert a `Grid` from a basis given by the particle identifiers [`pids1`] to a -/// possibly different basis given by [`pids0`]. This basis must also be identified using -/// [`pid_basis`], which tells [`FkTable::convolve`] how to perform a convolution. The members -/// [`ren1`] and [`alphas`] must be the strong couplings given at the respective renormalization -/// scales. Finally, [`xir`] and [`xif`] can be used to vary the renormalization and factorization -/// scales, respectively, around their central values. -/// -/// [`FkTable::convolve`]: super::fk_table::FkTable::convolve -/// [`FkTable`]: super::fk_table::FkTable -/// [`alphas`]: Self::alphas -/// [`fac0`]: Self::fac0 -/// [`fac1`]: Self::fac1 -/// [`pid_basis`]: Self::pid_basis -/// [`pids0`]: Self::pids0 -/// [`pids1`]: Self::pids1 -/// [`ren1`]: Self::ren1 -/// [`x0`]: Self::x0 -/// [`x1`]: Self::x1 -/// [`xif`]: Self::xif -/// [`xir`]: Self::xir -pub struct OperatorInfo { - /// Squared factorization scale of the `FkTable`. - pub fac0: f64, - /// Particle identifiers of the `FkTable`. - pub pids0: Vec, - /// `x`-grid coordinates of the `FkTable` - pub x0: Vec, - /// Squared factorization scales of the `Grid`. - pub fac1: Vec, - /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than - /// given here, the contributions of them are silently ignored. - pub pids1: Vec, - /// `x`-grid coordinates of the `Grid`. - pub x1: Vec, - - /// Renormalization scales of the `Grid`. - pub ren1: Vec, - /// Strong couplings corresponding to the order given in [`ren1`](Self::ren1). - pub alphas: Vec, - /// Multiplicative factor for the central renormalization scale. - pub xir: f64, - /// Multiplicative factor for the central factorization scale. - pub xif: f64, - /// Particle ID basis for `FkTable`. - pub pid_basis: PidBasis, -} - -/// Information about the evolution kernel operator slice (EKO) passed to -/// [`Grid::evolve_with_slice_iter`](super::grid::Grid::evolve_with_slice_iter) as `operator`, -/// which is used to convert a [`Grid`] into an [`FkTable`](super::fk_table::FkTable). The -/// dimensions of the EKO must correspond to the values given in [`fac1`](Self::fac1), -/// [`pids0`](Self::pids0), [`x0`](Self::x0), [`pids1`](Self::pids1) and [`x1`](Self::x1), exactly -/// in this order. Members with a `1` are defined at the squared factorization scale given as -/// `fac1` (often called process scale) and are found in the [`Grid`] that -/// `Grid::evolve_with_slice_iter` is called with. Members with a `0` are defined at the squared -/// factorization scale [`fac0`](Self::fac0) (often called fitting scale or starting scale) and are -/// found in the `FkTable` resulting from [`Grid::evolve`]. -/// -/// The EKO slice may convert a `Grid` from a basis given by the particle identifiers `pids1` to a -/// possibly different basis given by `pids0`. This basis must also be identified using -/// [`pid_basis`](Self::pid_basis), which tells -/// [`FkTable::convolve`](super::fk_table::FkTable::convolve) how to perform a convolution. -#[derive(Clone)] -pub struct OperatorSliceInfo { - /// Squared factorization scale of the `FkTable`. - pub fac0: f64, - /// Particle identifiers of the `FkTable`. - pub pids0: Vec, - /// `x`-grid coordinates of the `FkTable` - pub x0: Vec, - /// Squared factorization scale of the slice of `Grid` that should be evolved. - pub fac1: f64, - /// Particle identifiers of the `Grid`. If the `Grid` contains more particle identifiers than - /// given here, the contributions of them are silently ignored. - pub pids1: Vec, - /// `x`-grid coordinates of the `Grid`. - pub x1: Vec, - - /// Particle ID basis for `FkTable`. - pub pid_basis: PidBasis, -} - -/// A mapping of squared renormalization scales in `ren1` to strong couplings in `alphas`. The -/// ordering of both members defines the mapping. -pub struct AlphasTable { - /// Renormalization scales of the `Grid`. - pub ren1: Vec, - /// Strong couplings corresponding to the order given in [`ren1`](Self::ren1). - pub alphas: Vec, -} - -impl AlphasTable { - /// Create an `AlphasTable` for `grid`, varying the renormalization scale by `xir` for the - /// strong couplings given by `alphas`. The only argument of `alphas` must be the squared - /// renormalization scale. - pub fn from_grid(grid: &Grid, xir: f64, alphas: &dyn Fn(f64) -> f64) -> Self { - let mut ren1: Vec<_> = grid - .subgrids() - .iter() - .flat_map(|subgrid| { - subgrid - .mu2_grid() - .iter() - .map(|Mu2 { ren, .. }| xir * xir * ren) - .collect::>() - }) - .collect(); - // UNWRAP: if we can't sort numbers the grid is fishy - ren1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - ren1.dedup(); - let ren1 = ren1; - let alphas: Vec<_> = ren1.iter().map(|&mur2| alphas(mur2)).collect(); - - Self { ren1, alphas } - } -} - -fn gluon_has_pid_zero(grid: &Grid) -> bool { - // if there are any PID zero particles ... - grid.channels() - .iter() - .any(|entry| entry.entry().iter().any(|&(a, b, _)| (a == 0) || (b == 0))) - // and if the particle IDs are encoded using PDG MC IDs - && grid.pid_basis() == PidBasis::Pdg -} - -type Pid01IndexTuples = Vec<(usize, usize)>; -type Pid01Tuples = Vec<(i32, i32)>; - -fn pid_slices( - operator: &ArrayView4, - info: &OperatorSliceInfo, - gluon_has_pid_zero: bool, - pid1_nonzero: &dyn Fn(i32) -> bool, -) -> Result<(Pid01IndexTuples, Pid01Tuples), GridError> { - // list of all non-zero PID indices - let pid_indices: Vec<_> = (0..operator.dim().2) - .cartesian_product(0..operator.dim().0) - .filter(|&(pid0_idx, pid1_idx)| { - // 1) at least one element of the operator must be non-zero, and 2) the pid must be - // contained in some channel - operator - .slice(s![pid1_idx, .., pid0_idx, ..]) - .iter() - .any(|&value| value != 0.0) - && pid1_nonzero(if gluon_has_pid_zero && info.pids1[pid1_idx] == 21 { - 0 - } else { - info.pids1[pid1_idx] - }) - }) - .collect(); - - if pid_indices.is_empty() { - return Err(GridError::EvolutionFailure( - "no non-zero operator found; result would be an empty FkTable".to_owned(), - )); - } - - // list of all non-zero (pid0, pid1) combinations - let pids = pid_indices - .iter() - .map(|&(pid0_idx, pid1_idx)| { - ( - info.pids0[pid0_idx], - if gluon_has_pid_zero && info.pids1[pid1_idx] == 21 { - 0 - } else { - info.pids1[pid1_idx] - }, - ) - }) - .collect(); - - Ok((pid_indices, pids)) -} - -fn channels0_with_one(pids: &[(i32, i32)]) -> Vec { - let mut pids0: Vec<_> = pids.iter().map(|&(pid0, _)| pid0).collect(); - pids0.sort_unstable(); - pids0.dedup(); - - pids0 -} - -fn operator_slices( - operator: &ArrayView4, - info: &OperatorSliceInfo, - pid_indices: &[(usize, usize)], - x1: &[f64], -) -> Result>, GridError> { - // permutation between the grid x values and the operator x1 values - let x1_indices: Vec<_> = x1 - .iter() - .map(|&x1p| { - info.x1 - .iter() - .position(|&x1| approx_eq!(f64, x1p, x1, ulps = EVOLUTION_TOL_ULPS)) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for x = {x1p} found")) - }) - }) - // TODO: use `try_collect` once stabilized - .collect::>()?; - - // create the corresponding operators accessible in the form [muf2, x0, x1] - let operators: Vec<_> = pid_indices - .iter() - .map(|&(pid0_idx, pid1_idx)| { - operator - .slice(s![pid1_idx, .., pid0_idx, ..]) - .select(Axis(0), &x1_indices) - .reversed_axes() - .as_standard_layout() - .into_owned() - }) - .collect(); - - Ok(operators) -} - -type X1aX1bOp2Tuple = (Vec>, Option>); - -fn ndarray_from_subgrid_orders_slice( - fac1: f64, - subgrids: &ArrayView1, - orders: &[Order], - order_mask: &[bool], - (xir, xif): (f64, f64), - alphas_table: &AlphasTable, -) -> Result { - // TODO: skip empty subgrids - - let mut x1_a: Vec<_> = subgrids - .iter() - .enumerate() - .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) - .flat_map(|(_, subgrid)| subgrid.x1_grid().into_owned()) - .collect(); - let mut x1_b: Vec<_> = subgrids - .iter() - .enumerate() - .filter(|(index, _)| order_mask.get(*index).copied().unwrap_or(true)) - .flat_map(|(_, subgrid)| subgrid.x2_grid().into_owned()) - .collect(); - - x1_a.sort_by(f64::total_cmp); - x1_a.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); - x1_b.sort_by(f64::total_cmp); - x1_b.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLUTION_TOL_ULPS)); - - let mut array = Array2::::zeros((x1_a.len(), x1_b.len())); - let mut zero = true; - - // add subgrids for different orders, but the same bin and lumi, using the right - // couplings - for (subgrid, order) in subgrids - .iter() - .zip(orders.iter()) - .zip(order_mask.iter().chain(iter::repeat(&true))) - .filter_map(|((subgrid, order), &enabled)| { - (enabled && !subgrid.is_empty()).then_some((subgrid, order)) - }) - { - let mut logs = 1.0; - - if order.logxir > 0 { - if approx_eq!(f64, xir, 1.0, ulps = 4) { - continue; - } - - logs *= (xir * xir).ln(); - } - - if order.logxif > 0 { - if approx_eq!(f64, xif, 1.0, ulps = 4) { - continue; - } - - logs *= (xif * xif).ln(); - } - - // TODO: use `try_collect` once stabilized - let xa_indices: Vec<_> = subgrid - .x1_grid() - .iter() - .map(|&xa| { - x1_a.iter() - .position(|&x1a| approx_eq!(f64, x1a, xa, ulps = EVOLUTION_TOL_ULPS)) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for x1 = {xa} found")) - }) - }) - .collect::>()?; - let xb_indices: Vec<_> = subgrid - .x2_grid() - .iter() - .map(|&xb| { - x1_b.iter() - .position(|&x1b| approx_eq!(f64, x1b, xb, ulps = EVOLUTION_TOL_ULPS)) - .ok_or_else(|| { - GridError::EvolutionFailure(format!("no operator for x1 = {xb} found")) - }) - }) - .collect::>()?; - - for ((ifac1, ix1, ix2), value) in subgrid.indexed_iter() { - let Mu2 { ren, fac } = subgrid.mu2_grid()[ifac1]; - - if !approx_eq!(f64, xif * xif * fac, fac1, ulps = EVOLUTION_TOL_ULPS) { - continue; - } - - let mur2 = xir * xir * ren; - - let als = if order.alphas == 0 { - 1.0 - } else if let Some(alphas) = alphas_table - .ren1 - .iter() - .zip(alphas_table.alphas.iter()) - .find_map(|(&ren1, &alphas)| { - approx_eq!(f64, ren1, mur2, ulps = EVOLUTION_TOL_ULPS).then(|| alphas) - }) - { - alphas.powi(order.alphas.try_into().unwrap()) - } else { - return Err(GridError::EvolutionFailure(format!( - "no alphas for mur2 = {mur2} found" - ))); - }; - - zero = false; - - array[[xa_indices[ix1], xb_indices[ix2]]] += als * logs * value; - } - } - - Ok((vec![x1_a, x1_b], (!zero).then_some(array))) -} - -pub(crate) fn evolve_slice_with_one( - grid: &Grid, - operator: &ArrayView4, - info: &OperatorSliceInfo, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { - let gluon_has_pid_zero = gluon_has_pid_zero(grid); - let has_pdf1 = grid.convolutions()[0] != Convolution::None; - - let (pid_indices, pids) = pid_slices(operator, info, gluon_has_pid_zero, &|pid| { - grid.channels() - .iter() - .flat_map(Channel::entry) - .any(|&(a, b, _)| if has_pdf1 { a } else { b } == pid) - })?; - - let channels0 = channels0_with_one(&pids); - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); - let new_axis = if has_pdf1 { 2 } else { 1 }; - - let mut last_x1 = Vec::new(); - let mut ops = Vec::new(); - - for subgrids_ol in grid.subgrids().axis_iter(Axis(1)) { - let mut tables = vec![Array1::zeros(info.x0.len()); channels0.len()]; - - for (subgrids_o, channel1) in subgrids_ol.axis_iter(Axis(1)).zip(grid.channels()) { - let (mut x1, array) = ndarray_from_subgrid_orders_slice( - info.fac1, - &subgrids_o, - grid.orders(), - order_mask, - xi, - alphas_table, - )?; - - // skip over zero arrays to speed up evolution and avoid problems with NaNs - let Some(array) = array else { - continue; - }; - - let x1 = if has_pdf1 { x1.remove(0) } else { x1.remove(1) }; - - if x1.is_empty() { - continue; - } - - if (last_x1.len() != x1.len()) - || last_x1 - .iter() - .zip(x1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) - { - ops = operator_slices(operator, info, &pid_indices, &x1)?; - last_x1 = x1; - } - - for (&pid1, &factor) in - channel1 - .entry() - .iter() - .map(|(a, b, f)| if has_pdf1 { (a, f) } else { (b, f) }) - { - for (fk_table, op) in - channels0 - .iter() - .zip(tables.iter_mut()) - .filter_map(|(&pid0, fk_table)| { - pids.iter() - .zip(ops.iter()) - .find_map(|(&(p0, p1), op)| { - (p0 == pid0 && p1 == pid1).then_some(op) - }) - .map(|op| (fk_table, op)) - }) - { - fk_table.scaled_add(factor, &op.dot(&array.index_axis(Axis(new_axis - 1), 0))); - } - } - } - - sub_fk_tables.extend(tables.into_iter().map(|table| { - ImportOnlySubgridV2::new( - SparseArray3::from_ndarray( - table - .insert_axis(Axis(0)) - .insert_axis(Axis(new_axis)) - .view(), - 0, - 1, - ), - vec![Mu2 { - // TODO: FK tables don't depend on the renormalization scale - //ren: -1.0, - ren: info.fac0, - fac: info.fac0, - }], - if has_pdf1 { info.x0.clone() } else { vec![1.0] }, - if has_pdf1 { vec![1.0] } else { info.x0.clone() }, - ) - .into() - })); - } - - let pid = if grid.convolutions()[0] == Convolution::None { - grid.channels()[0].entry()[0].0 - } else { - grid.channels()[0].entry()[0].1 - }; - - Ok(( - Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), channels0.len())) - .unwrap(), - channels0 - .iter() - .map(|&a| { - channel![ - if has_pdf1 { a } else { pid }, - if has_pdf1 { pid } else { a }, - 1.0 - ] - }) - .collect(), - )) -} - -pub(crate) fn evolve_slice_with_two( - grid: &Grid, - operator: &ArrayView4, - info: &OperatorSliceInfo, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { - let gluon_has_pid_zero = gluon_has_pid_zero(grid); - - // TODO: generalize by iterating up to `n` - let (pid_indices, pids01): (Vec<_>, Vec<_>) = (0..2) - .map(|d| { - pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { - grid.channels() - .iter() - .flat_map(Channel::entry) - .any(|tuple| match d { - // TODO: `Channel::entry` should return a tuple of a `Vec` and an `f64` - 0 => tuple.0 == pid1, - 1 => tuple.1 == pid1, - _ => unreachable!(), - }) - }) - }) - .collect::, _>>()? - .into_iter() - .unzip(); - - let mut channels0: Vec<_> = pids01 - .iter() - .map(|pids| pids.iter().map(|&(pid0, _)| pid0)) - .multi_cartesian_product() - .collect(); - channels0.sort_unstable(); - channels0.dedup(); - let channels0 = channels0; - - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); - - // TODO: generalize to `n` - let mut last_x1 = vec![Vec::new(); 2]; - let mut operators = vec![Vec::new(); 2]; - - for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { - let mut tables = vec![Array2::zeros((info.x0.len(), info.x0.len())); channels0.len()]; - - for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { - let (x1, array) = ndarray_from_subgrid_orders_slice( - info.fac1, - &subgrids_o, - grid.orders(), - order_mask, - xi, - alphas_table, - )?; - - // skip over zero arrays to speed up evolution and avoid problems with NaNs - let Some(array) = array else { - continue; - }; - - for (last_x1, x1, pid_indices, operators) in - izip!(&mut last_x1, x1, &pid_indices, &mut operators) - { - if (last_x1.len() != x1.len()) - || last_x1 - .iter() - .zip(x1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) - { - *operators = operator_slices(operator, info, pid_indices, &x1)?; - *last_x1 = x1; - } - } - - let mut tmp = Array2::zeros((last_x1[0].len(), info.x0.len())); - - for (pids1, factor) in channel1 - .entry() - .iter() - .map(|&(pida1, pidb1, factor)| ([pida1, pidb1], factor)) - { - for (fk_table, ops) in - channels0 - .iter() - .zip(tables.iter_mut()) - .filter_map(|(pids0, fk_table)| { - izip!(pids0, &pids1, &pids01, &operators) - .map(|(&pid0, &pid1, pids, operators)| { - pids.iter().zip(operators).find_map(|(&(p0, p1), op)| { - ((p0 == pid0) && (p1 == pid1)).then_some(op) - }) - }) - // TODO: avoid using `collect` - .collect::>>() - .map(|ops| (fk_table, ops)) - }) - { - linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); - linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, fk_table); - } - } - } - - sub_fk_tables.extend(tables.into_iter().map(|table| { - ImportOnlySubgridV2::new( - SparseArray3::from_ndarray(table.insert_axis(Axis(0)).view(), 0, 1), - vec![Mu2 { - // TODO: FK tables don't depend on the renormalization scale - //ren: -1.0, - ren: info.fac0, - fac: info.fac0, - }], - info.x0.clone(), - info.x0.clone(), - ) - .into() - })); - } - - Ok(( - Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), channels0.len())) - .unwrap(), - channels0 - .iter() - .map(|c| channel![c[0], c[1], 1.0]) - .collect(), - )) -} - -pub(crate) fn evolve_slice_with_two2( - grid: &Grid, - operators: &[ArrayView4], - infos: &[OperatorSliceInfo], - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, -) -> Result<(Array3, Vec), GridError> { - let gluon_has_pid_zero = gluon_has_pid_zero(grid); - - // TODO: implement matching of different scales for different EKOs - let mut fac1_scales: Vec<_> = infos.iter().map(|info| info.fac1).collect(); - fac1_scales.sort_by(f64::total_cmp); - assert!(fac1_scales.windows(2).all(|scales| approx_eq!( - f64, - scales[0], - scales[1], - ulps = EVOLUTION_TOL_ULPS - ))); - let fac1 = fac1_scales[0]; - - // TODO: generalize by iterating up to `n` - let (pid_indices, pids01): (Vec<_>, Vec<_>) = izip!(0..2, operators, infos) - .map(|(d, operator, info)| { - pid_slices(operator, info, gluon_has_pid_zero, &|pid1| { - grid.channels() - .iter() - .flat_map(Channel::entry) - .any(|tuple| match d { - // TODO: `Channel::entry` should return a tuple of a `Vec` and an `f64` - 0 => tuple.0 == pid1, - 1 => tuple.1 == pid1, - _ => unreachable!(), - }) - }) - }) - .collect::, _>>()? - .into_iter() - .unzip(); - - let mut channels0: Vec<_> = pids01 - .iter() - .map(|pids| pids.iter().map(|&(pid0, _)| pid0)) - .multi_cartesian_product() - .collect(); - channels0.sort_unstable(); - channels0.dedup(); - let channels0 = channels0; - - let mut sub_fk_tables = Vec::with_capacity(grid.bin_info().bins() * channels0.len()); - - // TODO: generalize to `n` - let mut last_x1 = vec![Vec::new(); 2]; - let mut eko_slices = vec![Vec::new(); 2]; - - for subgrids_oc in grid.subgrids().axis_iter(Axis(1)) { - assert_eq!(infos[0].x0.len(), infos[1].x0.len()); - - let mut tables = - vec![Array2::zeros((infos[0].x0.len(), infos[1].x0.len())); channels0.len()]; - - for (subgrids_o, channel1) in subgrids_oc.axis_iter(Axis(1)).zip(grid.channels()) { - let (x1, array) = ndarray_from_subgrid_orders_slice( - fac1, - &subgrids_o, - grid.orders(), - order_mask, - xi, - alphas_table, - )?; - - // skip over zero arrays to speed up evolution and avoid problems with NaNs - let Some(array) = array else { - continue; - }; - - for (last_x1, x1, pid_indices, slices, operator, info) in izip!( - &mut last_x1, - x1, - &pid_indices, - &mut eko_slices, - operators, - infos - ) { - if (last_x1.len() != x1.len()) - || last_x1 - .iter() - .zip(x1.iter()) - .any(|(&lhs, &rhs)| !approx_eq!(f64, lhs, rhs, ulps = EVOLUTION_TOL_ULPS)) - { - *slices = operator_slices(operator, info, pid_indices, &x1)?; - *last_x1 = x1; - } - } - - let mut tmp = Array2::zeros((last_x1[0].len(), infos[1].x0.len())); - - for (pids1, factor) in channel1 - .entry() - .iter() - .map(|&(pida1, pidb1, factor)| ([pida1, pidb1], factor)) - { - for (fk_table, ops) in - channels0 - .iter() - .zip(tables.iter_mut()) - .filter_map(|(pids0, fk_table)| { - izip!(pids0, &pids1, &pids01, &eko_slices) - .map(|(&pid0, &pid1, pids, slices)| { - pids.iter().zip(slices).find_map(|(&(p0, p1), op)| { - ((p0 == pid0) && (p1 == pid1)).then_some(op) - }) - }) - // TODO: avoid using `collect` - .collect::>>() - .map(|ops| (fk_table, ops)) - }) - { - // tmp = array * ops[1]^T - linalg::general_mat_mul(1.0, &array, &ops[1].t(), 0.0, &mut tmp); - // fk_table += factor * ops[0] * tmp - linalg::general_mat_mul(factor, ops[0], &tmp, 1.0, fk_table); - } - } - } - - sub_fk_tables.extend(tables.into_iter().map(|table| { - ImportOnlySubgridV2::new( - SparseArray3::from_ndarray(table.insert_axis(Axis(0)).view(), 0, 1), - vec![Mu2 { - // TODO: FK tables don't depend on the renormalization scale - //ren: -1.0, - ren: infos[0].fac0, - fac: infos[0].fac0, - }], - infos[0].x0.clone(), - infos[1].x0.clone(), - ) - .into() - })); - } - - Ok(( - Array1::from_iter(sub_fk_tables) - .into_shape((1, grid.bin_info().bins(), channels0.len())) - .unwrap(), - channels0 - .iter() - .map(|c| channel![c[0], c[1], 1.0]) - .collect(), - )) -} diff --git a/pineappl_v0/src/fk_table.rs b/pineappl_v0/src/fk_table.rs deleted file mode 100644 index b7d045194..000000000 --- a/pineappl_v0/src/fk_table.rs +++ /dev/null @@ -1,441 +0,0 @@ -//! Provides the [`FkTable`] type. - -use super::boc::Order; -use super::convolutions::{Convolution, LumiCache}; -use super::grid::{Grid, GridError}; -use super::subgrid::Subgrid; -use float_cmp::approx_eq; -use ndarray::Array4; -use std::collections::HashMap; -use std::fmt::{self, Display, Formatter}; -use std::io::Write; -use std::str::FromStr; -use thiserror::Error; - -/// Structure implementing FK tables. These are special [`Grid`]s, for which the following -/// additional guarantees are given: -/// -/// - all subgrids of the grid evaluate the PDFs at a single factorization scale given by -/// [`FkTable::muf2`]. -/// - all subgrids, for both hadronic initial states (if both initial states are hadronic), share -/// the same `x` grid. See [`FkTable::x_grid`]. -/// - the channel definitions are *simple*, meaning that every entry consists of a single pair of -/// partons with trivial factor `1.0`, and all tuples are distinct from each other. See -/// [`Grid::channels`]. -/// - the FK table's grid contains only a single [`Order`], whose exponents are all zero. -#[repr(transparent)] -pub struct FkTable { - grid: Grid, -} - -/// The error type returned when a conversion of a [`Grid`] to an [`FkTable`] fails. -#[derive(Debug, Error)] -pub enum TryFromGridError { - /// Error if the grid contains multiple scales instead of a single one. - #[error("multiple scales detected")] - MultipleScales, - /// Error if the channels are not simple. - #[error("complicated channel function detected")] - InvalidChannel, - /// Error if the order of the grid was not a single one with all zeros in the exponents. - #[error("multiple orders detected")] - NonTrivialOrder, -} - -/// The optimization assumptions for an [`FkTable`], needed for [`FkTable::optimize`]. Since FK -/// tables are typically stored at very small `Q2 = Q0`, the PDFs `f(x,Q0)` of heavy quarks are -/// typically set to zero at this scale or set to the same value as their anti-quark PDF. This is -/// used to optimize the size of FK tables. -#[derive(Debug, Clone, Copy, Eq, PartialEq)] -pub enum FkAssumptions { - /// All quark PDFs are non-zero at the FK table scale and completely independent. - Nf6Ind, - /// Like [`Nf6Ind`](Self::Nf6Ind), but the PDFs of top and anti-top quarks are the same at FK - /// table scale. - Nf6Sym, - /// Like [`Nf6Ind`](Self::Nf6Ind), but the PDFs of top and anti-top quarks are zero at FK table - /// scale. - Nf5Ind, - /// Like [`Nf5Ind`](Self::Nf5Ind), but the PDFs of bottom and anti-bottom quarks are the same - /// at FK table scale. - Nf5Sym, - /// Like [`Nf5Ind`](Self::Nf5Ind), but the PDFs of bottom and anti-bottom quarks are zero at FK - /// table scale. - Nf4Ind, - /// Like [`Nf4Ind`](Self::Nf4Ind), but the PDFs of charm and anti-charm quarks are the same at - /// FK table scale. PDF sets that make this assumption are NNPDF4.0 and NNPDF3.1 at fitting - /// scale. - Nf4Sym, - /// Like [`Nf4Ind`](Self::Nf4Ind), but the PDFs of charm and anti-charm quarks are zero at FK - /// table scale. PDF sets that make this assumption are MSHT20 and NNPDF3.0 at fitting scale. - Nf3Ind, - /// Like [`Nf3Ind`](Self::Nf3Ind), but the PDFs of strange and anti-strange are the same at FK - /// table scale. A PDF set that makes this assumption is CT18 at fitting scale. - Nf3Sym, -} - -/// Error type when trying to construct [`FkAssumptions`] with a string. -#[derive(Debug, Eq, Error, PartialEq)] -#[error("unknown variant for FkAssumptions: {variant}")] -pub struct UnknownFkAssumption { - variant: String, -} - -impl Display for FkAssumptions { - fn fmt(&self, f: &mut Formatter) -> fmt::Result { - write!( - f, - "{}", - match self { - Self::Nf6Ind => "Nf6Ind", - Self::Nf6Sym => "Nf6Sym", - Self::Nf5Ind => "Nf5Ind", - Self::Nf5Sym => "Nf5Sym", - Self::Nf4Ind => "Nf4Ind", - Self::Nf4Sym => "Nf4Sym", - Self::Nf3Ind => "Nf3Ind", - Self::Nf3Sym => "Nf3Sym", - } - ) - } -} - -impl FromStr for FkAssumptions { - type Err = UnknownFkAssumption; - - fn from_str(s: &str) -> Result { - Ok(match s { - "Nf6Ind" => Self::Nf6Ind, - "Nf6Sym" => Self::Nf6Sym, - "Nf5Ind" => Self::Nf5Ind, - "Nf5Sym" => Self::Nf5Sym, - "Nf4Ind" => Self::Nf4Ind, - "Nf4Sym" => Self::Nf4Sym, - "Nf3Ind" => Self::Nf3Ind, - "Nf3Sym" => Self::Nf3Sym, - _ => { - return Err(UnknownFkAssumption { - variant: s.to_owned(), - }); - } - }) - } -} - -impl FkTable { - /// Returns the [`Grid`] object for this `FkTable`. - #[must_use] - pub const fn grid(&self) -> &Grid { - &self.grid - } - - // TODO: when trying to convert the following function to `const` as per clippy's suggestion, - // the compiler errors out with: 'the destructor for this type cannot be evaluated in constant - // functions' - - /// Converts the `FkTable` back to a [`Grid`]. - #[must_use] - pub fn into_grid(self) -> Grid { - self.grid - } - - /// Returns the FK table represented as a four-dimensional array indexed by `bin`, `channel`, - /// `x1` and `x2`, in this order. - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn table(&self) -> Array4 { - let has_pdf1 = self.grid.convolutions()[0] != Convolution::None; - let has_pdf2 = self.grid.convolutions()[1] != Convolution::None; - let x_grid = self.x_grid(); - - let mut result = Array4::zeros(( - self.bins(), - self.grid.channels().len(), - if has_pdf1 { x_grid.len() } else { 1 }, - if has_pdf2 { x_grid.len() } else { 1 }, - )); - - for ((_, bin, channel), subgrid) in self.grid().subgrids().indexed_iter() { - let indices1 = if has_pdf1 { - subgrid - .x1_grid() - .iter() - .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) - .collect::>() - .unwrap() - } else { - vec![0] - }; - let indices2 = if has_pdf2 { - subgrid - .x2_grid() - .iter() - .map(|&s| x_grid.iter().position(|&x| approx_eq!(f64, s, x, ulps = 2))) - .collect::>() - .unwrap() - } else { - vec![0] - }; - - for ((_, ix1, ix2), value) in subgrid.indexed_iter() { - result[[bin, channel, indices1[ix1], indices2[ix2]]] = value; - } - } - - result - } - - /// Returns the number of bins for this `FkTable`. - #[must_use] - pub fn bins(&self) -> usize { - self.grid.bin_info().bins() - } - - /// Extract the normalizations for each bin. - #[must_use] - pub fn bin_normalizations(&self) -> Vec { - self.grid.bin_info().normalizations() - } - - /// Extract the number of dimensions for bins. - #[must_use] - pub fn bin_dimensions(&self) -> usize { - self.grid.bin_info().dimensions() - } - - /// Extract the left edges of a specific bin dimension. - #[must_use] - pub fn bin_left(&self, dimension: usize) -> Vec { - self.grid.bin_info().left(dimension) - } - - /// Extract the right edges of a specific bin dimension. - #[must_use] - pub fn bin_right(&self, dimension: usize) -> Vec { - self.grid.bin_info().right(dimension) - } - - /// Access meta data - #[must_use] - pub const fn key_values(&self) -> Option<&HashMap> { - self.grid.key_values() - } - - /// Return the channel definition for this `FkTable`. All factors are `1.0`. - #[must_use] - pub fn channels(&self) -> Vec<(i32, i32)> { - self.grid - .channels() - .iter() - .map(|entry| (entry.entry()[0].0, entry.entry()[0].1)) - .collect() - } - - /// Returns the single `muf2` scale of this `FkTable`. - #[must_use] - pub fn muf2(&self) -> f64 { - if let &[muf2] = &self.grid.evolve_info(&[true]).fac1[..] { - muf2 - } else { - // every `FkTable` has only a single factorization scale - unreachable!() - } - } - - /// Returns the x grid that all subgrids for all hadronic initial states share. - #[must_use] - pub fn x_grid(&self) -> Vec { - self.grid.evolve_info(&[true]).x1 - } - - /// Propagate write to grid - /// - /// # Errors - /// - /// TODO - pub fn write(&self, writer: impl Write) -> Result<(), GridError> { - self.grid.write(writer) - } - - /// Propagate `write_lz4` to `Grid`. - /// - /// # Errors - /// - /// See [`Grid::write_lz4`]. - pub fn write_lz4(&self, writer: impl Write) -> Result<(), GridError> { - self.grid.write_lz4(writer) - } - - /// Convolve the FK-table. This method has fewer arguments than [`Grid::convolve`], because - /// FK-tables have all orders merged together and do not support scale variations. - pub fn convolve( - &self, - lumi_cache: &mut LumiCache, - bin_indices: &[usize], - channel_mask: &[bool], - ) -> Vec { - self.grid - .convolve(lumi_cache, &[], bin_indices, channel_mask, &[(1.0, 1.0)]) - } - - /// Set a metadata key-value pair - pub fn set_key_value(&mut self, key: &str, value: &str) { - self.grid.set_key_value(key, value); - } - - /// Optimizes the storage of FK tables based of assumptions of the PDFs at the FK table's - /// scale. - /// - /// # Panics - /// - /// TODO - pub fn optimize(&mut self, assumptions: FkAssumptions) { - let mut add = Vec::new(); - - match assumptions { - FkAssumptions::Nf6Ind => { - // nothing to do here - } - FkAssumptions::Nf6Sym => { - add.push((235, 200)); - } - FkAssumptions::Nf5Ind => { - add.extend_from_slice(&[(235, 200), (135, 100)]); - } - FkAssumptions::Nf5Sym => { - add.extend_from_slice(&[(235, 200), (135, 100), (224, 200)]); - } - FkAssumptions::Nf4Ind => { - add.extend_from_slice(&[(235, 200), (135, 100), (224, 200), (124, 100)]); - } - FkAssumptions::Nf4Sym => { - add.extend_from_slice(&[ - (235, 200), - (135, 100), - (224, 200), - (124, 100), - (215, 200), - ]); - } - FkAssumptions::Nf3Ind => { - add.extend_from_slice(&[ - (235, 200), - (135, 100), - (224, 200), - (124, 100), - (215, 200), - (115, 100), - ]); - } - FkAssumptions::Nf3Sym => { - add.extend_from_slice(&[ - (235, 200), - (135, 100), - (224, 200), - (124, 100), - (215, 200), - (115, 100), - (208, 200), - ]); - } - } - - self.grid.rewrite_channels(&add, &[]); - - // store the assumption so that we can check it later on - self.grid - .set_key_value("fk_assumptions", &assumptions.to_string()); - self.grid.optimize(); - } -} - -impl TryFrom for FkTable { - type Error = TryFromGridError; - - fn try_from(grid: Grid) -> Result { - let mut muf2 = -1.0; - - if grid.orders() - != [Order { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }] - { - return Err(TryFromGridError::NonTrivialOrder); - } - - for subgrid in grid.subgrids() { - if subgrid.is_empty() { - continue; - } - - let mu2_grid = subgrid.mu2_grid(); - - if mu2_grid.len() > 1 { - return Err(TryFromGridError::MultipleScales); - } - - if muf2 < 0.0 { - muf2 = mu2_grid[0].fac; - } else if muf2 != mu2_grid[0].fac { - return Err(TryFromGridError::MultipleScales); - } - } - - for channel in grid.channels() { - let entry = channel.entry(); - - if entry.len() != 1 || entry[0].2 != 1.0 { - return Err(TryFromGridError::InvalidChannel); - } - } - - if (1..grid.channels().len()) - .any(|i| grid.channels()[i..].contains(&grid.channels()[i - 1])) - { - return Err(TryFromGridError::InvalidChannel); - } - - Ok(Self { grid }) - } -} - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn fk_assumptions_try_from() { - assert_eq!(FkAssumptions::from_str("Nf6Ind"), Ok(FkAssumptions::Nf6Ind)); - assert_eq!(FkAssumptions::from_str("Nf6Sym"), Ok(FkAssumptions::Nf6Sym)); - assert_eq!(FkAssumptions::from_str("Nf5Ind"), Ok(FkAssumptions::Nf5Ind)); - assert_eq!(FkAssumptions::from_str("Nf5Sym"), Ok(FkAssumptions::Nf5Sym)); - assert_eq!(FkAssumptions::from_str("Nf4Ind"), Ok(FkAssumptions::Nf4Ind)); - assert_eq!(FkAssumptions::from_str("Nf4Sym"), Ok(FkAssumptions::Nf4Sym)); - assert_eq!(FkAssumptions::from_str("Nf3Ind"), Ok(FkAssumptions::Nf3Ind)); - assert_eq!(FkAssumptions::from_str("Nf3Sym"), Ok(FkAssumptions::Nf3Sym)); - assert_eq!( - FkAssumptions::from_str("XXXXXX"), - Err(UnknownFkAssumption { - variant: "XXXXXX".to_owned() - }) - ); - } - - #[test] - fn fk_assumptions_display() { - assert_eq!(format!("{}", FkAssumptions::Nf6Ind), "Nf6Ind"); - assert_eq!(format!("{}", FkAssumptions::Nf6Sym), "Nf6Sym"); - assert_eq!(format!("{}", FkAssumptions::Nf5Ind), "Nf5Ind"); - assert_eq!(format!("{}", FkAssumptions::Nf5Sym), "Nf5Sym"); - assert_eq!(format!("{}", FkAssumptions::Nf4Ind), "Nf4Ind"); - assert_eq!(format!("{}", FkAssumptions::Nf4Sym), "Nf4Sym"); - assert_eq!(format!("{}", FkAssumptions::Nf3Ind), "Nf3Ind"); - assert_eq!(format!("{}", FkAssumptions::Nf3Sym), "Nf3Sym"); - } -} diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index 386ac30f2..d7957689f 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -2,25 +2,18 @@ use super::bin::{BinInfo, BinLimits, BinRemapper}; use super::boc::{Channel, Order}; -use super::convolutions::{Convolution, LumiCache}; +use super::convolutions::Convolution; use super::empty_subgrid::EmptySubgridV1; -use super::evolution::{self, AlphasTable, EvolveInfo, OperatorInfo, OperatorSliceInfo}; -use super::fk_table::FkTable; -use super::import_only_subgrid::ImportOnlySubgridV2; -use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; -use super::ntuple_subgrid::NtupleSubgridV1; use super::pids::{self, PidBasis}; -use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridEnum, SubgridParams}; +use super::subgrid::{Subgrid, SubgridEnum, SubgridParams}; use bitflags::bitflags; -use float_cmp::{approx_eq, assert_approx_eq}; use git_version::git_version; -use lz4_flex::frame::{FrameDecoder, FrameEncoder}; -use ndarray::{s, Array3, ArrayView3, ArrayView5, ArrayViewMut3, Axis, CowArray, Dimension, Ix4}; +use lz4_flex::frame::FrameDecoder; +use ndarray::{Array3, ArrayView3}; use serde::{Deserialize, Serialize, Serializer}; use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; -use std::io::{self, BufRead, BufReader, BufWriter, Read, Write}; -use std::iter; +use std::io::{self, BufRead, BufReader, Read}; use std::mem; use std::ops::Range; use thiserror::Error; @@ -140,33 +133,6 @@ impl Default for Mmv2 { } } -impl Mmv3 { - fn new(subgrid_template: SubgridEnum) -> Self { - Self { - remapper: None, - key_value_db: [ - ( - "pineappl_gitversion".to_owned(), - git_version!( - args = ["--always", "--dirty", "--long", "--tags"], - cargo_prefix = "cargo:", - fallback = "unknown" - ) - .to_owned(), - ), - // by default we assume there are unpolarized protons in the initial state - // do not change these to the new metadata to not break backwards compatibility - ("initial_state_1".to_owned(), "2212".to_owned()), - ("initial_state_2".to_owned(), "2212".to_owned()), - ] - .iter() - .cloned() - .collect(), - subgrid_template, - } - } -} - // ALLOW: fixing the warning will break the file format #[allow(clippy::large_enum_variant)] #[derive(Clone, Deserialize, Serialize)] @@ -224,70 +190,6 @@ pub struct Grid { } impl Grid { - /// Constructor. - #[must_use] - pub fn new( - channels: Vec, - orders: Vec, - bin_limits: Vec, - subgrid_params: SubgridParams, - ) -> Self { - Self { - subgrids: Array3::from_shape_simple_fn( - (orders.len(), bin_limits.len() - 1, channels.len()), - || EmptySubgridV1.into(), - ), - orders, - channels, - bin_limits: BinLimits::new(bin_limits), - more_members: MoreMembers::V3(Mmv3::new( - LagrangeSubgridV2::new(&subgrid_params, &ExtraSubgridParams::from(&subgrid_params)) - .into(), - )), - subgrid_params, - } - } - - /// Constructor. This function can be used like `new`, but the additional parameter - /// `subgrid_type` selects the underlying `Subgrid` type. Supported values are: - /// - `LagrangeSubgrid` - /// - `LagrangeSparseSubgrid` - /// - `NtupleSubgrid` - /// - /// # Errors - /// - /// If `subgrid_type` is none of the values listed above, an error is returned. - pub fn with_subgrid_type( - channels: Vec, - orders: Vec, - bin_limits: Vec, - subgrid_params: SubgridParams, - extra: ExtraSubgridParams, - subgrid_type: &str, - ) -> Result { - let subgrid_template: SubgridEnum = match subgrid_type { - "LagrangeSubgrid" | "LagrangeSubgridV2" => { - LagrangeSubgridV2::new(&subgrid_params, &extra).into() - } - "LagrangeSubgridV1" => LagrangeSubgridV1::new(&subgrid_params).into(), - "NtupleSubgrid" => NtupleSubgridV1::new().into(), - "LagrangeSparseSubgrid" => LagrangeSparseSubgridV1::new(&subgrid_params).into(), - _ => return Err(GridError::UnknownSubgridType(subgrid_type.to_owned())), - }; - - Ok(Self { - subgrids: Array3::from_shape_simple_fn( - (orders.len(), bin_limits.len() - 1, channels.len()), - || EmptySubgridV1.into(), - ), - orders, - channels, - bin_limits: BinLimits::new(bin_limits), - subgrid_params, - more_members: MoreMembers::V3(Mmv3::new(subgrid_template)), - }) - } - /// Return by which convention the particle IDs are encoded. #[must_use] pub fn pid_basis(&self) -> PidBasis { @@ -305,14 +207,6 @@ impl Grid { PidBasis::Pdg } - /// Set the convention by which PIDs of channels are interpreted. - pub fn set_pid_basis(&mut self, pid_basis: PidBasis) { - match pid_basis { - PidBasis::Pdg => self.set_key_value("lumi_id_types", "pdg_mc_ids"), - PidBasis::Evol => self.set_key_value("lumi_id_types", "evol"), - } - } - fn pdg_channels(&self) -> Cow<'_, [Channel]> { match self.pid_basis() { PidBasis::Evol => self @@ -324,180 +218,6 @@ impl Grid { } } - /// Perform a convolution using the PDFs and strong coupling in `lumi_cache`, and only - /// selecting only the orders, bins and channels corresponding to `order_mask`, `bin_indices` - /// and `channel_mask`. A variation of the scales is performed using the factors in `xi`; the - /// first factor varies the renormalization scale, the second the factorization scale. Note - /// that for the variation to be trusted all non-zero log-grids must be contained. - /// - /// # Panics - /// - /// TODO - pub fn convolve( - &self, - lumi_cache: &mut LumiCache, - order_mask: &[bool], - bin_indices: &[usize], - channel_mask: &[bool], - xi: &[(f64, f64)], - ) -> Vec { - lumi_cache.setup(self, xi).unwrap(); - - let bin_indices = if bin_indices.is_empty() { - (0..self.bin_info().bins()).collect() - } else { - bin_indices.to_vec() - }; - let mut bins = vec![0.0; bin_indices.len() * xi.len()]; - let normalizations = self.bin_info().normalizations(); - let pdg_channels = self.pdg_channels(); - - for (xi_index, &(xir, xif)) in xi.iter().enumerate() { - for ((ord, bin, chan), subgrid) in self.subgrids.indexed_iter() { - let order = &self.orders[ord]; - - if ((order.logxir > 0) && (xir == 1.0)) || ((order.logxif > 0) && (xif == 1.0)) { - continue; - } - - if (!order_mask.is_empty() && !order_mask[ord]) - || (!channel_mask.is_empty() && !channel_mask[chan]) - { - continue; - } - - let Some(bin_index) = bin_indices.iter().position(|&index| index == bin) else { - continue; - }; - - if subgrid.is_empty() { - continue; - } - - let channel = &pdg_channels[chan]; - let mu2_grid = subgrid.mu2_grid(); - let x1_grid = subgrid.x1_grid(); - let x2_grid = subgrid.x2_grid(); - - lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); - - let mut value = - subgrid.convolve(&x1_grid, &x2_grid, &mu2_grid, &mut |ix1, ix2, imu2| { - let x1 = x1_grid[ix1]; - let x2 = x2_grid[ix2]; - let mut lumi = 0.0; - - for entry in channel.entry() { - let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); - let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); - lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); - } - - let alphas = lumi_cache.alphas(imu2); - - lumi *= alphas.powi(order.alphas.try_into().unwrap()); - lumi - }); - - if order.logxir > 0 { - value *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); - } - - if order.logxif > 0 { - value *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); - } - - bins[xi_index + xi.len() * bin_index] += value / normalizations[bin]; - } - } - - bins - } - - /// Convolutes a single subgrid `(order, bin, channel)` with the PDFs strong coupling given by - /// `xfx1`, `xfx2` and `alphas`. The convolution result is fully differentially, such that the - /// axes of the result correspond to the values given by the subgrid `q2`, `x1` and `x2` grid - /// values. - /// - /// # Panics - /// - /// TODO - pub fn convolve_subgrid( - &self, - lumi_cache: &mut LumiCache, - ord: usize, - bin: usize, - channel: usize, - xir: f64, - xif: f64, - ) -> Array3 { - lumi_cache.setup(self, &[(xir, xif)]).unwrap(); - - let normalizations = self.bin_info().normalizations(); - let pdg_channels = self.pdg_channels(); - - let subgrid = &self.subgrids[[ord, bin, channel]]; - let order = &self.orders[ord]; - - let channel = &pdg_channels[channel]; - let mu2_grid = subgrid.mu2_grid(); - let x1_grid = subgrid.x1_grid(); - let x2_grid = subgrid.x2_grid(); - - lumi_cache.set_grids(&mu2_grid, &x1_grid, &x2_grid, xir, xif); - - let mut array = Array3::zeros((mu2_grid.len(), x1_grid.len(), x2_grid.len())); - - for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { - let x1 = x1_grid[ix1]; - let x2 = x2_grid[ix2]; - let mut lumi = 0.0; - - for entry in channel.entry() { - let xfx1 = lumi_cache.xfx1(entry.0, ix1, imu2); - let xfx2 = lumi_cache.xfx2(entry.1, ix2, imu2); - lumi += xfx1 * xfx2 * entry.2 / (x1 * x2); - } - - let alphas = lumi_cache.alphas(imu2); - - lumi *= alphas.powi(order.alphas.try_into().unwrap()); - - array[[imu2, ix1, ix2]] = lumi * value; - } - - if order.logxir > 0 { - array *= (xir * xir).ln().powi(order.logxir.try_into().unwrap()); - } - - if order.logxif > 0 { - array *= (xif * xif).ln().powi(order.logxif.try_into().unwrap()); - } - - array /= normalizations[bin]; - array - } - - /// Fills the grid with an ntuple for the given `order`, `observable`, and `channel`. - /// - /// # Panics - /// - /// TODO - pub fn fill(&mut self, order: usize, observable: f64, channel: usize, ntuple: &Ntuple) { - if let Some(bin) = self.bin_limits.index(observable) { - let subgrid = &mut self.subgrids[[order, bin, channel]]; - if let SubgridEnum::EmptySubgridV1(_) = subgrid { - if let MoreMembers::V3(mmv3) = &self.more_members { - *subgrid = mmv3.subgrid_template.clone_empty(); - } else { - unreachable!(); - } - } - - subgrid.fill(ntuple); - } - } - /// Construct a `Grid` by deserializing it from `reader`. Reading is buffered. /// /// # Errors @@ -541,65 +261,6 @@ impl Grid { bincode::deserialize_from(reader).map_err(GridError::ReadFailure) } - /// Serializes `self` into `writer`. Writing is buffered. - /// - /// # Errors - /// - /// If writing fails an error is returned. - pub fn write(&self, writer: impl Write) -> Result<(), GridError> { - let mut writer = BufWriter::new(writer); - let file_header = b"PineAPPL\0\0\0\0\0\0\0\0"; - - // first write PineAPPL file header - writer.write(file_header).map_err(GridError::IoFailure)?; - - // then serialize - bincode::serialize_into(writer, self).map_err(GridError::WriteFailure) - } - - /// Serializes `self` into `writer`, using LZ4 compression. Writing is buffered. - /// - /// # Errors - /// - /// If writing or compression fails an error is returned. - /// - /// # Panics - /// - /// TODO - pub fn write_lz4(&self, writer: impl Write) -> Result<(), GridError> { - let mut encoder = FrameEncoder::new(writer); - self.write(&mut encoder)?; - // TODO: get rid of the unwrap call and return the error - encoder.try_finish().unwrap(); - - Ok(()) - } - - /// Fills the grid with events for the parton momentum fractions `x1` and `x2`, the scale `q2`, - /// and the `order` and `observable`. The events are stored in `weights` and their ordering - /// corresponds to the ordering of [`Grid::channels`]. - pub fn fill_all( - &mut self, - order: usize, - observable: f64, - ntuple: &Ntuple<()>, - weights: &[f64], - ) { - for (channel, weight) in weights.iter().enumerate() { - self.fill( - order, - observable, - channel, - &Ntuple { - x1: ntuple.x1, - x2: ntuple.x2, - q2: ntuple.q2, - weight: *weight, - }, - ); - } - } - /// Return the channels for this `Grid`. #[must_use] pub fn channels(&self) -> &[Channel] { @@ -658,115 +319,6 @@ impl Grid { Ok(()) } - /// Merges the non-empty `Subgrid`s contained in `other` into `self`. - /// - /// # Errors - /// - /// If the bin limits of `self` and `other` are different and if the bin limits of `other` can - /// not be merged with `self` an error is returned. - /// - /// # Panics - /// - /// TODO - pub fn merge(&mut self, mut other: Self) -> Result<(), GridError> { - let mut new_orders: Vec = Vec::new(); - let mut new_bins = 0; - let mut new_entries: Vec = Vec::new(); - - if self.bin_info() != other.bin_info() { - let lhs_bins = self.bin_info().bins(); - new_bins = other.bin_info().bins(); - - let lhs_remapper = self.remapper_mut(); - let rhs_remapper = other.remapper(); - - if let Some(lhs) = lhs_remapper { - if let Some(rhs) = rhs_remapper { - lhs.merge(rhs).map_err(GridError::MergeBinError)?; - - let a = u32::try_from(lhs_bins).unwrap_or_else(|_| unreachable!()); - let b = u32::try_from(lhs_bins + new_bins).unwrap_or_else(|_| unreachable!()); - - self.bin_limits = BinLimits::new((0..=b).map(f64::from).collect()); - other.bin_limits = BinLimits::new((a..=b).map(f64::from).collect()); - } else { - // Return an error - todo!(); - } - } else if rhs_remapper.is_none() { - self.bin_limits - .merge(&other.bin_limits) - .map_err(GridError::InvalidBinLimits)?; - } else { - // Return an error - todo!(); - } - } - - for ((i, _, k), _) in other - .subgrids - .indexed_iter_mut() - .filter(|((_, _, _), subgrid)| !subgrid.is_empty()) - { - let other_order = &other.orders[i]; - let other_entry = &other.channels[k]; - - if !self - .orders - .iter() - .chain(new_orders.iter()) - .any(|x| x == other_order) - { - new_orders.push(other_order.clone()); - } - - if !self - .channels() - .iter() - .chain(new_entries.iter()) - .any(|y| y == other_entry) - { - new_entries.push(other_entry.clone()); - } - } - - if !new_orders.is_empty() || !new_entries.is_empty() || (new_bins != 0) { - self.increase_shape(&(new_orders.len(), new_bins, new_entries.len())); - } - - self.orders.append(&mut new_orders); - self.channels.append(&mut new_entries); - - let bin_indices: Vec<_> = (0..other.bin_info().bins()) - .map(|bin| { - self.bin_info() - .find_bin(&other.bin_info().bin_limits(bin)) - .unwrap_or_else(|| panic!("failed for {bin}")) - }) - .collect(); - - for ((i, j, k), subgrid) in other - .subgrids - .indexed_iter_mut() - .filter(|((_, _, _), subgrid)| !subgrid.is_empty()) - { - let other_order = &other.orders[i]; - let other_entry = &other.channels[k]; - - let self_i = self.orders.iter().position(|x| x == other_order).unwrap(); - let self_j = bin_indices[j]; - let self_k = self.channels.iter().position(|y| y == other_entry).unwrap(); - - if self.subgrids[[self_i, self_j, self_k]].is_empty() { - mem::swap(&mut self.subgrids[[self_i, self_j, self_k]], subgrid); - } else { - self.subgrids[[self_i, self_j, self_k]].merge(&mut *subgrid, false); - } - } - - Ok(()) - } - /// Return a vector containing the type of convolutions performed with this grid. /// /// # Panics @@ -840,162 +392,18 @@ impl Grid { ) } - /// Set the convolution type for this grid for the corresponding `index`. - pub fn set_convolution(&mut self, index: usize, convolution: Convolution) { - // remove outdated metadata - self.key_values_mut() - .remove(&format!("initial_state_{}", index + 1)); - - let (type_, particle) = match convolution { - Convolution::UnpolPDF(pid) => ("UnpolPDF".to_owned(), pid.to_string()), - Convolution::PolPDF(pid) => ("PolPDF".to_owned(), pid.to_string()), - Convolution::UnpolFF(pid) => ("UnpolFF".to_owned(), pid.to_string()), - Convolution::PolFF(pid) => ("PolFF".to_owned(), pid.to_string()), - Convolution::None => ("None".to_owned(), String::new()), - }; - - self.set_key_value(&format!("convolution_type_{}", index + 1), &type_); - self.set_key_value(&format!("convolution_particle_{}", index + 1), &particle); - - // update the remaining metadata - for (index, convolution) in self.convolutions().into_iter().enumerate() { - if self - .key_values() - // UNWRAP: we set some key-values before so there must be a storage - .unwrap_or_else(|| unreachable!()) - .get(&format!("initial_state_{}", index + 1)) - .is_some() - { - self.set_convolution(index, convolution); - } - } - } - - fn increase_shape(&mut self, new_dim: &(usize, usize, usize)) { - let old_dim = self.subgrids.raw_dim().into_pattern(); - let mut new_subgrids = Array3::from_shape_simple_fn( - ( - old_dim.0 + new_dim.0, - old_dim.1 + new_dim.1, - old_dim.2 + new_dim.2, - ), - || EmptySubgridV1.into(), - ); - - for ((i, j, k), subgrid) in self.subgrids.indexed_iter_mut() { - mem::swap(&mut new_subgrids[[i, j, k]], subgrid); - } - - mem::swap(&mut self.subgrids, &mut new_subgrids); - } - - /// Scale all subgrids by `factor`. - pub fn scale(&mut self, factor: f64) { - self.subgrids - .iter_mut() - .for_each(|subgrid| subgrid.scale(factor)); - } - - /// Scales each subgrid by a factor which is the product of the given values `alphas`, `alpha`, - /// `logxir`, and `logxif`, each raised to the corresponding powers for each subgrid. In - /// addition, every subgrid is scaled by a factor `global` independently of its order. - /// - /// # Panics - /// - /// TODO - pub fn scale_by_order( - &mut self, - alphas: f64, - alpha: f64, - logxir: f64, - logxif: f64, - global: f64, - ) { - for ((i, _, _), subgrid) in self.subgrids.indexed_iter_mut() { - let order = &self.orders[i]; - let factor = global - * alphas.powi(order.alphas.try_into().unwrap()) - * alpha.powi(order.alpha.try_into().unwrap()) - * logxir.powi(order.logxir.try_into().unwrap()) - * logxif.powi(order.logxif.try_into().unwrap()); - - subgrid.scale(factor); - } - } - - /// Scales each subgrid by a bin-dependent factor given in `factors`. If a bin does not have a - /// corresponding entry in `factors` it is not rescaled. If `factors` has more entries than - /// there are bins the superfluous entries do not have an effect. - pub fn scale_by_bin(&mut self, factors: &[f64]) { - for ((_, bin, _), subgrid) in self.subgrids.indexed_iter_mut() { - if let Some(&factor) = factors.get(bin) { - subgrid.scale(factor); - } - } - } - /// Returns the subgrid parameters. #[must_use] pub fn orders(&self) -> &[Order] { &self.orders } - /// Return a mutable reference to the subgrid parameters. - #[must_use] - pub fn orders_mut(&mut self) -> &mut [Order] { - &mut self.orders - } - - /// Return a mutable reference to the grid's channels. - pub fn channels_mut(&mut self) -> &mut [Channel] { - &mut self.channels - } - /// Return all subgrids as an `ArrayView3`. #[must_use] pub fn subgrids(&self) -> ArrayView3<'_, SubgridEnum> { self.subgrids.view() } - /// Return all subgrids as an `ArrayViewMut3`. - #[must_use] - pub fn subgrids_mut(&mut self) -> ArrayViewMut3<'_, SubgridEnum> { - self.subgrids.view_mut() - } - - /// Sets a remapper. A remapper can change the dimensions and limits of each bin in this grid. - /// This is useful because many Monte Carlo integrators and also `PineAPPL` do not support - /// multi-dimensional bins. To work around the problem the multi-dimensional bins can be - /// projected to one-dimensional bins, and the remapper can be used to restore the multi - /// dimensionality. Furthermore, it allows to normalize each bin separately, and independently - /// of the bin widths. - /// - /// # Errors - /// - /// Returns an error if the number of bins in the grid and in the remapper do not agree. - /// - /// # Panics - /// - /// TODO - pub fn set_remapper(&mut self, remapper: BinRemapper) -> Result<(), GridError> { - if remapper.bins() != self.bin_info().bins() { - return Err(GridError::BinNumberMismatch { - grid_bins: self.bin_info().bins(), - remapper_bins: remapper.bins(), - }); - } - - self.more_members.upgrade(); - - match &mut self.more_members { - MoreMembers::V1(_) => unreachable!(), - MoreMembers::V2(mmv2) => mmv2.remapper = Some(remapper), - MoreMembers::V3(mmv3) => mmv3.remapper = Some(remapper), - } - - Ok(()) - } - /// Return the currently set remapper, if there is any. #[must_use] pub const fn remapper(&self) -> Option<&BinRemapper> { @@ -1020,1092 +428,18 @@ impl Grid { BinInfo::new(&self.bin_limits, self.remapper()) } - /// Calls [`Self::optimize_using`] with all possible optimization options - /// ([`GridOptFlags::all`]). - pub fn optimize(&mut self) { - self.optimize_using(GridOptFlags::all()); - } - - /// Optimizes the internal datastructures for space efficiency. The parameter `flags` - /// determines which optimizations are applied, see [`GridOptFlags`]. - pub fn optimize_using(&mut self, flags: GridOptFlags) { - if flags.contains(GridOptFlags::OPTIMIZE_SUBGRID_TYPE) { - let ssd = flags.contains(GridOptFlags::STATIC_SCALE_DETECTION); - self.optimize_subgrid_type(ssd); - } - if flags.contains(GridOptFlags::SYMMETRIZE_CHANNELS) { - self.symmetrize_channels(); - } - if flags.contains(GridOptFlags::STRIP_EMPTY_ORDERS) { - self.strip_empty_orders(); - } - if flags.contains(GridOptFlags::MERGE_SAME_CHANNELS) { - self.merge_same_channels(); - } - if flags.contains(GridOptFlags::STRIP_EMPTY_CHANNELS) { - self.strip_empty_channels(); - } - } - - fn optimize_subgrid_type(&mut self, static_scale_detection: bool) { - for subgrid in &mut self.subgrids { - match subgrid { - // replace empty subgrids of any type with `EmptySubgridV1` - _ if subgrid.is_empty() => { - *subgrid = EmptySubgridV1.into(); - } - // can't be optimized without losing information - SubgridEnum::NtupleSubgridV1(_) => continue, - _ => { - // TODO: this requires a `pub(crate)` in `LagrangeSubgridV2`; we should - // replace this with a method - if !static_scale_detection { - if let SubgridEnum::LagrangeSubgridV2(subgrid) = subgrid { - // disable static-scale detection - subgrid.static_q2 = -1.0; - } - } - - let mut new_subgrid = ImportOnlySubgridV2::from(&*subgrid).into(); - mem::swap(subgrid, &mut new_subgrid); - } - } - } + /// Upgrades the internal data structures to their latest versions. + pub fn upgrade(&mut self) { + self.more_members.upgrade(); } - /// Try to deduplicate channels by detecting pairs of them that contain the same subgrids. The - /// numerical equality is tested using a tolerance of `ulps`, given in [units of least - /// precision](https://docs.rs/float-cmp/latest/float_cmp/index.html#some-explanation). - pub fn dedup_channels(&mut self, ulps: i64) { - let mut indices: Vec = (0..self.channels.len()).collect(); - - while let Some(index) = indices.pop() { - if let Some(other_index) = indices.iter().copied().find(|&other_index| { - let (mut a, mut b) = self - .subgrids - .multi_slice_mut((s![.., .., other_index], s![.., .., index])); - - // TODO: use `Iterator::eq_by` once stablizied - for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { - let mut it_a = lhs.indexed_iter(); - let mut it_b = rhs.indexed_iter(); - - loop { - let a = it_a.next(); - let b = it_b.next(); - - match (a, b) { - (Some((tuple_a, value_a)), Some((tuple_b, value_b))) => { - if tuple_a != tuple_b { - return false; - } - - let u = ulps; - if !approx_eq!(f64, value_a, value_b, ulps = u) { - return false; - } - } - (None, None) => break, - _ => return false, - } - } - } - - true - }) { - let old_channel = self.channels.remove(index).entry().to_vec(); - let mut new_channel = self.channels[other_index].entry().to_vec(); - new_channel.extend(old_channel); - self.channels[other_index] = Channel::new(new_channel); - self.subgrids.remove_index(Axis(2), index); - } + /// Returns a map with key-value pairs, if there are any stored in this grid. + #[must_use] + pub const fn key_values(&self) -> Option<&HashMap> { + match &self.more_members { + MoreMembers::V3(mmv3) => Some(&mmv3.key_value_db), + MoreMembers::V2(mmv2) => Some(&mmv2.key_value_db), + MoreMembers::V1(_) => None, } } - - fn merge_same_channels(&mut self) { - let mut indices: Vec<_> = (0..self.channels.len()).rev().collect(); - - // merge channels that are the same - while let Some(index) = indices.pop() { - if let Some((other_index, factor)) = indices.iter().find_map(|&i| { - self.channels[i] - .common_factor(&self.channels[index]) - .map(|factor| (i, factor)) - }) { - let (mut a, mut b) = self - .subgrids - .multi_slice_mut((s![.., .., other_index], s![.., .., index])); - - // check if in all cases the limits are compatible with merging - for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { - if !rhs.is_empty() { - rhs.scale(1.0 / factor); - if lhs.is_empty() { - // we can't merge into an EmptySubgridV1 - *lhs = rhs.clone_empty(); - } - lhs.merge(rhs, false); - - *rhs = EmptySubgridV1.into(); - } - } - } - } - } - - fn strip_empty_channels(&mut self) { - let mut keep_channel_indices = vec![]; - let mut new_channel_entries = vec![]; - - // only keep channels that have non-zero factors and for which at least one subgrid is - // non-empty - for (channel, entry) in self.channels.iter().enumerate() { - if !entry.entry().iter().all(|&(_, _, factor)| factor == 0.0) - && !self - .subgrids - .slice(s![.., .., channel]) - .iter() - .all(Subgrid::is_empty) - { - keep_channel_indices.push(channel); - new_channel_entries.push(entry.clone()); - } - } - - // only keep the previously selected subgrids - let new_subgrids = Array3::from_shape_fn( - ( - self.orders.len(), - self.bin_info().bins(), - keep_channel_indices.len(), - ), - |(order, bin, new_channel)| { - mem::replace( - &mut self.subgrids[[order, bin, keep_channel_indices[new_channel]]], - EmptySubgridV1.into(), - ) - }, - ); - - self.channels = new_channel_entries; - self.subgrids = new_subgrids; - } - - fn strip_empty_orders(&mut self) { - let mut indices: Vec<_> = (0..self.orders().len()).collect(); - - while let Some(index) = indices.pop() { - if self - .subgrids - .slice(s![index, .., ..]) - .iter() - .all(Subgrid::is_empty) - { - self.orders.remove(index); - self.subgrids.remove_index(Axis(0), index); - } - } - } - - fn symmetrize_channels(&mut self) { - let convolutions = self.convolutions(); - if convolutions[0] != convolutions[1] { - return; - } - - let mut indices: Vec = (0..self.channels.len()).rev().collect(); - - while let Some(index) = indices.pop() { - let channel_entry = &self.channels[index]; - - if *channel_entry == channel_entry.transpose() { - // check if in all cases the limits are compatible with merging - self.subgrids - .slice_mut(s![.., .., index]) - .iter_mut() - .for_each(|subgrid| { - if !subgrid.is_empty() && (subgrid.x1_grid() == subgrid.x2_grid()) { - subgrid.symmetrize(); - } - }); - } else if let Some((j, &other_index)) = indices - .iter() - .enumerate() - .find(|(_, i)| self.channels[**i] == channel_entry.transpose()) - { - indices.remove(j); - - // check if in all cases the limits are compatible with merging - let (mut a, mut b) = self - .subgrids - .multi_slice_mut((s![.., .., index], s![.., .., other_index])); - - for (lhs, rhs) in a.iter_mut().zip(b.iter_mut()) { - if !rhs.is_empty() { - if lhs.is_empty() { - // we can't merge into an EmptySubgridV1 - *lhs = rhs.clone_empty(); - } - - lhs.merge(rhs, true); - *rhs = EmptySubgridV1.into(); - } - } - } - } - } - - /// Upgrades the internal data structures to their latest versions. - pub fn upgrade(&mut self) { - self.more_members.upgrade(); - } - - /// Returns a map with key-value pairs, if there are any stored in this grid. - #[must_use] - pub const fn key_values(&self) -> Option<&HashMap> { - match &self.more_members { - MoreMembers::V3(mmv3) => Some(&mmv3.key_value_db), - MoreMembers::V2(mmv2) => Some(&mmv2.key_value_db), - MoreMembers::V1(_) => None, - } - } - - /// Returns a map with key-value pairs, if there are any stored in this grid. - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn key_values_mut(&mut self) -> &mut HashMap { - self.more_members.upgrade(); - - match &mut self.more_members { - MoreMembers::V1(_) => unreachable!(), - MoreMembers::V2(mmv2) => &mut mmv2.key_value_db, - MoreMembers::V3(mmv3) => &mut mmv3.key_value_db, - } - } - - /// Sets a specific key-value pair in this grid. - /// - /// # Panics - /// - /// TODO - pub fn set_key_value(&mut self, key: &str, value: &str) { - self.key_values_mut() - .insert(key.to_owned(), value.to_owned()); - } - - /// Returns information for the generation of evolution operators that are being used in - /// [`Grid::evolve`] with the parameter `order_mask`. - #[must_use] - pub fn evolve_info(&self, order_mask: &[bool]) -> EvolveInfo { - use super::evolution::EVOLVE_INFO_TOL_ULPS; - - let has_pdf1 = self.convolutions()[0] != Convolution::None; - let has_pdf2 = self.convolutions()[1] != Convolution::None; - - let mut ren1 = Vec::new(); - let mut fac1 = Vec::new(); - let mut x1 = Vec::new(); - let mut pids1 = Vec::new(); - - for (channel, subgrid) in self - .subgrids() - .indexed_iter() - .filter_map(|(tuple, subgrid)| { - (!subgrid.is_empty() && (order_mask.is_empty() || order_mask[tuple.0])) - .then_some((tuple.2, subgrid)) - }) - { - ren1.extend(subgrid.mu2_grid().iter().map(|Mu2 { ren, .. }| *ren)); - ren1.sort_by(f64::total_cmp); - ren1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); - - fac1.extend(subgrid.mu2_grid().iter().map(|Mu2 { fac, .. }| *fac)); - fac1.sort_by(f64::total_cmp); - fac1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); - - if has_pdf1 { - x1.extend(subgrid.x1_grid().iter().copied()); - } - if has_pdf2 { - x1.extend(subgrid.x2_grid().iter()); - } - - x1.sort_by(f64::total_cmp); - x1.dedup_by(|a, b| approx_eq!(f64, *a, *b, ulps = EVOLVE_INFO_TOL_ULPS)); - - if has_pdf1 { - pids1.extend(self.channels()[channel].entry().iter().map(|(a, _, _)| a)); - } - if has_pdf2 { - pids1.extend(self.channels()[channel].entry().iter().map(|(_, b, _)| b)); - } - - pids1.sort_unstable(); - pids1.dedup(); - } - - EvolveInfo { - fac1, - pids1, - x1, - ren1, - } - } - - /// Converts this `Grid` into an [`FkTable`] using an evolution kernel operator (EKO) given as - /// `operator`. The dimensions and properties of this operator must be described using `info`. - /// The parameter `order_mask` can be used to include or exclude orders from this operation, - /// and must correspond to the ordering given by [`Grid::orders`]. Orders that are not given - /// are enabled, and in particular if `order_mask` is empty all orders are activated. - /// - /// # Errors - /// - /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is - /// incompatible with this `Grid`. - #[deprecated(since = "0.7.4", note = "use evolve_with_slice_iter instead")] - pub fn evolve( - &self, - operator: ArrayView5, - info: &OperatorInfo, - order_mask: &[bool], - ) -> Result { - self.evolve_with_slice_iter( - info.fac1 - .iter() - .zip(operator.axis_iter(Axis(0))) - .map(|(&fac1, op)| { - Ok::<_, GridError>(( - OperatorSliceInfo { - fac0: info.fac0, - pids0: info.pids0.clone(), - x0: info.x0.clone(), - fac1, - pids1: info.pids1.clone(), - x1: info.x1.clone(), - pid_basis: info.pid_basis, - }, - CowArray::from(op), - )) - }), - order_mask, - (info.xir, info.xif), - &AlphasTable { - ren1: info.ren1.clone(), - alphas: info.alphas.clone(), - }, - ) - } - - // TODO: - // - try to find a better solution than to require that E must be convertible into - // anyhow::Error - - /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] - /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter - /// `order_mask` can be used to include or exclude orders from this operation, and must - /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are - /// enabled, and in particular if `order_mask` is empty all orders are activated. - /// - /// # Errors - /// - /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is - /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` - /// return an error. - pub fn evolve_with_slice_iter<'a, E: Into>( - &self, - slices: impl IntoIterator), E>>, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, - ) -> Result { - use super::evolution::EVOLVE_INFO_TOL_ULPS; - - let mut lhs: Option = None; - // Q2 slices we use - let mut used_op_fac1 = Vec::new(); - // Q2 slices we encounter, but possibly don't use - let mut op_fac1 = Vec::new(); - // Q2 slices needed by the grid - let grid_fac1: Vec<_> = self - .evolve_info(order_mask) - .fac1 - .into_iter() - .map(|fac| xi.1 * xi.1 * fac) - .collect(); - - for result in slices { - let (info, operator) = result.map_err(|err| GridError::Other(err.into()))?; - - op_fac1.push(info.fac1); - - // it's possible that due to small numerical differences we get two slices which are - // almost the same. We have to skip those in order not to evolve the 'same' slice twice - if used_op_fac1 - .iter() - .any(|&fac| approx_eq!(f64, fac, info.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) - { - continue; - } - - // skip slices that the grid doesn't use - if !grid_fac1 - .iter() - .any(|&fac| approx_eq!(f64, fac, info.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) - { - continue; - } - - let op_info_dim = ( - info.pids1.len(), - info.x1.len(), - info.pids0.len(), - info.x0.len(), - ); - - if operator.dim() != op_info_dim { - return Err(GridError::EvolutionFailure(format!( - "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim, - operator.dim(), - ))); - } - - let view = operator.view(); - - let (subgrids, channels) = if self.convolutions()[0] != Convolution::None - && self.convolutions()[1] != Convolution::None - { - evolution::evolve_slice_with_two(self, &view, &info, order_mask, xi, alphas_table) - } else { - evolution::evolve_slice_with_one(self, &view, &info, order_mask, xi, alphas_table) - }?; - - let mut rhs = Self { - subgrids, - channels, - bin_limits: self.bin_limits.clone(), - orders: vec![Order::new(0, 0, 0, 0)], - subgrid_params: SubgridParams::default(), - more_members: self.more_members.clone(), - }; - - // TODO: use a new constructor to set this information - rhs.set_pid_basis(info.pid_basis); - - if let Some(lhs) = &mut lhs { - lhs.merge(rhs)?; - } else { - lhs = Some(rhs); - } - - used_op_fac1.push(info.fac1); - } - - // UNWRAP: if we can't compare two numbers there's a bug - op_fac1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - - // make sure we've evolved all slices - if let Some(muf2) = grid_fac1.into_iter().find(|&grid_mu2| { - !used_op_fac1 - .iter() - .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) - }) { - return Err(GridError::EvolutionFailure(format!( - "no operator for muf2 = {muf2} found in {op_fac1:?}" - ))); - } - - // TODO: convert this unwrap into error - let grid = lhs.unwrap(); - - // UNWRAP: merging evolved slices should be a proper FkTable again - Ok(FkTable::try_from(grid).unwrap_or_else(|_| unreachable!())) - } - - /// Converts this `Grid` into an [`FkTable`] using `slices` that must iterate over a [`Result`] - /// of tuples of an [`OperatorSliceInfo`] and the corresponding sliced operator. The parameter - /// `order_mask` can be used to include or exclude orders from this operation, and must - /// correspond to the ordering given by [`Grid::orders`]. Orders that are not given are - /// enabled, and in particular if `order_mask` is empty all orders are activated. - /// - /// # Errors - /// - /// Returns a [`GridError::EvolutionFailure`] if either the `operator` or its `info` is - /// incompatible with this `Grid`. Returns a [`GridError::Other`] if the iterator from `slices` - /// return an error. - pub fn evolve_with_slice_iter2<'a, E: Into>( - &self, - slices_a: impl IntoIterator), E>>, - slices_b: impl IntoIterator), E>>, - order_mask: &[bool], - xi: (f64, f64), - alphas_table: &AlphasTable, - ) -> Result { - use super::evolution::EVOLVE_INFO_TOL_ULPS; - use itertools::izip; - - let mut lhs: Option = None; - // Q2 slices we use - let mut used_op_fac1 = Vec::new(); - // Q2 slices we encounter, but possibly don't use - let mut op_fac1 = Vec::new(); - // Q2 slices needed by the grid - let grid_fac1: Vec<_> = self - .evolve_info(order_mask) - .fac1 - .into_iter() - .map(|fac| xi.1 * xi.1 * fac) - .collect(); - - // TODO: simplify the ugly repetition below by offloading some ops into fn - for (result_a, result_b) in izip!(slices_a, slices_b) { - // Operate on `slices_a` - let (info_a, operator_a) = result_a.map_err(|err| GridError::Other(err.into()))?; - // Operate on `slices_b` - let (info_b, operator_b) = result_b.map_err(|err| GridError::Other(err.into()))?; - - // TODO: what if the scales of the EKOs don't agree? Is there an ordering problem? - assert_approx_eq!(f64, info_a.fac1, info_b.fac1, ulps = EVOLVE_INFO_TOL_ULPS); - - // also the PID bases must be the same - assert_eq!(info_a.pid_basis, info_b.pid_basis); - - op_fac1.push(info_a.fac1); - - // it's possible that due to small numerical differences we get two slices which are - // almost the same. We have to skip those in order not to evolve the 'same' slice twice - if used_op_fac1 - .iter() - .any(|&fac| approx_eq!(f64, fac, info_a.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) - { - continue; - } - - // skip slices that the grid doesn't use - if !grid_fac1 - .iter() - .any(|&fac| approx_eq!(f64, fac, info_a.fac1, ulps = EVOLVE_INFO_TOL_ULPS)) - { - continue; - } - - let op_info_dim_a = ( - info_a.pids1.len(), - info_a.x1.len(), - info_a.pids0.len(), - info_a.x0.len(), - ); - - if operator_a.dim() != op_info_dim_a { - return Err(GridError::EvolutionFailure(format!( - "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim_a, - operator_a.dim(), - ))); - } - - let op_info_dim_b = ( - info_b.pids1.len(), - info_b.x1.len(), - info_b.pids0.len(), - info_b.x0.len(), - ); - - if operator_b.dim() != op_info_dim_b { - return Err(GridError::EvolutionFailure(format!( - "operator information {:?} does not match the operator's dimensions: {:?}", - op_info_dim_b, - operator_b.dim(), - ))); - } - - let views = [operator_a.view(), operator_b.view()]; - let infos = [info_a, info_b]; - - let (subgrids, channels) = if self.convolutions()[0] != Convolution::None - && self.convolutions()[1] != Convolution::None - { - evolution::evolve_slice_with_two2( - self, - &views, - &infos, - order_mask, - xi, - alphas_table, - ) - } else { - evolution::evolve_slice_with_one( - self, - &views[0], - &infos[1], - order_mask, - xi, - alphas_table, - ) - }?; - - let mut rhs = Self { - subgrids, - channels, - bin_limits: self.bin_limits.clone(), - orders: vec![Order::new(0, 0, 0, 0)], - subgrid_params: SubgridParams::default(), - more_members: self.more_members.clone(), - }; - - // TODO: use a new constructor to set this information - rhs.set_pid_basis(infos[0].pid_basis); - - if let Some(lhs) = &mut lhs { - lhs.merge(rhs)?; - } else { - lhs = Some(rhs); - } - - used_op_fac1.push(infos[0].fac1); - } - - // UNWRAP: if we can't compare two numbers there's a bug - op_fac1.sort_by(|a, b| a.partial_cmp(b).unwrap_or_else(|| unreachable!())); - - // make sure we've evolved all slices - if let Some(muf2) = grid_fac1.into_iter().find(|&grid_mu2| { - !used_op_fac1 - .iter() - .any(|&eko_mu2| approx_eq!(f64, grid_mu2, eko_mu2, ulps = EVOLVE_INFO_TOL_ULPS)) - }) { - return Err(GridError::EvolutionFailure(format!( - "no operator for muf2 = {muf2} found in {op_fac1:?}" - ))); - } - - // TODO: convert this unwrap into error - let grid = lhs.unwrap(); - - // UNWRAP: merging evolved slices should be a proper FkTable again - Ok(FkTable::try_from(grid).unwrap_or_else(|_| unreachable!())) - } - - /// Deletes bins with the corresponding `bin_indices`. Repeated indices and indices larger or - /// equal the bin length are ignored. - pub fn delete_bins(&mut self, bin_indices: &[usize]) { - let mut bin_indices: Vec<_> = bin_indices - .iter() - .copied() - // ignore indices corresponding to bin that don't exist - .filter(|&index| index < self.bin_info().bins()) - .collect(); - - // sort and remove repeated indices - bin_indices.sort_unstable(); - bin_indices.dedup(); - let bin_indices = bin_indices; - - let mut bin_ranges: Vec> = Vec::new(); - - // convert indices into consecutive ranges - for &bin_index in &bin_indices { - match bin_ranges.last_mut() { - Some(range) if range.end == bin_index => range.end += 1, - _ => bin_ranges.push(bin_index..(bin_index + 1)), - } - } - - let bin_ranges = bin_ranges; - let mut ranges = bin_ranges.as_slice(); - let old_limits = self.bin_limits.limits(); - - // remove the bins from the right first, so as not to invalidate any indices - if let Some((range, remainder)) = ranges.split_last() { - if range.end == self.bin_info().bins() { - self.bin_limits.delete_bins_right(range.end - range.start); - ranges = remainder; - } - } - - // indices on the left aren't affected by removal of bins to their right - if let Some((range, remainder)) = ranges.split_first() { - if range.start == 0 { - self.bin_limits.delete_bins_left(range.end); - ranges = remainder; - } - } - - if !ranges.is_empty() { - // if there's no remapper we need to store the bin limits in a new remapper - if self.remapper_mut().is_none() { - self.set_remapper( - BinRemapper::new( - old_limits.windows(2).map(|win| win[1] - win[0]).collect(), - old_limits.windows(2).map(|win| (win[0], win[1])).collect(), - ) - .unwrap_or_else(|_| unreachable!()), - ) - .unwrap_or_else(|_| unreachable!()); - } - - // the following should not be needed, but let's set these limits to integer values - self.bin_limits = BinLimits::new( - iter::successors(Some(0.0), |x| Some(x + 1.0)) - .take(old_limits.len() - bin_indices.len()) - .collect(), - ); - } - - if let Some(remapper) = self.remapper_mut() { - remapper.delete_bins(&bin_ranges); - } - - for &bin_index in bin_indices.iter().rev() { - self.subgrids.remove_index(Axis(1), bin_index); - } - } - - /// Change the particle ID convention. - pub fn rotate_pid_basis(&mut self, pid_basis: PidBasis) { - match (self.pid_basis(), pid_basis) { - (PidBasis::Pdg, PidBasis::Evol) => { - self.channels = self - .channels() - .iter() - .map(|channel| Channel::translate(channel, &pids::pdg_mc_pids_to_evol)) - .collect(); - - self.set_pid_basis(PidBasis::Evol); - } - (PidBasis::Evol, PidBasis::Pdg) => { - self.channels = self - .channels() - .iter() - .map(|channel| Channel::translate(channel, &pids::evol_to_pdg_mc_ids)) - .collect(); - - self.set_pid_basis(PidBasis::Pdg); - } - (PidBasis::Evol, PidBasis::Evol) | (PidBasis::Pdg, PidBasis::Pdg) => { - // here's nothing to do - } - } - } - - /// Deletes channels with the corresponding `channel_indices`. Repeated indices and indices - /// larger or equal than the number of channels are ignored. - pub fn delete_channels(&mut self, channel_indices: &[usize]) { - let mut channel_indices: Vec<_> = channel_indices - .iter() - .copied() - // ignore indices corresponding to bin that don't exist - .filter(|&index| index < self.channels().len()) - .collect(); - - // sort and remove repeated indices - channel_indices.sort_unstable(); - channel_indices.dedup(); - channel_indices.reverse(); - let channel_indices = channel_indices; - - for index in channel_indices { - self.channels.remove(index); - self.subgrids.remove_index(Axis(2), index); - } - } - - pub(crate) fn rewrite_channels(&mut self, add: &[(i32, i32)], del: &[i32]) { - self.channels = self - .channels() - .iter() - .map(|entry| { - Channel::new( - entry - .entry() - .iter() - .map(|(a, b, f)| { - ( - // if `a` is to be added to another pid replace it with this pid - add.iter().fold( - *a, - |id, &(source, target)| if id == source { target } else { id }, - ), - // if `b` is to be added to another pid replace it with this pid - add.iter().fold( - *b, - |id, &(source, target)| if id == source { target } else { id }, - ), - // if any of the pids `a` or `b` are to b deleted set the factor to - // zero - if del.iter().any(|id| id == a || id == b) { - 0.0 - } else { - *f - }, - ) - }) - .collect(), - ) - }) - .collect(); - } - - /// Splits the grid such that each channel contains only a single tuple of PIDs. - pub fn split_channels(&mut self) { - let indices: Vec<_> = self - .channels() - .iter() - .enumerate() - .flat_map(|(index, entry)| iter::repeat(index).take(entry.entry().len())) - .collect(); - - self.subgrids = self.subgrids.select(Axis(2), &indices); - self.channels = self - .channels() - .iter() - .flat_map(|entry| { - entry - .entry() - .iter() - .copied() - .map(move |entry| Channel::new(vec![entry])) - }) - .collect(); - } -} - -#[cfg(test)] -mod tests { - use super::*; - use crate::channel; - - #[test] - fn grid_with_subgrid_type() { - let subgrid_type = String::from("Idontexist"); - let result = Grid::with_subgrid_type( - vec![], - vec![], - vec![], - SubgridParams::default(), - ExtraSubgridParams::default(), - &subgrid_type, - ); - - matches!(result, Err(GridError::UnknownSubgridType(x)) if x == subgrid_type); - } - - #[test] - fn grid_merge_empty_subgrids() { - let mut grid = Grid::new( - vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], - ], - vec![Order::new(0, 2, 0, 0)], - vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 1); - - let other = Grid::new( - vec![ - // differently ordered than `grid` - channel![1, 1, 1.0; 3, 3, 1.0], - channel![2, 2, 1.0; 4, 4, 1.0], - ], - vec![Order::new(1, 2, 0, 0), Order::new(1, 2, 0, 1)], - vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - // merging with empty subgrids should not change the grid - grid.merge(other).unwrap(); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 1); - } - - #[test] - fn grid_merge_orders() { - let mut grid = Grid::new( - vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], - ], - vec![Order::new(0, 2, 0, 0)], - vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 1); - - let mut other = Grid::new( - vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], - ], - vec![ - Order::new(1, 2, 0, 0), - Order::new(1, 2, 0, 1), - Order::new(0, 2, 0, 0), - ], - vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - other.fill_all( - 0, - 0.1, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: (), - }, - &[1.0, 2.0], - ); - other.fill_all( - 1, - 0.1, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: (), - }, - &[1.0, 2.0], - ); - - // merge with four non-empty subgrids - grid.merge(other).unwrap(); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 3); - } - - #[test] - fn grid_merge_channels_entries() { - let mut grid = Grid::new( - vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], - ], - vec![Order::new(0, 2, 0, 0)], - vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 1); - - let mut other = Grid::new( - vec![channel![22, 22, 1.0], channel![2, 2, 1.0; 4, 4, 1.0]], - vec![Order::new(0, 2, 0, 0)], - vec![0.0, 0.25, 0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - // fill the photon-photon entry - other.fill( - 0, - 0.1, - 0, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 3.0, - }, - ); - - grid.merge(other).unwrap(); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 3); - assert_eq!(grid.orders().len(), 1); - } - - #[test] - fn grid_merge_bins() { - let mut grid = Grid::new( - vec![ - channel![2, 2, 1.0; 4, 4, 1.0], - channel![1, 1, 1.0; 3, 3, 1.0], - ], - vec![Order::new(0, 2, 0, 0)], - vec![0.0, 0.25, 0.5], - SubgridParams::default(), - ); - - assert_eq!(grid.bin_info().bins(), 2); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 1); - - let mut other = Grid::new( - vec![ - // channels are differently sorted - channel![1, 1, 1.0; 3, 3, 1.0], - channel![2, 2, 1.0; 4, 4, 1.0], - ], - vec![Order::new(0, 2, 0, 0)], - vec![0.5, 0.75, 1.0], - SubgridParams::default(), - ); - - other.fill_all( - 0, - 0.1, - &Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: (), - }, - &[2.0, 3.0], - ); - - grid.merge(other).unwrap(); - - assert_eq!(grid.bin_info().bins(), 4); - assert_eq!(grid.channels().len(), 2); - assert_eq!(grid.orders().len(), 1); - } - - // TODO: convolve_subgrid, merge_bins, subgrid, set_subgrid - - #[test] - fn grid_convolutions() { - let mut grid = Grid::new( - vec![channel![21, 21, 1.0]], - vec![Order { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }], - vec![0.0, 1.0], - SubgridParams::default(), - ); - - // by default we assume unpolarized proton PDFs are used - assert_eq!( - grid.convolutions(), - [Convolution::UnpolPDF(2212), Convolution::UnpolPDF(2212)] - ); - - grid.set_convolution(0, Convolution::UnpolPDF(-2212)); - grid.set_convolution(1, Convolution::UnpolPDF(-2212)); - - assert_eq!( - grid.convolutions(), - [Convolution::UnpolPDF(-2212), Convolution::UnpolPDF(-2212)] - ); - } } diff --git a/pineappl_v0/src/lib.rs b/pineappl_v0/src/lib.rs index 5a0387999..1848bf59b 100644 --- a/pineappl_v0/src/lib.rs +++ b/pineappl_v0/src/lib.rs @@ -39,8 +39,6 @@ pub mod bin; pub mod boc; pub mod convolutions; pub mod empty_subgrid; -pub mod evolution; -pub mod fk_table; pub mod grid; pub mod import_only_subgrid; pub mod lagrange_subgrid; diff --git a/pineappl_v0/tests/drell_yan_lo.rs b/pineappl_v0/tests/drell_yan_lo.rs deleted file mode 100644 index e5c99e246..000000000 --- a/pineappl_v0/tests/drell_yan_lo.rs +++ /dev/null @@ -1,822 +0,0 @@ -use anyhow::Result; -use float_cmp::assert_approx_eq; -use lhapdf::Pdf; -use num_complex::Complex; -use pineappl_v0::bin::BinRemapper; -use pineappl_v0::boc::Order; -use pineappl_v0::channel; -use pineappl_v0::convolutions::LumiCache; -use pineappl_v0::grid::{Grid, GridOptFlags, Ntuple}; -use pineappl_v0::subgrid::{ExtraSubgridParams, Subgrid, SubgridEnum, SubgridParams}; -use rand::Rng; -use rand_pcg::Pcg64; -use std::f64::consts::PI; -use std::io::Cursor; -use std::mem; - -// If equation numbers are given, they are from Stefan Dittmaier and Max Huber's paper: -// 'Radiative corrections to the neutral-current Drell–Yan process in the Standard Model and its -// minimal supersymmetric extension' (https://arxiv.org/abs/0911.2329) - -// Eq. (2.13) - gamma-gamma contribution to DY lepton pair production -fn int_photo(s: f64, t: f64, u: f64) -> f64 { - let alpha0: f64 = 1.0 / 137.03599911; - alpha0.powi(2) / 2.0 / s * (t / u + u / t) -} - -// Eq. (2.12) - quark-antiquark contribution to DY lepton pair production -fn int_quark(s: f64, t: f64, u: f64, qq: f64, i3_wq: f64) -> f64 { - let alphagf: f64 = 1.0 / 132.30818655547878; - let mw = 80.35198454966643; - let mz = 91.15348061918276; - let gw = 2.083799397775285; - let gz = 2.494266378772824; - - // lepton charge - let ql: f64 = -1.0; - // lepton weak isospin - let i3_wl = -0.5; - - // weak mixing angles - let cw = (Complex::new(mw * mw, -mw * gw) / Complex::new(mz * mz, -mz * gz)).sqrt(); - let sw = (Complex::new(1.0, 0.0) - cw * cw).sqrt(); - - // Eq. (2.8) - let chi_z = Complex::new(s, 0.0) / Complex::new(s - mz * mz, mz * gz); - - // Eq. (2.7) - let gp_qqz = -sw / cw * qq; - let gm_qqz = (i3_wq - sw * sw * qq) / (sw * cw); - let gp_llz = -sw / cw * ql; - let gm_llz = (i3_wl - sw * sw * ql) / (sw * cw); - - alphagf.powi(2) / 12.0 / s.powi(3) - * (2.0 * qq.powi(2) * ql.powi(2) * (t * t + u * u) - + 2.0 - * qq - * ql - * (((gp_qqz * gp_llz + gm_qqz * gm_llz) * u * u - + (gp_qqz * gm_llz + gm_qqz * gp_llz) * t * t) - * chi_z) - .re - + ((gp_qqz.norm_sqr() * gp_llz.norm_sqr() + gm_qqz.norm_sqr() * gm_llz.norm_sqr()) - * u - * u - + (gp_qqz.norm_sqr() * gm_llz.norm_sqr() + gm_qqz.norm_sqr() * gp_llz.norm_sqr()) - * t - * t) - * chi_z.norm_sqr()) -} - -struct Psp2to2 { - s: f64, - t: f64, - u: f64, - x1: f64, - x2: f64, - jacobian: f64, -} - -fn hadronic_pspgen(rng: &mut impl Rng, mmin: f64, mmax: f64) -> Psp2to2 { - let smin = mmin * mmin; - let smax = mmax * mmax; - - let mut jacobian = 1.0; - - let r1 = rng.r#gen::(); - let r2 = rng.r#gen::(); - let tau0 = smin / smax; - let tau = tau0.powf(r1); - let y = tau.powf(1.0 - r2); - let x1 = y; - let x2 = tau / y; - let s = tau * smax; - jacobian *= tau * tau0.ln().powi(2) * r1; - - // theta integration (in the CMS) - let cos_theta = 2.0 * rng.r#gen::() - 1.0; - jacobian *= 2.0; - - let t = -0.5 * s * (1.0 - cos_theta); - let u = -0.5 * s * (1.0 + cos_theta); - - // phi integration - jacobian *= 2.0 * PI; - - Psp2to2 { - s, - t, - u, - x1, - x2, - jacobian, - } -} - -fn fill_drell_yan_lo_grid( - rng: &mut impl Rng, - calls: u32, - subgrid_type: &str, - dynamic: bool, - reweight: bool, -) -> Result { - let channels = vec![ - // photons - channel![22, 22, 1.0], - // up-antiup - channel![2, -2, 1.0; 4, -4, 1.0], - // antiup-up - channel![-2, 2, 1.0; -4, 4, 1.0], - // down-antidown - channel![1, -1, 1.0; 3, -3, 1.0; 5, -5, 1.0], - // antidown-down - channel![-1, 1, 1.0; -3, 3, 1.0; -5, 5, 1.0], - ]; - - let orders = vec![ - // LO - Order { - alphas: 0, - alpha: 2, - logxir: 0, - logxif: 0, - }, - // NLO QCD - won't be filled - Order { - alphas: 1, - alpha: 2, - logxir: 0, - logxif: 0, - }, - Order { - alphas: 1, - alpha: 2, - logxir: 0, - logxif: 1, - }, - ]; - - // we bin in rapidity from 0 to 2.4 in steps of 0.1 - let bin_limits: Vec<_> = (0..=24).map(|x: u32| f64::from(x) / 10.0).collect(); - - let mut subgrid_params = SubgridParams::default(); - let mut extra = ExtraSubgridParams::default(); - - subgrid_params.set_q2_bins(30); - subgrid_params.set_q2_max(1e6); - subgrid_params.set_q2_min(1e2); - subgrid_params.set_q2_order(3); - subgrid_params.set_reweight(reweight); - subgrid_params.set_x_bins(50); - subgrid_params.set_x_max(1.0); - subgrid_params.set_x_min(2e-7); - subgrid_params.set_x_order(3); - extra.set_x2_bins(50); - extra.set_x2_max(1.0); - extra.set_x2_min(2e-7); - extra.set_x2_order(3); - extra.set_reweight2(reweight); - - // create the PineAPPL grid - let mut grid = Grid::with_subgrid_type( - channels, - orders, - bin_limits, - subgrid_params, - extra, - subgrid_type, - )?; - - // in GeV^2 pbarn - let hbarc2 = 3.893793721e8; - - for _ in 0..calls { - // generate a phase-space point - let Psp2to2 { - s, - t, - u, - x1, - x2, - mut jacobian, - } = hadronic_pspgen(rng, 10.0, 7000.0); - - let ptl = (t * u / s).sqrt(); - let mll = s.sqrt(); - let yll = 0.5 * (x1 / x2).ln(); - let ylp = (yll + (0.5 * mll / ptl).acosh()).abs(); - let ylm = (yll - (0.5 * mll / ptl).acosh()).abs(); - - jacobian *= hbarc2 / f64::from(calls); - - // cuts for LO for the invariant-mass slice containing the Z-peak from CMSDY2D11 - if (ptl < 14.0) - || (yll.abs() > 2.4) - || (ylp > 2.4) - || (ylm > 2.4) - || !(60.0..=120.0).contains(&mll) - { - continue; - } - - let q2 = if dynamic { mll * mll } else { 90.0 * 90.0 }; - - // LO photon-photon channel - let weight = jacobian * int_photo(s, t, u); - let pto = 0; - let channel = 0; - - grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); - - // LO up-antiup-type channel - let weight = jacobian * int_quark(s, t, u, 2.0 / 3.0, 0.5); - let pto = 0; - let channel = 1; - - grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); - - // LO antiup-up-type channel - swap (x1 <-> x2) and (t <-> u) - let weight = jacobian * int_quark(s, u, t, 2.0 / 3.0, 0.5); - let pto = 0; - let channel = 2; - - grid.fill( - pto, - yll.abs(), - channel, - &Ntuple { - x1: x2, - x2: x1, - q2, - weight, - }, - ); - - // LO down-antidown-type channel - let weight = jacobian * int_quark(s, t, u, -1.0 / 3.0, -0.5); - let pto = 0; - let channel = 3; - - grid.fill(pto, yll.abs(), channel, &Ntuple { x1, x2, q2, weight }); - - // LO antidown-down-type channel - swap (x1 <-> x2) and (t <-> u) - let weight = jacobian * int_quark(s, u, t, -1.0 / 3.0, -0.5); - let pto = 0; - let channel = 4; - - grid.fill( - pto, - yll.abs(), - channel, - &Ntuple { - x1: x2, - x2: x1, - q2, - weight, - }, - ); - } - - Ok(grid) -} - -fn perform_grid_tests( - subgrid_type: &str, - dynamic: bool, - reference: &[f64], - reference_after_ssd: &[f64], - x_grid: &[f64], - reweight: bool, -) -> Result<()> { - let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); - let mut grid = fill_drell_yan_lo_grid(&mut rng, 500_000, subgrid_type, dynamic, reweight)?; - - // TEST 1: `merge` and `scale` - grid.merge(fill_drell_yan_lo_grid( - &mut rng, - 500_000, - subgrid_type, - dynamic, - reweight, - )?)?; - grid.scale(0.5); - - // suppress LHAPDF banners - lhapdf::set_verbosity(0); - - let pdf_set = "NNPDF31_nlo_as_0118_luxqed"; - - let pdf = Pdf::with_setname_and_member(pdf_set, 0)?; - let mut xfx = |id, x, q2| pdf.xfx_q2(id, x, q2); - let mut alphas = |_| 0.0; - - // TEST 2: `read` and `write` - let mut file = Cursor::new(Vec::new()); - grid.write(&mut file)?; - file.set_position(0); - mem::drop(grid); - let grid = Grid::read(&mut file)?; - - // TEST 3: `write_lz4` - let mut file = Cursor::new(Vec::new()); - grid.write_lz4(&mut file)?; - file.set_position(0); - mem::drop(grid); - let mut grid = Grid::read(&mut file)?; - - // TEST 4: `scale_by_order` - grid.scale_by_order(10.0, 0.5, 10.0, 10.0, 1.0); - grid.scale_by_order(10.0, 1.0, 10.0, 10.0, 4.0); - - // TEST 5: `convolve` - let mut lumi_cache = LumiCache::with_one(2212, &mut xfx, &mut alphas); - let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); - - for (result, reference) in bins.iter().zip(reference.iter()) { - assert_approx_eq!(f64, *result, *reference, ulps = 16); - } - - // TEST 5b: `convolve` with `LumiCache::with_two` - let mut xfx1 = |id, x, q2| pdf.xfx_q2(id, x, q2); - let mut xfx2 = |id, x, q2| pdf.xfx_q2(id, x, q2); - let mut alphas2 = |_| 0.0; - let mut lumi_cache2 = LumiCache::with_two(2212, &mut xfx1, 2212, &mut xfx2, &mut alphas2); - let bins2 = grid.convolve(&mut lumi_cache2, &[], &[], &[], &[(1.0, 1.0)]); - - for (result, reference) in bins2.iter().zip(reference.iter()) { - assert_approx_eq!(f64, *result, *reference, ulps = 16); - } - - mem::drop(lumi_cache2); - mem::drop(bins2); - - // TEST 6: `convolve_subgrid` - let bins: Vec<_> = (0..grid.bin_info().bins()) - .map(|bin| { - (0..grid.channels().len()) - .map(|channel| { - grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) - .sum() - }) - .sum() - }) - .collect(); - - for (result, reference) in bins.iter().zip(reference.iter()) { - assert_approx_eq!(f64, *result, *reference, ulps = 24); - } - - // TEST 7a: `optimize_using` - tests `symmetrize` for each subgrid type - grid.optimize_using(GridOptFlags::SYMMETRIZE_CHANNELS); - - // TEST 7b: `optimize` - grid.optimize(); - - assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().as_ref(), x_grid); - assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().as_ref(), x_grid); - - // TEST 8: `convolve_subgrid` for the optimized subgrids - let bins: Vec<_> = (0..grid.bin_info().bins()) - .map(|bin| { - (0..grid.channels().len()) - .map(|channel| { - grid.convolve_subgrid(&mut lumi_cache, 0, bin, channel, 1.0, 1.0) - .sum() - }) - .sum() - }) - .collect(); - - for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { - assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); - } - - let bins = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); - - for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { - assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); - } - - // TEST 9: `set_remapper` - - // make a two-dimensional distribution out of it - grid.set_remapper(BinRemapper::new( - vec![0.1; 24], - (0..24) - .flat_map(|index| { - let index = f64::from(index); - vec![(60.0, 120.0), (index * 0.1, (index + 1.0) * 0.1)] - }) - .collect::>(), - )?)?; - - // TEST 10: `merge_bins` - - // trivial merge: first bin is merged into first bin - grid.merge_bins(0..1)?; - - for (result, reference_after_ssd) in bins.iter().zip(reference_after_ssd.iter()) { - assert_approx_eq!(f64, *result, *reference_after_ssd, ulps = 24); - } - - // merge two bins with each other - for bin in 0..12 { - grid.merge_bins(bin..bin + 2)?; - } - - let merged2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); - - for (result, reference_after_ssd) in merged2.iter().zip( - reference_after_ssd - .chunks_exact(2) - .map(|chunk| chunk.iter().sum::() / 2.0), - ) { - assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 32); - } - - // TEST 11: `delete_bins` - - // delete a few bins from the start - grid.delete_bins(&[0, 1]); - - let deleted = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); - - assert_eq!(deleted.len(), 10); - - for (result, reference_after_ssd) in deleted.iter().zip( - reference_after_ssd - .chunks_exact(2) - .map(|chunk| chunk.iter().sum::() / 2.0) - .skip(2), - ) { - assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 32); - } - - // delete a few bins from the ending - grid.delete_bins(&[8, 9]); - - let deleted2 = grid.convolve(&mut lumi_cache, &[], &[], &[], &[(1.0, 1.0)]); - - assert_eq!(deleted2.len(), 8); - - for (result, reference_after_ssd) in deleted2.iter().zip( - reference_after_ssd - .chunks_exact(2) - .map(|chunk| chunk.iter().sum::() / 2.0) - .skip(2) - .take(6), - ) { - assert_approx_eq!(f64, *result, reference_after_ssd, ulps = 16); - } - - Ok(()) -} - -fn generate_grid(subgrid_type: &str, dynamic: bool, reweight: bool) -> Result { - let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); - fill_drell_yan_lo_grid(&mut rng, 500_000, subgrid_type, dynamic, reweight) -} - -const STATIC_REFERENCE: [f64; 24] = [ - 269.89225458312495, - 266.2168804878282, - 290.0467478314624, - 258.0064918266305, - 239.54186548997865, - 300.17541324377703, - 258.8811221515799, - 238.4064950360576, - 242.5494601562957, - 236.34329830221077, - 230.63243720020898, - 190.03118557029666, - 213.22241277258763, - 177.75582251643334, - 168.07022695390958, - 151.59217101220256, - 143.81017491485716, - 97.09707327367487, - 91.38465432190982, - 73.94464862425771, - 63.859689262732104, - 48.595785504299926, - 27.94818010640803, - 9.343737799674852, -]; - -// numbers are slightly different from `STATIC_REFERENCE` because the static scale detection (SSD) -// removes the Q^2 interpolation error -const STATIC_REFERENCE_AFTER_SSD: [f64; 24] = [ - 269.89240546283145, - 266.2170285827742, - 290.04690782935967, - 258.0066322019259, - 239.54199362567599, - 300.17556967146095, - 258.88125430161745, - 238.40661279174125, - 242.54957458220744, - 236.34340283622035, - 230.63253265929194, - 190.03125927151245, - 213.2224910582812, - 177.7558806305883, - 168.07027678254747, - 151.59220685502618, - 143.81020355582885, - 97.09708758263099, - 91.38466242593998, - 73.94465114837278, - 63.859687905917, - 48.595781165174515, - 27.94817639459665, - 9.343735959243446, -]; - -const DYNAMIC_REFERENCE: [f64; 24] = [ - 269.9662650413552, - 266.2274509325408, - 290.039119030095, - 258.04801305108583, - 239.63561020879277, - 300.2475932636636, - 258.88126161648313, - 238.42709542929794, - 242.5724521248901, - 236.3541498865422, - 230.64832146047578, - 189.999243811704, - 213.2896760201295, - 177.7280865940876, - 168.0886178280483, - 151.59285700593935, - 143.80051106343882, - 97.0715765765853, - 91.38479915098559, - 73.94713838892906, - 63.85622547082087, - 48.61296466751912, - 27.948404940991445, - 9.342761664545428, -]; - -const DYNAMIC_REFERENCE_NO_REWEIGHT: [f64; 24] = [ - 268.8874311488598, - 265.3130436782233, - 289.0614714145284, - 257.02578172672656, - 238.76378338813032, - 299.1756333696102, - 257.98748703027104, - 237.58099891213897, - 241.75215319366012, - 235.41757682699438, - 229.8671307486547, - 189.47964517011536, - 212.56055728623704, - 176.9591711445695, - 167.56523215346917, - 151.30532185043768, - 143.20366078799765, - 96.67453775369947, - 91.18334210163036, - 73.75879631942671, - 63.629606742074984, - 48.47126745674977, - 27.86328933386428, - 9.32654010506528, -]; - -#[test] -fn drell_yan_lagrange_static() -> Result<()> { - perform_grid_tests( - "LagrangeSubgrid", - false, - &STATIC_REFERENCE, - &STATIC_REFERENCE_AFTER_SSD, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v1_static() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV1", - false, - &STATIC_REFERENCE, - &STATIC_REFERENCE, // LagrangeSubgridV1 doesn't have static-scale detection - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v2_static() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV2", - false, - &STATIC_REFERENCE, - &STATIC_REFERENCE_AFTER_SSD, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSubgrid", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v1_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV1", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v1_dynamic_no_reweight() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV1", - true, - &DYNAMIC_REFERENCE_NO_REWEIGHT, - &DYNAMIC_REFERENCE_NO_REWEIGHT, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - false, - ) -} - -#[test] -fn drell_yan_lagrange_v2_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV2", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn drell_yan_lagrange_v2_dynamic_no_reweight() -> Result<()> { - perform_grid_tests( - "LagrangeSubgridV2", - true, - &DYNAMIC_REFERENCE_NO_REWEIGHT, - &DYNAMIC_REFERENCE_NO_REWEIGHT, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - false, - ) -} - -#[test] -fn drell_yan_lagrange_sparse_dynamic() -> Result<()> { - perform_grid_tests( - "LagrangeSparseSubgrid", - true, - &DYNAMIC_REFERENCE, - &DYNAMIC_REFERENCE, - &[ - 0.030521584007828916, - 0.02108918668378717, - 0.014375068581090129, - 0.009699159574043398, - 0.006496206194633799, - 0.004328500638820811, - ], - true, - ) -} - -#[test] -fn grid_optimize() -> Result<()> { - let mut grid = generate_grid("LagrangeSubgridV2", false, false)?; - - assert_eq!(grid.orders().len(), 3); - assert_eq!(grid.channels().len(), 5); - assert!(matches!( - grid.subgrids()[[0, 0, 0]], - SubgridEnum::LagrangeSubgridV2 { .. } - )); - assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().len(), 50); - assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().len(), 50); - assert_eq!(grid.subgrids()[[0, 0, 0]].mu2_grid().len(), 30); - - let mut grid2 = grid.clone(); - grid2.optimize_using(GridOptFlags::OPTIMIZE_SUBGRID_TYPE); - - // `OPTIMIZE_SUBGRID_TYPE` changes the subgrid type ... - assert!(matches!( - grid2.subgrids()[[0, 0, 0]], - SubgridEnum::ImportOnlySubgridV2 { .. } - )); - // and the dimensions of the subgrid - assert_eq!(grid2.subgrids()[[0, 0, 0]].x1_grid().len(), 6); - assert_eq!(grid2.subgrids()[[0, 0, 0]].x2_grid().len(), 6); - assert_eq!(grid2.subgrids()[[0, 0, 0]].mu2_grid().len(), 4); - - grid.optimize_using(GridOptFlags::OPTIMIZE_SUBGRID_TYPE | GridOptFlags::STATIC_SCALE_DETECTION); - - assert!(matches!( - grid.subgrids()[[0, 0, 0]], - SubgridEnum::ImportOnlySubgridV2 { .. } - )); - // if `STATIC_SCALE_DETECTION` is present the `mu2_grid` dimension are better optimized - assert_eq!(grid.subgrids()[[0, 0, 0]].x1_grid().len(), 6); - assert_eq!(grid.subgrids()[[0, 0, 0]].x2_grid().len(), 6); - assert_eq!(grid.subgrids()[[0, 0, 0]].mu2_grid().len(), 1); - - // has no effect for this test - grid.optimize_using(GridOptFlags::SYMMETRIZE_CHANNELS); - - assert_eq!(grid.orders().len(), 3); - assert_eq!(grid.channels().len(), 5); - - grid.optimize_using(GridOptFlags::STRIP_EMPTY_ORDERS); - - assert_eq!(grid.orders().len(), 1); - assert_eq!(grid.channels().len(), 5); - - // has no effect for this test - grid.optimize_using(GridOptFlags::MERGE_SAME_CHANNELS); - - assert_eq!(grid.orders().len(), 1); - assert_eq!(grid.channels().len(), 5); - - grid.optimize_using(GridOptFlags::STRIP_EMPTY_CHANNELS); - - assert_eq!(grid.orders().len(), 1); - assert_eq!(grid.channels().len(), 3); - - Ok(()) -} From c38ec3e6e150e2ce40cd472e55f17463fb55ef55 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 11:17:28 +0200 Subject: [PATCH 19/42] Remove `pineappl_v0`'s unit tests --- pineappl_v0/src/bin.rs | 449 ---------------- pineappl_v0/src/boc.rs | 263 --------- pineappl_v0/src/convolutions.rs | 35 -- pineappl_v0/src/empty_subgrid.rs | 54 -- pineappl_v0/src/import_only_subgrid.rs | 351 ------------ pineappl_v0/src/lagrange_subgrid.rs | 430 --------------- pineappl_v0/src/ntuple_subgrid.rs | 103 ---- pineappl_v0/src/packed_array.rs | 415 -------------- pineappl_v0/src/pids.rs | 524 ------------------ pineappl_v0/src/sparse_array3.rs | 714 ------------------------- 10 files changed, 3338 deletions(-) diff --git a/pineappl_v0/src/bin.rs b/pineappl_v0/src/bin.rs index 5ba5b9cea..25fb5fbf0 100644 --- a/pineappl_v0/src/bin.rs +++ b/pineappl_v0/src/bin.rs @@ -786,452 +786,3 @@ impl BinLimits { *self = Self::new(limits); } } - -#[cfg(test)] -mod test { - use super::*; - use std::iter; - - #[test] - fn bin_limits_merge() { - let mut limits = BinLimits::new(vec![0.0, 1.0 / 3.0, 2.0 / 3.0, 1.0]); - - // right merge - limits - .merge(&BinLimits::new(vec![ - 1.0, - 1.0 + 1.0 / 3.0, - 1.0 + 2.0 / 3.0, - 2.0, - ])) - .unwrap(); - - assert_eq!(limits.left(), 0.0); - assert_eq!(limits.right(), 2.0); - assert_eq!(limits.bins(), 6); - - let non_consecutive_bins = BinLimits::new(vec![3.0, 4.0]); - - assert!(limits.merge(&non_consecutive_bins).is_err()); - - assert_eq!(limits.left(), 0.0); - assert_eq!(limits.right(), 2.0); - assert_eq!(limits.bins(), 6); - - // left merge - assert!(limits - .merge(&BinLimits::new(vec![ - -1.0, - -1.0 + 1.0 / 3.0, - -1.0 + 2.0 / 3.0, - 0.0 - ])) - .is_err()); - - assert_eq!(limits.left(), 0.0); - assert_eq!(limits.right(), 2.0); - assert_eq!(limits.bins(), 6); - } - - #[test] - fn bin_info_without_remapper() { - let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5]); - let info = BinInfo::new(&limits, None); - - assert_eq!(info.bins(), 4); - assert_eq!(info.dimensions(), 1); - assert_eq!(info.left(0), vec![0.0, 0.125, 0.25, 0.375]); - assert_eq!(info.right(0), vec![0.125, 0.25, 0.375, 0.5]); - assert_eq!(info.normalizations(), vec![0.125; 4]); - - assert_eq!(info.left(1), vec![]); - assert_eq!(info.right(1), vec![]); - - assert_eq!(info.slices(), [(0, 4)]); - } - - #[test] - fn bin_info_with_remapper() { - let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5]); - let remapper = BinRemapper::new( - vec![1.0; 4], - vec![ - (0.0, 0.5), - (0.25, 0.75), - (1.0, 2.0), - (0.5, 1.0), - (0.75, 1.0), - (2.0, 5.0), - (1.0, 2.0), - (1.75, 2.0), - (5.0, 5.5), - (2.5, 3.0), - (2.0, 2.5), - (6.0, 8.0), - ], - ) - .unwrap(); - let info = BinInfo::new(&limits, Some(&remapper)); - - assert_ne!(info, BinInfo::new(&limits, None)); - assert_eq!(info, BinInfo::new(&limits, Some(&remapper))); - - assert_eq!(info.bins(), 4); - assert_eq!(info.dimensions(), 3); - assert_eq!(info.left(0), vec![0.0, 0.5, 1.0, 2.5]); - assert_eq!(info.left(1), vec![0.25, 0.75, 1.75, 2.0]); - assert_eq!(info.left(2), vec![1.0, 2.0, 5.0, 6.0]); - assert_eq!(info.right(0), vec![0.5, 1.0, 2.0, 3.0]); - assert_eq!(info.right(1), vec![0.75, 1.0, 2.0, 2.5]); - assert_eq!(info.right(2), vec![2.0, 5.0, 5.5, 8.0]); - assert_eq!(info.normalizations(), vec![1.0; 4]); - - assert_eq!(info.left(3), vec![]); - assert_eq!(info.right(3), vec![]); - - assert_eq!(info.slices(), [(0, 1), (1, 2), (2, 3), (3, 4)]); - } - - #[test] - fn bin_info_slices() { - let limits = BinLimits::new( - iter::successors(Some(0.0), |n| Some(n + 1.0)) - .take(11) - .collect(), - ); - let remapper = BinRemapper::new( - vec![1.0; 10], - vec![ - (0.0, 1.0), - (0.0, 1.0), - (0.0, 1.0), - (0.0, 1.0), - (0.0, 1.0), - (1.0, 2.0), - (0.0, 1.0), - (0.0, 1.0), - (2.0, 3.0), - (0.0, 1.0), - (1.0, 2.0), - (0.0, 1.0), - (0.0, 1.0), - (1.0, 2.0), - (1.0, 2.0), - (0.0, 1.0), - (1.0, 2.0), - (2.0, 3.0), - (1.0, 2.0), - (1.0, 2.0), - (0.0, 1.0), - (1.0, 2.0), - (1.0, 2.0), - (1.0, 2.0), - (1.0, 2.0), - (1.0, 2.0), - (2.0, 3.0), - (1.0, 2.0), - (1.0, 2.0), - (3.0, 4.0), - ], - ) - .unwrap(); - let info = BinInfo::new(&limits, Some(&remapper)); - - assert_eq!(info.slices(), [(0, 3), (3, 6), (6, 10)]); - } - - #[test] - fn bin_info_trivial_slices() { - let limits = BinLimits::new( - iter::successors(Some(0.0), |x| Some(x + 1.0)) - .take(11) - .collect(), - ); - let remapper = BinRemapper::new( - vec![1.0; 10], - iter::successors(Some((0.0, 1.0)), |x| Some((x.0 + 1.0, x.1 + 1.0))) - .take(10) - .collect(), - ) - .unwrap(); - let info = BinInfo::new(&limits, Some(&remapper)); - - assert_eq!(info.slices(), [(0, 10)]); - } - - #[test] - fn bin_limits() { - // first check BinLimits with exactly representable bin sizes - let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]); - - assert_eq!(limits.bins(), 8); - assert_eq!(limits.index(-0.1), None); - assert_eq!(limits.index(0.1), Some(0)); - assert_eq!(limits.index(0.2), Some(1)); - assert_eq!(limits.index(0.3), Some(2)); - assert_eq!(limits.index(0.4), Some(3)); - assert_eq!(limits.index(0.55), Some(4)); - assert_eq!(limits.index(0.65), Some(5)); - assert_eq!(limits.index(0.8), Some(6)); - assert_eq!(limits.index(0.9), Some(7)); - assert_eq!(limits.index(1.1), None); - - // check bin limits that are equally sized, with values on the limits - let limits = BinLimits::new(vec![0.0, 0.125, 0.25, 0.375, 0.5, 0.625, 0.75, 0.875, 1.0]); - assert_eq!(limits.index(0.0), Some(0)); - assert_eq!(limits.index(0.125), Some(1)); - assert_eq!(limits.index(0.25), Some(2)); - assert_eq!(limits.index(0.375), Some(3)); - assert_eq!(limits.index(0.5), Some(4)); - assert_eq!(limits.index(0.625), Some(5)); - assert_eq!(limits.index(0.75), Some(6)); - assert_eq!(limits.index(0.875), Some(7)); - assert_eq!(limits.index(1.0), None); - - // now, check with bin sizes that are not exactly representable - let limits = BinLimits::new(vec![0.0, 0.1, 0.2, 0.3, 0.4, 0.5]); - - assert_eq!(limits.bins(), 5); - assert_eq!(limits.index(-1.0), None); - assert_eq!(limits.index(0.05), Some(0)); - assert_eq!(limits.index(0.15), Some(1)); - assert_eq!(limits.index(0.25), Some(2)); - assert_eq!(limits.index(0.35), Some(3)); - assert_eq!(limits.index(0.45), Some(4)); - assert_eq!(limits.index(1.1), None); - - // check the special case of one bin - let limits = BinLimits::new(vec![0.0, 1.0]); - assert_eq!(limits.bins(), 1); - assert_eq!(limits.index(-0.1), None); - assert_eq!(limits.index(0.5), Some(0)); - assert_eq!(limits.index(1.1), None); - - // check bin limits that are unequally sized, with ascending bin sizes - let limits = BinLimits::new(vec![0.0, 0.1, 0.3, 0.6, 1.0]); - assert_eq!(limits.bins(), 4); - assert_eq!(limits.index(-1.0), None); - assert_eq!(limits.index(0.05), Some(0)); - assert_eq!(limits.index(0.2), Some(1)); - assert_eq!(limits.index(0.4), Some(2)); - assert_eq!(limits.index(0.9), Some(3)); - assert_eq!(limits.index(1.3), None); - - // check bin limits that are unequally sized, with values on the limits - let limits = BinLimits::new(vec![0.0, 0.25, 0.75, 0.875, 1.0]); - assert_eq!(limits.index(0.0), Some(0)); - assert_eq!(limits.index(0.25), Some(1)); - assert_eq!(limits.index(0.75), Some(2)); - assert_eq!(limits.index(0.875), Some(3)); - assert_eq!(limits.index(1.0), None); - - // check bin limits that are unequally sized, with descending bin sizes - let limits = BinLimits::new(vec![0.0, 0.4, 0.7, 0.9, 1.0]); - assert_eq!(limits.bins(), 4); - assert_eq!(limits.index(-1.0), None); - assert_eq!(limits.index(0.2), Some(0)); - assert_eq!(limits.index(0.5), Some(1)); - assert_eq!(limits.index(0.8), Some(2)); - assert_eq!(limits.index(0.95), Some(3)); - assert_eq!(limits.index(1.3), None); - } - - #[test] - fn merge_bins() { - let mut limits = BinLimits::new(vec![0.0, 0.4, 0.7, 0.9, 1.0]); - limits.merge_bins(0..4).unwrap(); - - assert_eq!(limits.bins(), 1); - assert_eq!(limits.index(-1.0), None); - assert_eq!(limits.index(0.2), Some(0)); - assert_eq!(limits.index(0.5), Some(0)); - assert_eq!(limits.index(0.8), Some(0)); - assert_eq!(limits.index(0.95), Some(0)); - assert_eq!(limits.index(1.3), None); - } - - #[test] - fn merge_bins_error() { - let mut limits = BinLimits::new(vec![0.0, 0.4, 0.7, 0.9, 1.0]); - assert!(limits.merge_bins(0..5).is_err()); - } - - #[test] - fn bin_remapper() { - let remapper = BinRemapper::new( - vec![1.0; 4], - vec![ - (0.0, 0.5), - (0.25, 0.75), - (0.5, 1.0), - (0.75, 1.0), - (1.0, 2.0), - (1.75, 2.0), - (2.5, 3.0), - (2.0, 2.5), - ], - ) - .unwrap(); - - assert_ne!( - remapper, - BinRemapper::new( - vec![1.0; 4], - vec![(0.0, 1.0), (1.0, 2.0), (2.0, 3.0), (4.0, 5.0)] - ) - .unwrap() - ); - - assert!(matches!( - BinRemapper::new(vec![1.0; 8], vec![(0.0, 1.0); 2]), - Err(BinRemapperNewError::DimensionUnknown{normalizations_len, limits_len}) - if (normalizations_len == 8) && (limits_len == 2) - )); - - assert_eq!(remapper.bins(), 4); - assert_eq!(remapper.dimensions(), 2); - assert_eq!( - remapper.limits(), - &[ - (0.0, 0.5), - (0.25, 0.75), - (0.5, 1.0), - (0.75, 1.0), - (1.0, 2.0), - (1.75, 2.0), - (2.5, 3.0), - (2.0, 2.5) - ] - ); - assert_eq!(remapper.normalizations(), vec![1.0; 4]); - } - - #[test] - fn bin_remapper_merge_bins() { - let mut remapper = BinRemapper::new( - vec![1.0; 4], - vec![(0.0, 0.25), (0.25, 0.5), (0.5, 0.75), (0.75, 1.0)], - ) - .unwrap(); - - remapper.merge_bins(0..4).unwrap(); - assert_eq!(remapper.bins(), 1); - assert_eq!(remapper.dimensions(), 1); - assert_eq!(remapper.limits(), [(0.0, 1.0)]); - assert_eq!(remapper.normalizations(), [4.0]); - assert_eq!(remapper.slices(), [(0, 1)]); - } - - //#[test] - //#[ignore] // FIXME: there's a bug in the `slices` method - //#[should_panic] - //fn bin_remapper_merge_bins_panic() { - // let mut remapper = - // BinRemapper::new(vec![1.0; 3], vec![(0.0, 0.25), (0.5, 0.75), (0.75, 1.0)]).unwrap(); - - // //assert_eq!(remapper.slices(), [(0, 1), (1, 3)]); - // remapper.merge_bins(0..3).unwrap(); - //} - - #[test] - fn limit_parsing_failure() { - assert_eq!( - BinRemapper::from_str("0,1,2,x").unwrap_err().to_string(), - "unable to parse limit 'x': 'invalid float literal')" - ); - } - - #[test] - fn pipe_syntax_first_dimension() { - assert_eq!( - BinRemapper::from_str("|0,1,2").unwrap_err().to_string(), - "'|' syntax not meaningful for first dimension" - ); - } - - #[test] - fn pipe_syntax_first_empty() { - assert_eq!( - BinRemapper::from_str("0,1,2;0,2,4;||") - .unwrap_err() - .to_string(), - "empty repetition with '|'" - ); - } - - #[test] - fn colon_syntax_bad_string() { - assert_eq!( - BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|::") - .unwrap_err() - .to_string(), - "unable to parse 'N:M' syntax from: '::' (N: 'cannot parse integer from empty string', M: 'invalid digit found in string')" - ); - } - - #[test] - fn colon_syntax_bad_lhs() { - assert_eq!( - BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|2.5:|:3|:3") - .unwrap_err() - .to_string(), - "unable to parse 'N:M' syntax from: '2.5:' (N: 'invalid digit found in string', M: 'cannot parse integer from empty string')" - ); - } - - #[test] - fn colon_syntax_bad_rhs() { - assert_eq!( - BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|:2.5|:3|:3") - .unwrap_err() - .to_string(), - "unable to parse 'N:M' syntax from: ':2.5' (N: 'cannot parse integer from empty string', M: 'invalid digit found in string')" - ); - } - - #[test] - fn colon_syntax_no_limits() { - assert_eq!( - BinRemapper::from_str("0,1,2;0,2,4;1,2,3,4,5|:4|:3|:3") - .unwrap_err() - .to_string(), - "no limits due to ':' syntax" - ); - } - - #[test] - fn pipe_syntax_too_few_pipes() { - assert_eq!( - BinRemapper::from_str("0,1,2;0,2,4;1,2,3|4,5,6|7,8,9") - .unwrap_err() - .to_string(), - "missing '|' specification: number of variants too small" - ); - } - - #[test] - fn bin_remapper_new_dimension_unknown() { - assert_eq!( - BinRemapper::new( - vec![1.0, 1.0, 1.0], - vec![(1.0, 2.0), (2.0, 3.0), (3.0, 4.0), (4.0, 5.0)], - ) - .unwrap_err() - .to_string(), - "could not determine the dimensions from a normalization vector with length 3 and limits vector with length 4" - ); - } - - #[test] - fn bin_remapper_new_overlapping_bins() { - assert_eq!( - BinRemapper::new( - vec![1.0, 1.0, 1.0], - vec![(1.0, 2.0), (2.0, 3.0), (1.0, 2.0)], - ) - .unwrap_err() - .to_string(), - "the bin limits for the bins with indices 2 overlap with other bins" - ); - } -} diff --git a/pineappl_v0/src/boc.rs b/pineappl_v0/src/boc.rs index 52da7f060..7e790deaf 100644 --- a/pineappl_v0/src/boc.rs +++ b/pineappl_v0/src/boc.rs @@ -504,266 +504,3 @@ macro_rules! channel { $crate::boc::Channel::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) }; } - -#[cfg(test)] -mod tests { - use super::{Channel, Order, ParseOrderError}; - use crate::pids; - - #[test] - fn order_from_str() { - assert_eq!("as1".parse(), Ok(Order::new(1, 0, 0, 0))); - assert_eq!("a1".parse(), Ok(Order::new(0, 1, 0, 0))); - assert_eq!("as1lr1".parse(), Ok(Order::new(1, 0, 1, 0))); - assert_eq!("as1lf1".parse(), Ok(Order::new(1, 0, 0, 1))); - assert_eq!( - "ab12".parse::(), - Err(ParseOrderError("unknown coupling: 'ab'".to_owned())) - ); - assert_eq!( - "ab123456789000000".parse::(), - Err(ParseOrderError( - "error while parsing exponent of 'ab': number too large to fit in target type" - .to_owned() - )) - ); - } - - #[test] - fn order_cmp() { - let mut orders = [ - Order::new(1, 2, 1, 0), - Order::new(1, 2, 0, 1), - Order::new(1, 2, 0, 0), - Order::new(0, 3, 1, 0), - Order::new(0, 3, 0, 1), - Order::new(0, 3, 0, 0), - Order::new(0, 2, 0, 0), - ]; - - orders.sort(); - - assert_eq!(orders[0], Order::new(0, 2, 0, 0)); - assert_eq!(orders[1], Order::new(1, 2, 0, 0)); - assert_eq!(orders[2], Order::new(1, 2, 0, 1)); - assert_eq!(orders[3], Order::new(1, 2, 1, 0)); - assert_eq!(orders[4], Order::new(0, 3, 0, 0)); - assert_eq!(orders[5], Order::new(0, 3, 0, 1)); - assert_eq!(orders[6], Order::new(0, 3, 1, 0)); - } - - #[test] - fn order_create_mask() { - // Drell—Yan orders - let orders = [ - Order::new(0, 2, 0, 0), // LO : alpha^2 - Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 - Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 - ]; - - assert_eq!( - Order::create_mask(&orders, 0, 0, false), - [false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 1, false), - [true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 2, false), - [true, false, true, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 3, false), - [true, false, true, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 1, 0, false), - [true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 1, false), - [true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 2, false), - [true, false, true, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 3, false), - [true, false, true, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 2, 0, false), - [true, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 1, false), - [true, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 2, false), - [true, true, true, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 3, false), - [true, true, true, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 3, 0, false), - [true, true, false, true, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 1, false), - [true, true, false, true, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 2, false), - [true, true, true, true, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 3, false), - [true, true, true, true, true, true] - ); - - // Top-pair production orders - let orders = [ - Order::new(2, 0, 0, 0), // LO QCD : alphas^2 - Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha - Order::new(0, 2, 0, 0), // LO EW : alpha^2 - Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 - Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha - Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 - Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - Order::new(4, 0, 0, 0), // NNLO QCD : alphas^4 - Order::new(3, 1, 0, 0), // NNLO QCD—EW : alphas^3 alpha - Order::new(2, 2, 0, 0), // NNLO QCD—EW : alphas^2 alpha^2 - Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 - ]; - - assert_eq!( - Order::create_mask(&orders, 0, 0, false), - [false, false, false, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 1, false), - [false, false, true, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 2, false), - [false, false, true, false, false, false, true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 0, 3, false), - [false, false, true, false, false, false, true, false, false, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 1, 0, false), - [true, false, false, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 1, false), - [true, true, true, false, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 2, false), - [true, true, true, false, false, false, true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 1, 3, false), - [true, true, true, false, false, false, true, false, false, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 2, 0, false), - [true, false, false, true, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 1, false), - [true, true, true, true, false, false, false, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 2, false), - [true, true, true, true, true, true, true, false, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 2, 3, false), - [true, true, true, true, true, true, true, false, false, false, false, true] - ); - assert_eq!( - Order::create_mask(&orders, 3, 0, false), - [true, false, false, true, false, false, false, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 1, false), - [true, true, true, true, false, false, false, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 2, false), - [true, true, true, true, true, true, true, true, false, false, false, false] - ); - assert_eq!( - Order::create_mask(&orders, 3, 3, false), - [true, true, true, true, true, true, true, true, true, true, true, true] - ); - } - - #[test] - fn channel_translate() { - let channel = Channel::translate(&channel![103, 203, 2.0], &pids::evol_to_pdg_mc_ids); - - assert_eq!( - channel, - channel![ 2, 2, 2.0; 2, -2, -2.0; 2, 1, -2.0; 2, -1, 2.0; - -2, 2, 2.0; -2, -2, -2.0; -2, 1, -2.0; -2, -1, 2.0; - 1, 2, -2.0; 1, -2, 2.0; 1, 1, 2.0; 1, -1, -2.0; - -1, 2, -2.0; -1, -2, 2.0; -1, 1, 2.0; -1, -1, -2.0] - ); - } - - #[test] - fn channel_from_str() { - assert_eq!( - str::parse::(" 1 * ( 2 , -2) + 2* (4,-4)").unwrap(), - channel![2, -2, 1.0; 4, -4, 2.0] - ); - - assert_eq!( - str::parse::("* ( 2, -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "cannot parse float from empty string" - ); - - assert_eq!( - str::parse::(" 1 ( 2 -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing '*' in ' 1 ( 2 -2) '" - ); - - assert_eq!( - str::parse::(" 1 * ( 2 -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing ',' in ' ( 2 -2) '" - ); - - assert_eq!( - str::parse::(" 1 * 2, -2) + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing '(' in ' 2, -2) '" - ); - - assert_eq!( - str::parse::(" 1 * ( 2, -2 + 2* (4,-4)") - .unwrap_err() - .to_string(), - "missing ')' in ' ( 2, -2 '" - ); - } -} diff --git a/pineappl_v0/src/convolutions.rs b/pineappl_v0/src/convolutions.rs index 6adf603a0..2424f2ba0 100644 --- a/pineappl_v0/src/convolutions.rs +++ b/pineappl_v0/src/convolutions.rs @@ -45,38 +45,3 @@ impl Convolution { } } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn convolution_charge_conjugate() { - assert_eq!(Convolution::None.charge_conjugate(), Convolution::None); - assert_eq!( - Convolution::UnpolPDF(2212).charge_conjugate(), - Convolution::UnpolPDF(-2212) - ); - assert_eq!( - Convolution::PolPDF(2212).charge_conjugate(), - Convolution::PolPDF(-2212) - ); - assert_eq!( - Convolution::UnpolFF(2212).charge_conjugate(), - Convolution::UnpolFF(-2212) - ); - assert_eq!( - Convolution::PolFF(2212).charge_conjugate(), - Convolution::PolFF(-2212) - ); - } - - #[test] - fn convolution_pid() { - assert_eq!(Convolution::None.pid(), None); - assert_eq!(Convolution::UnpolPDF(2212).pid(), Some(2212)); - assert_eq!(Convolution::PolPDF(2212).pid(), Some(2212)); - assert_eq!(Convolution::UnpolFF(2212).pid(), Some(2212)); - assert_eq!(Convolution::PolFF(2212).pid(), Some(2212)); - } -} diff --git a/pineappl_v0/src/empty_subgrid.rs b/pineappl_v0/src/empty_subgrid.rs index f53fe133c..4cb8cbba5 100644 --- a/pineappl_v0/src/empty_subgrid.rs +++ b/pineappl_v0/src/empty_subgrid.rs @@ -74,57 +74,3 @@ impl Subgrid for EmptySubgridV1 { None } } - -#[cfg(test)] -mod tests { - use super::*; - - #[test] - fn create_empty() { - let mut subgrid = EmptySubgridV1; - assert_eq!(subgrid.convolve(&[], &[], &[], &mut |_, _, _| 0.0), 0.0,); - assert!(subgrid.is_empty()); - subgrid.merge(&mut EmptySubgridV1.into(), false); - subgrid.scale(2.0); - subgrid.symmetrize(); - assert!(subgrid.clone_empty().is_empty()); - assert_eq!( - subgrid.stats(), - Stats { - total: 0, - allocated: 0, - zeros: 0, - overhead: 0, - bytes_per_value: 0, - } - ); - assert_eq!(subgrid.static_scale(), None); - } - - #[test] - #[should_panic(expected = "EmptySubgridV1 doesn't support the fill operation")] - fn fill() { - let mut subgrid = EmptySubgridV1; - subgrid.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 0.0, - }); - } - - #[test] - fn q2_grid() { - assert!(EmptySubgridV1.mu2_grid().is_empty()); - } - - #[test] - fn x1_grid() { - assert!(EmptySubgridV1.x1_grid().is_empty()); - } - - #[test] - fn x2_grid() { - assert!(EmptySubgridV1.x2_grid().is_empty()); - } -} diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs index a4b5df098..0e85f67d1 100644 --- a/pineappl_v0/src/import_only_subgrid.rs +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -432,354 +432,3 @@ impl From<&SubgridEnum> for ImportOnlySubgridV2 { } } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::lagrange_subgrid::LagrangeSubgridV2; - use crate::subgrid::{ExtraSubgridParams, SubgridParams}; - use float_cmp::assert_approx_eq; - use rand::distributions::{Distribution, Uniform}; - use rand::Rng; - use rand_pcg::Pcg64; - - #[test] - fn test_v1() { - let x = vec![ - 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, - ]; - let mut grid1: SubgridEnum = ImportOnlySubgridV1::new( - SparseArray3::new(1, 10, 10), - vec![0.0], - x.clone(), - x.clone(), - ) - .into(); - - assert_eq!( - grid1.stats(), - Stats { - total: 100, - allocated: 0, - zeros: 0, - overhead: 2, - bytes_per_value: 8, - } - ); - - let mu2 = vec![Mu2 { ren: 0.0, fac: 0.0 }]; - - assert_eq!(grid1.mu2_grid().as_ref(), mu2); - assert_eq!(grid1.x1_grid().as_ref(), x); - assert_eq!(grid1.x2_grid(), grid1.x1_grid()); - - assert!(grid1.is_empty()); - - // only use exactly representable numbers here so that we can avoid using approx_eq - if let SubgridEnum::ImportOnlySubgridV1(ref mut x) = grid1 { - x.array_mut()[[0, 1, 2]] = 1.0; - x.array_mut()[[0, 1, 3]] = 2.0; - x.array_mut()[[0, 4, 3]] = 4.0; - x.array_mut()[[0, 7, 1]] = 8.0; - } else { - unreachable!(); - } - - assert!(!grid1.is_empty()); - - assert_eq!(grid1.indexed_iter().next(), Some(((0, 1, 2), 1.0))); - assert_eq!(grid1.indexed_iter().nth(1), Some(((0, 1, 3), 2.0))); - assert_eq!(grid1.indexed_iter().nth(2), Some(((0, 4, 3), 4.0))); - assert_eq!(grid1.indexed_iter().nth(3), Some(((0, 7, 1), 8.0))); - - // symmetric luminosity function - let lumi = - &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); - - // create grid with transposed entries, but different q2 - let mut grid2: SubgridEnum = ImportOnlySubgridV1::new( - SparseArray3::new(1, 10, 10), - vec![1.0], - x.clone(), - x.clone(), - ) - .into(); - if let SubgridEnum::ImportOnlySubgridV1(ref mut x) = grid2 { - x.array_mut()[[0, 2, 1]] = 1.0; - x.array_mut()[[0, 3, 1]] = 2.0; - x.array_mut()[[0, 3, 4]] = 4.0; - x.array_mut()[[0, 1, 7]] = 8.0; - } else { - unreachable!(); - } - assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); - - assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); - assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); - assert_eq!(grid2.indexed_iter().nth(2), Some(((0, 3, 1), 2.0))); - assert_eq!(grid2.indexed_iter().nth(3), Some(((0, 3, 4), 4.0))); - - grid1.merge(&mut grid2, false); - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - let mut grid1 = { - let mut g = grid1.clone_empty(); - g.merge(&mut grid1, false); - g - }; - - // the luminosity function is symmetric, so after symmetrization the result must be - // unchanged - grid1.symmetrize(); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - grid1.scale(2.0); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); - - assert_eq!( - grid1.stats(), - Stats { - total: 200, - allocated: 14, - zeros: 6, - overhead: 42, - bytes_per_value: 8, - } - ); - } - - #[test] - fn test_v2() { - let x = vec![ - 0.015625, 0.03125, 0.0625, 0.125, 0.1875, 0.25, 0.375, 0.5, 0.75, 1.0, - ]; - let mut grid1: SubgridEnum = ImportOnlySubgridV2::new( - SparseArray3::new(1, 10, 10), - vec![Mu2 { ren: 0.0, fac: 0.0 }], - x.clone(), - x.clone(), - ) - .into(); - - let mu2 = vec![Mu2 { ren: 0.0, fac: 0.0 }]; - - assert_eq!(grid1.mu2_grid().as_ref(), mu2); - assert_eq!(grid1.x1_grid().as_ref(), x); - assert_eq!(grid1.x2_grid(), grid1.x1_grid()); - - assert!(grid1.is_empty()); - - // only use exactly representable numbers here so that we can avoid using approx_eq - if let SubgridEnum::ImportOnlySubgridV2(ref mut x) = grid1 { - x.array_mut()[[0, 1, 2]] = 1.0; - x.array_mut()[[0, 1, 3]] = 2.0; - x.array_mut()[[0, 4, 3]] = 4.0; - x.array_mut()[[0, 7, 1]] = 8.0; - } else { - unreachable!(); - } - - assert!(!grid1.is_empty()); - - assert_eq!(grid1.indexed_iter().next(), Some(((0, 1, 2), 1.0))); - assert_eq!(grid1.indexed_iter().nth(1), Some(((0, 1, 3), 2.0))); - assert_eq!(grid1.indexed_iter().nth(2), Some(((0, 4, 3), 4.0))); - assert_eq!(grid1.indexed_iter().nth(3), Some(((0, 7, 1), 8.0))); - - // symmetric luminosity function - let lumi = - &mut (|ix1, ix2, _| x[ix1] * x[ix2]) as &mut dyn FnMut(usize, usize, usize) -> f64; - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 0.228515625); - - // create grid with transposed entries, but different q2 - let mut grid2: SubgridEnum = ImportOnlySubgridV2::new( - SparseArray3::new(1, 10, 10), - vec![Mu2 { ren: 1.0, fac: 1.0 }], - x.clone(), - x.clone(), - ) - .into(); - if let SubgridEnum::ImportOnlySubgridV2(ref mut x) = grid2 { - x.array_mut()[[0, 2, 1]] = 1.0; - x.array_mut()[[0, 3, 1]] = 2.0; - x.array_mut()[[0, 3, 4]] = 4.0; - x.array_mut()[[0, 1, 7]] = 8.0; - } else { - unreachable!(); - } - assert_eq!(grid2.convolve(&x, &x, &mu2, lumi), 0.228515625); - - assert_eq!(grid2.indexed_iter().next(), Some(((0, 1, 7), 8.0))); - assert_eq!(grid2.indexed_iter().nth(1), Some(((0, 2, 1), 1.0))); - assert_eq!(grid2.indexed_iter().nth(2), Some(((0, 3, 1), 2.0))); - assert_eq!(grid2.indexed_iter().nth(3), Some(((0, 3, 4), 4.0))); - - grid1.merge(&mut grid2, false); - - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - let mut grid1 = { - let mut g = grid1.clone_empty(); - g.merge(&mut grid1, false); - g - }; - - // the luminosity function is symmetric, so after symmetrization the result must be - // unchanged - grid1.symmetrize(); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 2.0 * 0.228515625); - - grid1.scale(2.0); - assert_eq!(grid1.convolve(&x, &x, &mu2, lumi), 4.0 * 0.228515625); - - assert_eq!( - grid1.stats(), - Stats { - total: 200, - allocated: 14, - zeros: 6, - overhead: 42, - bytes_per_value: 8, - } - ); - } - - #[test] - #[should_panic(expected = "ImportOnlySubgridV1 doesn't support the fill operation")] - fn fill_panic_v1() { - let mut grid = - ImportOnlySubgridV1::new(SparseArray3::new(1, 1, 1), vec![1.0], vec![1.0], vec![1.0]); - - grid.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - } - - #[test] - #[should_panic(expected = "ImportOnlySubgridV2 doesn't support the fill operation")] - fn fill_panic_v2() { - let mut grid = ImportOnlySubgridV2::new( - SparseArray3::new(1, 1, 1), - vec![Mu2 { ren: 1.0, fac: 1.0 }], - vec![1.0], - vec![1.0], - ); - - grid.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - } - - #[test] - fn from_lagrange_subgrid_v2() { - let mut lagrange = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - // by default this should have 40 grid points - assert_eq!(lagrange.mu2_grid().len(), 40); - - // only `q2` are important: they're not static and fall between two grid points - lagrange.fill(&Ntuple { - x1: 0.25, - x2: 0.5, - q2: 10000.0, - weight: 1.0, - }); - lagrange.fill(&Ntuple { - x1: 0.0625, - x2: 0.125, - q2: 10001.0, - weight: 1.0, - }); - lagrange.fill(&Ntuple { - x1: 0.5, - x2: 0.0625, - q2: 10002.0, - weight: 1.0, - }); - lagrange.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 10003.0, - weight: 1.0, - }); - - let x1 = lagrange.x1_grid().to_vec(); - let x2 = lagrange.x2_grid().to_vec(); - let mu2 = lagrange.mu2_grid().to_vec(); - - let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; - let reference = lagrange.convolve(&x1, &x2, &mu2, lumi); - - let imported = ImportOnlySubgridV2::from(&lagrange.into()); - let test = imported.convolve(&x1, &x2, &mu2, lumi); - - // make sure the conversion did not change the results - assert_approx_eq!(f64, reference, test, ulps = 8); - - // all unneccessary grid points should be gone; since we are inserting between two - // interpolation grid points, the imported grid should have as many interpolation grid - // points as its interpolation order - assert_eq!(imported.mu2_grid().len(), 4); - } - - #[test] - fn merge_with_different_x_grids() { - let mut params = SubgridParams::default(); - let mut grid1 = LagrangeSubgridV2::new(¶ms, &ExtraSubgridParams::default()); - - // change parameters of the second grid to force non-trivial merging - params.set_x_min(0.2); - params.set_x_max(0.5); - - let mut grid2 = LagrangeSubgridV2::new(¶ms, &ExtraSubgridParams::default()); - let mut rng = Pcg64::new(0xcafef00dd15ea5e5, 0xa02bdbf7bb3c0a7ac28fa16a64abf96); - let q2_range = Uniform::new(1e4, 1e8); - - for _ in 0..1000 { - grid1.fill(&Ntuple { - x1: rng.r#gen(), - x2: rng.r#gen(), - q2: q2_range.sample(&mut rng), - weight: 1.0, - }); - grid2.fill(&Ntuple { - x1: rng.r#gen(), - x2: rng.r#gen(), - q2: q2_range.sample(&mut rng), - weight: 1.0, - }); - } - - let lumi = &mut (|_, _, _| 1.0) as &mut dyn FnMut(usize, usize, usize) -> f64; - let result1 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - let result2 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); - - let mut grid1: SubgridEnum = ImportOnlySubgridV2::from(&grid1.into()).into(); - let mut grid2: SubgridEnum = ImportOnlySubgridV2::from(&grid2.into()).into(); - - let result3 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - let result4 = grid2.convolve(&grid2.x1_grid(), &grid2.x2_grid(), &grid2.mu2_grid(), lumi); - - // conversion from LangrangeSubgridV2 to ImportOnlySubgridV2 shouldn't change the results - assert!((result3 / result1 - 1.0).abs() < 1e-13); - assert!((result4 / result2 - 1.0).abs() < 1e-13); - - grid1.merge(&mut grid2, false); - - let result5 = grid1.convolve(&grid1.x1_grid(), &grid1.x2_grid(), &grid1.mu2_grid(), lumi); - - // merging the two grids should give the sum of the two results - assert!((result5 / (result3 + result4) - 1.0).abs() < 1e-12); - } -} diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs index 9c125fbec..f8f1bc98d 100644 --- a/pineappl_v0/src/lagrange_subgrid.rs +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -1058,433 +1058,3 @@ impl From<&LagrangeSubgridV1> for LagrangeSparseSubgridV1 { } } } - -#[cfg(test)] -mod tests { - use super::*; - use float_cmp::assert_approx_eq; - - fn test_q2_slice_methods(mut grid: G) -> G { - grid.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid.fill(&Ntuple { - x1: 0.009, - x2: 0.01, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid.fill(&Ntuple { - x1: 0.009, - x2: 0.5, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - // the grid must not be empty - assert!(!grid.is_empty()); - - let x1 = grid.x1_grid(); - let x2 = grid.x2_grid(); - let mu2 = grid.mu2_grid(); - - let reference = grid.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - let mut test = 0.0; - - // check `reference` against manually calculated result from q2 slices - for ((_, ix1, ix2), value) in grid.indexed_iter() { - test += value / (x1[ix1] * x2[ix2]); - } - - assert_approx_eq!(f64, test, reference, ulps = 8); - - grid - } - - fn test_merge_method(mut grid1: G, mut grid2: G, mut grid3: G) - where - SubgridEnum: From, - { - grid1.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid1.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid1.fill(&Ntuple { - x1: 0.009, - x2: 0.01, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid1.fill(&Ntuple { - x1: 0.009, - x2: 0.5, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - assert!(!grid1.is_empty()); - assert!(grid2.is_empty()); - - let x1 = grid1.x1_grid().into_owned(); - let x2 = grid1.x2_grid().into_owned(); - let mu2 = grid1.mu2_grid().into_owned(); - - let reference = - grid1.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - // merge filled grid into empty one - grid2.merge(&mut grid1.into(), false); - assert!(!grid2.is_empty()); - - let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - assert_approx_eq!(f64, reference, merged, ulps = 8); - - grid3.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid3.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid3.fill(&Ntuple { - x1: 0.009, - x2: 0.01, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - grid3.fill(&Ntuple { - x1: 0.009, - x2: 0.5, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - grid2.merge(&mut grid3.into(), false); - - let merged = grid2.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - assert_approx_eq!(f64, 2.0 * reference, merged, ulps = 8); - } - - fn test_empty_subgrid(mut grid: G) { - // this following events should be skipped - - // q2 is too large - grid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 2e+8, - weight: 1.0, - }); - // q2 is too small - grid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 5e+1, - weight: 1.0, - }); - // x1 is too large - grid.fill(&Ntuple { - x1: 1.1, - x2: 0.5, - q2: 1e+3, - weight: 1.0, - }); - // x1 is too small - grid.fill(&Ntuple { - x1: 0.5, - x2: 1e-7, - q2: 1e+3, - weight: 1.0, - }); - // x1 is too large - grid.fill(&Ntuple { - x1: 0.5, - x2: 1.1, - q2: 1e+3, - weight: 1.0, - }); - // x1 is too small - grid.fill(&Ntuple { - x1: 1e-7, - x2: 0.5, - q2: 1e+3, - weight: 1.0, - }); - - let x1 = grid.x1_grid(); - let x2 = grid.x2_grid(); - let mu2 = grid.mu2_grid(); - - let result = grid.convolve(&x1, &x2, &mu2, &mut |_, _, _| 1.0); - - assert_eq!(result, 0.0); - } - - #[test] - fn q2_slice_v1() { - let subgrid = test_q2_slice_methods(LagrangeSubgridV1::new(&SubgridParams::default())); - - assert_eq!( - subgrid.stats(), - Stats { - total: 10000, - allocated: 10000, - zeros: 256, - overhead: 0, - bytes_per_value: 8 - } - ); - } - - #[test] - fn q2_slice_v2() { - let subgrid = test_q2_slice_methods(LagrangeSubgridV2::new( - &SubgridParams::default(), - &ExtraSubgridParams::default(), - )); - - assert_eq!( - subgrid.stats(), - Stats { - total: 10000, - allocated: 10000, - zeros: 256, - overhead: 0, - bytes_per_value: 8 - } - ); - } - - #[test] - fn sparse_q2_slice() { - let subgrid = - test_q2_slice_methods(LagrangeSparseSubgridV1::new(&SubgridParams::default())); - - assert_eq!( - subgrid.stats(), - Stats { - total: 100000, - allocated: 432, - zeros: 176, - overhead: 402, - bytes_per_value: 8 - } - ); - } - - #[test] - fn fill_zero_v1() { - let mut subgrid = LagrangeSubgridV1::new(&SubgridParams::default()); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - assert_eq!(subgrid.indexed_iter().count(), 0); - } - - #[test] - fn fill_zero_v1_sparse() { - let mut subgrid = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - assert_eq!(subgrid.indexed_iter().count(), 0); - } - - #[test] - fn fill_zero_v2() { - let mut subgrid = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - assert_eq!(subgrid.indexed_iter().count(), 0); - } - - #[test] - fn from() { - // check conversion of empty grids - let mut dense = LagrangeSubgridV1::new(&SubgridParams::default()); - assert!(dense.is_empty()); - let sparse = LagrangeSparseSubgridV1::from(&dense); - assert!(sparse.is_empty()); - - let mu2 = dense.mu2_grid().into_owned(); - let x1 = dense.x1_grid().into_owned(); - let x2 = dense.x2_grid().into_owned(); - - assert_eq!(mu2, *sparse.mu2_grid()); - assert_eq!(x1, *sparse.x1_grid()); - assert_eq!(x2, *sparse.x2_grid()); - - // check conversion of a filled grid - dense.fill(&Ntuple { - x1: 0.1, - x2: 0.2, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - dense.fill(&Ntuple { - x1: 0.9, - x2: 0.1, - q2: 90.0_f64.powi(2), - weight: 1.0, - }); - - assert!(!dense.is_empty()); - - let sparse = LagrangeSparseSubgridV1::from(&dense); - assert!(!sparse.is_empty()); - - let reference = - dense.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - let converted = - sparse.convolve(&x1, &x2, &mu2, &mut |ix1, ix2, _| 1.0 / (x1[ix1] * x2[ix2])); - - assert_approx_eq!(f64, reference, converted, ulps = 8); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v1_with_sparse() { - let mut dense = LagrangeSubgridV1::new(&SubgridParams::default()); - let sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - - dense.merge(&mut sparse.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v1_with_dense_v2() { - let mut one = LagrangeSubgridV1::new(&SubgridParams::default()); - let two = LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - one.merge(&mut two.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v2_with_dense_v1() { - let mut two = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - let one = LagrangeSubgridV1::new(&SubgridParams::default()); - - two.merge(&mut one.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_dense_v2_with_sparse() { - let mut dense = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - let sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - - dense.merge(&mut sparse.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_sparse_with_dense_v1() { - let mut sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - let dense = LagrangeSubgridV1::new(&SubgridParams::default()); - - sparse.merge(&mut dense.into(), false); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn merge_sparse_with_dense_v2() { - let mut sparse = LagrangeSparseSubgridV1::new(&SubgridParams::default()); - let dense = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()); - - sparse.merge(&mut dense.into(), false); - } - - #[test] - fn merge_dense_v1() { - test_merge_method( - LagrangeSubgridV1::new(&SubgridParams::default()), - LagrangeSubgridV1::new(&SubgridParams::default()), - LagrangeSubgridV1::new(&SubgridParams::default()), - ); - } - - #[test] - fn merge_dense_v2() { - test_merge_method( - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()), - ); - } - - #[test] - fn merge_sparse() { - test_merge_method( - LagrangeSparseSubgridV1::new(&SubgridParams::default()), - LagrangeSparseSubgridV1::new(&SubgridParams::default()), - LagrangeSparseSubgridV1::new(&SubgridParams::default()), - ); - } - - #[test] - fn empty_v1() { - test_empty_subgrid(LagrangeSubgridV1::new(&SubgridParams::default())); - } - - #[test] - fn empty_v2() { - test_empty_subgrid(LagrangeSubgridV2::new( - &SubgridParams::default(), - &ExtraSubgridParams::default(), - )); - } - - #[test] - fn empty_sparse() { - test_empty_subgrid(LagrangeSparseSubgridV1::new(&SubgridParams::default())); - } -} diff --git a/pineappl_v0/src/ntuple_subgrid.rs b/pineappl_v0/src/ntuple_subgrid.rs index b1bb0a5b3..c47292cb7 100644 --- a/pineappl_v0/src/ntuple_subgrid.rs +++ b/pineappl_v0/src/ntuple_subgrid.rs @@ -93,106 +93,3 @@ impl Subgrid for NtupleSubgridV1 { todo!() } } - -#[cfg(test)] -mod tests { - use super::*; - use crate::lagrange_subgrid::LagrangeSubgridV2; - use crate::subgrid::{ExtraSubgridParams, SubgridParams}; - - #[test] - #[should_panic(expected = "NtupleSubgridV1 doesn't support the convolve operation")] - fn convolve() { - NtupleSubgridV1::new().convolve(&[], &[], &[], &mut |_, _, _| 0.0); - } - - #[test] - fn fill_zero() { - let mut subgrid = NtupleSubgridV1::new(); - - subgrid.fill(&Ntuple { - x1: 0.5, - x2: 0.5, - q2: 1000.0, - weight: 0.0, - }); - - assert!(subgrid.is_empty()); - } - - #[test] - #[should_panic(expected = "NtupleSubgridV1 doesn't support the indexed_iter operation")] - fn indexed_iter() { - // `next` isn't called because `indexed_iter` panics, but it suppresses a warning about an - // unused result - NtupleSubgridV1::new().indexed_iter().next(); - } - - #[test] - fn stats() { - let subgrid = NtupleSubgridV1::new(); - assert_eq!( - subgrid.stats(), - Stats { - total: 0, - allocated: 0, - zeros: 0, - overhead: 0, - bytes_per_value: 32, - } - ); - } - - #[test] - #[should_panic(expected = "not yet implemented")] - fn static_scale() { - let subgrid = NtupleSubgridV1::new(); - subgrid.static_scale(); - } - - #[test] - #[should_panic( - expected = "NtupleSubgridV1 doesn't support the merge operation with subgrid types other than itself" - )] - fn merge_with_lagrange_subgrid() { - let mut subgrid = NtupleSubgridV1::new(); - let mut other = - LagrangeSubgridV2::new(&SubgridParams::default(), &ExtraSubgridParams::default()) - .into(); - subgrid.merge(&mut other, false); - } - - #[test] - fn test() { - let mut subgrid1: SubgridEnum = NtupleSubgridV1::new().into(); - - assert!(subgrid1.is_empty()); - - subgrid1.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - - assert!(!subgrid1.is_empty()); - - assert_eq!(subgrid1.mu2_grid().as_ref(), []); - assert_eq!(subgrid1.x1_grid().as_ref(), []); - assert_eq!(subgrid1.x2_grid().as_ref(), []); - - subgrid1.symmetrize(); - subgrid1.scale(2.0); - - let mut subgrid2: SubgridEnum = subgrid1.clone_empty(); - - subgrid2.fill(&Ntuple { - x1: 0.0, - x2: 0.0, - q2: 0.0, - weight: 1.0, - }); - - subgrid2.merge(&mut subgrid1, false); - } -} diff --git a/pineappl_v0/src/packed_array.rs b/pineappl_v0/src/packed_array.rs index 41b6ab666..cc06093f8 100644 --- a/pineappl_v0/src/packed_array.rs +++ b/pineappl_v0/src/packed_array.rs @@ -291,418 +291,3 @@ impl IndexMut<[usize; D]> &mut self.entries[point_entries] } } - -#[cfg(test)] -mod tests { - use super::*; - use ndarray::Array3; - use std::mem; - - #[test] - fn unravel_index() { - assert_eq!(super::unravel_index(0, &[3, 2]), [0, 0]); - assert_eq!(super::unravel_index(1, &[3, 2]), [0, 1]); - assert_eq!(super::unravel_index(2, &[3, 2]), [1, 0]); - assert_eq!(super::unravel_index(3, &[3, 2]), [1, 1]); - assert_eq!(super::unravel_index(4, &[3, 2]), [2, 0]); - assert_eq!(super::unravel_index(5, &[3, 2]), [2, 1]); - } - - #[test] - fn ravel_multi_index() { - assert_eq!(super::ravel_multi_index(&[0, 0], &[3, 2]), 0); - assert_eq!(super::ravel_multi_index(&[0, 1], &[3, 2]), 1); - assert_eq!(super::ravel_multi_index(&[1, 0], &[3, 2]), 2); - assert_eq!(super::ravel_multi_index(&[1, 1], &[3, 2]), 3); - assert_eq!(super::ravel_multi_index(&[2, 0], &[3, 2]), 4); - assert_eq!(super::ravel_multi_index(&[2, 1], &[3, 2]), 5); - } - - #[test] - fn index() { - let mut a = PackedArray::::new([4, 2]); - - a[[0, 0]] = 1.0; - assert_eq!(a[[0, 0]], 1.0); - assert_eq!(a.entries, vec![1.0]); - assert_eq!(a.start_indices, vec![0]); - assert_eq!(a.lengths, vec![1]); - - a[[3, 0]] = 2.0; - assert_eq!(a[[0, 0]], 1.0); - assert_eq!(a[[3, 0]], 2.0); - assert_eq!(a.entries, vec![1.0, 2.0]); - assert_eq!(a.start_indices, vec![0, 6]); - assert_eq!(a.lengths, vec![1, 1]); - - a[[3, 1]] = 3.0; - assert_eq!(a[[0, 0]], 1.0); - assert_eq!(a[[3, 0]], 2.0); - assert_eq!(a[[3, 1]], 3.0); - assert_eq!(a.entries, vec![1.0, 2.0, 3.0]); - assert_eq!(a.start_indices, vec![0, 6]); - assert_eq!(a.lengths, vec![1, 2]); - - a[[2, 0]] = 3.5; - assert_eq!(a[[0, 0]], 1.0); - assert_eq!(a[[3, 0]], 2.0); - assert_eq!(a[[3, 1]], 3.0); - assert_eq!(a[[2, 0]], 3.5); - assert_eq!(a.entries, vec![1.0, 3.5, 0.0, 2.0, 3.0]); - assert_eq!(a.start_indices, vec![0, 4]); - assert_eq!(a.lengths, vec![1, 4]); - - a[[2, 0]] = 4.0; - assert_eq!(a[[0, 0]], 1.0); - assert_eq!(a[[3, 0]], 2.0); - assert_eq!(a[[3, 1]], 3.0); - assert_eq!(a[[2, 0]], 4.0); - assert_eq!(a.entries, vec![1.0, 4.0, 0.0, 2.0, 3.0]); - assert_eq!(a.start_indices, vec![0, 4]); - assert_eq!(a.lengths, vec![1, 4]); - - a[[1, 0]] = 5.0; - assert_eq!(a[[0, 0]], 1.0); - assert_eq!(a[[3, 0]], 2.0); - assert_eq!(a[[3, 1]], 3.0); - assert_eq!(a[[2, 0]], 4.0); - assert_eq!(a[[1, 0]], 5.0); - assert_eq!(a.entries, vec![1.0, 0.0, 5.0, 0.0, 4.0, 0.0, 2.0, 3.0]); - assert_eq!(a.start_indices, vec![0]); - assert_eq!(a.lengths, vec![8]); - } - - #[test] - fn iter() { - let mut a = PackedArray::::new([6, 5]); - a[[2, 2]] = 1; - a[[2, 4]] = 2; - a[[4, 1]] = 3; - a[[4, 4]] = 4; - a[[5, 0]] = 5; - assert_eq!( - a.indexed_iter().collect::>(), - &[ - ([2, 2], 1), - ([2, 4], 2), - ([4, 1], 3), - ([4, 4], 4), - ([5, 0], 5), - ] - ); - } - - #[test] - fn index_access() { - let mut array = PackedArray::new([40, 50, 50]); - - // after creation the array must be empty - assert_eq!(array.overhead(), 0); - assert!(array.is_empty()); - - // insert the first element - array[[5, 10, 10]] = 1.0; - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 1); - assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 2); - assert!(!array.is_empty()); - - // insert an element after the first one - array[[8, 10, 10]] = 2.0; - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 2); - assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 4); - assert!(!array.is_empty()); - - // insert an element before the first one - array[[1, 10, 10]] = 3.0; - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 3); - assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 6); - assert!(!array.is_empty()); - - array[[1, 10, 11]] = 4.0; - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 4); - assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 6); - assert!(!array.is_empty()); - - array[[1, 10, 9]] = 5.0; - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 5); - assert_eq!(array.explicit_zeros(), 0); - // dbg!(&array.start_indices); - // dbg!(&array.lengths); - assert_eq!(array.overhead(), 6); - assert!(!array.is_empty()); - - array[[1, 10, 0]] = 6.0; - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 6); - assert_eq!(array.explicit_zeros(), 0); - assert_eq!(array.overhead(), 8); - assert!(!array.is_empty()); - - array[[1, 10, 2]] = 7.0; - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 7); - assert_eq!(array.overhead(), 8); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array.explicit_zeros(), 1); - - array[[1, 15, 2]] = 8.0; - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 8); - assert_eq!(array.overhead(), 10); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array.explicit_zeros(), 1); - - array[[1, 15, 4]] = 9.0; - assert_eq!(array[[1, 15, 4]], 9.0); - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 9); - assert_eq!(array.overhead(), 10); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 15, 3]], 0.0); - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array.explicit_zeros(), 2); - - array[[1, 15, 0]] = 10.0; - assert_eq!(array[[1, 15, 0]], 10.0); - assert_eq!(array[[1, 15, 4]], 9.0); - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.non_zeros(), 10); - assert_eq!(array.overhead(), 10); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 15, 1]], 0.0); - assert_eq!(array[[1, 15, 3]], 0.0); - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array.explicit_zeros(), 3); - } - - #[test] - #[should_panic(expected = "index [40, 0, 50] is out of bounds for array of shape [40, 50, 50]")] - fn index_mut_panic_dim0() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[40, 0, 50]] = 1.0; - } - - #[test] - #[should_panic(expected = "index [0, 50, 0] is out of bounds for array of shape [40, 50, 50]")] - fn index_mut_panic_dim1() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[0, 50, 0]] = 1.0; - } - - #[test] - #[should_panic(expected = "index [0, 0, 50] is out of bounds for array of shape [40, 50, 50]")] - fn index_mut_panic_dim2() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[0, 0, 50]] = 1.0; - } - - #[test] - #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] - fn index_panic_dim0_0() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "entry at index [2, 0, 0] is implicitly set to the default value")] - fn index_panic_dim0_1() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[2, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "index [1, 50, 0] is out of bounds for array of shape [40, 50, 50]")] - fn index_panic_dim1() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[1, 50, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "entry at index [0, 0, 0] is implicitly set to the default value")] - fn index_panic_dim2_0() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[0, 0, 1]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "entry at index [0, 0, 2] is implicitly set to the default value")] - fn index_panic_dim2_1() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[0, 0, 1]] = 1.0; - - assert_eq!(array[[0, 0, 2]], 0.0); - } - - #[test] - fn indexed_iter() { - let mut array = PackedArray::new([40, 50, 50]); - - // check empty iterator - assert_eq!(array.indexed_iter().next(), None); - - // insert an element - array[[2, 3, 4]] = 1.0; - - let mut iter = array.indexed_iter(); - - // check iterator with one element - assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); - assert_eq!(iter.next(), None); - - mem::drop(iter); - - // insert another element - array[[2, 3, 6]] = 2.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); - assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); - assert_eq!(iter.next(), None); - - mem::drop(iter); - - // insert yet another element - array[[4, 5, 7]] = 3.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); - assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); - assert_eq!(iter.next(), Some(([4, 5, 7], 3.0))); - assert_eq!(iter.next(), None); - - mem::drop(iter); - - // insert at the very first position - array[[2, 0, 0]] = 4.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(([2, 0, 0], 4.0))); - assert_eq!(iter.next(), Some(([2, 3, 4], 1.0))); - assert_eq!(iter.next(), Some(([2, 3, 6], 2.0))); - assert_eq!(iter.next(), Some(([4, 5, 7], 3.0))); - assert_eq!(iter.next(), None); - } - - #[test] - fn clear() { - let mut array = PackedArray::new([40, 50, 50]); - - array[[3, 5, 1]] = 1.0; - array[[7, 8, 9]] = 2.0; - array[[9, 1, 4]] = 3.0; - - assert!(!array.is_empty()); - assert_eq!(array.non_zeros(), 3); - assert_eq!(array.explicit_zeros(), 0); - - array.clear(); - - assert!(array.is_empty()); - assert_eq!(array.non_zeros(), 0); - assert_eq!(array.explicit_zeros(), 0); - } - - #[test] - fn from_ndarray() { - let mut ndarray = Array3::zeros((2, 50, 50)); - - ndarray[[0, 4, 3]] = 1.0; - ndarray[[0, 4, 4]] = 2.0; - ndarray[[0, 4, 6]] = 3.0; - ndarray[[0, 5, 1]] = 4.0; - ndarray[[0, 5, 7]] = 5.0; - ndarray[[1, 3, 9]] = 6.0; - - let array = PackedArray::from_ndarray(ndarray.view(), 3, 40); - - assert_eq!(array[[3, 4, 3]], 1.0); - assert_eq!(array[[3, 4, 4]], 2.0); - assert_eq!(array[[3, 4, 5]], 0.0); - assert_eq!(array[[3, 4, 6]], 3.0); - assert_eq!(array[[3, 5, 1]], 4.0); - assert_eq!(array[[3, 5, 7]], 5.0); - assert_eq!(array[[4, 3, 9]], 6.0); - - assert_eq!(array.explicit_zeros(), 1); - } -} diff --git a/pineappl_v0/src/pids.rs b/pineappl_v0/src/pids.rs index 8e23eaa49..6064f3f8b 100644 --- a/pineappl_v0/src/pids.rs +++ b/pineappl_v0/src/pids.rs @@ -376,527 +376,3 @@ pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { None } - -#[cfg(test)] -mod tests { - use super::*; - use crate::boc::Channel; - use crate::channel; - use float_cmp::assert_approx_eq; - - #[test] - fn test() { - // check photon - assert_eq!(evol_to_pdg_mc_ids(21), [(21, 1.0)]); - - // check gluon - assert_eq!(evol_to_pdg_mc_ids(22), [(22, 1.0)]); - - // check singlet - assert_eq!( - evol_to_pdg_mc_ids(100), - [ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, 1.0), - (-4, 1.0), - (5, 1.0), - (-5, 1.0), - (6, 1.0), - (-6, 1.0), - ] - ); - - // check T3 - assert_eq!( - evol_to_pdg_mc_ids(103), - [(2, 1.0), (-2, 1.0), (1, -1.0), (-1, -1.0)] - ); - - // check T8 - assert_eq!( - evol_to_pdg_mc_ids(108), - [ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, -2.0), - (-3, -2.0), - ], - ); - - // check T15 - assert_eq!( - evol_to_pdg_mc_ids(115), - [ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, -3.0), - (-4, -3.0), - ], - ); - - // check T24 - assert_eq!( - evol_to_pdg_mc_ids(124), - [ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, 1.0), - (-4, 1.0), - (5, -4.0), - (-5, -4.0), - ], - ); - - // check T35 - assert_eq!( - evol_to_pdg_mc_ids(135), - [ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, 1.0), - (-4, 1.0), - (5, 1.0), - (-5, 1.0), - (6, -5.0), - (-6, -5.0), - ], - ); - - // check valence - assert_eq!( - evol_to_pdg_mc_ids(200), - [ - (1, 1.0), - (-1, -1.0), - (2, 1.0), - (-2, -1.0), - (3, 1.0), - (-3, -1.0), - (4, 1.0), - (-4, -1.0), - (5, 1.0), - (-5, -1.0), - (6, 1.0), - (-6, -1.0), - ], - ); - - // check V3 - assert_eq!( - evol_to_pdg_mc_ids(203), - [(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], - ); - - // check V8 - assert_eq!( - evol_to_pdg_mc_ids(208), - [ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, -2.0), - (-3, 2.0), - ], - ); - - // check V15 - assert_eq!( - evol_to_pdg_mc_ids(215), - [ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, 1.0), - (-3, -1.0), - (4, -3.0), - (-4, 3.0), - ], - ); - - // check V24 - assert_eq!( - evol_to_pdg_mc_ids(224), - [ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, 1.0), - (-3, -1.0), - (4, 1.0), - (-4, -1.0), - (5, -4.0), - (-5, 4.0), - ], - ); - - // check V35 - assert_eq!( - evol_to_pdg_mc_ids(235), - [ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, 1.0), - (-3, -1.0), - (4, 1.0), - (-4, -1.0), - (5, 1.0), - (-5, -1.0), - (6, -5.0), - (-6, 5.0), - ], - ); - } - - #[test] - fn test_pdg_mc_ids_to_evol() { - assert_eq!(pdg_mc_ids_to_evol(&[]), None); - - // check photon - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 1.0), - (-6, 0.0), - (-5, 0.0), - (-4, 0.0), - (-3, 0.0), - (-2, 0.0), - (-1, 0.0), - (21, 0.0), - (1, 0.0), - (2, 0.0), - (3, 0.0), - (4, 0.0), - (5, 0.0), - (6, 0.0), - ]), - Some(22) - ); - - // check gluon - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, 0.0), - (-3, 0.0), - (-2, 0.0), - (-1, 0.0), - (21, 1.0), - (1, 0.0), - (2, 0.0), - (3, 0.0), - (4, 0.0), - (5, 0.0), - (6, 0.0), - ]), - Some(21) - ); - - // check singlet - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 1.0), - (-5, 1.0), - (-4, 1.0), - (-3, 1.0), - (-2, 1.0), - (-1, 1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, 1.0), - (5, 1.0), - (6, 1.0), - ]), - Some(100) - ); - - // check T3 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, 0.0), - (-3, 0.0), - (-2, 1.0), - (-1, -1.0), - (21, 0.0), - (1, -1.0), - (2, 1.0), - (3, 0.0), - (4, 0.0), - (5, 0.0), - (6, 0.0), - ]), - Some(103) - ); - - // check T8 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, 0.0), - (-3, -2.0), - (-2, 1.0), - (-1, 1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, -2.0), - (4, 0.0), - (5, 0.0), - (6, 0.0), - ]), - Some(108) - ); - - // check T15 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, -3.0), - (-3, 1.0), - (-2, 1.0), - (-1, 1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, -3.0), - (5, 0.0), - (6, 0.0), - ]), - Some(115) - ); - - // check T24 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, -4.0), - (-4, 1.0), - (-3, 1.0), - (-2, 1.0), - (-1, 1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, 1.0), - (5, -4.0), - (6, 0.0), - ]), - Some(124) - ); - - // check T35 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, -5.0), - (-5, 1.0), - (-4, 1.0), - (-3, 1.0), - (-2, 1.0), - (-1, 1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, 1.0), - (5, 1.0), - (6, -5.0), - ]), - Some(135) - ); - - // check valence - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, -1.0), - (-5, -1.0), - (-4, -1.0), - (-3, -1.0), - (-2, -1.0), - (-1, -1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, 1.0), - (5, 1.0), - (6, 1.0), - ]), - Some(200) - ); - - // check V3 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, 0.0), - (-3, 0.0), - (-2, -1.0), - (-1, 1.0), - (21, 0.0), - (1, -1.0), - (2, 1.0), - (3, 0.0), - (4, 0.0), - (5, 0.0), - (6, 0.0), - ]), - Some(203) - ); - - // check V8 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, 0.0), - (-3, 2.0), - (-2, -1.0), - (-1, -1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, -2.0), - (4, 0.0), - (5, 0.0), - (6, 0.0), - ]), - Some(208) - ); - - // check V15 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 0.0), - (-4, 3.0), - (-3, -1.0), - (-2, -1.0), - (-1, -1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, -3.0), - (5, 0.0), - (6, 0.0), - ]), - Some(215) - ); - - // check V24 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 0.0), - (-5, 4.0), - (-4, -1.0), - (-3, -1.0), - (-2, -1.0), - (-1, -1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, 1.0), - (5, -4.0), - (6, 0.0), - ]), - Some(224) - ); - - // check V35 - assert_eq!( - pdg_mc_ids_to_evol(&[ - (22, 0.0), - (-6, 5.0), - (-5, -1.0), - (-4, -1.0), - (-3, -1.0), - (-2, -1.0), - (-1, -1.0), - (21, 0.0), - (1, 1.0), - (2, 1.0), - (3, 1.0), - (4, 1.0), - (5, 1.0), - (6, -5.0), - ]), - Some(235) - ); - } - - #[test] - fn pid_basis_guess() { - assert_eq!( - PidBasis::guess(&[22, -6, -5, -4, -3, -2, -1, 21, 1, 2, 3, 4, 5, 6]), - PidBasis::Pdg, - ); - - assert_eq!( - PidBasis::guess(&[ - 22, 100, 200, 21, 100, 103, 108, 115, 124, 135, 203, 208, 215, 224, 235 - ]), - PidBasis::Evol, - ); - } - - #[test] - fn inverse_inverse_evol() { - for pid in [-6, -5, -4, -3, -2, -1, 1, 2, 3, 4, 5, 6] { - let result = Channel::translate( - &Channel::translate(&channel![pid, pid, 1.0], &pdg_mc_pids_to_evol), - &evol_to_pdg_mc_ids, - ); - - assert_eq!(result.entry().len(), 1); - assert_eq!(result.entry()[0].0, pid); - assert_eq!(result.entry()[0].1, pid); - assert_approx_eq!(f64, result.entry()[0].2, 1.0, ulps = 8); - } - } -} diff --git a/pineappl_v0/src/sparse_array3.rs b/pineappl_v0/src/sparse_array3.rs index 1debae7e8..f64c60826 100644 --- a/pineappl_v0/src/sparse_array3.rs +++ b/pineappl_v0/src/sparse_array3.rs @@ -419,717 +419,3 @@ impl SparseArray3 { } } } - -#[cfg(test)] -mod tests { - use super::*; - use ndarray::Array3; - - #[test] - fn index_access() { - let mut array = SparseArray3::new(40, 50, 50); - - // after creation the array must be empty - assert_eq!(array.x_range(), 0..0); - assert_eq!(array.overhead(), 2); - assert!(array.is_empty()); - - // insert the first element - array[[5, 10, 10]] = 1.0; - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 1); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 5..6); - assert_eq!(array.overhead(), 102); - assert!(!array.is_empty()); - - // insert an element after the first one - array[[8, 10, 10]] = 2.0; - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 2); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 5..9); - assert_eq!(array.overhead(), 402); - assert!(!array.is_empty()); - - // insert an element before the first one - array[[1, 10, 10]] = 3.0; - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 3); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - array[[1, 10, 11]] = 4.0; - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 4); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - array[[1, 10, 9]] = 5.0; - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 5); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - array[[1, 10, 0]] = 6.0; - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 6); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 2]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 8); - - // insert where previously a zero was - array[[1, 10, 2]] = 7.0; - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 7); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 7); - - array[[1, 15, 2]] = 8.0; - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 8); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 7); - - array[[1, 15, 4]] = 9.0; - assert_eq!(array[[1, 15, 4]], 9.0); - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 9); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 15, 3]], 0.0); - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 8); - - array[[1, 15, 0]] = 10.0; - assert_eq!(array[[1, 15, 0]], 10.0); - assert_eq!(array[[1, 15, 4]], 9.0); - assert_eq!(array[[1, 15, 2]], 8.0); - assert_eq!(array[[1, 10, 2]], 7.0); - assert_eq!(array[[1, 10, 0]], 6.0); - assert_eq!(array[[1, 10, 9]], 5.0); - assert_eq!(array[[1, 10, 11]], 4.0); - assert_eq!(array[[1, 10, 10]], 3.0); - assert_eq!(array[[8, 10, 10]], 2.0); - assert_eq!(array[[5, 10, 10]], 1.0); - assert_eq!(array.len(), 10); - assert_eq!(array.x_range(), 1..9); - assert_eq!(array.overhead(), 802); - assert!(!array.is_empty()); - - // check zeros - assert_eq!(array[[1, 15, 1]], 0.0); - assert_eq!(array[[1, 15, 3]], 0.0); - assert_eq!(array[[1, 10, 1]], 0.0); - assert_eq!(array[[1, 10, 3]], 0.0); - assert_eq!(array[[1, 10, 4]], 0.0); - assert_eq!(array[[1, 10, 5]], 0.0); - assert_eq!(array[[1, 10, 6]], 0.0); - assert_eq!(array[[1, 10, 7]], 0.0); - assert_eq!(array[[1, 10, 8]], 0.0); - assert_eq!(array.zeros(), 9); - } - - #[test] - #[should_panic(expected = "explicit panic")] - fn index_mut_panic_dim0() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[40, 0, 50]] = 1.0; - } - - #[test] - #[should_panic(expected = "assertion failed: index[1] < dim1")] - fn index_mut_panic_dim1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 50, 0]] = 1.0; - } - - #[test] - #[should_panic(expected = "explicit panic")] - fn index_mut_panic_dim2() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 0, 50]] = 1.0; - } - - #[test] - #[should_panic(expected = "assertion failed: index[0] >= self.start")] - fn index_panic_dim0_0() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 0.0); - } - - #[test] - #[should_panic( - expected = "assertion failed: index[0] < (self.start + (self.indices.len() - 1) / dim1)" - )] - fn index_panic_dim0_1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[2, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "assertion failed: index[1] < dim1")] - fn index_panic_dim1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 0, 0]] = 1.0; - - assert_eq!(array[[1, 50, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "assertion failed: index[2] >= zeros_left")] - fn index_panic_dim2_0() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 0, 1]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 0.0); - } - - #[test] - #[should_panic(expected = "assertion failed: index[2] < (non_zeros + zeros_left)")] - fn index_panic_dim2_1() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[0, 0, 1]] = 1.0; - - assert_eq!(array[[0, 0, 2]], 0.0); - } - - #[test] - fn indexed_iter() { - let mut array = SparseArray3::new(40, 50, 50); - - // check empty iterator - assert_eq!(array.indexed_iter().next(), None); - - // insert an element - array[[2, 3, 4]] = 1.0; - - let mut iter = array.indexed_iter(); - - // check iterator with one element - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), None); - - // insert another element - array[[2, 3, 6]] = 2.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); - assert_eq!(iter.next(), None); - - // insert yet another element - array[[4, 5, 7]] = 3.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); - assert_eq!(iter.next(), Some(((4, 5, 7), 3.0))); - assert_eq!(iter.next(), None); - - // insert at the very first position - array[[2, 0, 0]] = 4.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((2, 0, 0), 4.0))); - assert_eq!(iter.next(), Some(((2, 3, 4), 1.0))); - assert_eq!(iter.next(), Some(((2, 3, 6), 2.0))); - assert_eq!(iter.next(), Some(((4, 5, 7), 3.0))); - assert_eq!(iter.next(), None); - } - - #[test] - fn iter_mut() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[3, 5, 1]] = 1.0; - array[[7, 8, 9]] = 2.0; - array[[7, 8, 13]] = 3.0; - array[[9, 1, 4]] = 4.0; - - let mut iter = array.iter_mut(); - - assert_eq!(iter.next(), Some(&mut 1.0)); - assert_eq!(iter.next(), Some(&mut 2.0)); - assert_eq!(iter.next(), Some(&mut 0.0)); - assert_eq!(iter.next(), Some(&mut 0.0)); - assert_eq!(iter.next(), Some(&mut 0.0)); - assert_eq!(iter.next(), Some(&mut 3.0)); - assert_eq!(iter.next(), Some(&mut 4.0)); - assert_eq!(iter.next(), None); - } - - #[test] - fn clear() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[3, 5, 1]] = 1.0; - array[[7, 8, 9]] = 2.0; - array[[9, 1, 4]] = 3.0; - - assert!(!array.is_empty()); - assert_eq!(array.len(), 3); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 3..10); - - array.clear(); - - assert!(array.is_empty()); - assert_eq!(array.len(), 0); - assert_eq!(array.zeros(), 0); - assert_eq!(array.x_range(), 0..0); - } - - #[test] - fn remove_x() { - let mut array = SparseArray3::new(40, 50, 50); - - array[[1, 5, 6]] = 1.0; - array[[1, 6, 5]] = 2.0; - array[[1, 2, 3]] = 3.0; - array[[1, 9, 3]] = 4.0; - array[[1, 8, 4]] = 5.0; - array[[2, 0, 0]] = 6.0; - array[[3, 4, 5]] = 7.0; - array[[3, 4, 6]] = 8.0; - array[[3, 4, 7]] = 9.0; - array[[4, 0, 2]] = 10.0; - array[[4, 0, 3]] = 11.0; - array[[5, 0, 1]] = 12.0; - array[[5, 0, 2]] = 13.0; - - assert_eq!(array.x_range(), 1..6); - assert_eq!(array.len(), 13); - assert_eq!(array.zeros(), 0); - - // remove the first five entries - array.remove_x(1); - - assert_eq!(array.x_range(), 2..6); - assert_eq!(array.len(), 8); - assert_eq!(array.zeros(), 0); - - // remove the last two entries - array.remove_x(5); - - assert_eq!(array.x_range(), 2..5); - assert_eq!(array.len(), 6); - assert_eq!(array.zeros(), 0); - - // remove the from the middle - array.remove_x(3); - - assert_eq!(array.x_range(), 2..5); - assert_eq!(array.len(), 3); - assert_eq!(array.zeros(), 0); - - // remove also the rest - array.remove_x(4); - array.remove_x(2); - - assert_eq!(array.x_range(), 0..0); - assert_eq!(array.len(), 0); - assert_eq!(array.zeros(), 0); - } - - #[test] - #[should_panic(expected = "assertion failed: (x >= self.start) && (x < self.start + nx)")] - fn remove_x_panic() { - let mut array = SparseArray3::::new(40, 50, 50); - - array.remove_x(0); - } - - #[test] - fn increase_at_x() { - let mut array = SparseArray3::new(1, 50, 50); - - array[[0, 0, 0]] = 1.0; - array[[0, 2, 3]] = 2.0; - array[[0, 2, 4]] = 3.0; - array[[0, 2, 5]] = 4.0; - array[[0, 3, 0]] = 5.0; - array[[0, 49, 49]] = 6.0; - - assert_eq!(array.dimensions(), (1, 50, 50)); - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 2, 3]], 2.0); - assert_eq!(array[[0, 2, 4]], 3.0); - assert_eq!(array[[0, 2, 5]], 4.0); - assert_eq!(array[[0, 3, 0]], 5.0); - assert_eq!(array[[0, 49, 49]], 6.0); - - // increase at the end - array.increase_x_at(1); - - assert_eq!(array.dimensions(), (2, 50, 50)); - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 2, 3]], 2.0); - assert_eq!(array[[0, 2, 4]], 3.0); - assert_eq!(array[[0, 2, 5]], 4.0); - assert_eq!(array[[0, 3, 0]], 5.0); - assert_eq!(array[[0, 49, 49]], 6.0); - - array[[1, 5, 0]] = 7.0; - array[[1, 5, 5]] = 8.0; - array[[1, 6, 3]] = 9.0; - array[[1, 6, 0]] = 10.0; - - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 2, 3]], 2.0); - assert_eq!(array[[0, 2, 4]], 3.0); - assert_eq!(array[[0, 2, 5]], 4.0); - assert_eq!(array[[0, 3, 0]], 5.0); - assert_eq!(array[[0, 49, 49]], 6.0); - assert_eq!(array[[1, 5, 0]], 7.0); - assert_eq!(array[[1, 5, 5]], 8.0); - assert_eq!(array[[1, 6, 3]], 9.0); - assert_eq!(array[[1, 6, 0]], 10.0); - - // increase at the start - array.increase_x_at(0); - - assert_eq!(array.dimensions(), (3, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[2, 5, 0]], 7.0); - assert_eq!(array[[2, 5, 5]], 8.0); - assert_eq!(array[[2, 6, 3]], 9.0); - assert_eq!(array[[2, 6, 0]], 10.0); - - // increase at the end - array.increase_x_at(3); - - assert_eq!(array.dimensions(), (4, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[2, 5, 0]], 7.0); - assert_eq!(array[[2, 5, 5]], 8.0); - assert_eq!(array[[2, 6, 3]], 9.0); - assert_eq!(array[[2, 6, 0]], 10.0); - - // increase after the end - array.increase_x_at(5); - - assert_eq!(array.dimensions(), (6, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[2, 5, 0]], 7.0); - assert_eq!(array[[2, 5, 5]], 8.0); - assert_eq!(array[[2, 6, 3]], 9.0); - assert_eq!(array[[2, 6, 0]], 10.0); - - // increase in the middle - array.increase_x_at(2); - - assert_eq!(array.dimensions(), (7, 50, 50)); - assert_eq!(array[[1, 0, 0]], 1.0); - assert_eq!(array[[1, 2, 3]], 2.0); - assert_eq!(array[[1, 2, 4]], 3.0); - assert_eq!(array[[1, 2, 5]], 4.0); - assert_eq!(array[[1, 3, 0]], 5.0); - assert_eq!(array[[1, 49, 49]], 6.0); - assert_eq!(array[[3, 5, 0]], 7.0); - assert_eq!(array[[3, 5, 5]], 8.0); - assert_eq!(array[[3, 6, 3]], 9.0); - assert_eq!(array[[3, 6, 0]], 10.0); - } - - #[test] - fn from_ndarray() { - let mut ndarray = Array3::zeros((2, 50, 50)); - - ndarray[[0, 4, 3]] = 1.0; - ndarray[[0, 4, 4]] = 2.0; - ndarray[[0, 4, 6]] = 3.0; - ndarray[[0, 5, 1]] = 4.0; - ndarray[[0, 5, 7]] = 5.0; - ndarray[[1, 3, 9]] = 6.0; - - let array = SparseArray3::from_ndarray(ndarray.view(), 3, 40); - - assert_eq!(array[[3, 4, 3]], 1.0); - assert_eq!(array[[3, 4, 4]], 2.0); - assert_eq!(array[[3, 4, 5]], 0.0); - assert_eq!(array[[3, 4, 6]], 3.0); - assert_eq!(array[[3, 5, 1]], 4.0); - assert_eq!(array[[3, 5, 2]], 0.0); - assert_eq!(array[[3, 5, 3]], 0.0); - assert_eq!(array[[3, 5, 4]], 0.0); - assert_eq!(array[[3, 5, 5]], 0.0); - assert_eq!(array[[3, 5, 6]], 0.0); - assert_eq!(array[[3, 5, 7]], 5.0); - assert_eq!(array[[4, 3, 9]], 6.0); - - assert_eq!(array.len(), 6); - assert_eq!(array.zeros(), 6); - } - - #[test] - fn test_index_swap() { - let mut array = SparseArray3::new(5, 50, 2); - - array[[0, 0, 0]] = 1.0; - array[[0, 0, 1]] = 2.0; - array[[1, 2, 1]] = 3.0; - array[[1, 5, 1]] = 4.0; - array[[1, 6, 0]] = 5.0; - array[[1, 8, 0]] = 6.0; - array[[1, 9, 0]] = 7.0; - array[[2, 0, 0]] = 8.0; - array[[3, 2, 1]] = 9.0; - array[[3, 4, 0]] = 10.0; - array[[3, 4, 1]] = 11.0; - array[[4, 0, 0]] = 12.0; - array[[4, 0, 1]] = 13.0; - - assert_eq!(array[[0, 0, 0]], 1.0); - assert_eq!(array[[0, 0, 1]], 2.0); - assert_eq!(array[[1, 2, 1]], 3.0); - assert_eq!(array[[1, 5, 1]], 4.0); - assert_eq!(array[[1, 6, 0]], 5.0); - assert_eq!(array[[1, 8, 0]], 6.0); - assert_eq!(array[[1, 9, 0]], 7.0); - assert_eq!(array[[2, 0, 0]], 8.0); - assert_eq!(array[[3, 2, 1]], 9.0); - assert_eq!(array[[3, 4, 0]], 10.0); - assert_eq!(array[[3, 4, 1]], 11.0); - assert_eq!(array[[4, 0, 0]], 12.0); - assert_eq!(array[[4, 0, 1]], 13.0); - - assert_eq!(array.x_range(), 0..5); - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 1.0))); - assert_eq!(iter.next(), Some(((0, 0, 1), 2.0))); - assert_eq!(iter.next(), Some(((1, 6, 0), 5.0))); - assert_eq!(iter.next(), Some(((1, 8, 0), 6.0))); - assert_eq!(iter.next(), Some(((1, 9, 0), 7.0))); - assert_eq!(iter.next(), Some(((1, 2, 1), 3.0))); - assert_eq!(iter.next(), Some(((1, 5, 1), 4.0))); - assert_eq!(iter.next(), Some(((2, 0, 0), 8.0))); - assert_eq!(iter.next(), Some(((3, 4, 0), 10.0))); - assert_eq!(iter.next(), Some(((3, 2, 1), 9.0))); - assert_eq!(iter.next(), Some(((3, 4, 1), 11.0))); - assert_eq!(iter.next(), Some(((4, 0, 0), 12.0))); - assert_eq!(iter.next(), Some(((4, 0, 1), 13.0))); - assert_eq!(iter.next(), None); - - let mut ndarray = Array3::zeros((5, 50, 2)); - - ndarray[[0, 0, 0]] = 1.0; - ndarray[[0, 0, 1]] = 2.0; - ndarray[[1, 2, 1]] = 3.0; - ndarray[[1, 5, 1]] = 4.0; - ndarray[[1, 6, 0]] = 5.0; - ndarray[[1, 8, 0]] = 6.0; - ndarray[[1, 9, 0]] = 7.0; - ndarray[[2, 0, 0]] = 8.0; - ndarray[[3, 2, 1]] = 9.0; - ndarray[[3, 4, 0]] = 10.0; - ndarray[[3, 4, 1]] = 11.0; - ndarray[[4, 0, 0]] = 12.0; - ndarray[[4, 0, 1]] = 13.0; - - let mut other = SparseArray3::from_ndarray(ndarray.view(), 0, 5); - - assert_eq!(other[[0, 0, 0]], 1.0); - assert_eq!(other[[0, 0, 1]], 2.0); - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - assert_eq!(other[[3, 2, 1]], 9.0); - assert_eq!(other[[3, 4, 0]], 10.0); - assert_eq!(other[[3, 4, 1]], 11.0); - assert_eq!(other[[4, 0, 0]], 12.0); - assert_eq!(other[[4, 0, 1]], 13.0); - - assert_eq!(other.x_range(), 0..5); - - other.remove_x(0); - - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - assert_eq!(other[[3, 2, 1]], 9.0); - assert_eq!(other[[3, 4, 0]], 10.0); - assert_eq!(other[[3, 4, 1]], 11.0); - assert_eq!(other[[4, 0, 0]], 12.0); - assert_eq!(other[[4, 0, 1]], 13.0); - - other.remove_x(3); - - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - assert_eq!(other[[4, 0, 0]], 12.0); - assert_eq!(other[[4, 0, 1]], 13.0); - - other.remove_x(4); - - assert_eq!(other[[1, 2, 1]], 3.0); - assert_eq!(other[[1, 5, 1]], 4.0); - assert_eq!(other[[1, 6, 0]], 5.0); - assert_eq!(other[[1, 8, 0]], 6.0); - assert_eq!(other[[1, 9, 0]], 7.0); - assert_eq!(other[[2, 0, 0]], 8.0); - } - - // https://github.com/NNPDF/pineappl/issues/220 - #[test] - fn regression_test_220() { - let mut array = SparseArray3::new(1, 2, 4); - - array[[0, 0, 0]] = 1.0; - - assert_eq!(array[[0, 0, 0]], 1.0); - - assert_eq!(array.x_range(), 0..1); - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 1.0))); - assert_eq!(iter.next(), None); - - array.increase_x_at(0); - - array[[0, 0, 0]] = 2.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 2.0))); - assert_eq!(iter.next(), Some(((1, 0, 0), 1.0))); - assert_eq!(iter.next(), None); - - array.increase_x_at(1); - - array[[1, 0, 0]] = 3.0; - - let mut iter = array.indexed_iter(); - - assert_eq!(iter.next(), Some(((0, 0, 0), 2.0))); - assert_eq!(iter.next(), Some(((1, 0, 0), 3.0))); - assert_eq!(iter.next(), Some(((2, 0, 0), 1.0))); - assert_eq!(iter.next(), None); - } -} From 6eb58dd76544f06b914f7530e35e8748d45f8fa8 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 11:20:29 +0200 Subject: [PATCH 20/42] Strip more code from `grid.rs` --- pineappl_v0/src/grid.rs | 105 ---------------------------------------- 1 file changed, 105 deletions(-) diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index d7957689f..7eb5f5849 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -7,7 +7,6 @@ use super::empty_subgrid::EmptySubgridV1; use super::pids::{self, PidBasis}; use super::subgrid::{Subgrid, SubgridEnum, SubgridParams}; use bitflags::bitflags; -use git_version::git_version; use lz4_flex::frame::FrameDecoder; use ndarray::{Array3, ArrayView3}; use serde::{Deserialize, Serialize, Serializer}; @@ -108,31 +107,6 @@ struct Mmv3 { subgrid_template: SubgridEnum, } -impl Default for Mmv2 { - fn default() -> Self { - Self { - remapper: None, - key_value_db: [ - ( - "pineappl_gitversion".to_owned(), - git_version!( - args = ["--always", "--dirty", "--long", "--tags"], - cargo_prefix = "cargo:", - fallback = "unknown" - ) - .to_owned(), - ), - // by default we assume there are protons in the initial state - ("initial_state_1".to_owned(), "2212".to_owned()), - ("initial_state_2".to_owned(), "2212".to_owned()), - ] - .iter() - .cloned() - .collect(), - } - } -} - // ALLOW: fixing the warning will break the file format #[allow(clippy::large_enum_variant)] #[derive(Clone, Deserialize, Serialize)] @@ -142,17 +116,6 @@ enum MoreMembers { V3(Mmv3), } -impl MoreMembers { - fn upgrade(&mut self) { - match self { - Self::V1(_) => { - *self = Self::V2(Mmv2::default()); - } - Self::V2(_) | Self::V3(_) => {} - } - } -} - bitflags! { /// Bitflags for optimizing a [`Grid`]. See [`Grid::optimize_using`]. #[derive(Clone, Copy)] @@ -207,17 +170,6 @@ impl Grid { PidBasis::Pdg } - fn pdg_channels(&self) -> Cow<'_, [Channel]> { - match self.pid_basis() { - PidBasis::Evol => self - .channels - .iter() - .map(|entry| Channel::translate(entry, &pids::evol_to_pdg_mc_ids)) - .collect(), - PidBasis::Pdg => Cow::Borrowed(self.channels()), - } - } - /// Construct a `Grid` by deserializing it from `reader`. Reading is buffered. /// /// # Errors @@ -267,58 +219,6 @@ impl Grid { &self.channels } - /// Merges the bins for the corresponding range together in a single one. - /// - /// # Errors - /// - /// When the given bins are non-consecutive, an error is returned. - pub fn merge_bins(&mut self, bins: Range) -> Result<(), GridError> { - self.bin_limits - .merge_bins(bins.clone()) - .map_err(GridError::MergeBinError)?; - - if let Some(remapper) = self.remapper_mut() { - remapper - .merge_bins(bins.clone()) - .map_err(GridError::MergeBinError)?; - } - - let bin_count = self.bin_info().bins(); - let mut old_subgrids = mem::replace( - &mut self.subgrids, - Array3::from_shape_simple_fn( - (self.orders.len(), bin_count, self.channels.len()), - || EmptySubgridV1.into(), - ), - ); - - for ((order, bin, channel), subgrid) in old_subgrids.indexed_iter_mut() { - if subgrid.is_empty() { - continue; - } - - if bins.contains(&bin) { - let new_subgrid = &mut self.subgrids[[order, bins.start, channel]]; - - if new_subgrid.is_empty() { - mem::swap(new_subgrid, subgrid); - } else { - new_subgrid.merge(subgrid, false); - } - } else { - let new_bin = if bin > bins.start { - bin - (bins.end - bins.start) + 1 - } else { - bin - }; - - mem::swap(&mut self.subgrids[[order, new_bin, channel]], subgrid); - } - } - - Ok(()) - } - /// Return a vector containing the type of convolutions performed with this grid. /// /// # Panics @@ -428,11 +328,6 @@ impl Grid { BinInfo::new(&self.bin_limits, self.remapper()) } - /// Upgrades the internal data structures to their latest versions. - pub fn upgrade(&mut self) { - self.more_members.upgrade(); - } - /// Returns a map with key-value pairs, if there are any stored in this grid. #[must_use] pub const fn key_values(&self) -> Option<&HashMap> { From d36075103bd30f296cfaa3af2476780dd495bf67 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 11:51:53 +0200 Subject: [PATCH 21/42] Remove `Subgrid::{convolve,fill}` --- pineappl_v0/src/empty_subgrid.rs | 15 -- pineappl_v0/src/grid.rs | 8 +- pineappl_v0/src/import_only_subgrid.rs | 35 --- pineappl_v0/src/lagrange_subgrid.rs | 310 +------------------------ pineappl_v0/src/ntuple_subgrid.rs | 18 -- pineappl_v0/src/subgrid.rs | 16 -- 6 files changed, 3 insertions(+), 399 deletions(-) diff --git a/pineappl_v0/src/empty_subgrid.rs b/pineappl_v0/src/empty_subgrid.rs index 4cb8cbba5..c4c635fb1 100644 --- a/pineappl_v0/src/empty_subgrid.rs +++ b/pineappl_v0/src/empty_subgrid.rs @@ -1,6 +1,5 @@ //! TODO -use super::grid::Ntuple; use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; @@ -11,20 +10,6 @@ use std::iter; pub struct EmptySubgridV1; impl Subgrid for EmptySubgridV1 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - _: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - 0.0 - } - - fn fill(&mut self, _: &Ntuple) { - panic!("EmptySubgridV1 doesn't support the fill operation"); - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&[]) } diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index 7eb5f5849..388abcfec 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -3,18 +3,14 @@ use super::bin::{BinInfo, BinLimits, BinRemapper}; use super::boc::{Channel, Order}; use super::convolutions::Convolution; -use super::empty_subgrid::EmptySubgridV1; -use super::pids::{self, PidBasis}; -use super::subgrid::{Subgrid, SubgridEnum, SubgridParams}; +use super::pids::PidBasis; +use super::subgrid::{SubgridEnum, SubgridParams}; use bitflags::bitflags; use lz4_flex::frame::FrameDecoder; use ndarray::{Array3, ArrayView3}; use serde::{Deserialize, Serialize, Serializer}; -use std::borrow::Cow; use std::collections::{BTreeMap, HashMap}; use std::io::{self, BufRead, BufReader, Read}; -use std::mem; -use std::ops::Range; use thiserror::Error; /// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs index 0e85f67d1..81393c128 100644 --- a/pineappl_v0/src/import_only_subgrid.rs +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -1,6 +1,5 @@ //! TODO -use super::grid::Ntuple; use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; use serde::{Deserialize, Serialize}; @@ -40,23 +39,6 @@ impl ImportOnlySubgridV1 { } impl Subgrid for ImportOnlySubgridV1 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.array - .indexed_iter() - .map(|((imu2, ix1, ix2), sigma)| sigma * lumi(ix1, ix2, imu2)) - .sum() - } - - fn fill(&mut self, _: &Ntuple) { - panic!("ImportOnlySubgridV1 doesn't support the fill operation"); - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { self.q2_grid .iter() @@ -210,23 +192,6 @@ impl ImportOnlySubgridV2 { } impl Subgrid for ImportOnlySubgridV2 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.array - .indexed_iter() - .map(|((imu2, ix1, ix2), sigma)| sigma * lumi(ix1, ix2, imu2)) - .sum() - } - - fn fill(&mut self, _: &Ntuple) { - panic!("ImportOnlySubgridV2 doesn't support the fill operation"); - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&self.mu2_grid) } diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs index f8f1bc98d..ac0e286b7 100644 --- a/pineappl_v0/src/lagrange_subgrid.rs +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -1,12 +1,10 @@ //! Module containing the Lagrange-interpolation subgrid. -use super::convert::{f64_from_usize, usize_from_f64}; -use super::grid::Ntuple; +use super::convert::f64_from_usize; use super::sparse_array3::SparseArray3; use super::subgrid::{ ExtraSubgridParams, Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter, SubgridParams, }; -use arrayvec::ArrayVec; use ndarray::Array3; use serde::{Deserialize, Serialize}; use std::borrow::Cow; @@ -45,20 +43,6 @@ fn fq2(tau: f64) -> f64 { 0.0625 * tau.exp().exp() } -fn fi(i: usize, n: usize, u: f64) -> f64 { - let mut factorials = 1; - let mut product = 1.0; - for z in 0..i { - product *= u - f64_from_usize(z); - factorials *= i - z; - } - for z in i + 1..=n { - product *= f64_from_usize(z) - u; - factorials *= z - i; - } - product / f64_from_usize(factorials) -} - /// Subgrid which uses Lagrange-interpolation. #[derive(Clone, Deserialize, Serialize)] pub struct LagrangeSubgridV1 { @@ -129,104 +113,6 @@ impl LagrangeSubgridV1 { } impl Subgrid for LagrangeSubgridV1 { - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.grid.as_ref().map_or(0.0, |grid| { - grid.indexed_iter() - .map(|((imu2, ix1, ix2), &sigma)| { - if sigma == 0.0 { - 0.0 - } else { - let mut value = sigma * lumi(ix1, ix2, imu2 + self.itaumin); - if self.reweight { - value *= weightfun(x1[ix1]) * weightfun(x2[ix2]); - } - value - } - }) - .sum() - }) - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - let y1 = fy(ntuple.x1); - let y2 = fy(ntuple.x2); - let tau = ftau(ntuple.q2); - - if (y2 < self.ymin) - || (y2 > self.ymax) - || (y1 < self.ymin) - || (y1 > self.ymax) - || (tau < self.taumin) - || (tau > self.taumax) - { - return; - } - - let k1 = usize_from_f64((y1 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - let k2 = usize_from_f64((y2 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - - let u_y1 = (y1 - self.gety(k1)) / self.deltay(); - let u_y2 = (y2 - self.gety(k2)) / self.deltay(); - - let fi1: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y1)) - .collect(); - let fi2: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y2)) - .collect(); - - let k3 = usize_from_f64( - (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), - ) - .min(self.ntau - 1 - self.tauorder); - - let u_tau = (tau - self.gettau(k3)) / self.deltatau(); - - let factor = if self.reweight { - 1.0 / (weightfun(ntuple.x1) * weightfun(ntuple.x2)) - } else { - 1.0 - }; - - let size = self.tauorder + 1; - let ny = self.ny; - - if self.grid.is_none() { - self.itaumin = k3; - self.itaumax = k3 + size; - } else if k3 < self.itaumin || k3 + size > self.itaumax { - self.increase_tau(self.itaumin.min(k3), self.itaumax.max(k3 + size)); - } - - for i3 in 0..=self.tauorder { - let fi3i3 = fi(i3, self.tauorder, u_tau); - - for (i1, fi1i1) in fi1.iter().enumerate() { - for (i2, fi2i2) in fi2.iter().enumerate() { - let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; - - let grid = self - .grid - .get_or_insert_with(|| Array3::zeros((size, ny, ny))); - - grid[[k3 + i3 - self.itaumin, k1 + i1, k2 + i2]] += fillweight; - } - } - } - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { (0..self.ntau) .map(|itau| { @@ -495,121 +381,6 @@ impl LagrangeSubgridV2 { } impl Subgrid for LagrangeSubgridV2 { - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.grid.as_ref().map_or(0.0, |grid| { - grid.indexed_iter() - .map(|((imu2, ix1, ix2), &sigma)| { - if sigma == 0.0 { - 0.0 - } else { - let mut value = sigma * lumi(ix1, ix2, imu2 + self.itaumin); - if self.reweight1 { - value *= weightfun(x1[ix1]); - } - if self.reweight2 { - value *= weightfun(x2[ix2]); - } - value - } - }) - .sum() - }) - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - let y1 = fy(ntuple.x1); - let y2 = fy(ntuple.x2); - let tau = ftau(ntuple.q2); - - if self.static_q2 == 0.0 { - self.static_q2 = ntuple.q2; - } else if (self.static_q2 != -1.0) && (self.static_q2 != ntuple.q2) { - self.static_q2 = -1.0; - } - - if (y2 < self.y2min) - || (y2 > self.y2max) - || (y1 < self.y1min) - || (y1 > self.y1max) - || (tau < self.taumin) - || (tau > self.taumax) - { - return; - } - - let k1 = - usize_from_f64((y1 - self.y1min) / self.deltay1() - f64_from_usize(self.y1order / 2)) - .min(self.ny1 - 1 - self.y1order); - let k2 = - usize_from_f64((y2 - self.y2min) / self.deltay2() - f64_from_usize(self.y2order / 2)) - .min(self.ny2 - 1 - self.y2order); - - let u_y1 = (y1 - self.gety1(k1)) / self.deltay1(); - let u_y2 = (y2 - self.gety2(k2)) / self.deltay2(); - - let fi1: ArrayVec<_, 8> = (0..=self.y1order) - .map(|i| fi(i, self.y1order, u_y1)) - .collect(); - let fi2: ArrayVec<_, 8> = (0..=self.y2order) - .map(|i| fi(i, self.y2order, u_y2)) - .collect(); - - let k3 = usize_from_f64( - (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), - ) - .min(self.ntau - 1 - self.tauorder); - - let u_tau = (tau - self.gettau(k3)) / self.deltatau(); - - let factor = 1.0 - / (if self.reweight1 { - weightfun(ntuple.x1) - } else { - 1.0 - } * if self.reweight2 { - weightfun(ntuple.x2) - } else { - 1.0 - }); - - let size = self.tauorder + 1; - let ny1 = self.ny1; - let ny2 = self.ny2; - - if self.grid.is_none() { - self.itaumin = k3; - self.itaumax = k3 + size; - } else if k3 < self.itaumin || k3 + size > self.itaumax { - self.increase_tau(self.itaumin.min(k3), self.itaumax.max(k3 + size)); - } - - for i3 in 0..=self.tauorder { - let fi3i3 = fi(i3, self.tauorder, u_tau); - - for (i1, fi1i1) in fi1.iter().enumerate() { - for (i2, fi2i2) in fi2.iter().enumerate() { - let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; - - let grid = self - .grid - .get_or_insert_with(|| Array3::zeros((size, ny1, ny2))); - - grid[[k3 + i3 - self.itaumin, k1 + i1, k2 + i2]] += fillweight; - } - } - } - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { (0..self.ntau) .map(|itau| { @@ -843,85 +614,6 @@ impl LagrangeSparseSubgridV1 { } impl Subgrid for LagrangeSparseSubgridV1 { - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - _: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - self.array - .indexed_iter() - .map(|((imu2, ix1, ix2), sigma)| { - let mut value = sigma * lumi(ix1, ix2, imu2); - if self.reweight { - value *= weightfun(x1[ix1]) * weightfun(x2[ix2]); - } - value - }) - .sum() - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - let y1 = fy(ntuple.x1); - let y2 = fy(ntuple.x2); - let tau = ftau(ntuple.q2); - - if (y2 < self.ymin) - || (y2 > self.ymax) - || (y1 < self.ymin) - || (y1 > self.ymax) - || (tau < self.taumin) - || (tau > self.taumax) - { - return; - } - - let k1 = usize_from_f64((y1 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - let k2 = usize_from_f64((y2 - self.ymin) / self.deltay() - f64_from_usize(self.yorder / 2)) - .min(self.ny - 1 - self.yorder); - - let u_y1 = (y1 - self.gety(k1)) / self.deltay(); - let u_y2 = (y2 - self.gety(k2)) / self.deltay(); - - let fi1: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y1)) - .collect(); - let fi2: ArrayVec<_, 8> = (0..=self.yorder) - .map(|i| fi(i, self.yorder, u_y2)) - .collect(); - - let k3 = usize_from_f64( - (tau - self.taumin) / self.deltatau() - f64_from_usize(self.tauorder / 2), - ) - .min(self.ntau - 1 - self.tauorder); - - let u_tau = (tau - self.gettau(k3)) / self.deltatau(); - - let factor = if self.reweight { - 1.0 / (weightfun(ntuple.x1) * weightfun(ntuple.x2)) - } else { - 1.0 - }; - - for i3 in 0..=self.tauorder { - let fi3i3 = fi(i3, self.tauorder, u_tau); - - for (i1, fi1i1) in fi1.iter().enumerate() { - for (i2, fi2i2) in fi2.iter().enumerate() { - let fillweight = factor * fi1i1 * fi2i2 * fi3i3 * ntuple.weight; - - self.array[[k3 + i3, k1 + i1, k2 + i2]] += fillweight; - } - } - } - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { (0..self.ntau) .map(|itau| { diff --git a/pineappl_v0/src/ntuple_subgrid.rs b/pineappl_v0/src/ntuple_subgrid.rs index c47292cb7..3bdba3614 100644 --- a/pineappl_v0/src/ntuple_subgrid.rs +++ b/pineappl_v0/src/ntuple_subgrid.rs @@ -21,24 +21,6 @@ impl NtupleSubgridV1 { } impl Subgrid for NtupleSubgridV1 { - fn convolve( - &self, - _: &[f64], - _: &[f64], - _: &[Mu2], - _: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64 { - panic!("NtupleSubgridV1 doesn't support the convolve operation"); - } - - fn fill(&mut self, ntuple: &Ntuple) { - if ntuple.weight == 0.0 { - return; - } - - self.ntuples.push(ntuple.clone()); - } - fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&[]) } diff --git a/pineappl_v0/src/subgrid.rs b/pineappl_v0/src/subgrid.rs index 1fe8e286c..99df63a60 100644 --- a/pineappl_v0/src/subgrid.rs +++ b/pineappl_v0/src/subgrid.rs @@ -1,7 +1,6 @@ //! Module containing the trait `Subgrid` and supporting structs. use super::empty_subgrid::EmptySubgridV1; -use super::grid::Ntuple; use super::import_only_subgrid::{ImportOnlySubgridV1, ImportOnlySubgridV2}; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::ntuple_subgrid::NtupleSubgridV1; @@ -77,21 +76,6 @@ pub trait Subgrid { /// return an empty slice. fn x2_grid(&self) -> Cow<'_, [f64]>; - /// Convolute the subgrid with a luminosity function, which takes indices as arguments that - /// correspond to the entries given in the slices `x1`, `x2` and `mu2`. - fn convolve( - &self, - x1: &[f64], - x2: &[f64], - mu2: &[Mu2], - lumi: &mut dyn FnMut(usize, usize, usize) -> f64, - ) -> f64; - - /// Fills the subgrid with `weight` for the parton momentum fractions `x1` and `x2`, and the - /// scale `q2`. Filling is currently only support where both renormalization and factorization - /// scale have the same value. - fn fill(&mut self, ntuple: &Ntuple); - /// Returns true if `fill` was never called for this grid. fn is_empty(&self) -> bool; From 2a453918eaafb67928317c213ca458f0c3ff9dd1 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 11:57:17 +0200 Subject: [PATCH 22/42] Remove two bits of code --- pineappl_v0/src/grid.rs | 8 -------- pineappl_v0/src/subgrid.rs | 18 ------------------ 2 files changed, 26 deletions(-) diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index 388abcfec..e3ae2cb49 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -310,14 +310,6 @@ impl Grid { } } - fn remapper_mut(&mut self) -> Option<&mut BinRemapper> { - match &mut self.more_members { - MoreMembers::V1(_) => None, - MoreMembers::V2(mmv2) => mmv2.remapper.as_mut(), - MoreMembers::V3(mmv3) => mmv3.remapper.as_mut(), - } - } - /// Returns all information about the bins in this grid. #[must_use] pub const fn bin_info(&self) -> BinInfo<'_> { diff --git a/pineappl_v0/src/subgrid.rs b/pineappl_v0/src/subgrid.rs index 99df63a60..f1227a0db 100644 --- a/pineappl_v0/src/subgrid.rs +++ b/pineappl_v0/src/subgrid.rs @@ -5,7 +5,6 @@ use super::import_only_subgrid::{ImportOnlySubgridV1, ImportOnlySubgridV2}; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::ntuple_subgrid::NtupleSubgridV1; use enum_dispatch::enum_dispatch; -use ndarray::Array3; use serde::{Deserialize, Serialize}; use std::borrow::Cow; @@ -102,23 +101,6 @@ pub trait Subgrid { fn static_scale(&self) -> Option; } -// this is needed in the Python interface -impl From<&SubgridEnum> for Array3 { - fn from(subgrid: &SubgridEnum) -> Self { - let mut result = Self::zeros(( - subgrid.mu2_grid().len(), - subgrid.x1_grid().len(), - subgrid.x2_grid().len(), - )); - - for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { - result[[imu2, ix1, ix2]] = value; - } - - result - } -} - /// Type to iterate over the non-zero contents of a subgrid. The tuple contains the indices of the /// `mu2_grid`, the `x1_grid` and finally the `x2_grid`. pub type SubgridIndexedIter<'a> = Box + 'a>; From 8eb980769a875fa2bfd190463b2626edbc399421 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:12:12 +0200 Subject: [PATCH 23/42] Backport fix for Issue #334 to `pineappl_v0` crate --- pineappl_v0/src/grid.rs | 29 ++++++++++++++++++----------- 1 file changed, 18 insertions(+), 11 deletions(-) diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index e3ae2cb49..4d8e6e561 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -251,18 +251,25 @@ impl Grid { .map(|s| s.parse::()) { Some(Ok(pid)) => { - let condition = !self.channels().iter().all(|entry| { - entry.entry().iter().all(|&channels| match index { - 1 => channels.0 == pid, - 2 => channels.1 == pid, - _ => unreachable!(), - }) - }); - - if condition { - Convolution::UnpolPDF(pid) + // Addresses: https://github.com/NNPDF/pineappl/issues/334 + if self.channels().is_empty() && pid == 2212 { + Convolution::UnpolPDF(2212) } else { - Convolution::None + let condition = !self.channels().iter().all(|entry| { + entry.entry().iter().all(|&(a, b, _)| + match index { + 1 => a, + 2 => b, + _ => unreachable!() + } == pid + ) + }); + + if condition { + Convolution::UnpolPDF(pid) + } else { + Convolution::None + } } } None => Convolution::UnpolPDF(2212), From 083521e80dbd08d3e3e97ebc9d7a5f2b9f7d7649 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:14:01 +0200 Subject: [PATCH 24/42] Deduplicate code --- pineappl/src/v0.rs | 84 +++++++--------------------------------------- 1 file changed, 12 insertions(+), 72 deletions(-) diff --git a/pineappl/src/v0.rs b/pineappl/src/v0.rs index 5271cbd70..e24292916 100644 --- a/pineappl/src/v0.rs +++ b/pineappl/src/v0.rs @@ -41,10 +41,21 @@ pub fn default_interps(flexible_scale: bool, convolutions: usize) -> Vec } pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { + use pineappl_v0::convolutions::Convolution; use pineappl_v0::subgrid::Subgrid as _; let grid = GridV0::read(&mut reader).map_err(|err| Error::Other(err.into()))?; - let convolutions = read_convolutions_from_metadata(&grid); + let convolutions: Vec<_> = grid + .convolutions() + .into_iter() + .map(|old_conv| match old_conv { + Convolution::UnpolPDF(pid) => Some(Conv::new(ConvType::UnpolPDF, pid)), + Convolution::PolPDF(pid) => Some(Conv::new(ConvType::PolPDF, pid)), + Convolution::UnpolFF(pid) => Some(Conv::new(ConvType::UnpolFF, pid)), + Convolution::PolFF(pid) => Some(Conv::new(ConvType::PolFF, pid)), + Convolution::None => None, + }) + .collect(); let flexible_scale_grid = grid.subgrids().iter().any(|subgrid| { subgrid @@ -261,74 +272,3 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { Ok(result) } - -fn read_convolutions_from_metadata(grid: &GridV0) -> Vec> { - grid.key_values().map_or_else( - // if there isn't any metadata, we assume two unpolarized proton-PDFs are used - || vec![Some(Conv::new(ConvType::UnpolPDF, 2212)); 2], - |kv| { - // file format v0 only supports exactly two convolutions - (1..=2) - .map(|index| { - // if there are key-value pairs `convolution_particle_1` and - // `convolution_type_1` and the same with a higher index, we convert this - // metadata into `Convolution` - match ( - kv.get(&format!("convolution_particle_{index}")) - .map(|s| s.parse::()), - kv.get(&format!("convolution_type_{index}")) - .map(String::as_str), - ) { - (_, Some("None")) => None, - (Some(Ok(pid)), Some("UnpolPDF")) => Some(Conv::new(ConvType::UnpolPDF, pid)), - (Some(Ok(pid)), Some("PolPDF")) => Some(Conv::new(ConvType::PolPDF, pid)), - (Some(Ok(pid)), Some("UnpolFF")) => Some(Conv::new(ConvType::UnpolFF, pid)), - (Some(Ok(pid)), Some("PolFF")) => Some(Conv::new(ConvType::PolFF, pid)), - (None, None) => { - // if these key-value pairs are missing use the old metadata - match kv - .get(&format!("initial_state_{index}")) - .map(|s| s.parse::()) - { - Some(Ok(pid)) => { - // Addresses: https://github.com/NNPDF/pineappl/issues/334 - if grid.channels().is_empty() && pid == 2212 { - Some(Conv::new(ConvType::UnpolPDF, 2212)) - } else { - let condition = !grid.channels().iter().all(|entry| { - entry.entry().iter().all(|&(a, b, _)| - match index { - 1 => a, - 2 => b, - _ => unreachable!() - } == pid - ) - }); - - condition.then_some(Conv::new(ConvType::UnpolPDF, pid)) - } - } - None => Some(Conv::new(ConvType::UnpolPDF, 2212)), - Some(Err(err)) => panic!( - "metadata 'initial_state_{index}' could not be parsed: {err}" - ), - } - } - (None, Some(_)) => { - panic!("metadata 'convolution_type_{index}' is missing") - } - (Some(_), None) => { - panic!("metadata 'convolution_particle_{index}' is missing") - } - (Some(Ok(_)), Some(type_)) => { - panic!("metadata 'convolution_type_{index} = {type_}' is unknown") - } - (Some(Err(err)), Some(_)) => { - panic!("metadata 'convolution_particle_{index}' could not be parsed: {err}") - } - } - }) - .collect() - }, - ) -} From 356b270a954e33b0d55dc03afd50ad05be0f07bf Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:17:07 +0200 Subject: [PATCH 25/42] Rename test to mark it as a regression test --- CONTRIBUTING.md | 5 +++++ pineappl_cli/tests/convolve.rs | 2 +- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 666c1ec37..f6f36d881 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -56,6 +56,11 @@ increasing the MSRV make sure to set it everywhere to the same value: data with `curl`. To make Github refresh the cached test data when running the CI, increase the integer `XX` in the line `key: test-data-vXX` by one. +### Regression tests for Github Issues + +If you're writing a regression test for a Github Issue, name the test +`issue_XXX`, where `XXX` is the Github Issue number. + ## Git - When you commit, make sure the commit message is written properly. This diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index e20f894bd..fd934524c 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -463,7 +463,7 @@ fn three_convolutions() { } #[test] -fn no_channels_grid() { +fn issue_334() { Command::cargo_bin("pineappl") .unwrap() .args([ From e2c4c31df1f13dbe63aed804356aa136c942f473 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:27:17 +0200 Subject: [PATCH 26/42] Reuse `Grid::pid_basis` from `pineappl_v0` --- pineappl/src/v0.rs | 16 ++++++---------- 1 file changed, 6 insertions(+), 10 deletions(-) diff --git a/pineappl/src/v0.rs b/pineappl/src/v0.rs index e24292916..6a96e214e 100644 --- a/pineappl/src/v0.rs +++ b/pineappl/src/v0.rs @@ -42,6 +42,7 @@ pub fn default_interps(flexible_scale: bool, convolutions: usize) -> Vec pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { use pineappl_v0::convolutions::Convolution; + use pineappl_v0::pids::PidBasis as PidBasisV0; use pineappl_v0::subgrid::Subgrid as _; let grid = GridV0::read(&mut reader).map_err(|err| Error::Other(err.into()))?; @@ -56,6 +57,10 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { Convolution::None => None, }) .collect(); + let pid_basis = match grid.pid_basis() { + PidBasisV0::Pdg => PidBasis::Pdg, + PidBasisV0::Evol => PidBasis::Evol, + }; let flexible_scale_grid = grid.subgrids().iter().any(|subgrid| { subgrid @@ -139,16 +144,7 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { ) }) .collect(), - grid.key_values() - .and_then(|kv| kv.get("lumi_id_types")) - // TODO: use PidBasis::from_str - .map_or(PidBasis::Pdg, |lumi_id_types| { - match lumi_id_types.as_str() { - "pdg_mc_ids" => PidBasis::Pdg, - "evol" => PidBasis::Evol, - _ => panic!("unknown PID basis '{lumi_id_types}'"), - } - }), + pid_basis, convolutions.clone().into_iter().flatten().collect(), default_interps( flexible_scale_grid, From 76d0ba209347b7608a6ac266e4eeeafb582cf5e9 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:30:24 +0200 Subject: [PATCH 27/42] Remove unneeded dependencies from `pineappl_v0` --- Cargo.lock | 10 ---------- pineappl_v0/Cargo.toml | 14 -------------- 2 files changed, 24 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 802d83a36..0bada600c 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -795,7 +795,6 @@ checksum = "f85776816e34becd8bd9540818d7dc77bf28307f3b3dcc51cc82403c6931680c" dependencies = [ "byteorder", "ndarray", - "num-complex", "num-traits", "py_literal", "zip", @@ -1021,23 +1020,15 @@ name = "pineappl_v0" version = "1.3.3" dependencies = [ "anyhow", - "arrayvec", "bincode", "bitflags 2.4.2", "enum_dispatch", "float-cmp", - "git-version", "itertools", "lz4_flex", - "managed-lhapdf", "ndarray", - "ndarray-npy", - "num-complex", - "rand", - "rand_pcg", "rustc-hash 1.1.0", "serde", - "serde_yaml", "thiserror", ] @@ -1898,6 +1889,5 @@ checksum = "93ab48844d61251bb3835145c521d88aa4031d7139e8485990f60ca911fa0815" dependencies = [ "byteorder", "crc32fast", - "flate2", "thiserror", ] diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml index 6561e1a6c..28adf80ee 100644 --- a/pineappl_v0/Cargo.toml +++ b/pineappl_v0/Cargo.toml @@ -17,27 +17,13 @@ workspace = true [dependencies] anyhow = "1.0.48" -arrayvec = "0.7.2" bincode = "1.3.3" bitflags = "2.4.2" enum_dispatch = "0.3.7" float-cmp = { default-features = false, version = "0.9.0" } -git-version = "0.3.5" itertools = "0.10.1" lz4_flex = "0.11.6" ndarray = { features = ["serde"], version = "0.15.4" } rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0.130" } thiserror = "1.0.30" - -[dev-dependencies] -anyhow = "1.0.48" -lhapdf = { package = "managed-lhapdf", version = "0.4.0" } -num-complex = "0.4.4" -rand = { default-features = false, version = "0.8.4" } -rand_pcg = { default-features = false, version = "0.3.1" } -serde_yaml = "0.9.13" -ndarray-npy = "0.8.1" - -[features] -static = ["lhapdf/static"] From 363cb7626931387050df3a282f5a485ee75d9925 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:42:31 +0200 Subject: [PATCH 28/42] Remove more `Subgrid` methods in `pineappl_v0` crate --- pineappl_v0/src/empty_subgrid.rs | 31 +-- pineappl_v0/src/import_only_subgrid.rs | 283 +------------------ pineappl_v0/src/lagrange_subgrid.rs | 364 +------------------------ pineappl_v0/src/ntuple_subgrid.rs | 37 +-- pineappl_v0/src/subgrid.rs | 35 --- 5 files changed, 4 insertions(+), 746 deletions(-) diff --git a/pineappl_v0/src/empty_subgrid.rs b/pineappl_v0/src/empty_subgrid.rs index c4c635fb1..f458c195d 100644 --- a/pineappl_v0/src/empty_subgrid.rs +++ b/pineappl_v0/src/empty_subgrid.rs @@ -1,6 +1,6 @@ //! TODO -use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::iter; @@ -26,36 +26,7 @@ impl Subgrid for EmptySubgridV1 { true } - fn merge(&mut self, subgrid: &mut SubgridEnum, _: bool) { - assert!( - subgrid.is_empty(), - "EmptySubgridV1 doesn't support the merge operation for non-empty subgrids" - ); - } - - fn scale(&mut self, _: f64) {} - - fn symmetrize(&mut self) {} - - fn clone_empty(&self) -> SubgridEnum { - Self.into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(iter::empty()) } - - fn stats(&self) -> Stats { - Stats { - total: 0, - allocated: 0, - zeros: 0, - overhead: 0, - bytes_per_value: 0, - } - } - - fn static_scale(&self) -> Option { - None - } } diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs index 81393c128..3457352c1 100644 --- a/pineappl_v0/src/import_only_subgrid.rs +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -1,10 +1,9 @@ //! TODO use super::sparse_array3::SparseArray3; -use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; -use std::mem; /// TODO #[derive(Clone, Deserialize, Serialize)] @@ -59,104 +58,9 @@ impl Subgrid for ImportOnlySubgridV1 { self.array.is_empty() } - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - if let SubgridEnum::ImportOnlySubgridV1(other_grid) = other { - if self.array.is_empty() && !transpose { - mem::swap(&mut self.array, &mut other_grid.array); - } else { - // TODO: the general case isn't implemented - assert!(self.x1_grid() == other_grid.x1_grid()); - assert!(self.x2_grid() == other_grid.x2_grid()); - - for (other_index, mu2) in other_grid.mu2_grid().iter().enumerate() { - // the following should always be the case - assert_eq!(mu2.ren, mu2.fac); - let q2 = &mu2.ren; - - let index = match self - .q2_grid - .binary_search_by(|val| val.partial_cmp(q2).unwrap()) - { - Ok(index) => index, - Err(index) => { - self.q2_grid.insert(index, *q2); - self.array.increase_x_at(index); - index - } - }; - - for ((_, j, k), value) in other_grid - .array - .indexed_iter() - .filter(|&((i, _, _), _)| i == other_index) - { - let (j, k) = if transpose { (k, j) } else { (j, k) }; - self.array[[index, j, k]] += value; - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.array.clear(); - } else { - self.array.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - let mut new_array = - SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); - - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { - new_array[[i, j, k]] = sigma; - } - // do not change the diagonal entries (k==j) - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { - new_array[[i, k, j]] += sigma; - } - - mem::swap(&mut self.array, &mut new_array); - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - array: SparseArray3::new(self.q2_grid.len(), self.x1_grid.len(), self.x2_grid.len()), - q2_grid: self.q2_grid.clone(), - x1_grid: self.x1_grid.clone(), - x2_grid: self.x2_grid.clone(), - } - .into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(self.array.indexed_iter()) } - - fn stats(&self) -> Stats { - Stats { - total: self.q2_grid.len() * self.x1_grid.len() * self.x2_grid.len(), - allocated: self.array.len() + self.array.zeros(), - zeros: self.array.zeros(), - overhead: self.array.overhead(), - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let &[static_scale] = self.q2_grid.as_slice() { - Some(Mu2 { - ren: static_scale, - fac: static_scale, - }) - } else { - None - } - } } /// TODO @@ -208,192 +112,7 @@ impl Subgrid for ImportOnlySubgridV2 { self.array.is_empty() } - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - if let SubgridEnum::ImportOnlySubgridV2(other_grid) = other { - if self.array.is_empty() && !transpose { - mem::swap(&mut self.array, &mut other_grid.array); - } else { - let rhs_x1 = if transpose { - other_grid.x2_grid() - } else { - other_grid.x1_grid() - }; - let rhs_x2 = if transpose { - other_grid.x1_grid() - } else { - other_grid.x2_grid() - }; - - if (self.x1_grid() != rhs_x1) || (self.x2_grid() != rhs_x2) { - let mut x1_grid = self.x1_grid.clone(); - let mut x2_grid = self.x2_grid.clone(); - - x1_grid.extend_from_slice(&rhs_x1); - x1_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - x1_grid.dedup(); - x2_grid.extend_from_slice(&rhs_x2); - x2_grid.sort_by(|a, b| a.partial_cmp(b).unwrap()); - x2_grid.dedup(); - - let mut array = - SparseArray3::new(self.array.dimensions().0, x1_grid.len(), x2_grid.len()); - - for ((i, j, k), value) in self.array.indexed_iter() { - let target_j = x1_grid - .iter() - .position(|&x| x == self.x1_grid[j]) - .unwrap_or_else(|| unreachable!()); - let target_k = x2_grid - .iter() - .position(|&x| x == self.x2_grid[k]) - .unwrap_or_else(|| unreachable!()); - - array[[i, target_j, target_k]] = value; - } - - self.array = array; - self.x1_grid = x1_grid; - self.x2_grid = x2_grid; - } - - for (other_index, mu2) in other_grid.mu2_grid().iter().enumerate() { - let index = match self - .mu2_grid - .binary_search_by(|val| val.partial_cmp(mu2).unwrap()) - { - Ok(index) => index, - Err(index) => { - self.mu2_grid.insert(index, mu2.clone()); - self.array.increase_x_at(index); - index - } - }; - - for ((_, j, k), value) in other_grid - .array - .indexed_iter() - .filter(|&((i, _, _), _)| i == other_index) - { - let (j, k) = if transpose { (k, j) } else { (j, k) }; - let target_j = self - .x1_grid - .iter() - .position(|&x| x == rhs_x1[j]) - .unwrap_or_else(|| unreachable!()); - let target_k = self - .x2_grid - .iter() - .position(|&x| x == rhs_x2[k]) - .unwrap_or_else(|| unreachable!()); - - self.array[[index, target_j, target_k]] += value; - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.array.clear(); - } else { - self.array.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - let mut new_array = - SparseArray3::new(self.mu2_grid.len(), self.x1_grid.len(), self.x2_grid.len()); - - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { - new_array[[i, j, k]] = sigma; - } - // do not change the diagonal entries (k==j) - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { - new_array[[i, k, j]] += sigma; - } - - mem::swap(&mut self.array, &mut new_array); - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - array: SparseArray3::new(self.mu2_grid.len(), self.x1_grid.len(), self.x2_grid.len()), - mu2_grid: self.mu2_grid.clone(), - x1_grid: self.x1_grid.clone(), - x2_grid: self.x2_grid.clone(), - } - .into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(self.array.indexed_iter()) } - - fn stats(&self) -> Stats { - Stats { - total: self.mu2_grid.len() * self.x1_grid.len() * self.x2_grid.len(), - allocated: self.array.len() + self.array.zeros(), - zeros: self.array.zeros(), - overhead: self.array.overhead(), - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let [static_scale] = self.mu2_grid.as_slice() { - Some(static_scale.clone()) - } else { - None - } - } -} - -impl From<&SubgridEnum> for ImportOnlySubgridV2 { - fn from(subgrid: &SubgridEnum) -> Self { - // find smallest ranges - let (mu2_range, x1_range, x2_range) = subgrid.indexed_iter().fold( - ( - subgrid.mu2_grid().len()..0, - subgrid.x1_grid().len()..0, - subgrid.x2_grid().len()..0, - ), - |prev, ((imu2, ix1, ix2), _)| { - ( - prev.0.start.min(imu2)..prev.0.end.max(imu2 + 1), - prev.1.start.min(ix1)..prev.1.end.max(ix1 + 1), - prev.2.start.min(ix2)..prev.2.end.max(ix2 + 1), - ) - }, - ); - - let (mu2_grid, static_scale) = subgrid.static_scale().map_or_else( - || (subgrid.mu2_grid()[mu2_range.clone()].to_vec(), false), - |scale| (vec![scale], true), - ); - let x1_grid = subgrid.x1_grid()[x1_range.clone()].to_vec(); - let x2_grid = subgrid.x2_grid()[x2_range.clone()].to_vec(); - - let mut array = SparseArray3::new(mu2_grid.len(), x1_grid.len(), x2_grid.len()); - - for ((imu2, ix1, ix2), value) in subgrid.indexed_iter() { - // if there's a static scale we want every value to be added to same grid point - let index = if static_scale { - 0 - } else { - imu2 - mu2_range.start - }; - - array[[index, ix1 - x1_range.start, ix2 - x2_range.start]] += value; - } - - Self { - array, - mu2_grid, - x1_grid, - x2_grid, - } - } } diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs index ac0e286b7..3339307a1 100644 --- a/pineappl_v0/src/lagrange_subgrid.rs +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -2,14 +2,11 @@ use super::convert::f64_from_usize; use super::sparse_array3::SparseArray3; -use super::subgrid::{ - ExtraSubgridParams, Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter, SubgridParams, -}; +use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridIndexedIter, SubgridParams}; use ndarray::Array3; use serde::{Deserialize, Serialize}; use std::borrow::Cow; use std::iter; -use std::mem; fn weightfun(x: f64) -> f64 { (x.sqrt() / (1.0 - 0.99 * x)).powi(3) @@ -95,21 +92,6 @@ impl LagrangeSubgridV1 { fn gettau(&self, iy: usize) -> f64 { f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) } - - fn increase_tau(&mut self, new_itaumin: usize, new_itaumax: usize) { - let min_diff = self.itaumin - new_itaumin; - - let mut new_grid = Array3::zeros((new_itaumax - new_itaumin, self.ny, self.ny)); - - for ((i, j, k), value) in self.grid.as_ref().unwrap().indexed_iter() { - new_grid[[i + min_diff, j, k]] = *value; - } - - self.itaumin = new_itaumin; - self.itaumax = new_itaumax; - - mem::swap(&mut self.grid, &mut Some(new_grid)); - } } impl Subgrid for LagrangeSubgridV1 { @@ -134,96 +116,6 @@ impl Subgrid for LagrangeSubgridV1 { self.grid.is_none() } - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - let x1_equal = self.x1_grid() == other.x1_grid(); - let x2_equal = self.x2_grid() == other.x2_grid(); - - if let SubgridEnum::LagrangeSubgridV1(other_grid) = other { - if let Some(other_grid_grid) = &mut other_grid.grid { - if self.grid.is_some() { - // TODO: the general case isn't implemented - assert!(x1_equal); - assert!(x2_equal); - - let new_itaumin = self.itaumin.min(other_grid.itaumin); - let new_itaumax = self.itaumax.max(other_grid.itaumax); - let offset = other_grid.itaumin.saturating_sub(self.itaumin); - - // TODO: we need much more checks here if there subgrids are compatible at all - - if (self.itaumin != new_itaumin) || (self.itaumax != new_itaumax) { - self.increase_tau(new_itaumin, new_itaumax); - } - - let self_grid = self.grid.as_mut().unwrap(); - - if transpose { - for ((i, k, j), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } else { - for ((i, j, k), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } - } else { - self.grid = other_grid.grid.take(); - self.itaumin = other_grid.itaumin; - self.itaumax = other_grid.itaumax; - - if transpose { - if let Some(grid) = &mut self.grid { - grid.swap_axes(1, 2); - } - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.grid = None; - } else if let Some(self_grid) = &mut self.grid { - self_grid.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - if let Some(grid) = self.grid.as_mut() { - let (i_size, j_size, k_size) = grid.dim(); - - for i in 0..i_size { - for j in 0..j_size { - for k in j + 1..k_size { - grid[[i, j, k]] += grid[[i, k, j]]; - grid[[i, k, j]] = 0.0; - } - } - } - } - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - grid: None, - ntau: self.ntau, - ny: self.ny, - yorder: self.yorder, - tauorder: self.tauorder, - itaumin: 0, - itaumax: 0, - reweight: self.reweight, - ymin: self.ymin, - ymax: self.ymax, - taumin: self.taumin, - taumax: self.taumax, - } - .into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { self.grid.as_ref().map_or_else( || Box::new(iter::empty()) as Box>, @@ -245,35 +137,6 @@ impl Subgrid for LagrangeSubgridV1 { }, ) } - - fn stats(&self) -> Stats { - let (non_zeros, zeros) = self.grid.as_ref().map_or((0, 0), |array| { - array.iter().fold((0, 0), |mut result, value| { - if *value == 0.0 { - result.0 += 1; - } else { - result.1 += 1; - } - result - }) - }); - - Stats { - total: non_zeros + zeros, - allocated: non_zeros + zeros, - zeros, - overhead: 0, - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let [static_scale] = self.mu2_grid().as_ref() { - Some(static_scale.clone()) - } else { - None - } - } } /// Subgrid which uses Lagrange-interpolation. @@ -363,21 +226,6 @@ impl LagrangeSubgridV2 { f64_from_usize(iy).mul_add(self.deltatau(), self.taumin) } } - - fn increase_tau(&mut self, new_itaumin: usize, new_itaumax: usize) { - let min_diff = self.itaumin - new_itaumin; - - let mut new_grid = Array3::zeros((new_itaumax - new_itaumin, self.ny1, self.ny2)); - - for ((i, j, k), value) in self.grid.as_ref().unwrap().indexed_iter() { - new_grid[[i + min_diff, j, k]] = *value; - } - - self.itaumin = new_itaumin; - self.itaumax = new_itaumax; - - mem::swap(&mut self.grid, &mut Some(new_grid)); - } } impl Subgrid for LagrangeSubgridV2 { @@ -402,107 +250,6 @@ impl Subgrid for LagrangeSubgridV2 { self.grid.is_none() } - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - let x1_equal = self.x1_grid() == other.x1_grid(); - let x2_equal = self.x2_grid() == other.x2_grid(); - - if let SubgridEnum::LagrangeSubgridV2(other_grid) = other { - if let Some(other_grid_grid) = &mut other_grid.grid { - if self.grid.is_some() { - // TODO: the general case isn't implemented - assert!(x1_equal); - assert!(x2_equal); - - let new_itaumin = self.itaumin.min(other_grid.itaumin); - let new_itaumax = self.itaumax.max(other_grid.itaumax); - let offset = other_grid.itaumin.saturating_sub(self.itaumin); - - // TODO: we need much more checks here if there subgrids are compatible at all - - if (self.itaumin != new_itaumin) || (self.itaumax != new_itaumax) { - self.increase_tau(new_itaumin, new_itaumax); - } - - if (other_grid.static_q2 == -1.0) || (self.static_q2 != other_grid.static_q2) { - self.static_q2 = -1.0; - } - - let self_grid = self.grid.as_mut().unwrap(); - - if transpose { - for ((i, k, j), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } else { - for ((i, j, k), value) in other_grid_grid.indexed_iter() { - self_grid[[i + offset, j, k]] += value; - } - } - } else { - self.grid = other_grid.grid.take(); - self.itaumin = other_grid.itaumin; - self.itaumax = other_grid.itaumax; - self.static_q2 = other_grid.static_q2; - - if transpose { - if let Some(grid) = &mut self.grid { - grid.swap_axes(1, 2); - } - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.grid = None; - } else if let Some(self_grid) = &mut self.grid { - self_grid.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - if let Some(grid) = self.grid.as_mut() { - let (i_size, j_size, k_size) = grid.dim(); - - for i in 0..i_size { - for j in 0..j_size { - for k in j + 1..k_size { - grid[[i, j, k]] += grid[[i, k, j]]; - grid[[i, k, j]] = 0.0; - } - } - } - } - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - grid: None, - ntau: self.ntau, - ny1: self.ny1, - ny2: self.ny2, - y1order: self.y1order, - y2order: self.y2order, - tauorder: self.tauorder, - itaumin: 0, - itaumax: 0, - reweight1: self.reweight1, - reweight2: self.reweight2, - y1min: self.y1min, - y1max: self.y1max, - y2min: self.y2min, - y2max: self.y2max, - taumin: self.taumin, - taumax: self.taumax, - static_q2: 0.0, - } - .into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { self.grid.as_ref().map_or_else( || Box::new(iter::empty()) as Box>, @@ -528,34 +275,6 @@ impl Subgrid for LagrangeSubgridV2 { }, ) } - - fn stats(&self) -> Stats { - let (non_zeros, zeros) = self.grid.as_ref().map_or((0, 0), |array| { - array.iter().fold((0, 0), |mut result, value| { - if *value == 0.0 { - result.0 += 1; - } else { - result.1 += 1; - } - result - }) - }); - - Stats { - total: non_zeros + zeros, - allocated: non_zeros + zeros, - zeros, - overhead: 0, - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - (self.static_q2 > 0.0).then_some(Mu2 { - ren: self.static_q2, - fac: self.static_q2, - }) - } } /// Subgrid which uses Lagrange-interpolation, but also stores its contents in a space-efficient @@ -635,69 +354,6 @@ impl Subgrid for LagrangeSparseSubgridV1 { self.array.is_empty() } - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - if let SubgridEnum::LagrangeSparseSubgridV1(other_grid) = other { - if self.array.is_empty() && !transpose { - mem::swap(&mut self.array, &mut other_grid.array); - } else { - // TODO: the general case isn't implemented - assert!(self.x1_grid() == other_grid.x1_grid()); - assert!(self.x2_grid() == other_grid.x2_grid()); - - // TODO: we need much more checks here if there subgrids are compatible at all - - if transpose { - for ((i, k, j), value) in other_grid.array.indexed_iter() { - self.array[[i, j, k]] += value; - } - } else { - for ((i, j, k), value) in other_grid.array.indexed_iter() { - self.array[[i, j, k]] += value; - } - } - } - } else { - todo!(); - } - } - - fn scale(&mut self, factor: f64) { - if factor == 0.0 { - self.array.clear(); - } else { - self.array.iter_mut().for_each(|x| *x *= factor); - } - } - - fn symmetrize(&mut self) { - let mut new_array = SparseArray3::new(self.ntau, self.ny, self.ny); - - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k >= j) { - new_array[[i, j, k]] = sigma; - } - for ((i, j, k), sigma) in self.array.indexed_iter().filter(|((_, j, k), _)| k < j) { - new_array[[i, k, j]] += sigma; - } - - mem::swap(&mut self.array, &mut new_array); - } - - fn clone_empty(&self) -> SubgridEnum { - Self { - array: SparseArray3::new(self.ntau, self.ny, self.ny), - ntau: self.ntau, - ny: self.ny, - yorder: self.yorder, - tauorder: self.tauorder, - reweight: self.reweight, - ymin: self.ymin, - ymax: self.ymax, - taumin: self.taumin, - taumax: self.taumax, - } - .into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { Box::new(self.array.indexed_iter().map(|(tuple, value)| { ( @@ -711,24 +367,6 @@ impl Subgrid for LagrangeSparseSubgridV1 { ) })) } - - fn stats(&self) -> Stats { - Stats { - total: self.ntau * self.ny * self.ny, - allocated: self.array.len() + self.array.zeros(), - zeros: self.array.zeros(), - overhead: self.array.overhead(), - bytes_per_value: mem::size_of::(), - } - } - - fn static_scale(&self) -> Option { - if let [static_scale] = self.mu2_grid().as_ref() { - Some(static_scale.clone()) - } else { - None - } - } } impl From<&LagrangeSubgridV1> for LagrangeSparseSubgridV1 { diff --git a/pineappl_v0/src/ntuple_subgrid.rs b/pineappl_v0/src/ntuple_subgrid.rs index 3bdba3614..3f27e2e29 100644 --- a/pineappl_v0/src/ntuple_subgrid.rs +++ b/pineappl_v0/src/ntuple_subgrid.rs @@ -1,10 +1,9 @@ //! Provides an implementation of the `Grid` trait with n-tuples. use super::grid::Ntuple; -use super::subgrid::{Mu2, Stats, Subgrid, SubgridEnum, SubgridIndexedIter}; +use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; use serde::{Deserialize, Serialize}; use std::borrow::Cow; -use std::mem; /// Structure holding a grid with an n-tuple as the storage method for weights. #[derive(Clone, Default, Deserialize, Serialize)] @@ -37,41 +36,7 @@ impl Subgrid for NtupleSubgridV1 { self.ntuples.is_empty() } - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool) { - assert!(!transpose); - - if let SubgridEnum::NtupleSubgridV1(other_grid) = other { - self.ntuples.append(&mut other_grid.ntuples); - } else { - panic!("NtupleSubgridV1 doesn't support the merge operation with subgrid types other than itself"); - } - } - - fn scale(&mut self, factor: f64) { - self.ntuples.iter_mut().for_each(|t| t.weight *= factor); - } - - fn symmetrize(&mut self) {} - - fn clone_empty(&self) -> SubgridEnum { - Self::new().into() - } - fn indexed_iter(&self) -> SubgridIndexedIter<'_> { panic!("NtupleSubgridV1 doesn't support the indexed_iter operation"); } - - fn stats(&self) -> Stats { - Stats { - total: self.ntuples.len(), - allocated: self.ntuples.len(), - zeros: 0, - overhead: 0, - bytes_per_value: mem::size_of::>(), - } - } - - fn static_scale(&self) -> Option { - todo!() - } } diff --git a/pineappl_v0/src/subgrid.rs b/pineappl_v0/src/subgrid.rs index f1227a0db..b541741c1 100644 --- a/pineappl_v0/src/subgrid.rs +++ b/pineappl_v0/src/subgrid.rs @@ -78,27 +78,8 @@ pub trait Subgrid { /// Returns true if `fill` was never called for this grid. fn is_empty(&self) -> bool; - /// Merges `other` into this subgrid. - fn merge(&mut self, other: &mut SubgridEnum, transpose: bool); - - /// Scale the subgrid by `factor`. - fn scale(&mut self, factor: f64); - - /// Assumes that the initial states for this grid are the same and uses this to optimize the - /// grid by getting rid of almost half of the entries. - fn symmetrize(&mut self); - - /// Returns an empty copy of the current subgrid. - fn clone_empty(&self) -> SubgridEnum; - /// Return an iterator over all non-zero elements of the subgrid. fn indexed_iter(&self) -> SubgridIndexedIter<'_>; - - /// Return statistics for this subgrid. - fn stats(&self) -> Stats; - - /// Return the static (single) scale, if this subgrid has one. - fn static_scale(&self) -> Option; } /// Type to iterate over the non-zero contents of a subgrid. The tuple contains the indices of the @@ -119,22 +100,6 @@ pub struct SubgridParams { x_order: usize, } -impl Default for SubgridParams { - fn default() -> Self { - Self { - q2_bins: 40, - q2_max: 1e8, - q2_min: 1e2, - q2_order: 3, - reweight: true, - x_bins: 50, - x_max: 1.0, - x_min: 2e-7, - x_order: 3, - } - } -} - impl SubgridParams { /// Returns the number of bins for the $Q^2$ axis. #[must_use] From 7365b8438f5f86b0a33ff9065bbe88da57d326cc Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 12:54:21 +0200 Subject: [PATCH 29/42] Drop compressed `Grid::read` in `pineappl_v0` crate --- Cargo.lock | 1 - pineappl/src/v0.rs | 2 +- pineappl_v0/Cargo.toml | 1 - pineappl_v0/src/grid.rs | 19 +++---------------- 4 files changed, 4 insertions(+), 19 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 0bada600c..95d4e0e5a 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1025,7 +1025,6 @@ dependencies = [ "enum_dispatch", "float-cmp", "itertools", - "lz4_flex", "ndarray", "rustc-hash 1.1.0", "serde", diff --git a/pineappl/src/v0.rs b/pineappl/src/v0.rs index 6a96e214e..4b7b0c23e 100644 --- a/pineappl/src/v0.rs +++ b/pineappl/src/v0.rs @@ -45,7 +45,7 @@ pub fn read_uncompressed_v0(mut reader: impl BufRead) -> Result { use pineappl_v0::pids::PidBasis as PidBasisV0; use pineappl_v0::subgrid::Subgrid as _; - let grid = GridV0::read(&mut reader).map_err(|err| Error::Other(err.into()))?; + let grid = GridV0::read_uncompressed(&mut reader).map_err(|err| Error::Other(err.into()))?; let convolutions: Vec<_> = grid .convolutions() .into_iter() diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml index 28adf80ee..47d1cc9bf 100644 --- a/pineappl_v0/Cargo.toml +++ b/pineappl_v0/Cargo.toml @@ -22,7 +22,6 @@ bitflags = "2.4.2" enum_dispatch = "0.3.7" float-cmp = { default-features = false, version = "0.9.0" } itertools = "0.10.1" -lz4_flex = "0.11.6" ndarray = { features = ["serde"], version = "0.15.4" } rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0.130" } diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index 4d8e6e561..3c7c77d7d 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -6,11 +6,10 @@ use super::convolutions::Convolution; use super::pids::PidBasis; use super::subgrid::{SubgridEnum, SubgridParams}; use bitflags::bitflags; -use lz4_flex::frame::FrameDecoder; use ndarray::{Array3, ArrayView3}; use serde::{Deserialize, Serialize, Serializer}; use std::collections::{BTreeMap, HashMap}; -use std::io::{self, BufRead, BufReader, Read}; +use std::io::{self, BufRead}; use thiserror::Error; /// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a @@ -166,24 +165,12 @@ impl Grid { PidBasis::Pdg } - /// Construct a `Grid` by deserializing it from `reader`. Reading is buffered. + /// Construct a `Grid` by deserializing it from `reader`. /// /// # Errors /// /// If reading from the compressed or uncompressed stream fails an error is returned. - pub fn read(reader: impl Read) -> Result { - let mut reader = BufReader::new(reader); - let buffer = reader.fill_buf().map_err(GridError::IoFailure)?; - let magic_bytes: [u8; 4] = buffer[0..4].try_into().unwrap_or_else(|_| unreachable!()); - - if u32::from_le_bytes(magic_bytes) == 0x18_4D_22_04 { - Self::read_uncompressed(FrameDecoder::new(reader)) - } else { - Self::read_uncompressed(reader) - } - } - - fn read_uncompressed(mut reader: impl BufRead) -> Result { + pub fn read_uncompressed(mut reader: impl BufRead) -> Result { let magic_bytes: [u8; 16] = reader.fill_buf().map_err(GridError::IoFailure)?[0..16] .try_into() .unwrap_or_else(|_| unreachable!()); From c20f6d514185b137e59ba93b55be2017acf19a62 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 13:02:25 +0200 Subject: [PATCH 30/42] Remove code from `bin.rs` --- pineappl_v0/src/bin.rs | 540 +------------------------------------ pineappl_v0/src/convert.rs | 6 - 2 files changed, 1 insertion(+), 545 deletions(-) diff --git a/pineappl_v0/src/bin.rs b/pineappl_v0/src/bin.rs index 25fb5fbf0..32f7020f9 100644 --- a/pineappl_v0/src/bin.rs +++ b/pineappl_v0/src/bin.rs @@ -1,13 +1,10 @@ //! Module that contains helpers for binning observables -use super::convert::{f64_from_usize, usize_from_f64}; -use float_cmp::approx_eq; -use itertools::izip; +use super::convert::f64_from_usize; use itertools::Itertools; use serde::{Deserialize, Serialize}; use std::f64; use std::ops::Range; -use std::str::FromStr; use thiserror::Error; #[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] @@ -108,159 +105,6 @@ pub enum ParseBinRemapperError { }, } -impl FromStr for BinRemapper { - type Err = ParseBinRemapperError; - - fn from_str(s: &str) -> Result { - let remaps: Result>>, Self::Err> = s - .split(';') - .map(|string| { - string - .split('|') - .map(|string| { - string - .split_once(':') - .map_or(Ok(string), |(lhs, rhs)| { - match (lhs.trim().parse::(), rhs.trim().parse::()) { - (Err(lhs), Err(rhs)) => Err(ParseBinRemapperError::Error(format!( - "unable to parse 'N:M' syntax from: '{string}' (N: '{lhs}', M: '{rhs}')" - ))), - // skip :N specification - (Err(_), Ok(_)) => Ok(lhs), - // skip N: specification - (Ok(_), Err(_)) => Ok(rhs), - // skip N:M specification - (Ok(_), Ok(_)) => Ok(""), - } - })? - .split(',') - .filter_map(|string| { - let string = string.trim(); - if string.is_empty() { - None - } else { - Some(string.parse::().map_err(|err| { - ParseBinRemapperError::Error(format!( - "unable to parse limit '{string}': '{err}')" - )) - })) - } - }) - .collect() - }) - .collect() - }) - .collect(); - let mut remaps = remaps?; - - if let Some(first) = remaps.first() { - if first.len() != 1 { - return Err(ParseBinRemapperError::Error( - "'|' syntax not meaningful for first dimension".to_owned(), - )); - } - } - - // go over `remaps` again, and repeat previous entries as requested with the `|` syntax - for vec in &mut remaps { - for i in 1..vec.len() { - if vec[i].is_empty() { - if vec[i - 1].is_empty() { - return Err(ParseBinRemapperError::Error( - "empty repetition with '|'".to_owned(), - )); - } - - vec[i] = vec[i - 1].clone(); - } - } - } - - // go over `remaps` again, this time remove bin as requested with the `:N` or `N:` syntax - for (vec, string) in remaps.iter_mut().zip(s.split(';')) { - for (vec, string) in vec.iter_mut().zip(string.split('|')) { - let (lhs, rhs) = { - if let Some((lhs, rhs)) = string.split_once(':') { - (lhs.parse::(), rhs.parse::()) - } else { - // there's no colon - continue; - } - }; - - if let Ok(num) = rhs { - vec.truncate(vec.len() - num); - } - - if let Ok(num) = lhs { - vec.drain(0..num); - } - - if vec.len() <= 1 { - return Err(ParseBinRemapperError::Error( - "no limits due to ':' syntax".to_owned(), - )); - } - } - } - - let dimensions = remaps.len(); - let mut normalizations = Vec::new(); - let mut limits = Vec::new(); - let mut buffer = Vec::with_capacity(dimensions); - let mut pipe_indices = vec![0; dimensions]; - let mut last_indices = vec![0; dimensions]; - - 'looop: for indices in remaps - .iter() - .map(|vec| 0..vec.iter().map(|vec| vec.len() - 1).max().unwrap()) - .multi_cartesian_product() - { - // calculate `pipe_indices`, which stores the indices for the second dimension of `remaps` - for d in 0..dimensions - 1 { - if indices[d] > last_indices[d] { - for dp in d + 1..dimensions { - if remaps[dp].len() != 1 { - pipe_indices[dp] += 1; - } - } - } - } - - last_indices.clone_from(&indices); - - let mut normalization = 1.0; - - for (remap, &pipe_index, &i) in izip!(&remaps, &pipe_indices, &indices) { - if let Some(r) = remap.get(pipe_index) { - if r.len() <= (i + 1) { - buffer.clear(); - - // this index doesn't exist - continue 'looop; - } - - let left = r[i]; - let right = r[i + 1]; - - buffer.push((left, right)); - normalization *= right - left; - } else { - return Err(ParseBinRemapperError::Error( - "missing '|' specification: number of variants too small".to_owned(), - )); - } - } - - limits.append(&mut buffer); - normalizations.push(normalization); - } - - Self::new(normalizations, limits) - .map_err(|err| ParseBinRemapperError::BinRemapperNewError { source: err }) - } -} - impl<'a> BinInfo<'a> { /// Constructor. #[must_use] @@ -268,22 +112,6 @@ impl<'a> BinInfo<'a> { Self { limits, remapper } } - /// Return the bin limits for the bin with index `bin`. - #[must_use] - pub fn bin_limits(&self, bin: usize) -> Vec<(f64, f64)> { - // TODO: make return type a Cow - self.remapper.map_or_else( - || { - let limits = &self.limits.limits()[bin..=bin + 1]; - vec![(limits[0], limits[1])] - }, - |remapper| { - let dim = remapper.dimensions(); - remapper.limits()[bin * dim..(bin + 1) * dim].to_vec() - }, - ) - } - /// Returns the number of bins. #[must_use] pub fn bins(&self) -> usize { @@ -296,76 +124,6 @@ impl<'a> BinInfo<'a> { self.remapper.map_or(1, BinRemapper::dimensions) } - /// Return the index of the bin corresponding to `limits`. If no bin is found `None` is - /// returned. - #[must_use] - pub fn find_bin(&self, limits: &[(f64, f64)]) -> Option { - (0..self.bins()) - .map(|bin| self.bin_limits(bin)) - .position(|lim| lim == limits) - } - - /// Returns all left-limits for the specified dimension. If the dimension does not exist, an - /// empty vector is returned. - #[must_use] - pub fn left(&self, dimension: usize) -> Vec { - if dimension >= self.dimensions() { - vec![] - } else { - self.remapper.map_or_else( - || { - self.limits - .limits() - .iter() - .take(self.bins()) - .copied() - .collect() - }, - |remapper| { - remapper - .limits() - .iter() - .skip(dimension) - .step_by(self.dimensions()) - .take(self.bins()) - .map(|tuple| tuple.0) - .collect() - }, - ) - } - } - - /// Returns all right-limits for the specified dimension. If the dimension does not exist, an - /// empty vector is returned. - #[must_use] - pub fn right(&self, dimension: usize) -> Vec { - if dimension >= self.dimensions() { - vec![] - } else { - self.remapper.map_or_else( - || { - self.limits - .limits() - .iter() - .skip(1) - .take(self.bins()) - .copied() - .collect() - }, - |remapper| { - remapper - .limits() - .iter() - .skip(dimension) - .step_by(self.dimensions()) - .take(self.bins()) - .map(|tuple| tuple.1) - .collect() - }, - ) - } - } - /// For each bin return a vector of `(left, right)` limits for each dimension. #[must_use] pub fn limits(&self) -> Vec> { @@ -396,15 +154,6 @@ impl<'a> BinInfo<'a> { |remapper| remapper.normalizations().to_vec(), ) } - - /// Returns a vector of half-open intervals that show how multi-dimensional bins can be - /// efficiently sliced into one-dimensional histograms. - #[must_use] - pub fn slices(&self) -> Vec<(usize, usize)> { - // TODO: convert this to Vec> - self.remapper - .map_or_else(|| vec![(0, self.limits.bins())], BinRemapper::slices) - } } impl PartialEq> for BinInfo<'_> { @@ -414,56 +163,6 @@ impl PartialEq> for BinInfo<'_> { } impl BinRemapper { - /// Create a new `BinRemapper` object with the specified number of bins and dimensions and - /// limits. - /// - /// # Errors - /// - /// Returns an error if the length of `limits` is not a multiple of the length of - /// `normalizations`, or if the limits of at least two bins overlap. - pub fn new( - normalizations: Vec, - limits: Vec<(f64, f64)>, - ) -> Result { - if limits.len() % normalizations.len() == 0 { - let dimensions = limits.len() / normalizations.len(); - let mut overlaps = Vec::new(); - - for (i, bin_i) in limits.chunks_exact(dimensions).enumerate() { - for (j, bin_j) in limits.chunks_exact(dimensions).enumerate().skip(i + 1) { - if bin_i.iter().zip(bin_j).all(|((l1, r1), (l2, r2))| { - ((l2 >= l1) && (l2 < r1)) || ((l1 >= l2) && (l1 < r2)) - }) { - overlaps.push(j); - } - } - } - - overlaps.sort_unstable(); - overlaps.dedup(); - - if overlaps.is_empty() { - Ok(Self { - normalizations, - limits, - }) - } else { - Err(BinRemapperNewError::OverlappingBins { overlaps }) - } - } else { - Err(BinRemapperNewError::DimensionUnknown { - normalizations_len: normalizations.len(), - limits_len: limits.len(), - }) - } - } - - /// Return the number of bins. - #[must_use] - pub fn bins(&self) -> usize { - self.normalizations.len() - } - /// Return the number of dimensions. #[must_use] pub fn dimensions(&self) -> usize { @@ -476,96 +175,11 @@ impl BinRemapper { &self.limits } - /// Merges the bins for the corresponding range together in a single one. - /// - /// # Errors - /// - /// When `range` refers to non-consecutive bins, an error is returned. - pub fn merge_bins(&mut self, range: Range) -> Result<(), MergeBinError> { - if self - .slices() - .iter() - .any(|&(start, end)| (start <= range.start) && (range.end <= end)) - { - for bin in range.start + 1..range.end { - self.normalizations[range.start] += self.normalizations[bin]; - } - - let dim = self.dimensions(); - - self.normalizations.drain(range.start + 1..range.end); - self.limits[dim * (range.start + 1) - 1].1 = self.limits[dim * range.end - 1].1; - self.limits.drain(dim * (range.start + 1)..dim * range.end); - - Ok(()) - } else { - Err(MergeBinError::NonConsecutiveRange(range)) - } - } - - /// Merge the `BinRemapper` of `other` into `self` on the right-hand-side. - /// - /// # Errors - /// - /// If the dimensions of both remappers are not the same an error is returned. - pub fn merge(&mut self, other: &Self) -> Result<(), MergeBinError> { - let lhs_dim = self.dimensions(); - let rhs_dim = other.dimensions(); - - if lhs_dim != rhs_dim { - return Err(MergeBinError::IncompatibleDimensions { - lhs: lhs_dim, - rhs: rhs_dim, - }); - } - - // TODO: we shouldn't allow overlapping bins - self.normalizations.extend_from_slice(&other.normalizations); - self.limits.extend_from_slice(&other.limits); - - Ok(()) - } - /// Return the normalization factors for all bins. #[must_use] pub fn normalizations(&self) -> &[f64] { &self.normalizations } - - /// Returns a vector of half-open intervals that show how multi-dimensional bins can be - /// efficiently sliced into one-dimensional histograms. - #[must_use] - pub fn slices(&self) -> Vec<(usize, usize)> { - if self.dimensions() == 1 { - vec![(0, self.bins())] - } else { - self.limits() - .iter() - .enumerate() - .filter_map(|(index, x)| { - ((index % self.dimensions()) != (self.dimensions() - 1)).then_some(x) - }) - .collect::>() - .chunks_exact(self.dimensions() - 1) - .enumerate() - .dedup_by_with_count(|(_, x), (_, y)| x == y) - .map(|(count, (index, _))| (index, index + count)) - .collect() - } - } - - /// Deletes all bins whose corresponding indices are in one of the ranges of `bins`. - pub fn delete_bins(&mut self, bins: &[Range]) { - let dim = self.dimensions(); - - for range in bins.iter().cloned().rev() { - self.normalizations.drain(range); - } - - for range in bins.iter().rev() { - self.limits.drain((range.start * dim)..(range.end * dim)); - } - } } impl PartialEq for BinRemapper { @@ -575,33 +189,6 @@ impl PartialEq for BinRemapper { } impl BinLimits { - /// Constructor for `BinLimits`. - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn new(mut limits: Vec) -> Self { - limits.sort_by(|left, right| left.partial_cmp(right).unwrap()); - - if limits - .iter() - .zip(limits.iter().skip(1)) - .map(|(current, next)| next - current) - .collect::>() - .windows(2) - .all(|val| approx_eq!(f64, val[0], val[1], ulps = 8)) - { - Self(Limits::Equal { - left: *limits.first().unwrap(), - right: *limits.last().unwrap(), - bins: limits.len() - 1, - }) - } else { - Self(Limits::Unequal { limits }) - } - } - /// Returns the number of bins. #[must_use] pub fn bins(&self) -> usize { @@ -611,49 +198,6 @@ impl BinLimits { } } - /// Returns the bin index for observable `value`. If the value over- or underflows, the return - /// value is `None`. - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn index(&self, value: f64) -> Option { - match &self.0 { - Limits::Equal { left, right, bins } => { - if value < *left || value >= *right { - None - } else { - Some(usize_from_f64( - (value - left) / (right - left) * f64_from_usize(*bins), - )) - } - } - Limits::Unequal { limits } => { - match limits.binary_search_by(|left| left.partial_cmp(&value).unwrap()) { - Err(0) => None, - Err(index) if index == limits.len() => None, - Ok(index) if index == (limits.len() - 1) => None, - Ok(index) => Some(index), - Err(index) => Some(index - 1), - } - } - } - } - - /// Returns the left-most bin limit - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn left(&self) -> f64 { - match &self.0 { - Limits::Unequal { limits } => *limits.first().unwrap(), - Limits::Equal { left, .. } => *left, - } - } - /// Returns the limits in a `Vec`. /// /// # Examples @@ -679,27 +223,6 @@ impl BinLimits { } } - /// Merges the bins for the corresponding range together in a single one. - /// - /// # Errors - /// - /// When `bins` contains any indices that do not correspond to bins this method returns an - /// error. - pub fn merge_bins(&mut self, range: Range) -> Result<(), MergeBinError> { - if range.end > self.bins() { - return Err(MergeBinError::InvalidRange { - range, - bins: self.bins(), - }); - } - - let mut new_limits = self.limits(); - new_limits.drain(range.start + 1..range.end); - *self = Self::new(new_limits); - - Ok(()) - } - /// Returns the size for each bin. /// /// # Examples @@ -724,65 +247,4 @@ impl BinLimits { Limits::Unequal { limits } => limits.windows(2).map(|x| x[1] - x[0]).collect(), } } - - /// Merge the limits of `other` into `self` on the right-hand-side. If both limits are - /// non-consecutive, an error is returned. - /// - /// # Errors - /// - /// If the right-most limit of `self` is different from the left-most limit of `other`, the - /// bins are non-consecutive and an error is returned. - /// - /// # Panics - /// - /// TODO - pub fn merge(&mut self, other: &Self) -> Result<(), MergeBinError> { - if !approx_eq!(f64, self.right(), other.left(), ulps = 8) { - return Err(MergeBinError::NonConsecutiveBins { - lhs: self.right(), - rhs: other.left(), - }); - } - - let mut limits = self.limits(); - let add_limits = other.limits(); - - // average over the shared limit - *limits.last_mut().unwrap() = - 0.5 * (*limits.last().unwrap() + *add_limits.first().unwrap()); - // add the new limits - limits.extend_from_slice(&add_limits[1..]); - - // use the constructor to get a valid state - *self = Self::new(limits); - - Ok(()) - } - - /// Returns the right-most bin limit - /// - /// # Panics - /// - /// TODO - #[must_use] - pub fn right(&self) -> f64 { - match &self.0 { - Limits::Unequal { limits } => *limits.last().unwrap(), - Limits::Equal { right, .. } => *right, - } - } - - /// Delete `bins` number of bins from the start. - pub fn delete_bins_left(&mut self, bins: usize) { - let mut limits = self.limits(); - limits.drain(..bins); - *self = Self::new(limits); - } - - /// Delete `bins` number of bins from the end. - pub fn delete_bins_right(&mut self, bins: usize) { - let mut limits = self.limits(); - limits.drain((limits.len() - bins)..); - *self = Self::new(limits); - } } diff --git a/pineappl_v0/src/convert.rs b/pineappl_v0/src/convert.rs index fe1a85186..724e13c86 100644 --- a/pineappl_v0/src/convert.rs +++ b/pineappl_v0/src/convert.rs @@ -1,9 +1,3 @@ -#[allow(clippy::cast_possible_truncation)] -#[allow(clippy::cast_sign_loss)] -pub fn usize_from_f64(x: f64) -> usize { - x.max(0.0) as usize -} - pub fn f64_from_usize(x: usize) -> f64 { f64::from(u32::try_from(x).unwrap()) } From dd82b816bf055a803da8205984ce4a37931e4308 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 13:44:10 +0200 Subject: [PATCH 31/42] Rip out a large chunk of code from `pineappl_v0` --- Cargo.lock | 5 - pineappl_v0/Cargo.toml | 5 - pineappl_v0/src/bin.rs | 98 +----- pineappl_v0/src/boc.rs | 447 +------------------------ pineappl_v0/src/convolutions.rs | 27 -- pineappl_v0/src/empty_subgrid.rs | 4 +- pineappl_v0/src/grid.rs | 106 +----- pineappl_v0/src/import_only_subgrid.rs | 52 +-- pineappl_v0/src/lagrange_subgrid.rs | 123 +------ pineappl_v0/src/lib.rs | 1 - pineappl_v0/src/ntuple_subgrid.rs | 12 +- pineappl_v0/src/packed_array.rs | 293 ---------------- pineappl_v0/src/pids.rs | 363 -------------------- pineappl_v0/src/sparse_array3.rs | 295 +--------------- pineappl_v0/src/subgrid.rs | 238 +------------ 15 files changed, 62 insertions(+), 2007 deletions(-) delete mode 100644 pineappl_v0/src/packed_array.rs diff --git a/Cargo.lock b/Cargo.lock index 95d4e0e5a..b9b51bf07 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -1019,14 +1019,9 @@ dependencies = [ name = "pineappl_v0" version = "1.3.3" dependencies = [ - "anyhow", "bincode", - "bitflags 2.4.2", "enum_dispatch", - "float-cmp", - "itertools", "ndarray", - "rustc-hash 1.1.0", "serde", "thiserror", ] diff --git a/pineappl_v0/Cargo.toml b/pineappl_v0/Cargo.toml index 47d1cc9bf..e7838b19e 100644 --- a/pineappl_v0/Cargo.toml +++ b/pineappl_v0/Cargo.toml @@ -16,13 +16,8 @@ version.workspace = true workspace = true [dependencies] -anyhow = "1.0.48" bincode = "1.3.3" -bitflags = "2.4.2" enum_dispatch = "0.3.7" -float-cmp = { default-features = false, version = "0.9.0" } -itertools = "0.10.1" ndarray = { features = ["serde"], version = "0.15.4" } -rustc-hash = "1.1.0" serde = { features = ["derive"], version = "1.0.130" } thiserror = "1.0.30" diff --git a/pineappl_v0/src/bin.rs b/pineappl_v0/src/bin.rs index 32f7020f9..6030b8589 100644 --- a/pineappl_v0/src/bin.rs +++ b/pineappl_v0/src/bin.rs @@ -1,110 +1,32 @@ //! Module that contains helpers for binning observables use super::convert::f64_from_usize; -use itertools::Itertools; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::f64; -use std::ops::Range; -use thiserror::Error; -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[derive(Deserialize)] enum Limits { Equal { left: f64, right: f64, bins: usize }, Unequal { limits: Vec }, } -/// Error type which is returned when two `BinLimits` objects are merged which are not -/// connected/non-consecutive. -#[derive(Debug, Error)] -pub enum MergeBinError { - /// Returned when two `BinLimits` objects `a` and `b` were tried to be merged using - /// `a.merge(b)`, but when the right-most limit of `a` does not match the left-most limit of - /// `b`. - #[error("can not merge bins which end at {lhs} with bins that start at {rhs}")] - NonConsecutiveBins { - /// right-most limit of the `BinLimits` object that is being merged into. - lhs: f64, - /// left-most limit of the `BinLimits` object that is being merged. - rhs: f64, - }, - - /// Returned by [`BinRemapper::merge_bins`] whenever it can not merge bins. - #[error("can not merge bins with indices {0:?}")] - NonConsecutiveRange(Range), - - /// Returned by [`BinLimits::merge_bins`] whenever the range is outside the available bins. - #[error("tried to merge bins with indices {range:?}, but there are only {bins} bins")] - InvalidRange { - /// Range given to [`BinLimits::merge_bins`]. - range: Range, - /// Number of bins. - bins: usize, - }, - - /// Returned by [`BinRemapper::merge`] whenever the dimensions of two `BinRemapper` are not the - /// same. - #[error("tried to merge bins with different dimensions {lhs} and {rhs}")] - IncompatibleDimensions { - /// Dimension of the bins of the first `BinRemapper`. - lhs: usize, - /// Dimension of the bins of the second `BinRemapper`. - rhs: usize, - }, -} - /// Structure representing bin limits. -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[derive(Deserialize)] pub struct BinLimits(Limits); -/// Error type that is returned by the constructor of `BinRemapper`. -#[derive(Debug, Error)] -pub enum BinRemapperNewError { - /// Returned if the lengths of the normalization and limits vectors do not allow to determine a - /// well-defined number of dimensions. - #[error("could not determine the dimensions from a normalization vector with length {normalizations_len} and limits vector with length {limits_len}")] - DimensionUnknown { - /// Length of the normalization vector. - normalizations_len: usize, - /// Length of the limits vector. - limits_len: usize, - }, - /// Returned if bins overlap. - #[error("the bin limits for the bins with indices {} overlap with other bins", overlaps.iter().map(ToString::to_string).join(","))] - OverlappingBins { - /// Indices of the bins that overlap with other bins. - overlaps: Vec, - }, -} - /// Structure for remapping bin limits. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct BinRemapper { normalizations: Vec, limits: Vec<(f64, f64)>, } /// Captures all information about the bins in a grid. -#[derive(Debug)] pub struct BinInfo<'a> { limits: &'a BinLimits, remapper: Option<&'a BinRemapper>, } -/// Error type returned by [`BinRemapper::from_str`] -#[derive(Debug, Error)] -pub enum ParseBinRemapperError { - /// An error that occured while parsing the string in [`BinRemapper::from_str`]. - #[error("{0}")] - Error(String), - /// An error that occured while constructing the remapper with [`BinRemapper::new`]. - #[error("{source}")] - BinRemapperNewError { - // TODO: enable #[backtrace] whenever the feature is stable - /// The error returned by [`BinRemapper::new`]. - source: BinRemapperNewError, - }, -} - impl<'a> BinInfo<'a> { /// Constructor. #[must_use] @@ -156,12 +78,6 @@ impl<'a> BinInfo<'a> { } } -impl PartialEq> for BinInfo<'_> { - fn eq(&self, other: &BinInfo) -> bool { - (self.limits() == other.limits()) && (self.normalizations() == other.normalizations()) - } -} - impl BinRemapper { /// Return the number of dimensions. #[must_use] @@ -182,12 +98,6 @@ impl BinRemapper { } } -impl PartialEq for BinRemapper { - fn eq(&self, other: &Self) -> bool { - (self.limits == other.limits) && (self.normalizations == other.normalizations) - } -} - impl BinLimits { /// Returns the number of bins. #[must_use] diff --git a/pineappl_v0/src/boc.rs b/pineappl_v0/src/boc.rs index 7e790deaf..6ed022b02 100644 --- a/pineappl_v0/src/boc.rs +++ b/pineappl_v0/src/boc.rs @@ -1,11 +1,7 @@ //! Module containing structures for the 3 dimensions of a [`Grid`]: bins, [`Order`] and channels //! (`boc`). -use float_cmp::approx_eq; -use itertools::Itertools; -use serde::{Deserialize, Serialize}; -use std::cmp::Ordering; -use std::str::FromStr; +use serde::Deserialize; use thiserror::Error; /// Error type keeping information if [`Order::from_str`] went wrong. @@ -13,10 +9,8 @@ use thiserror::Error; #[error("{0}")] pub struct ParseOrderError(String); -// TODO: when possible change the types from `u32` to `u8` to change `try_into` to `into` - /// Coupling powers for each grid. -#[derive(Clone, Debug, Deserialize, Eq, Hash, PartialEq, Serialize)] +#[derive(Deserialize)] pub struct Order { /// Exponent of the strong coupling. pub alphas: u32, @@ -28,74 +22,6 @@ pub struct Order { pub logxif: u32, } -impl FromStr for Order { - type Err = ParseOrderError; - - fn from_str(s: &str) -> Result { - let mut result = Self { - alphas: 0, - alpha: 0, - logxir: 0, - logxif: 0, - }; - - for tuple in s - .split(|c: char| c.is_ascii_digit()) - .filter(|s| !s.is_empty()) - .zip( - s.split(|c: char| !c.is_ascii_digit()) - .filter(|s| !s.is_empty()) - .map(str::parse), - ) - { - match tuple { - ("as", Ok(num)) => { - result.alphas = num; - } - ("a", Ok(num)) => { - result.alpha = num; - } - ("lr", Ok(num)) => { - result.logxir = num; - } - ("lf", Ok(num)) => { - result.logxif = num; - } - (label, Err(err)) => { - return Err(ParseOrderError(format!( - "error while parsing exponent of '{label}': {err}" - ))); - } - (label, Ok(_)) => { - return Err(ParseOrderError(format!("unknown coupling: '{label}'"))); - } - } - } - - Ok(result) - } -} - -impl Ord for Order { - fn cmp(&self, other: &Self) -> Ordering { - // sort leading orders before next-to-leading orders, then the lowest power in alpha, the - // rest lexicographically - (self.alphas + self.alpha) - .cmp(&(other.alphas + other.alpha)) - .then((self.alpha, self.logxir, self.logxif).cmp(&( - other.alpha, - other.logxir, - other.logxif, - ))) - } -} - -impl PartialOrd for Order { - fn partial_cmp(&self, other: &Self) -> Option { - Some(self.cmp(other)) - } -} - impl Order { /// Constructor. This function mainly exists to have a way of constructing `Order` that is less /// verbose. @@ -108,258 +34,18 @@ impl Order { logxif, } } - - /// Return a mask suitable to pass as the `order_mask` parameter of [`Grid::convolve`], - /// [`Grid::evolve`] or [`Grid::evolve_info`]. The selection of `orders` is controlled using - /// the `max_as` and `max_al` parameters, for instance setting `max_as = 1` and `max_al = 0` - /// selects the LO QCD only, `max_as = 2` and `max_al = 0` the NLO QCD; setting `max_as = 3` - /// and `max_al = 2` would select all NLOs, and the NNLO QCD. - /// - /// [`Grid::convolve`]: super::grid::Grid::convolve - /// [`Grid::evolve`]: super::grid::Grid::evolve - /// [`Grid::evolve_info`]: super::grid::Grid::evolve_info - /// - /// # Example - /// - /// In the case of Drell—Yan, there are the following orders: - /// - /// - exactly one leading order (LO), - /// - two next-to-leading orders (NLO), which are - /// - the NLO QCD and - /// - the NLO EW, and - /// - three next-to-next-to-leading orders (NNLO), - /// - the NNLO QCD, - /// - the NNLO EW, and finally - /// - the mixed NNLO QCD—EW. - /// - /// ```rust - /// use pineappl_v0::boc::Order; - /// - /// let orders = [ - /// Order::new(0, 2, 0, 0), // LO : alpha^2 - /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// Order::new(2, 2, 0, 0), // NNLO QCD : alphas^2 alpha^2 - /// Order::new(1, 3, 0, 0), // NNLO QCD—EW : alphas alpha^3 - /// Order::new(0, 4, 0, 0), // NNLO EW : alpha^4 - /// ]; - /// - /// // LO EW - /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [true, false, false, false, false, false]); - /// // LO QCD - /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false]); - /// // LO - /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, false, false, false, false, false]); - /// // NLO QCD - /// assert_eq!(Order::create_mask(&orders, 2, 0, false), [true, true, false, false, false, false]); - /// // NLO EW - /// assert_eq!(Order::create_mask(&orders, 0, 2, false), [true, false, true, false, false, false]); - /// // NNLO QCD - /// assert_eq!(Order::create_mask(&orders, 3, 0, false), [true, true, false, true, false, false]); - /// // NNLO EW - /// assert_eq!(Order::create_mask(&orders, 0, 3, false), [true, false, true, false, false, true]); - /// ``` - /// - /// Orders containing non-zero powers of logarithms can be selected as well if `logs` is set to - /// `true`: - /// - /// ```rust - /// use pineappl_v0::boc::Order; - /// - /// let orders = [ - /// Order::new(0, 2, 0, 0), // LO : alpha^2 - /// Order::new(1, 2, 0, 0), // NLO QCD : alphas alpha^2 - /// Order::new(1, 2, 1, 0), // NLO QCD : alphas alpha^2 logxif - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// Order::new(0, 3, 1, 0), // NLO EW : alpha^3 logxif - /// ]; - /// - /// assert_eq!(Order::create_mask(&orders, 0, 2, true), [true, false, false, true, true]); - /// ``` - /// - /// For the more complicated example of top-pair production one can see the difference between - /// the selection for different LOs: - /// - /// ```rust - /// use pineappl_v0::boc::Order; - /// - /// let orders = [ - /// Order::new(2, 0, 0, 0), // LO QCD : alphas^2 - /// Order::new(1, 1, 0, 0), // LO QCD—EW : alphas alpha - /// Order::new(0, 2, 0, 0), // LO EW : alpha^2 - /// Order::new(3, 0, 0, 0), // NLO QCD : alphas^3 - /// Order::new(2, 1, 0, 0), // NLO QCD—EW : alphas^2 alpha - /// Order::new(1, 2, 0, 0), // NLO QCD—EW : alphas alpha^2 - /// Order::new(0, 3, 0, 0), // NLO EW : alpha^3 - /// ]; - /// - /// // LO EW - /// assert_eq!(Order::create_mask(&orders, 0, 1, false), [false, false, true, false, false, false, false]); - /// // LO QCD - /// assert_eq!(Order::create_mask(&orders, 1, 0, false), [true, false, false, false, false, false, false]); - /// // LO - /// assert_eq!(Order::create_mask(&orders, 1, 1, false), [true, true, true, false, false, false, false]); - /// ``` - #[must_use] - pub fn create_mask(orders: &[Self], max_as: u32, max_al: u32, logs: bool) -> Vec { - // smallest sum of alphas and alpha - let lo = orders - .iter() - .map(|Self { alphas, alpha, .. }| alphas + alpha) - .min() - .unwrap_or_default(); - - // all leading orders, without logarithms - let leading_orders: Vec<_> = orders - .iter() - .filter(|Self { alphas, alpha, .. }| alphas + alpha == lo) - .cloned() - .collect(); - - let lo_as = leading_orders - .iter() - .map(|Self { alphas, .. }| *alphas) - .max() - .unwrap_or_default(); - let lo_al = leading_orders - .iter() - .map(|Self { alpha, .. }| *alpha) - .max() - .unwrap_or_default(); - - let max = max_as.max(max_al); - let min = max_as.min(max_al); - - orders - .iter() - .map( - |&Self { - alphas, - alpha, - logxir, - logxif, - }| { - if !logs && (logxir > 0 || logxif > 0) { - return false; - } - - let pto = alphas + alpha - lo; - - alphas + alpha < min + lo - || (alphas + alpha < max + lo - && match max_as.cmp(&max_al) { - Ordering::Greater => lo_as + pto == alphas, - Ordering::Less => lo_al + pto == alpha, - Ordering::Equal => false, - }) - }, - ) - .collect() - } } /// This structure represents a channel. Each channel consists of a tuple containing in the /// following order, the particle ID of the first incoming parton, then the particle ID of the /// second parton, and finally a numerical factor that will multiply the result for this specific /// combination. -#[derive(Clone, Debug, Deserialize, PartialEq, PartialOrd, Serialize)] +#[derive(Deserialize)] pub struct Channel { entry: Vec<(i32, i32, f64)>, } impl Channel { - /// Constructor for `Channel`. Note that `entry` must be non-empty, otherwise this function - /// panics. - /// - /// # Examples - /// - /// Ordering of the arguments doesn't matter: - /// - /// ```rust - /// use pineappl_v0::boc::Channel; - /// - /// let entry1 = Channel::new(vec![(2, 2, 1.0), (4, 4, 1.0)]); - /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); - /// - /// // checks that the ordering doesn't matter - /// assert_eq!(entry1, entry2); - /// ``` - /// - /// Same arguments are merged together: - /// - /// ```rust - /// use pineappl_v0::boc::Channel; - /// - /// let entry1 = Channel::new(vec![(1, 1, 1.0), (1, 1, 3.0), (3, 3, 1.0), (1, 1, 6.0)]); - /// let entry2 = Channel::new(vec![(1, 1, 10.0), (3, 3, 1.0)]); - /// - /// assert_eq!(entry1, entry2); - /// ``` - /// - /// # Panics - /// - /// Creating an empty channel panics: - /// - /// ```rust,should_panic - /// use pineappl_v0::boc::Channel; - /// - /// let _ = Channel::new(vec![]); - /// ``` - #[must_use] - pub fn new(mut entry: Vec<(i32, i32, f64)>) -> Self { - assert!(!entry.is_empty()); - - // sort `entry` because the ordering doesn't matter and because it makes it easier to - // compare `Channel` objects with each other - entry.sort_by(|x, y| (x.0, x.1).cmp(&(y.0, y.1))); - - Self { - entry: entry - .into_iter() - .coalesce(|lhs, rhs| { - // sum the factors of repeated elements - if (lhs.0, lhs.1) == (rhs.0, rhs.1) { - Ok((lhs.0, lhs.1, lhs.2 + rhs.2)) - } else { - Err((lhs, rhs)) - } - }) - // filter zeros - // TODO: find a better than to hardcode the epsilon limit - .filter(|&(_, _, f)| !approx_eq!(f64, f.abs(), 0.0, epsilon = 1e-14)) - .collect(), - } - } - - /// Translates `entry` into a different basis using `translator`. - /// - /// # Examples - /// - /// ```rust - /// use pineappl_v0::boc::Channel; - /// use pineappl_v0::channel; - /// - /// let entry = Channel::translate(&channel![103, 11, 1.0], &|evol_id| match evol_id { - /// 103 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], - /// _ => vec![(evol_id, 1.0)], - /// }); - /// - /// assert_eq!(entry, channel![2, 11, 1.0; -2, 11, -1.0; 1, 11, -1.0; -1, 11, 1.0]); - /// ``` - pub fn translate(entry: &Self, translator: &dyn Fn(i32) -> Vec<(i32, f64)>) -> Self { - let mut tuples = Vec::new(); - - for &(a, b, factor) in &entry.entry { - for (aid, af) in translator(a) { - for (bid, bf) in translator(b) { - tuples.push((aid, bid, factor * af * bf)); - } - } - } - - Self::new(tuples) - } - /// Returns a tuple representation of this entry. /// /// # Examples @@ -376,131 +62,4 @@ impl Channel { pub fn entry(&self) -> &[(i32, i32, f64)] { &self.entry } - - /// Creates a new object with the initial states transposed. - #[must_use] - pub fn transpose(&self) -> Self { - Self::new(self.entry.iter().map(|(a, b, c)| (*b, *a, *c)).collect()) - } - - /// If `other` is the same channel when only comparing PIDs and neglecting the factors, return - /// the number `f1 / f2`, where `f1` is the factor from `self` and `f2` is the factor from - /// `other`. - /// - /// # Examples - /// - /// ```rust - /// use pineappl_v0::boc::Channel; - /// - /// let entry1 = Channel::new(vec![(2, 2, 2.0), (4, 4, 2.0)]); - /// let entry2 = Channel::new(vec![(4, 4, 1.0), (2, 2, 1.0)]); - /// let entry3 = Channel::new(vec![(3, 4, 1.0), (2, 2, 1.0)]); - /// let entry4 = Channel::new(vec![(4, 3, 1.0), (2, 3, 2.0)]); - /// - /// assert_eq!(entry1.common_factor(&entry2), Some(2.0)); - /// assert_eq!(entry1.common_factor(&entry3), None); - /// assert_eq!(entry1.common_factor(&entry4), None); - /// ``` - #[must_use] - pub fn common_factor(&self, other: &Self) -> Option { - if self.entry.len() != other.entry.len() { - return None; - } - - let result: Option> = self - .entry - .iter() - .zip(&other.entry) - .map(|(a, b)| ((a.0 == b.0) && (a.1 == b.1)).then_some(a.2 / b.2)) - .collect(); - - result.and_then(|factors| { - if factors - .windows(2) - .all(|win| approx_eq!(f64, win[0], win[1], ulps = 4)) - { - factors.first().copied() - } else { - None - } - }) - } -} - -/// Error type keeping information if [`Channel::from_str`] went wrong. -#[derive(Debug, Error)] -#[error("{0}")] -pub struct ParseChannelError(String); - -impl FromStr for Channel { - type Err = ParseChannelError; - - fn from_str(s: &str) -> Result { - Ok(Self::new( - s.split('+') - .map(|sub| { - sub.split_once('*').map_or_else( - || Err(ParseChannelError(format!("missing '*' in '{sub}'"))), - |(factor, pids)| { - let tuple = pids.split_once(',').map_or_else( - || Err(ParseChannelError(format!("missing ',' in '{pids}'"))), - |(a, b)| { - Ok(( - a.trim() - .strip_prefix('(') - .ok_or_else(|| { - ParseChannelError(format!( - "missing '(' in '{pids}'" - )) - })? - .trim() - .parse::() - .map_err(|err| ParseChannelError(err.to_string()))?, - b.trim() - .strip_suffix(')') - .ok_or_else(|| { - ParseChannelError(format!( - "missing ')' in '{pids}'" - )) - })? - .trim() - .parse::() - .map_err(|err| ParseChannelError(err.to_string()))?, - )) - }, - )?; - - Ok(( - tuple.0, - tuple.1, - str::parse::(factor.trim()) - .map_err(|err| ParseChannelError(err.to_string()))?, - )) - }, - ) - }) - .collect::>()?, - )) - } -} - -/// Helper macro to quickly generate a `Channel` at compile time. -/// -/// # Examples -/// -/// In the following example `entry1` and `entry2` represent the same values: -/// -/// ```rust -/// use pineappl_v0::channel; -/// -/// let entry1 = channel![2, 2, 1.0; 4, 4, 1.0]; -/// let entry2 = channel![4, 4, 1.0; 2, 2, 1.0]; -/// -/// assert_eq!(entry1, entry2); -/// ``` -#[macro_export] -macro_rules! channel { - ($a:expr, $b:expr, $factor:expr $(; $c:expr, $d:expr, $fac:expr)*) => { - $crate::boc::Channel::new(vec![($a, $b, $factor), $(($c, $d, $fac)),*]) - }; } diff --git a/pineappl_v0/src/convolutions.rs b/pineappl_v0/src/convolutions.rs index 2424f2ba0..af96f94e4 100644 --- a/pineappl_v0/src/convolutions.rs +++ b/pineappl_v0/src/convolutions.rs @@ -1,7 +1,5 @@ //! Module for everything related to luminosity functions. -use super::pids; - /// Data type that indentifies different types of convolutions. #[derive(Debug, Eq, PartialEq)] pub enum Convolution { @@ -20,28 +18,3 @@ pub enum Convolution { /// Polarized fragmentation function. The integer denotes the type of hadron with a PDG MC ID. PolFF(i32), } - -impl Convolution { - /// Return the convolution if the PID is charged conjugated. - #[must_use] - pub const fn charge_conjugate(&self) -> Self { - match *self { - Self::None => Self::None, - Self::UnpolPDF(pid) => Self::UnpolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolPDF(pid) => Self::PolPDF(pids::charge_conjugate_pdg_pid(pid)), - Self::UnpolFF(pid) => Self::UnpolFF(pids::charge_conjugate_pdg_pid(pid)), - Self::PolFF(pid) => Self::PolFF(pids::charge_conjugate_pdg_pid(pid)), - } - } - - /// Return the PID of the convolution if it has any. - #[must_use] - pub const fn pid(&self) -> Option { - match *self { - Self::None => None, - Self::UnpolPDF(pid) | Self::PolPDF(pid) | Self::UnpolFF(pid) | Self::PolFF(pid) => { - Some(pid) - } - } - } -} diff --git a/pineappl_v0/src/empty_subgrid.rs b/pineappl_v0/src/empty_subgrid.rs index f458c195d..96d36b3c0 100644 --- a/pineappl_v0/src/empty_subgrid.rs +++ b/pineappl_v0/src/empty_subgrid.rs @@ -1,12 +1,12 @@ //! TODO use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::borrow::Cow; use std::iter; /// A subgrid type that is always empty. -#[derive(Clone, Default, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct EmptySubgridV1; impl Subgrid for EmptySubgridV1 { diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index 3c7c77d7d..b051e2628 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -5,17 +5,16 @@ use super::boc::{Channel, Order}; use super::convolutions::Convolution; use super::pids::PidBasis; use super::subgrid::{SubgridEnum, SubgridParams}; -use bitflags::bitflags; use ndarray::{Array3, ArrayView3}; -use serde::{Deserialize, Serialize, Serializer}; -use std::collections::{BTreeMap, HashMap}; +use serde::Deserialize; +use std::collections::HashMap; use std::io::{self, BufRead}; use thiserror::Error; /// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a /// corresponding `weight`. The type `W` can either be a `f64` or `()`, which is used when multiple /// weights should be signaled. -#[derive(Clone, Debug, Deserialize, PartialEq, Serialize)] +#[derive(Deserialize)] pub struct Ntuple { /// Momentum fraction of the first parton. pub x1: f64, @@ -30,120 +29,48 @@ pub struct Ntuple { /// Error returned when merging two grids fails. #[derive(Debug, Error)] pub enum GridError { - /// Returned when trying to merge two `Grid` objects with incompatible bin limits. - #[error(transparent)] - InvalidBinLimits(super::bin::MergeBinError), - /// Returned if the number of bins in the grid and in the remapper do not agree. - #[error("the remapper has {remapper_bins} bins, but the grid has {grid_bins}")] - BinNumberMismatch { - /// Number of bins in the grid. - grid_bins: usize, - /// Number of bins in the remapper. - remapper_bins: usize, - }, - /// Returned when it was tried to merge bins that are non-consecutive. - #[error(transparent)] - MergeBinError(super::bin::MergeBinError), - /// Returned when trying to construct a `Grid` using an unknown subgrid type. - #[error("tried constructing a Grid with unknown Subgrid type `{0}`")] - UnknownSubgridType(String), /// Returned when failed to read a Grid. #[error(transparent)] ReadFailure(bincode::Error), - /// Returned when failed to write a Grid. - #[error(transparent)] - WriteFailure(bincode::Error), /// Returned while performing IO operations. #[error(transparent)] IoFailure(io::Error), - /// Returned when trying to read a `PineAPPL` file with file format version that is not - /// supported. - #[error("the file version is {file_version}, but supported is only {supported_version}")] - FileVersionMismatch { - /// File format version of the file read. - file_version: u64, - /// Maximum supported file format version for this library. - supported_version: u64, - }, - /// Returned from [`Grid::evolve`] if the evolution failed. - #[error("failed to evolve grid: {0}")] - EvolutionFailure(String), - /// Errors that do no originate from this crate itself. - #[error(transparent)] - Other(#[from] anyhow::Error), } -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] struct Mmv1; -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] struct Mmv2 { remapper: Option, key_value_db: HashMap, } -fn ordered_map_serialize( - value: &HashMap, - serializer: S, -) -> Result -where - S: Serializer, -{ - let ordered: BTreeMap<_, _> = value.iter().collect(); - ordered.serialize(serializer) -} - -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] struct Mmv3 { remapper: Option, - // order the HashMap before serializing it to make the output stable - #[serde(serialize_with = "ordered_map_serialize")] key_value_db: HashMap, - subgrid_template: SubgridEnum, + _subgrid_template: SubgridEnum, } // ALLOW: fixing the warning will break the file format #[allow(clippy::large_enum_variant)] -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] enum MoreMembers { V1(Mmv1), V2(Mmv2), V3(Mmv3), } -bitflags! { - /// Bitflags for optimizing a [`Grid`]. See [`Grid::optimize_using`]. - #[derive(Clone, Copy)] - #[repr(transparent)] - pub struct GridOptFlags: u32 { - /// Change the [`Subgrid`] type to optimize storage effeciency. - const OPTIMIZE_SUBGRID_TYPE = 0b1; - /// Recognize whether a subgrid was filled with events with a static scale and if this is - /// the case, optimize it by undoing the interpolation in the scale. This flag requires - /// [`Self::OPTIMIZE_SUBGRID_TYPE`] to be active. - const STATIC_SCALE_DETECTION = 0b10; - /// If two channels differ by transposition of the two initial states and the functions - /// this grid is convolved with are the same for both initial states, this will merge one - /// channel into the other, with the correct transpositions. - const SYMMETRIZE_CHANNELS = 0b100; - /// Remove all orders ([`Grid::orders`]), which do not contain any non-zero subgrids. - const STRIP_EMPTY_ORDERS = 0b1000; - /// Merge the subgrids of channels which have the same definition. - const MERGE_SAME_CHANNELS = 0b10000; - /// Remove all channels ([`Grid::channels`]), which do not contain any non-zero subgrids. - const STRIP_EMPTY_CHANNELS = 0b10_0000; - } -} - /// Main data structure of `PineAPPL`. This structure contains a `Subgrid` for each `LumiEntry`, /// bin, and coupling order it was created with. -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct Grid { subgrids: Array3, channels: Vec, bin_limits: BinLimits, orders: Vec, - subgrid_params: SubgridParams, + _subgrid_params: SubgridParams, more_members: MoreMembers, } @@ -170,6 +97,10 @@ impl Grid { /// # Errors /// /// If reading from the compressed or uncompressed stream fails an error is returned. + /// + /// # Panics + /// + /// Panics if the grid version is not `0`. pub fn read_uncompressed(mut reader: impl BufRead) -> Result { let magic_bytes: [u8; 16] = reader.fill_buf().map_err(GridError::IoFailure)?[0..16] .try_into() @@ -186,12 +117,9 @@ impl Grid { 0 }; - if file_version != 0 { - return Err(GridError::FileVersionMismatch { - file_version, - supported_version: 0, - }); - } + // should be guarateed not to happen, because `pineappl::grid::Grid::read` only calls this + // method if the file version matches + assert_eq!(file_version, 0); bincode::deserialize_from(reader).map_err(GridError::ReadFailure) } diff --git a/pineappl_v0/src/import_only_subgrid.rs b/pineappl_v0/src/import_only_subgrid.rs index 3457352c1..b91e9b93f 100644 --- a/pineappl_v0/src/import_only_subgrid.rs +++ b/pineappl_v0/src/import_only_subgrid.rs @@ -2,11 +2,11 @@ use super::sparse_array3::SparseArray3; use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::borrow::Cow; /// TODO -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct ImportOnlySubgridV1 { array: SparseArray3, q2_grid: Vec, @@ -14,29 +14,6 @@ pub struct ImportOnlySubgridV1 { x2_grid: Vec, } -impl ImportOnlySubgridV1 { - /// Constructor. - #[must_use] - pub fn new( - array: SparseArray3, - q2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, - ) -> Self { - Self { - array, - q2_grid, - x1_grid, - x2_grid, - } - } - - /// Return the array containing the numerical values of the grid. - pub fn array_mut(&mut self) -> &mut SparseArray3 { - &mut self.array - } -} - impl Subgrid for ImportOnlySubgridV1 { fn mu2_grid(&self) -> Cow<'_, [Mu2]> { self.q2_grid @@ -64,7 +41,7 @@ impl Subgrid for ImportOnlySubgridV1 { } /// TODO -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct ImportOnlySubgridV2 { array: SparseArray3, mu2_grid: Vec, @@ -72,29 +49,6 @@ pub struct ImportOnlySubgridV2 { x2_grid: Vec, } -impl ImportOnlySubgridV2 { - /// Constructor. - #[must_use] - pub fn new( - array: SparseArray3, - mu2_grid: Vec, - x1_grid: Vec, - x2_grid: Vec, - ) -> Self { - Self { - array, - mu2_grid, - x1_grid, - x2_grid, - } - } - - /// Return the array containing the numerical values of the grid. - pub fn array_mut(&mut self) -> &mut SparseArray3 { - &mut self.array - } -} - impl Subgrid for ImportOnlySubgridV2 { fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&self.mu2_grid) diff --git a/pineappl_v0/src/lagrange_subgrid.rs b/pineappl_v0/src/lagrange_subgrid.rs index 3339307a1..23dce67c6 100644 --- a/pineappl_v0/src/lagrange_subgrid.rs +++ b/pineappl_v0/src/lagrange_subgrid.rs @@ -2,9 +2,9 @@ use super::convert::f64_from_usize; use super::sparse_array3::SparseArray3; -use super::subgrid::{ExtraSubgridParams, Mu2, Subgrid, SubgridIndexedIter, SubgridParams}; +use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; use ndarray::Array3; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::borrow::Cow; use std::iter; @@ -28,28 +28,20 @@ fn fx(y: f64) -> f64 { unreachable!(); } -fn fy(x: f64) -> f64 { - (1.0 - x).mul_add(5.0, -x.ln()) -} - -fn ftau(q2: f64) -> f64 { - (q2 / 0.0625).ln().ln() -} - fn fq2(tau: f64) -> f64 { 0.0625 * tau.exp().exp() } /// Subgrid which uses Lagrange-interpolation. -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct LagrangeSubgridV1 { grid: Option>, ntau: usize, ny: usize, - yorder: usize, - tauorder: usize, + _yorder: usize, + _tauorder: usize, itaumin: usize, - itaumax: usize, + _itaumax: usize, reweight: bool, ymin: f64, ymax: f64, @@ -58,25 +50,6 @@ pub struct LagrangeSubgridV1 { } impl LagrangeSubgridV1 { - /// Constructor. - #[must_use] - pub fn new(subgrid_params: &SubgridParams) -> Self { - Self { - grid: None, - ntau: subgrid_params.q2_bins(), - ny: subgrid_params.x_bins(), - yorder: subgrid_params.x_order(), - tauorder: subgrid_params.q2_order(), - itaumin: 0, - itaumax: 0, - reweight: subgrid_params.reweight(), - ymin: fy(subgrid_params.x_max()), - ymax: fy(subgrid_params.x_min()), - taumin: ftau(subgrid_params.q2_min()), - taumax: ftau(subgrid_params.q2_max()), - } - } - fn deltay(&self) -> f64 { (self.ymax - self.ymin) / f64_from_usize(self.ny - 1) } @@ -140,17 +113,17 @@ impl Subgrid for LagrangeSubgridV1 { } /// Subgrid which uses Lagrange-interpolation. -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct LagrangeSubgridV2 { grid: Option>, ntau: usize, ny1: usize, ny2: usize, - y1order: usize, - y2order: usize, - tauorder: usize, + _y1order: usize, + _y2order: usize, + _tauorder: usize, itaumin: usize, - itaumax: usize, + _itaumax: usize, reweight1: bool, reweight2: bool, y1min: f64, @@ -159,35 +132,10 @@ pub struct LagrangeSubgridV2 { y2max: f64, taumin: f64, taumax: f64, - pub(crate) static_q2: f64, + pub(crate) _static_q2: f64, } impl LagrangeSubgridV2 { - /// Constructor. - #[must_use] - pub fn new(subgrid_params: &SubgridParams, extra_params: &ExtraSubgridParams) -> Self { - Self { - grid: None, - ntau: subgrid_params.q2_bins(), - ny1: subgrid_params.x_bins(), - ny2: extra_params.x2_bins(), - y1order: subgrid_params.x_order(), - y2order: extra_params.x2_order(), - tauorder: subgrid_params.q2_order(), - itaumin: 0, - itaumax: 0, - reweight1: subgrid_params.reweight(), - reweight2: extra_params.reweight2(), - y1min: fy(subgrid_params.x_max()), - y1max: fy(subgrid_params.x_min()), - y2min: fy(extra_params.x2_max()), - y2max: fy(extra_params.x2_min()), - taumin: ftau(subgrid_params.q2_min()), - taumax: ftau(subgrid_params.q2_max()), - static_q2: 0.0, - } - } - fn deltay1(&self) -> f64 { (self.y1max - self.y1min) / f64_from_usize(self.ny1 - 1) } @@ -279,13 +227,13 @@ impl Subgrid for LagrangeSubgridV2 { /// Subgrid which uses Lagrange-interpolation, but also stores its contents in a space-efficient /// structure. -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct LagrangeSparseSubgridV1 { array: SparseArray3, ntau: usize, ny: usize, - yorder: usize, - tauorder: usize, + _yorder: usize, + _tauorder: usize, reweight: bool, ymin: f64, ymax: f64, @@ -294,27 +242,6 @@ pub struct LagrangeSparseSubgridV1 { } impl LagrangeSparseSubgridV1 { - /// Constructor. - #[must_use] - pub fn new(subgrid_params: &SubgridParams) -> Self { - Self { - array: SparseArray3::new( - subgrid_params.q2_bins(), - subgrid_params.x_bins(), - subgrid_params.x_bins(), - ), - ntau: subgrid_params.q2_bins(), - ny: subgrid_params.x_bins(), - yorder: subgrid_params.x_order(), - tauorder: subgrid_params.q2_order(), - reweight: subgrid_params.reweight(), - ymin: fy(subgrid_params.x_max()), - ymax: fy(subgrid_params.x_min()), - taumin: ftau(subgrid_params.q2_min()), - taumax: ftau(subgrid_params.q2_max()), - } - } - fn deltay(&self) -> f64 { (self.ymax - self.ymin) / f64_from_usize(self.ny - 1) } @@ -368,23 +295,3 @@ impl Subgrid for LagrangeSparseSubgridV1 { })) } } - -impl From<&LagrangeSubgridV1> for LagrangeSparseSubgridV1 { - fn from(subgrid: &LagrangeSubgridV1) -> Self { - Self { - array: subgrid.grid.as_ref().map_or_else( - || SparseArray3::new(subgrid.ntau, subgrid.ny, subgrid.ny), - |grid| SparseArray3::from_ndarray(grid.view(), subgrid.itaumin, subgrid.ntau), - ), - ntau: subgrid.ntau, - ny: subgrid.ny, - yorder: subgrid.yorder, - tauorder: subgrid.tauorder, - reweight: subgrid.reweight, - ymin: subgrid.ymin, - ymax: subgrid.ymax, - taumin: subgrid.taumin, - taumax: subgrid.taumax, - } - } -} diff --git a/pineappl_v0/src/lib.rs b/pineappl_v0/src/lib.rs index 1848bf59b..5d9db49ad 100644 --- a/pineappl_v0/src/lib.rs +++ b/pineappl_v0/src/lib.rs @@ -43,7 +43,6 @@ pub mod grid; pub mod import_only_subgrid; pub mod lagrange_subgrid; pub mod ntuple_subgrid; -pub mod packed_array; pub mod pids; pub mod sparse_array3; pub mod subgrid; diff --git a/pineappl_v0/src/ntuple_subgrid.rs b/pineappl_v0/src/ntuple_subgrid.rs index 3f27e2e29..56e8cf7dc 100644 --- a/pineappl_v0/src/ntuple_subgrid.rs +++ b/pineappl_v0/src/ntuple_subgrid.rs @@ -2,23 +2,15 @@ use super::grid::Ntuple; use super::subgrid::{Mu2, Subgrid, SubgridIndexedIter}; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::borrow::Cow; /// Structure holding a grid with an n-tuple as the storage method for weights. -#[derive(Clone, Default, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct NtupleSubgridV1 { ntuples: Vec>, } -impl NtupleSubgridV1 { - /// Constructor. - #[must_use] - pub const fn new() -> Self { - Self { ntuples: vec![] } - } -} - impl Subgrid for NtupleSubgridV1 { fn mu2_grid(&self) -> Cow<'_, [Mu2]> { Cow::Borrowed(&[]) diff --git a/pineappl_v0/src/packed_array.rs b/pineappl_v0/src/packed_array.rs deleted file mode 100644 index cc06093f8..000000000 --- a/pineappl_v0/src/packed_array.rs +++ /dev/null @@ -1,293 +0,0 @@ -//! Provides the [`PackedArray`] struct. - -use ndarray::ArrayView3; -use serde::{Deserialize, Serialize}; -use std::iter; -use std::mem; -use std::ops::{Index, IndexMut, MulAssign}; - -/// `D`-dimensional array similar to [`ndarray::ArrayBase`], except that `T::default()` is not -/// stored to save space. Instead, adjacent non-default elements are grouped together and the index -/// of their first element (`start_index`) and the length of the group (`lengths`) is stored. -#[derive(Clone, Deserialize, Serialize)] -pub struct PackedArray { - /// The actual values stored in the array. The length of `entries` is always the sum of the - /// elements in `lengths`. - entries: Vec, - /// The indices of the first elements in each group. `start_indices[i]` corresponds to the - /// group with index `i`. - start_indices: Vec, - /// The length of each group. `lengths[i]` corresponds to the group with index `i`. - lengths: Vec, - /// The shape (dimensions) of the array. - shape: Vec, -} - -impl PackedArray { - /// Constructs a new and empty `PackedArray` of shape `shape`. - #[must_use] - pub fn new(shape: [usize; D]) -> Self { - Self { - entries: vec![], - start_indices: vec![], - lengths: vec![], - shape: shape.to_vec(), - } - } - - /// Returns `true` if the array contains no element. - #[must_use] - pub fn is_empty(&self) -> bool { - self.entries.is_empty() - } - - /// Returns the shape of the array. - #[must_use] - pub fn shape(&self) -> &[usize] { - &self.shape - } - - /// Clears the contents of the array. - pub fn clear(&mut self) { - self.entries.clear(); - self.start_indices.clear(); - self.lengths.clear(); - } - - /// Returns the overhead of storing the `start_indices` and the `lengths` of the groups, in - /// units of `f64`. - #[must_use] - pub fn overhead(&self) -> usize { - ((self.start_indices.len() + self.lengths.len()) * mem::size_of::()) - / mem::size_of::() - } - - /// Returns the number of default (zero) elements that are explicitly stored in `entries`. If - /// there is one default element between adjacent groups, it is more economical to store the - /// one default element explicitly and merge the two groups, than to store the `start_indices` - /// and `lengths` of both groups. - #[must_use] - pub fn explicit_zeros(&self) -> usize { - self.entries.iter().filter(|x| **x == T::default()).count() - } - - /// Returns the number of non-default (non-zero) elements stored in the array. - #[must_use] - pub fn non_zeros(&self) -> usize { - self.entries.iter().filter(|x| **x != T::default()).count() - } - - /// Returns an `Iterator` over the non-default (non-zero) elements of this array. The type of - /// an iterator element is `([usize; D], T)` where the first element of the tuple is the index - /// and the second element is the value. - pub fn indexed_iter(&self) -> impl Iterator + '_ { - self.start_indices - .iter() - .zip(&self.lengths) - .flat_map(|(&start_index, &length)| { - (start_index..(start_index + length)).map(|i| unravel_index(i, &self.shape)) - }) - .zip(&self.entries) - .filter(|&(_, entry)| *entry != Default::default()) - .map(|(indices, entry)| (indices, *entry)) - } -} - -impl, const D: usize> MulAssign for PackedArray { - fn mul_assign(&mut self, rhs: T) { - self.entries.iter_mut().for_each(|x| *x *= rhs); - } -} - -impl PackedArray { - /// Converts `array` into a `PackedArray`. - #[must_use] - pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { - let shape = array.shape(); - - let mut result = Self::new([xsize, shape[1], shape[2]]); - - for ((i, j, k), &entry) in array - .indexed_iter() - .filter(|&(_, &entry)| entry != Default::default()) - { - result[[i + xstart, j, k]] = entry; - } - - result - } -} - -/// Converts a `multi_index` into a flat index. -fn ravel_multi_index(multi_index: &[usize; D], shape: &[usize]) -> usize { - assert_eq!(multi_index.len(), shape.len()); - - multi_index - .iter() - .zip(shape) - .fold(0, |acc, (i, d)| acc * d + i) -} - -/// Converts a flat `index` into a `multi_index`. -fn unravel_index(mut index: usize, shape: &[usize]) -> [usize; D] { - assert!(index < shape.iter().product()); - let mut indices = [0; D]; - for (i, d) in indices.iter_mut().zip(shape).rev() { - *i = index % d; - index /= d; - } - indices -} - -impl Index<[usize; D]> for PackedArray { - type Output = T; - - fn index(&self, index: [usize; D]) -> &Self::Output { - assert_eq!(index.len(), self.shape.len()); - assert!( - index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), - "index {:?} is out of bounds for array of shape {:?}", - index, - self.shape - ); - - let raveled_index = ravel_multi_index(&index, &self.shape); - let point = self.start_indices.partition_point(|&i| i <= raveled_index); - - assert!( - point > 0, - "entry at index {index:?} is implicitly set to the default value" - ); - - let start_index = self.start_indices[point - 1]; - let length = self.lengths[point - 1]; - - let point_entries = - self.lengths.iter().take(point - 1).sum::() + raveled_index - start_index; - - assert!( - raveled_index < (start_index + length), - "entry at index {index:?} is implicitly set to the default value" - ); - - &self.entries[point_entries] - } -} - -impl IndexMut<[usize; D]> - for PackedArray -{ - fn index_mut(&mut self, index: [usize; D]) -> &mut Self::Output { - assert_eq!(index.len(), self.shape.len()); - - // Panic if the index value for any dimension is greater or equal than the length of this - // dimension. - assert!( - index.iter().zip(self.shape.iter()).all(|(&i, &d)| i < d), - "index {:?} is out of bounds for array of shape {:?}", - index, - self.shape - ); - - // The insertion cases are: - // 1. this array already stores an element at `index`: - // -> we just have to update this element - // 2. this array does not store an element at `index`: - // a. the distance of the (raveled) `index` is `threshold_distance` away from the next - // or previous element that is already stored: - // -> we can merge the new element into already stored groups, potentially padding - // with `T::default()` elements - // b. the distance of the (raveled) `index` from the existing elements is greater than - // `threshold_distance`: - // -> we insert the element as a new group - - let raveled_index = ravel_multi_index(&index, &self.shape); - - // To determine which groups the new element is close to, `point` is the index of the - // start_index of the first group after the new element. `point` is 0 if no elements before - // the new element are stored, and point is `self.start_indices.len()` if no elements after - // the new element are stored. - let point = self.start_indices.partition_point(|&i| i <= raveled_index); - - // `point_entries` is the index of the first element of the next group, given in - // `self.entries`, i.e. the element at index `self.start_indices[point]`. - let point_entries = self.lengths.iter().take(point).sum::(); - - // Maximum distance for merging groups. If the new element is within `threshold_distance` - // of an existing group (i.e. there are `threshold_distance - 1` implicit elements - // between them), we merge the new element into the existing group. We choose 2 as the - // `threshold_distance` based on memory: in the case of `T` = `f64`, it is more economical - // to store one zero explicitly than to store the start_index and length of a new group. - let threshold_distance = 2; - - // If `point > 0`, there is at least one group preceding the new element. Thus, in the - // following we determine if we can insert the new element into this group. - if point > 0 { - // start_index and length of the group before the new element, i.e. the group - // (potentially) getting the new element - let start_index = self.start_indices[point - 1]; - let length = self.lengths[point - 1]; - - // Case 1: an element is already stored at this `index` - if raveled_index < start_index + length { - return &mut self.entries[point_entries - length + raveled_index - start_index]; - // Case 2a: the new element can be merged into the preceding group - } else if raveled_index < start_index + length + threshold_distance { - let distance = raveled_index - (start_index + length) + 1; - // Merging happens by increasing the length of the group - self.lengths[point - 1] += distance; - // and inserting the necessary number of default elements. - self.entries.splice( - point_entries..point_entries, - iter::repeat(Default::default()).take(distance), - ); - - // If the new element is within `threshold_distance` of the *next* group, we merge - // the next group into this group. - if let Some(start_index_next) = self.start_indices.get(point) { - if raveled_index + threshold_distance >= *start_index_next { - let distance_next = start_index_next - raveled_index; - - // Increase the length of this group - self.lengths[point - 1] += distance_next - 1 + self.lengths[point]; - // and remove the next group. we don't have to manipulate `self.entries`, - // since the grouping of the elements is handled only by - // `self.start_indices` and `self.lengths` - self.lengths.remove(point); - self.start_indices.remove(point); - // Insert the default elements between the groups. - self.entries.splice( - point_entries..point_entries, - iter::repeat(Default::default()).take(distance_next - 1), - ); - } - } - - return &mut self.entries[point_entries - 1 + distance]; - } - } - - // Case 2a: the new element can be merged into the next group. No `self.lengths.remove` and - // `self.start_indices.remove` here, since we are not merging two groups. - if let Some(start_index_next) = self.start_indices.get(point) { - if raveled_index + threshold_distance >= *start_index_next { - let distance = start_index_next - raveled_index; - - self.start_indices[point] = raveled_index; - self.lengths[point] += distance; - self.entries.splice( - point_entries..point_entries, - iter::repeat(Default::default()).take(distance), - ); - return &mut self.entries[point_entries]; - } - } - - // Case 2b: we insert a new group of length 1 - self.start_indices.insert(point, raveled_index); - self.lengths.insert(point, 1); - self.entries.insert(point_entries, Default::default()); - - &mut self.entries[point_entries] - } -} diff --git a/pineappl_v0/src/pids.rs b/pineappl_v0/src/pids.rs index 6064f3f8b..720edcca0 100644 --- a/pineappl_v0/src/pids.rs +++ b/pineappl_v0/src/pids.rs @@ -1,14 +1,8 @@ //! TODO -use std::str::FromStr; -use thiserror::Error; - -const EVOL_BASIS_IDS: [i32; 12] = [100, 103, 108, 115, 124, 135, 200, 203, 208, 215, 224, 235]; - /// Particle ID bases. In `PineAPPL` every particle is identified using a particle identifier /// (PID), which is represented as an `i32`. The values of this `enum` specify how this value is /// interpreted. -#[derive(Clone, Copy, Debug, Eq, PartialEq)] pub enum PidBasis { /// This basis uses the [particle data group](https://pdg.lbl.gov/) (PDG) PIDs. For a complete /// definition see the section 'Monte Carlo Particle Numbering Scheme' of the PDG Review, for @@ -19,360 +13,3 @@ pub enum PidBasis { /// `200`, `203`, `208`, `215`, `224`, `235`. Evol, } - -impl FromStr for PidBasis { - type Err = UnknownPidBasis; - - fn from_str(s: &str) -> Result { - match s { - "Pdg" | "PDG" | "pdg_mc_ids" => Ok(Self::Pdg), - "Evol" | "EVOL" | "evol" => Ok(Self::Evol), - _ => Err(UnknownPidBasis { - basis: s.to_owned(), - }), - } - } -} - -impl PidBasis { - /// Return the charge-conjugated particle ID of `pid` given in the basis of `self`. The - /// returned tuple contains a factor that possibly arises during the charge conjugation. - #[must_use] - pub const fn charge_conjugate(&self, pid: i32) -> (i32, f64) { - match (*self, pid) { - // TODO: in the general case we should allow to return a vector of tuples - (Self::Evol, 100 | 103 | 108 | 115 | 124 | 135) => (pid, 1.0), - (Self::Evol, 200 | 203 | 208 | 215 | 224 | 235) => (pid, -1.0), - (Self::Evol | Self::Pdg, _) => (charge_conjugate_pdg_pid(pid), 1.0), - } - } - - /// Given the particle IDs in `pids`, guess the [`PidBasis`]. - #[must_use] - pub fn guess(pids: &[i32]) -> Self { - // if we find more than 3 pids that are recognized to be from the evolution basis, declare - // it to be the evolution basis (that's a heuristic), otherwise PDG MC IDs - if pids - .iter() - .filter(|&pid| EVOL_BASIS_IDS.iter().any(|evol_pid| pid == evol_pid)) - .count() - > 3 - { - Self::Evol - } else { - Self::Pdg - } - } -} - -/// Error returned by [`PidBasis::from_str`] when passed with an unknown argument. -#[derive(Debug, Error)] -#[error("unknown PID basis: {basis}")] -pub struct UnknownPidBasis { - basis: String, -} - -/// Translates IDs from the evolution basis into IDs using PDG Monte Carlo IDs. -#[must_use] -pub fn evol_to_pdg_mc_ids(id: i32) -> Vec<(i32, f64)> { - match id { - 100 => vec![ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, 1.0), - (-4, 1.0), - (5, 1.0), - (-5, 1.0), - (6, 1.0), - (-6, 1.0), - ], - 103 => vec![(2, 1.0), (-2, 1.0), (1, -1.0), (-1, -1.0)], - 108 => vec![ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, -2.0), - (-3, -2.0), - ], - 115 => vec![ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, -3.0), - (-4, -3.0), - ], - 124 => vec![ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, 1.0), - (-4, 1.0), - (5, -4.0), - (-5, -4.0), - ], - 135 => vec![ - (2, 1.0), - (-2, 1.0), - (1, 1.0), - (-1, 1.0), - (3, 1.0), - (-3, 1.0), - (4, 1.0), - (-4, 1.0), - (5, 1.0), - (-5, 1.0), - (6, -5.0), - (-6, -5.0), - ], - 200 => vec![ - (1, 1.0), - (-1, -1.0), - (2, 1.0), - (-2, -1.0), - (3, 1.0), - (-3, -1.0), - (4, 1.0), - (-4, -1.0), - (5, 1.0), - (-5, -1.0), - (6, 1.0), - (-6, -1.0), - ], - 203 => vec![(2, 1.0), (-2, -1.0), (1, -1.0), (-1, 1.0)], - 208 => vec![ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, -2.0), - (-3, 2.0), - ], - 215 => vec![ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, 1.0), - (-3, -1.0), - (4, -3.0), - (-4, 3.0), - ], - 224 => vec![ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, 1.0), - (-3, -1.0), - (4, 1.0), - (-4, -1.0), - (5, -4.0), - (-5, 4.0), - ], - 235 => vec![ - (2, 1.0), - (-2, -1.0), - (1, 1.0), - (-1, -1.0), - (3, 1.0), - (-3, -1.0), - (4, 1.0), - (-4, -1.0), - (5, 1.0), - (-5, -1.0), - (6, -5.0), - (-6, 5.0), - ], - _ => vec![(id, 1.0)], - } -} - -/// Translates PDG Monte Carlo IDs to particle IDs from the evolution basis. -#[must_use] -pub fn pdg_mc_pids_to_evol(pid: i32) -> Vec<(i32, f64)> { - match pid { - -6 => vec![ - (100, 1.0 / 12.0), - (135, -1.0 / 12.0), - (200, -1.0 / 12.0), - (235, 1.0 / 12.0), - ], - -5 => vec![ - (100, 1.0 / 12.0), - (124, -1.0 / 10.0), - (135, 1.0 / 60.0), - (200, -1.0 / 12.0), - (224, 1.0 / 10.0), - (235, -1.0 / 60.0), - ], - -4 => vec![ - (100, 1.0 / 12.0), - (115, -1.0 / 8.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, -1.0 / 12.0), - (215, 1.0 / 8.0), - (224, -1.0 / 40.0), - (235, -1.0 / 60.0), - ], - -3 => vec![ - (100, 1.0 / 12.0), - (108, -1.0 / 6.0), - (115, 1.0 / 24.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, -1.0 / 12.0), - (208, 1.0 / 6.0), - (215, -1.0 / 24.0), - (224, -1.0 / 40.0), - (235, -1.0 / 60.0), - ], - -2 => vec![ - (100, 1.0 / 12.0), - (103, 1.0 / 4.0), - (108, 1.0 / 12.0), - (115, 1.0 / 24.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, -1.0 / 12.0), - (203, -1.0 / 4.0), - (208, -1.0 / 12.0), - (215, -1.0 / 24.0), - (224, -1.0 / 40.0), - (235, -1.0 / 60.0), - ], - -1 => vec![ - (100, 1.0 / 12.0), - (103, -1.0 / 4.0), - (108, 1.0 / 12.0), - (115, 1.0 / 24.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, -1.0 / 12.0), - (203, 1.0 / 4.0), - (208, -1.0 / 12.0), - (215, -1.0 / 24.0), - (224, -1.0 / 40.0), - (235, -1.0 / 60.0), - ], - 1 => vec![ - (100, 1.0 / 12.0), - (103, -1.0 / 4.0), - (108, 1.0 / 12.0), - (115, 1.0 / 24.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, 1.0 / 12.0), - (203, -1.0 / 4.0), - (208, 1.0 / 12.0), - (215, 1.0 / 24.0), - (224, 1.0 / 40.0), - (235, 1.0 / 60.0), - ], - 2 => vec![ - (100, 1.0 / 12.0), - (103, 1.0 / 4.0), - (108, 1.0 / 12.0), - (115, 1.0 / 24.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, 1.0 / 12.0), - (203, 1.0 / 4.0), - (208, 1.0 / 12.0), - (215, 1.0 / 24.0), - (224, 1.0 / 40.0), - (235, 1.0 / 60.0), - ], - 3 => vec![ - (100, 1.0 / 12.0), - (108, -1.0 / 6.0), - (115, 1.0 / 24.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, 1.0 / 12.0), - (208, -1.0 / 6.0), - (215, 1.0 / 24.0), - (224, 1.0 / 40.0), - (235, 1.0 / 60.0), - ], - 4 => vec![ - (100, 1.0 / 12.0), - (115, -1.0 / 8.0), - (124, 1.0 / 40.0), - (135, 1.0 / 60.0), - (200, 1.0 / 12.0), - (215, -1.0 / 8.0), - (224, 1.0 / 40.0), - (235, 1.0 / 60.0), - ], - 5 => vec![ - (100, 1.0 / 12.0), - (124, -1.0 / 10.0), - (135, 1.0 / 60.0), - (200, 1.0 / 12.0), - (224, -1.0 / 10.0), - (235, 1.0 / 60.0), - ], - 6 => vec![ - (100, 1.0 / 12.0), - (135, -1.0 / 12.0), - (200, 1.0 / 12.0), - (235, -1.0 / 12.0), - ], - _ => vec![(pid, 1.0)], - } -} - -/// Return the charge-conjugated PDG ID of `pid`. -#[must_use] -pub const fn charge_conjugate_pdg_pid(pid: i32) -> i32 { - match pid { - 21 | 22 => pid, - _ => -pid, - } -} - -/// Given `tuples` represting a linear combination of PDG MC IDs, return a PID for the `evol` -/// basis. The order of each tuple in `tuples` is not relevant. This function inverts -/// [`evol_to_pdg_mc_ids`]. If the inversion is not possible, `None` is returned. -#[must_use] -pub fn pdg_mc_ids_to_evol(tuples: &[(i32, f64)]) -> Option { - let mut tuples = tuples.to_vec(); - tuples.retain(|&(_, factor)| factor != 0.0); - tuples.sort_by_key(|&(id, _)| id); - let tuples = tuples; - - for &evol_pid in &EVOL_BASIS_IDS { - let mut evol_vec = evol_to_pdg_mc_ids(evol_pid); - evol_vec.sort_by_key(|&(id, _)| id); - let evol_vec = evol_vec; - - if evol_vec == tuples { - return Some(evol_pid); - } - } - - let non_zero: Vec<_> = tuples - .into_iter() - .filter(|&(_, factor)| factor != 0.0) - .collect(); - - if let &[(pid, factor)] = non_zero.as_slice() { - if factor == 1.0 { - return Some(pid); - } - } - - None -} diff --git a/pineappl_v0/src/sparse_array3.rs b/pineappl_v0/src/sparse_array3.rs index f64c60826..fd8e3a3d9 100644 --- a/pineappl_v0/src/sparse_array3.rs +++ b/pineappl_v0/src/sparse_array3.rs @@ -1,15 +1,11 @@ //! Module containing the `SparseArray3` struct. -use ndarray::{ArrayView3, Axis}; -use serde::{Deserialize, Serialize}; -use std::iter; -use std::mem; -use std::ops::{Index, IndexMut, Range}; -use std::slice::{Iter, IterMut}; +use serde::Deserialize; +use std::slice::Iter; /// Struct for a sparse three-dimensional array, which is optimized for the sparsity of /// interpolation grids. -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct SparseArray3 { entries: Vec, indices: Vec<(usize, usize)>, @@ -17,121 +13,6 @@ pub struct SparseArray3 { dimensions: (usize, usize, usize), } -// TODO: write panic messages - -impl Index<[usize; 3]> for SparseArray3 { - type Output = T; - - fn index(&self, mut index: [usize; 3]) -> &Self::Output { - // index too small - assert!(index[0] >= self.start); - - let dim1 = if self.dimensions.1 > self.dimensions.2 { - index.swap(1, 2); - self.dimensions.2 - } else { - self.dimensions.1 - }; - - // index too large - assert!(index[0] < (self.start + (self.indices.len() - 1) / dim1)); - - // index too large - assert!(index[1] < dim1); - - let forward = dim1 * (index[0] - self.start) + index[1]; - let indices_a = &self.indices[forward]; - let indices_b = &self.indices[forward + 1]; - - let zeros_left = indices_a.0; - let offset = indices_a.1; - let non_zeros = indices_b.1 - offset; - - // index too small - assert!(index[2] >= zeros_left); - - // index too large - assert!(index[2] < (non_zeros + zeros_left)); - - &self.entries[offset + (index[2] - zeros_left)] - } -} - -impl IndexMut<[usize; 3]> for SparseArray3 { - fn index_mut(&mut self, mut index: [usize; 3]) -> &mut Self::Output { - let dim1 = if self.dimensions.1 > self.dimensions.2 { - index.swap(1, 2); - self.dimensions.2 - } else { - self.dimensions.1 - }; - - let max_index0 = self.start + (self.indices.len() - 1) / dim1; - - if index[0] < self.start { - let elements = self.start - index[0]; - self.start = index[0]; - self.indices - .splice(0..0, iter::repeat((0, 0)).take(elements * dim1)); - } else if index[0] >= self.dimensions.0 { - panic!(); - } else if self.entries.is_empty() || (index[0] >= max_index0) { - let elements = if self.entries.is_empty() { - self.start = index[0]; - 1 - } else { - index[0] - max_index0 + 1 - }; - - let insert = self.indices.len() - 1; - self.indices.splice( - insert..insert, - iter::repeat((0, self.indices.last().unwrap().1)).take(elements * dim1), - ); - } - - // index too large - assert!(index[1] < dim1); - - let forward = dim1 * (index[0] - self.start) + index[1]; - let indices_a = &self.indices[forward]; - let indices_b = &self.indices[forward + 1]; - - let zeros_left = indices_a.0; - let offset = indices_a.1; - let non_zeros = indices_b.1 - offset; - - let elements; - let insert; - - if index[2] < zeros_left { - elements = zeros_left - index[2]; - insert = offset; - self.indices[forward].0 -= elements; - } else if index[2] >= self.dimensions.2.max(self.dimensions.1) { - panic!(); - } else if non_zeros == 0 { - elements = 1; - insert = offset; - self.indices[forward].0 = index[2]; - } else if index[2] >= (zeros_left + non_zeros) { - elements = index[2] - (zeros_left + non_zeros) + 1; - insert = offset + non_zeros; - } else { - return &mut self.entries[offset + (index[2] - zeros_left)]; - } - - self.entries - .splice(insert..insert, iter::repeat(T::default()).take(elements)); - self.indices - .iter_mut() - .skip(forward + 1) - .for_each(|ix| ix.1 += elements); - - &mut self.entries[offset + (index[2] - self.indices[forward].0)] - } -} - /// Immutable iterator over the elements of a `SparseArray3`. pub struct IndexedIter<'a, T> { entry_iter: Iter<'a, T>, @@ -224,105 +105,6 @@ impl<'a, T: Copy + Default + PartialEq> Iterator for IndexedIter<'a, T> { } impl SparseArray3 { - /// Constructs a new and empty `SparseArray3` with the specified dimensions `nx`, `ny` and - /// `nz`. - #[must_use] - pub fn new(nx: usize, ny: usize, nz: usize) -> Self { - Self { - entries: vec![], - indices: vec![(0, 0)], - start: 0, - dimensions: (nx, ny, nz), - } - } - - /// Converts `array` into a `SparseArray3`. - #[must_use] - pub fn from_ndarray(array: ArrayView3, xstart: usize, xsize: usize) -> Self { - let (_, ny, nz) = array.dim(); - let array = if ny > nz { - let mut array = array; - array.swap_axes(1, 2); - array - } else { - array - }; - - let dimensions = (xsize, ny, nz); - let mut entries = vec![]; - let mut indices = vec![]; - - let mut offset = 0; - - for array2 in array.axis_iter(Axis(0)) { - for array1 in array2.axis_iter(Axis(0)) { - let start = array1.iter().position(|x| *x != T::default()); - - if let Some(start) = start { - let end = array1.iter().enumerate().skip(start).fold( - start, - |last_non_zero, (index, x)| { - if *x == T::default() { - last_non_zero - } else { - index - } - }, - ) + 1; - indices.push((start, offset)); - offset += end - start; - entries.splice( - entries.len()..entries.len(), - array1.iter().skip(start).take(end - start).cloned(), - ); - } else { - indices.push((0, offset)); - } - } - } - - indices.push((0, offset)); - - Self { - entries, - indices, - start: xstart, - dimensions, - } - } - - /// Clear the contents of the array. - pub fn clear(&mut self) { - self.entries.clear(); - self.indices.clear(); - self.indices.push((0, 0)); - self.start = 0; - } - - /// Returns the dimensions of this array. - #[must_use] - pub const fn dimensions(&self) -> (usize, usize, usize) { - self.dimensions - } - - /// Returns the overhead for storing the explicitly zero and non-zero elements. - #[must_use] - pub fn overhead(&self) -> usize { - (2 * self.indices.len() * mem::size_of::()) / mem::size_of::() - } - - /// Returns the number of default (zero) elements in this array. - #[must_use] - pub fn zeros(&self) -> usize { - self.entries.iter().filter(|x| **x == T::default()).count() - } - - /// Returns the number of non-default (non-zero) elements in this array. - #[must_use] - pub fn len(&self) -> usize { - self.entries.iter().filter(|x| **x != T::default()).count() - } - /// Returns `true` if the array contains no element. #[must_use] pub fn is_empty(&self) -> bool { @@ -347,75 +129,4 @@ impl SparseArray3 { result } - - /// Return an iterator over the elements, including zero elements. - pub fn iter_mut(&mut self) -> IterMut<'_, T> { - self.entries.iter_mut() - } - - /// Return a half-open interval of indices that are filled for the first dimension. - #[must_use] - pub fn x_range(&self) -> Range { - self.start - ..(self.start + (self.indices.len() - 1) / self.dimensions.1.min(self.dimensions.2)) - } - - /// Increase the number of entries of the x-axis by one by inserting zeros at `x`. - pub fn increase_x_at(&mut self, x: usize) { - let dim1 = self.dimensions.1.min(self.dimensions.2); - let nx = (self.indices.len() - 1) / dim1; - - if x <= self.start { - self.start += 1; - } else if x < self.start + nx { - let at = (x - self.start) * dim1; - let offset = self.indices[at].1; - self.indices - .splice(at..at, iter::repeat((0, offset)).take(dim1)); - } else if x <= self.dimensions.0 { - // nothing to do here - } else { - self.dimensions.0 = x; - } - - self.dimensions.0 += 1; - } - - /// Removes all elements with the specified x coordinate. - /// - /// # Panics - /// - /// TODO - pub fn remove_x(&mut self, x: usize) { - let dim1 = self.dimensions.1.min(self.dimensions.2); - let nx = (self.indices.len() - 1) / dim1; - - assert!((x >= self.start) && (x < self.start + nx)); - - let index_a = (x - self.start) * dim1; - let index_b = (x - self.start + 1) * dim1; - let offset_a = self.indices[index_a].1; - let offset_b = self.indices[index_b].1; - - self.entries.drain(offset_a..offset_b); - self.indices - .iter_mut() - .skip(index_b) - .for_each(|o| o.1 -= offset_b - offset_a); - - if (x != self.start) && (x != (self.start + nx - 1)) { - self.indices - .splice(index_a..index_b, iter::repeat((0, offset_a)).take(dim1)); - } else { - if x == self.start { - self.start += 1; - } - - self.indices.drain(index_a..index_b); - } - - if self.indices.last().unwrap().1 == 0 { - self.clear(); - } - } } diff --git a/pineappl_v0/src/subgrid.rs b/pineappl_v0/src/subgrid.rs index b541741c1..a98194364 100644 --- a/pineappl_v0/src/subgrid.rs +++ b/pineappl_v0/src/subgrid.rs @@ -5,12 +5,12 @@ use super::import_only_subgrid::{ImportOnlySubgridV1, ImportOnlySubgridV2}; use super::lagrange_subgrid::{LagrangeSparseSubgridV1, LagrangeSubgridV1, LagrangeSubgridV2}; use super::ntuple_subgrid::NtupleSubgridV1; use enum_dispatch::enum_dispatch; -use serde::{Deserialize, Serialize}; +use serde::Deserialize; use std::borrow::Cow; /// Enum which lists all possible `Subgrid` variants possible. #[enum_dispatch(Subgrid)] -#[derive(Clone, Deserialize, Serialize)] +#[derive(Deserialize)] pub enum SubgridEnum { // WARNING: never change the order or content of this enum, only add to the end of it /// Lagrange-interpolation subgrid. @@ -31,7 +31,7 @@ pub enum SubgridEnum { } /// Structure denoting renormalization and factorization scale values. -#[derive(Debug, Deserialize, Clone, PartialEq, PartialOrd, Serialize)] +#[derive(Deserialize, Clone)] pub struct Mu2 { /// The (squared) renormalization scale value. pub ren: f64, @@ -39,26 +39,6 @@ pub struct Mu2 { pub fac: f64, } -/// Size-related statistics for a subgrid. -#[derive(Debug, Eq, PartialEq)] -pub struct Stats { - /// Number of possible total entries for a subgrid. This number is the product of the lengths - /// of the slices returned by [`Subgrid::mu2_grid`], [`Subgrid::x1_grid`] and - /// [`Subgrid::x2_grid`]. - pub total: usize, - /// Number of allocated entries for a subgrid. This number is always smaller or equal than - /// [`Self::total`]. - pub allocated: usize, - /// Number of allocated zero entries for a subgrid. This number is always smaller or equal than - /// [`Self::allocated`] and contributes to [`Self::overhead`]. - pub zeros: usize, - /// The overhead of a [`Subgrid`] is the size of internal data not used to store grid values. - pub overhead: usize, - /// This value multiplied with any other member of this struct gives an approximate size in - /// bytes. - pub bytes_per_value: usize, -} - /// Trait each subgrid must implement. #[enum_dispatch] pub trait Subgrid { @@ -87,207 +67,15 @@ pub trait Subgrid { pub type SubgridIndexedIter<'a> = Box + 'a>; /// Subgrid creation parameters for subgrids that perform interpolation. -#[derive(Clone, Debug, Deserialize, Serialize)] +#[derive(Deserialize)] pub struct SubgridParams { - q2_bins: usize, - q2_max: f64, - q2_min: f64, - q2_order: usize, - reweight: bool, - x_bins: usize, - x_max: f64, - x_min: f64, - x_order: usize, -} - -impl SubgridParams { - /// Returns the number of bins for the $Q^2$ axis. - #[must_use] - pub const fn q2_bins(&self) -> usize { - self.q2_bins - } - - /// Returns the upper limit of the $Q^2$ axis. - #[must_use] - pub const fn q2_max(&self) -> f64 { - self.q2_max - } - - /// Returns the lower limit of the $Q^2$ axis. - #[must_use] - pub const fn q2_min(&self) -> f64 { - self.q2_min - } - - /// Returns the interpolation order for the $Q^2$ axis. - #[must_use] - pub const fn q2_order(&self) -> usize { - self.q2_order - } - - /// Returns whether reweighting is enabled or not. - #[must_use] - pub const fn reweight(&self) -> bool { - self.reweight - } - - /// Sets the number of bins for the $Q^2$ axis. - pub fn set_q2_bins(&mut self, q2_bins: usize) { - self.q2_bins = q2_bins; - } - - /// Sets the upper limit of the $Q^2$ axis. - pub fn set_q2_max(&mut self, q2_max: f64) { - self.q2_max = q2_max; - } - - /// Sets the lower limit of the $Q^2$ axis. - pub fn set_q2_min(&mut self, q2_min: f64) { - self.q2_min = q2_min; - } - - /// Sets the interpolation order for the $Q^2$ axis. - pub fn set_q2_order(&mut self, q2_order: usize) { - self.q2_order = q2_order; - } - - /// Sets the reweighting parameter. - pub fn set_reweight(&mut self, reweight: bool) { - self.reweight = reweight; - } - - /// Sets the number of bins for the $x$ axes. - pub fn set_x_bins(&mut self, x_bins: usize) { - self.x_bins = x_bins; - } - - /// Sets the upper limit of the $x$ axes. - pub fn set_x_max(&mut self, x_max: f64) { - self.x_max = x_max; - } - - /// Sets the lower limit of the $x$ axes. - pub fn set_x_min(&mut self, x_min: f64) { - self.x_min = x_min; - } - - /// Sets the interpolation order for the $x$ axes. - pub fn set_x_order(&mut self, x_order: usize) { - self.x_order = x_order; - } - - /// Returns the number of bins for the $x$ axes. - #[must_use] - pub const fn x_bins(&self) -> usize { - self.x_bins - } - - /// Returns the upper limit of the $x$ axes. - #[must_use] - pub const fn x_max(&self) -> f64 { - self.x_max - } - - /// Returns the lower limit of the $x$ axes. - #[must_use] - pub const fn x_min(&self) -> f64 { - self.x_min - } - - /// Returns the interpolation order for the $x$ axes. - #[must_use] - pub const fn x_order(&self) -> usize { - self.x_order - } -} - -/// Extra grid creation parameters when the limits for `x1` and `x2` are different. -#[derive(Deserialize, Serialize)] -pub struct ExtraSubgridParams { - reweight2: bool, - x2_bins: usize, - x2_max: f64, - x2_min: f64, - x2_order: usize, -} - -impl Default for ExtraSubgridParams { - fn default() -> Self { - Self { - reweight2: true, - x2_bins: 50, - x2_max: 1.0, - x2_min: 2e-7, - x2_order: 3, - } - } -} - -impl From<&SubgridParams> for ExtraSubgridParams { - fn from(subgrid_params: &SubgridParams) -> Self { - Self { - reweight2: subgrid_params.reweight(), - x2_bins: subgrid_params.x_bins(), - x2_max: subgrid_params.x_max(), - x2_min: subgrid_params.x_min(), - x2_order: subgrid_params.x_order(), - } - } -} - -impl ExtraSubgridParams { - /// Returns whether reweighting is enabled for the `x2` axis or not. - #[must_use] - pub const fn reweight2(&self) -> bool { - self.reweight2 - } - - /// Sets the reweighting parameter for the `x2` axis. - pub fn set_reweight2(&mut self, reweight2: bool) { - self.reweight2 = reweight2; - } - - /// Sets the number of bins for the `x2` axes. - pub fn set_x2_bins(&mut self, x_bins: usize) { - self.x2_bins = x_bins; - } - - /// Sets the upper limit of the `x2` axes. - pub fn set_x2_max(&mut self, x_max: f64) { - self.x2_max = x_max; - } - - /// Sets the lower limit of the `x2` axes. - pub fn set_x2_min(&mut self, x_min: f64) { - self.x2_min = x_min; - } - - /// Sets the interpolation order for the `x2` axes. - pub fn set_x2_order(&mut self, x_order: usize) { - self.x2_order = x_order; - } - - /// Returns the number of bins for the `x2` axes. - #[must_use] - pub const fn x2_bins(&self) -> usize { - self.x2_bins - } - - /// Returns the upper limit of the `x2` axes. - #[must_use] - pub const fn x2_max(&self) -> f64 { - self.x2_max - } - - /// Returns the lower limit of the `x2` axes. - #[must_use] - pub const fn x2_min(&self) -> f64 { - self.x2_min - } - - /// Returns the interpolation order for the `x2` axes. - #[must_use] - pub const fn x2_order(&self) -> usize { - self.x2_order - } + _q2_bins: usize, + _q2_max: f64, + _q2_min: f64, + _q2_order: usize, + _reweight: bool, + _x_bins: usize, + _x_max: f64, + _x_min: f64, + _x_order: usize, } From 667d88b7a9f6f53280853f8c99df6cc5387c47cd Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 14:01:07 +0200 Subject: [PATCH 32/42] Remove some final bits of code in `pineappl_v0` --- pineappl_v0/src/boc.rs | 20 -------------------- pineappl_v0/src/convolutions.rs | 1 - 2 files changed, 21 deletions(-) diff --git a/pineappl_v0/src/boc.rs b/pineappl_v0/src/boc.rs index 6ed022b02..7c811dbb6 100644 --- a/pineappl_v0/src/boc.rs +++ b/pineappl_v0/src/boc.rs @@ -2,12 +2,6 @@ //! (`boc`). use serde::Deserialize; -use thiserror::Error; - -/// Error type keeping information if [`Order::from_str`] went wrong. -#[derive(Debug, Error, Eq, PartialEq)] -#[error("{0}")] -pub struct ParseOrderError(String); /// Coupling powers for each grid. #[derive(Deserialize)] @@ -22,20 +16,6 @@ pub struct Order { pub logxif: u32, } -impl Order { - /// Constructor. This function mainly exists to have a way of constructing `Order` that is less - /// verbose. - #[must_use] - pub const fn new(alphas: u32, alpha: u32, logxir: u32, logxif: u32) -> Self { - Self { - alphas, - alpha, - logxir, - logxif, - } - } -} - /// This structure represents a channel. Each channel consists of a tuple containing in the /// following order, the particle ID of the first incoming parton, then the particle ID of the /// second parton, and finally a numerical factor that will multiply the result for this specific diff --git a/pineappl_v0/src/convolutions.rs b/pineappl_v0/src/convolutions.rs index af96f94e4..4fd82a348 100644 --- a/pineappl_v0/src/convolutions.rs +++ b/pineappl_v0/src/convolutions.rs @@ -1,7 +1,6 @@ //! Module for everything related to luminosity functions. /// Data type that indentifies different types of convolutions. -#[derive(Debug, Eq, PartialEq)] pub enum Convolution { // TODO: eventually get rid of this value /// No convolution. From 71711267f00a3f20d58594a45cd970b2eca7d0bb Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 14:18:25 +0200 Subject: [PATCH 33/42] Fix some clippy warnings --- pineappl_v0/src/bin.rs | 6 +++--- pineappl_v0/src/grid.rs | 14 +++++++------- pineappl_v0/src/sparse_array3.rs | 4 ++-- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/pineappl_v0/src/bin.rs b/pineappl_v0/src/bin.rs index 6030b8589..6f764e9cc 100644 --- a/pineappl_v0/src/bin.rs +++ b/pineappl_v0/src/bin.rs @@ -36,7 +36,7 @@ impl<'a> BinInfo<'a> { /// Returns the number of bins. #[must_use] - pub fn bins(&self) -> usize { + pub const fn bins(&self) -> usize { self.limits.bins() } @@ -81,7 +81,7 @@ impl<'a> BinInfo<'a> { impl BinRemapper { /// Return the number of dimensions. #[must_use] - pub fn dimensions(&self) -> usize { + pub const fn dimensions(&self) -> usize { self.limits.len() / self.normalizations.len() } @@ -101,7 +101,7 @@ impl BinRemapper { impl BinLimits { /// Returns the number of bins. #[must_use] - pub fn bins(&self) -> usize { + pub const fn bins(&self) -> usize { match &self.0 { Limits::Equal { bins, .. } => *bins, Limits::Unequal { limits } => limits.len() - 1, diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index b051e2628..f36db3727 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -78,13 +78,13 @@ impl Grid { /// Return by which convention the particle IDs are encoded. #[must_use] pub fn pid_basis(&self) -> PidBasis { - if let Some(key_values) = self.key_values() { - if let Some(lumi_id_types) = key_values.get("lumi_id_types") { - match lumi_id_types.as_str() { - "pdg_mc_ids" => return PidBasis::Pdg, - "evol" => return PidBasis::Evol, - _ => unimplemented!("unknown particle ID convention {lumi_id_types}"), - } + if let Some(key_values) = self.key_values() + && let Some(lumi_id_types) = key_values.get("lumi_id_types") + { + match lumi_id_types.as_str() { + "pdg_mc_ids" => return PidBasis::Pdg, + "evol" => return PidBasis::Evol, + _ => unimplemented!("unknown particle ID convention {lumi_id_types}"), } } diff --git a/pineappl_v0/src/sparse_array3.rs b/pineappl_v0/src/sparse_array3.rs index fd8e3a3d9..77f99a15c 100644 --- a/pineappl_v0/src/sparse_array3.rs +++ b/pineappl_v0/src/sparse_array3.rs @@ -23,7 +23,7 @@ pub struct IndexedIter<'a, T> { dimensions: (usize, usize, usize), } -impl<'a, T: Copy + Default + PartialEq> Iterator for IndexedIter<'a, T> { +impl Iterator for IndexedIter<'_, T> { type Item = ((usize, usize, usize), T); fn next(&mut self) -> Option { @@ -107,7 +107,7 @@ impl<'a, T: Copy + Default + PartialEq> Iterator for IndexedIter<'a, T> { impl SparseArray3 { /// Returns `true` if the array contains no element. #[must_use] - pub fn is_empty(&self) -> bool { + pub const fn is_empty(&self) -> bool { self.entries.is_empty() } From 47b49dbff8f419ad0b6083bcda45dace5fc8c6c0 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 14:18:50 +0200 Subject: [PATCH 34/42] Remove obsolete comment --- pineappl/Cargo.toml | 1 - 1 file changed, 1 deletion(-) diff --git a/pineappl/Cargo.toml b/pineappl/Cargo.toml index cf042b605..bcf9be3ca 100644 --- a/pineappl/Cargo.toml +++ b/pineappl/Cargo.toml @@ -26,7 +26,6 @@ git-version = "0.3.5" itertools = "0.10.1" lz4_flex = "0.11.6" ndarray = { features = ["serde"], version = "0.15.4" } -# TODO: opt out of default features in this crate to match line above pineappl_v0 = { path = "../pineappl_v0", version = "=1.3.3" } rayon = "1.5.1" rustc-hash = "1.1.0" From 1ef612ab3f82d02e2991fc2ead792947e2454928 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 15:25:12 +0200 Subject: [PATCH 35/42] Increase test coverage by testing `LagrangeSubgrid*` types --- .github/actions/cache-test-data/action.yml | 2 +- maintainer/download-test-data.sh | 3 + pineappl_cli/tests/convolve.rs | 71 ++++++++++++++++++++++ 3 files changed, 75 insertions(+), 1 deletion(-) diff --git a/.github/actions/cache-test-data/action.yml b/.github/actions/cache-test-data/action.yml index f0629ee67..7f1b59074 100644 --- a/.github/actions/cache-test-data/action.yml +++ b/.github/actions/cache-test-data/action.yml @@ -10,7 +10,7 @@ runs: uses: actions/cache@v4 with: path: test-data - key: test-data-v24 + key: test-data-v25 - name: Download test data if cache miss if: steps.cache.outputs.cache-hit != 'true' run: | diff --git a/maintainer/download-test-data.sh b/maintainer/download-test-data.sh index 926abfede..887ff59ad 100755 --- a/maintainer/download-test-data.sh +++ b/maintainer/download-test-data.sh @@ -21,6 +21,9 @@ files=( 'https://data.nnpdf.science/dy_high_mass/NNPDF_DY_14TEV_BSM_AFB.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/ATLASWPT11-Wplus_tot.appl' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT-opt.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSparseSubgrid.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV1.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV2.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT.tar' 'https://data.nnpdf.science/pineappl/test-data/E906nlo_bin_00.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/E906nlo_bin_00.tar' diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index fd934524c..7b2f0d562 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -224,6 +224,35 @@ const NO_CHANNELS_GRID_STR: &str = 19 5 5 0.9 0.9 0.0000000e0 "; +const DYAA_SUBGRID_TEST: &str = "b x1 diff + [] [] +--+---+---+------------ + 0 0 0.1 5.4419693e-1 + 1 0.1 0.2 5.2673830e-1 + 2 0.2 0.3 5.5475101e-1 + 3 0.3 0.4 4.9474835e-1 + 4 0.4 0.5 4.6130464e-1 + 5 0.5 0.6 4.8110532e-1 + 6 0.6 0.7 4.7523978e-1 + 7 0.7 0.8 4.4445878e-1 + 8 0.8 0.9 4.2463353e-1 + 9 0.9 1 3.6203960e-1 +10 1 1.1 3.3418502e-1 +11 1.1 1.2 3.1297586e-1 +12 1.2 1.3 2.5858171e-1 +13 1.3 1.4 2.3601039e-1 +14 1.4 1.5 2.1117966e-1 +15 1.5 1.6 1.8150553e-1 +16 1.6 1.7 1.4598356e-1 +17 1.7 1.8 1.1183116e-1 +18 1.8 1.9 8.9304076e-2 +19 1.9 2 6.8197346e-2 +20 2 2.1 5.0815965e-2 +21 2.1 2.2 3.4465399e-2 +22 2.2 2.3 1.9973225e-2 +23 2.3 2.4 8.0989654e-3 +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -475,3 +504,45 @@ fn issue_334() { .success() .stdout(NO_CHANNELS_GRID_STR); } + +#[test] +fn lagrange_subgrid_v1() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + "../test-data/DYAA_0.4.1_LagrangeSubgridV1.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DYAA_SUBGRID_TEST); +} + +#[test] +fn lagrange_subgrid_v2() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + "../test-data/DYAA_0.4.1_LagrangeSubgridV2.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DYAA_SUBGRID_TEST); +} + +#[test] +fn lagrange_sparse_subgrid_v1() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + "../test-data/DYAA_0.4.1_LagrangeSparseSubgridV1.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DYAA_SUBGRID_TEST); +} From ae22797c37d61bffb39442127da338c6c9f22e7b Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 15:30:41 +0200 Subject: [PATCH 36/42] Fix file name of the new test data --- maintainer/download-test-data.sh | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/maintainer/download-test-data.sh b/maintainer/download-test-data.sh index 887ff59ad..55ba40971 100755 --- a/maintainer/download-test-data.sh +++ b/maintainer/download-test-data.sh @@ -21,7 +21,7 @@ files=( 'https://data.nnpdf.science/dy_high_mass/NNPDF_DY_14TEV_BSM_AFB.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/ATLASWPT11-Wplus_tot.appl' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT-opt.pineappl.lz4' - 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSparseSubgrid.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSparseSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV2.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT.tar' From 3ed35007263bf2a99bf35ead5d4a1a59c5faf166 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 15:37:29 +0200 Subject: [PATCH 37/42] Increase cache number --- .github/actions/cache-test-data/action.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/actions/cache-test-data/action.yml b/.github/actions/cache-test-data/action.yml index 7f1b59074..990c794a7 100644 --- a/.github/actions/cache-test-data/action.yml +++ b/.github/actions/cache-test-data/action.yml @@ -10,10 +10,11 @@ runs: uses: actions/cache@v4 with: path: test-data - key: test-data-v25 + key: test-data-v26 - name: Download test data if cache miss if: steps.cache.outputs.cache-hit != 'true' run: | cd maintainer + # TODO: if this script fails to download some files, the cache is saved nevertheless ./download-test-data.sh shell: bash From 5f801b08a45f0255f341b04deb4c3ae7b0e547f6 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 16:02:20 +0200 Subject: [PATCH 38/42] Increase cache version to download fixed grid --- .github/actions/cache-test-data/action.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/actions/cache-test-data/action.yml b/.github/actions/cache-test-data/action.yml index 990c794a7..c228f8574 100644 --- a/.github/actions/cache-test-data/action.yml +++ b/.github/actions/cache-test-data/action.yml @@ -10,7 +10,7 @@ runs: uses: actions/cache@v4 with: path: test-data - key: test-data-v26 + key: test-data-v27 - name: Download test data if cache miss if: steps.cache.outputs.cache-hit != 'true' run: | From 4a8814f0ce3c2c653ddd5c0f2dbea421be8da3ac Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 16:19:57 +0200 Subject: [PATCH 39/42] Fix test output --- pineappl_cli/tests/convolve.rs | 33 ++++++++++++++++++++++++++++++++- 1 file changed, 32 insertions(+), 1 deletion(-) diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index 7b2f0d562..6007e8360 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -253,6 +253,37 @@ const DYAA_SUBGRID_TEST: &str = "b x1 diff 23 2.3 2.4 8.0989654e-3 "; +// TODO: why is this not the same as `DYAA_SUBGRID_V1_TEST`? Probably a bug in pineappl-0.4.1, +// which was used to generate the grid +const DYAA_SUBGRID_V1_TEST: &str = "b x1 diff + [] [] +--+---+---+------------ + 0 0 0.1 4.9421101e-1 + 1 0.1 0.2 5.3414733e-1 + 2 0.2 0.3 5.6047777e-1 + 3 0.3 0.4 4.9305119e-1 + 4 0.4 0.5 5.0444226e-1 + 5 0.5 0.6 4.8764547e-1 + 6 0.6 0.7 4.8822586e-1 + 7 0.7 0.8 4.3253468e-1 + 8 0.8 0.9 4.5631914e-1 + 9 0.9 1 4.4101667e-1 +10 1 1.1 3.7012152e-1 +11 1.1 1.2 3.2686684e-1 +12 1.2 1.3 2.8788858e-1 +13 1.3 1.4 2.5158100e-1 +14 1.4 1.5 1.9924476e-1 +15 1.5 1.6 1.6493339e-1 +16 1.6 1.7 1.5437771e-1 +17 1.7 1.8 1.2129587e-1 +18 1.8 1.9 9.4075605e-2 +19 1.9 2 6.2396273e-2 +20 2 2.1 4.9049203e-2 +21 2.1 2.2 3.5005153e-2 +22 2.2 2.3 2.0221966e-2 +23 2.3 2.4 5.9587519e-3 +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -516,7 +547,7 @@ fn lagrange_subgrid_v1() { ]) .assert() .success() - .stdout(DYAA_SUBGRID_TEST); + .stdout(DYAA_SUBGRID_V1_TEST); } #[test] From 923c5a617c52c2e4d82fb875ee535a1e31f0d62b Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 16:51:01 +0200 Subject: [PATCH 40/42] Increase test coverage --- .github/actions/cache-test-data/action.yml | 2 +- maintainer/download-test-data.sh | 2 + pineappl_cli/tests/convolve.rs | 57 ++++++++++++++++++++++ 3 files changed, 60 insertions(+), 1 deletion(-) diff --git a/.github/actions/cache-test-data/action.yml b/.github/actions/cache-test-data/action.yml index c228f8574..66d311897 100644 --- a/.github/actions/cache-test-data/action.yml +++ b/.github/actions/cache-test-data/action.yml @@ -10,7 +10,7 @@ runs: uses: actions/cache@v4 with: path: test-data - key: test-data-v27 + key: test-data-v28 - name: Download test data if cache miss if: steps.cache.outputs.cache-hit != 'true' run: | diff --git a/maintainer/download-test-data.sh b/maintainer/download-test-data.sh index 55ba40971..54eda42a2 100755 --- a/maintainer/download-test-data.sh +++ b/maintainer/download-test-data.sh @@ -21,9 +21,11 @@ files=( 'https://data.nnpdf.science/dy_high_mass/NNPDF_DY_14TEV_BSM_AFB.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/ATLASWPT11-Wplus_tot.appl' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT-opt.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.3.0_LagrangeSubgrid.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSparseSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV2.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.8.7_ImportOnlySubgridV2.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT.tar' 'https://data.nnpdf.science/pineappl/test-data/E906nlo_bin_00.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/E906nlo_bin_00.tar' diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index 6007e8360..52464ed48 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -284,6 +284,35 @@ const DYAA_SUBGRID_V1_TEST: &str = "b x1 diff 23 2.3 2.4 5.9587519e-3 "; +const DYAA_IMPORT_ONLY_SUBGRID_V2_TEST: &str = "b x1 diff + [] [] +--+---+---+----------- + 0 0 0.1 2.6135143e2 + 1 0.1 0.2 2.6183087e2 + 2 0.2 0.3 2.6047594e2 + 3 0.3 0.4 2.6508580e2 + 4 0.4 0.5 2.5405348e2 + 5 0.5 0.6 2.4452454e2 + 6 0.6 0.7 2.4744331e2 + 7 0.7 0.8 2.4703713e2 + 8 0.8 0.9 2.3439723e2 + 9 0.9 1 2.6294172e2 +10 1 1.1 2.5651959e2 +11 1.1 1.2 1.9493319e2 +12 1.2 1.3 2.0817685e2 +13 1.3 1.4 1.8362573e2 +14 1.4 1.5 1.8185239e2 +15 1.5 1.6 1.5607187e2 +16 1.6 1.7 1.4492097e2 +17 1.7 1.8 9.8421742e1 +18 1.8 1.9 1.0014939e2 +19 1.9 2 7.8997062e1 +20 2 2.1 6.5452743e1 +21 2.1 2.2 5.2407224e1 +22 2.2 2.3 2.4198310e1 +23 2.3 2.4 8.6036652e0 +"; + #[test] fn help() { Command::cargo_bin("pineappl") @@ -577,3 +606,31 @@ fn lagrange_sparse_subgrid_v1() { .success() .stdout(DYAA_SUBGRID_TEST); } + +#[test] +fn import_only_subgrid_v2() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + "../test-data/DYAA_0.8.7_ImportOnlySubgridV2.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DYAA_IMPORT_ONLY_SUBGRID_V2_TEST); +} + +#[test] +fn lagrange_subgrid_v1_mmv1() { + Command::cargo_bin("pineappl") + .unwrap() + .args([ + "convolve", + "../test-data/DYAA_0.3.0_LagrangeSubgridV1.pineappl.lz4", + "NNPDF31_nlo_as_0118_luxqed", + ]) + .assert() + .success() + .stdout(DYAA_SUBGRID_TEST); +} From 4b50f64b65c6040e1f72d18de9ff4f4a2ec35e5d Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 17:03:06 +0200 Subject: [PATCH 41/42] Fix test-data link --- .github/actions/cache-test-data/action.yml | 2 +- maintainer/download-test-data.sh | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/actions/cache-test-data/action.yml b/.github/actions/cache-test-data/action.yml index 66d311897..da409ce7b 100644 --- a/.github/actions/cache-test-data/action.yml +++ b/.github/actions/cache-test-data/action.yml @@ -10,7 +10,7 @@ runs: uses: actions/cache@v4 with: path: test-data - key: test-data-v28 + key: test-data-v29 - name: Download test data if cache miss if: steps.cache.outputs.cache-hit != 'true' run: | diff --git a/maintainer/download-test-data.sh b/maintainer/download-test-data.sh index 54eda42a2..dd9535ef2 100755 --- a/maintainer/download-test-data.sh +++ b/maintainer/download-test-data.sh @@ -21,7 +21,7 @@ files=( 'https://data.nnpdf.science/dy_high_mass/NNPDF_DY_14TEV_BSM_AFB.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/ATLASWPT11-Wplus_tot.appl' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT-opt.pineappl.lz4' - 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.3.0_LagrangeSubgrid.pineappl.lz4' + 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.3.0_LagrangeSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSparseSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV2.pineappl.lz4' From 4ea366d849c637892419f82cdc13e6842930eb94 Mon Sep 17 00:00:00 2001 From: Christopher Schwan Date: Wed, 1 Apr 2026 19:07:12 +0200 Subject: [PATCH 42/42] Increase test coverage --- .github/actions/cache-test-data/action.yml | 2 +- maintainer/download-test-data.sh | 1 - pineappl_cli/tests/convolve.rs | 14 ------------ pineappl_v0/src/grid.rs | 26 ++-------------------- 4 files changed, 3 insertions(+), 40 deletions(-) diff --git a/.github/actions/cache-test-data/action.yml b/.github/actions/cache-test-data/action.yml index da409ce7b..855d67045 100644 --- a/.github/actions/cache-test-data/action.yml +++ b/.github/actions/cache-test-data/action.yml @@ -10,7 +10,7 @@ runs: uses: actions/cache@v4 with: path: test-data - key: test-data-v29 + key: test-data-v30 - name: Download test data if cache miss if: steps.cache.outputs.cache-hit != 'true' run: | diff --git a/maintainer/download-test-data.sh b/maintainer/download-test-data.sh index dd9535ef2..63cab140a 100755 --- a/maintainer/download-test-data.sh +++ b/maintainer/download-test-data.sh @@ -25,7 +25,6 @@ files=( 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSparseSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV1.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.4.1_LagrangeSubgridV2.pineappl.lz4' - 'https://data.nnpdf.science/pineappl/test-data/DYAA_0.8.7_ImportOnlySubgridV2.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/CMS_TTB_8TEV_2D_TTM_TRAP_TOT.tar' 'https://data.nnpdf.science/pineappl/test-data/E906nlo_bin_00.pineappl.lz4' 'https://data.nnpdf.science/pineappl/test-data/E906nlo_bin_00.tar' diff --git a/pineappl_cli/tests/convolve.rs b/pineappl_cli/tests/convolve.rs index 52464ed48..f9904feae 100644 --- a/pineappl_cli/tests/convolve.rs +++ b/pineappl_cli/tests/convolve.rs @@ -607,20 +607,6 @@ fn lagrange_sparse_subgrid_v1() { .stdout(DYAA_SUBGRID_TEST); } -#[test] -fn import_only_subgrid_v2() { - Command::cargo_bin("pineappl") - .unwrap() - .args([ - "convolve", - "../test-data/DYAA_0.8.7_ImportOnlySubgridV2.pineappl.lz4", - "NNPDF31_nlo_as_0118_luxqed", - ]) - .assert() - .success() - .stdout(DYAA_IMPORT_ONLY_SUBGRID_V2_TEST); -} - #[test] fn lagrange_subgrid_v1_mmv1() { Command::cargo_bin("pineappl") diff --git a/pineappl_v0/src/grid.rs b/pineappl_v0/src/grid.rs index f36db3727..02c1808c7 100644 --- a/pineappl_v0/src/grid.rs +++ b/pineappl_v0/src/grid.rs @@ -8,7 +8,7 @@ use super::subgrid::{SubgridEnum, SubgridParams}; use ndarray::{Array3, ArrayView3}; use serde::Deserialize; use std::collections::HashMap; -use std::io::{self, BufRead}; +use std::io::BufRead; use thiserror::Error; /// This structure represents a position (`x1`, `x2`, `q2`) in a `Subgrid` together with a @@ -32,9 +32,6 @@ pub enum GridError { /// Returned when failed to read a Grid. #[error(transparent)] ReadFailure(bincode::Error), - /// Returned while performing IO operations. - #[error(transparent)] - IoFailure(io::Error), } #[derive(Deserialize)] @@ -101,26 +98,7 @@ impl Grid { /// # Panics /// /// Panics if the grid version is not `0`. - pub fn read_uncompressed(mut reader: impl BufRead) -> Result { - let magic_bytes: [u8; 16] = reader.fill_buf().map_err(GridError::IoFailure)?[0..16] - .try_into() - .unwrap_or_else(|_| unreachable!()); - - let file_version = if &magic_bytes[0..8] == b"PineAPPL" { - reader.consume(16); - u64::from_le_bytes( - magic_bytes[8..16] - .try_into() - .unwrap_or_else(|_| unreachable!()), - ) - } else { - 0 - }; - - // should be guarateed not to happen, because `pineappl::grid::Grid::read` only calls this - // method if the file version matches - assert_eq!(file_version, 0); - + pub fn read_uncompressed(reader: impl BufRead) -> Result { bincode::deserialize_from(reader).map_err(GridError::ReadFailure) }