// Copyright 2015-2018 Hans Dembinski // // Distributed under the Boost Software License, Version 1.0. // (See accompanying file LICENSE_1_0.txt // or copy at http://www.boost.org/LICENSE_1_0.txt) #ifndef BOOST_HISTOGRAM_DETAIL_AXES_HPP #define BOOST_HISTOGRAM_DETAIL_AXES_HPP #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include namespace boost { namespace histogram { namespace detail { template void for_each_axis_impl(dynamic_size, T& t, Unary& p) { for (auto& a : t) axis::visit(p, a); } template void for_each_axis_impl(N, T& t, Unary& p) { mp11::tuple_for_each(t, p); } // also matches const T and const Unary template void for_each_axis(T&& t, Unary&& p) { for_each_axis_impl(relaxed_tuple_size(t), t, p); } // merge if a and b are discrete and growing struct axis_merger { template T operator()(const T& a, const U& u) { const T* bp = ptr_cast(&u); if (!bp) BOOST_THROW_EXCEPTION(std::invalid_argument("axes not mergable")); using O = axis::traits::get_options; constexpr bool discrete_and_growing = axis::traits::is_continuous::value == false && O::test(axis::option::growth); return impl(mp11::mp_bool{}, a, *bp); } template T impl(std::false_type, const T& a, const T& b) { if (!relaxed_equal{}(a, b)) BOOST_THROW_EXCEPTION(std::invalid_argument("axes not mergable")); return a; } template T impl(std::true_type, const T& a, const T& b) { if (relaxed_equal{}(axis::traits::metadata(a), axis::traits::metadata(b))) { auto r = a; if (axis::traits::is_ordered::value) { r.update(b.value(0)); r.update(b.value(b.size() - 1)); } else for (auto&& v : b) r.update(v); return r; } return impl(std::false_type{}, a, b); } }; // create empty dynamic axis which can store any axes types from the argument template auto make_empty_dynamic_axes(const T& axes) { return make_default(axes); } template auto make_empty_dynamic_axes(const std::tuple&) { using namespace ::boost::mp11; using L = mp_unique>; // return std::vector> or std::vector return std::vector::value == 1), mp_first, L>>{}; } template auto axes_transform_impl(const T& t, Functor&& f, mp11::index_sequence) { return std::make_tuple(f(Is, std::get(t))...); } // warning: sequential order of functor execution is platform-dependent! template auto axes_transform(const std::tuple& old_axes, Functor&& f) { return axes_transform_impl(old_axes, std::forward(f), mp11::make_index_sequence{}); } // changing axes type is not supported template T axes_transform(const T& old_axes, Functor&& f) { T axes = make_default(old_axes); axes.reserve(old_axes.size()); for_each_axis(old_axes, [&](const auto& a) { axes.emplace_back(f(axes.size(), a)); }); return axes; } template std::tuple axes_transform_impl(const std::tuple& lhs, const std::tuple& rhs, Binary&& bin, mp11::index_sequence) { return std::make_tuple(bin(std::get(lhs), std::get(rhs))...); } template std::tuple axes_transform(const std::tuple& lhs, const std::tuple& rhs, Binary&& bin) { return axes_transform_impl(lhs, rhs, bin, mp11::make_index_sequence{}); } template T axes_transform(const T& lhs, const T& rhs, Binary&& bin) { T ax = make_default(lhs); ax.reserve(lhs.size()); using std::begin; auto ir = begin(rhs); for (auto&& li : lhs) { axis::visit( [&](const auto& li) { axis::visit([&](const auto& ri) { ax.emplace_back(bin(li, ri)); }, *ir); }, li); ++ir; } return ax; } template unsigned axes_rank(const T& axes) { using std::begin; using std::end; return static_cast(std::distance(begin(axes), end(axes))); } template constexpr unsigned axes_rank(const std::tuple&) { return static_cast(sizeof...(Ts)); } template void throw_if_axes_is_too_large(const T& axes) { if (axes_rank(axes) > BOOST_HISTOGRAM_DETAIL_AXES_LIMIT) BOOST_THROW_EXCEPTION( std::invalid_argument("length of axis vector exceeds internal buffers, " "recompile with " "-DBOOST_HISTOGRAM_DETAIL_AXES_LIMIT= " "to increase internal buffers")); } // tuple is never too large because internal buffers adapt to size of tuple template void throw_if_axes_is_too_large(const std::tuple&) {} template decltype(auto) axis_get(std::tuple& axes) { return std::get(axes); } template decltype(auto) axis_get(const std::tuple& axes) { return std::get(axes); } template decltype(auto) axis_get(T& axes) { return axes[N]; } template decltype(auto) axis_get(const T& axes) { return axes[N]; } template auto axis_get(std::tuple& axes, const unsigned i) { constexpr auto S = sizeof...(Ts); using V = mp11::mp_unique>; return mp11::mp_with_index(i, [&axes](auto i) { return V(&std::get(axes)); }); } template auto axis_get(const std::tuple& axes, const unsigned i) { constexpr auto S = sizeof...(Ts); using V = mp11::mp_unique>; return mp11::mp_with_index(i, [&axes](auto i) { return V(&std::get(axes)); }); } template decltype(auto) axis_get(T& axes, const unsigned i) { return axes[i]; } template decltype(auto) axis_get(const T& axes, const unsigned i) { return axes[i]; } template bool axes_equal_impl(const T& t, const U& u, mp11::index_sequence) noexcept { bool result = true; // operator folding emulation (void)std::initializer_list{ (result &= relaxed_equal{}(std::get(t), std::get(u)))...}; return result; } template bool axes_equal_impl(const std::tuple& t, const std::tuple& u) noexcept { return axes_equal_impl( t, u, mp11::make_index_sequence{}); } template bool axes_equal_impl(const std::tuple& t, const U& u) noexcept { using std::begin; auto iu = begin(u); bool result = true; mp11::tuple_for_each(t, [&](const auto& ti) { axis::visit([&](const auto& ui) { result &= relaxed_equal{}(ti, ui); }, *iu); ++iu; }); return result; } template bool axes_equal_impl(const T& t, const std::tuple& u) noexcept { return axes_equal_impl(u, t); } template bool axes_equal_impl(const T& t, const U& u) noexcept { using std::begin; auto iu = begin(u); bool result = true; for (auto&& ti : t) { axis::visit( [&](const auto& ti) { axis::visit([&](const auto& ui) { result &= relaxed_equal{}(ti, ui); }, *iu); }, ti); ++iu; } return result; } template bool axes_equal(const T& t, const U& u) noexcept { return axes_rank(t) == axes_rank(u) && axes_equal_impl(t, u); } // enable_if_t needed by msvc :( template std::enable_if_t, std::tuple>::value)> axes_assign(std::tuple&, const std::tuple&) { BOOST_THROW_EXCEPTION(std::invalid_argument("cannot assign axes, types do not match")); } template void axes_assign(std::tuple& t, const std::tuple& u) { t = u; } template void axes_assign(std::tuple& t, const U& u) { if (sizeof...(Ts) == detail::size(u)) { using std::begin; auto iu = begin(u); mp11::tuple_for_each(t, [&](auto& ti) { using T = std::decay_t; ti = axis::get(*iu); ++iu; }); return; } BOOST_THROW_EXCEPTION(std::invalid_argument("cannot assign axes, sizes do not match")); } template void axes_assign(T& t, const std::tuple& u) { // resize instead of reserve, because t may not be empty and we want exact capacity t.resize(sizeof...(Us)); using std::begin; auto it = begin(t); mp11::tuple_for_each(u, [&](const auto& ui) { *it++ = ui; }); } template void axes_assign(T& t, const U& u) { t.assign(u.begin(), u.end()); } template void axes_serialize(Archive& ar, T& axes) { ar& make_nvp("axes", axes); } template void axes_serialize(Archive& ar, std::tuple& axes) { // needed to keep serialization format backward compatible struct proxy { std::tuple& t; void serialize(Archive& ar, unsigned /* version */) { mp11::tuple_for_each(t, [&ar](auto& x) { ar& make_nvp("item", x); }); } }; proxy p{axes}; ar& make_nvp("axes", p); } // total number of bins including *flow bins template std::size_t bincount(const T& axes) { std::size_t n = 1; for_each_axis(axes, [&n](const auto& a) { const auto old = n; const auto s = axis::traits::extent(a); n *= s; if (s > 0 && n < old) BOOST_THROW_EXCEPTION(std::overflow_error("bincount overflow")); }); return n; } /** Initial offset for the linear index. This precomputes an offset for the global index so that axis index = -1 addresses the first entry in the storage. Example: one-dim. histogram. The offset is 1, stride is 1, and global_index = offset + axis_index * stride == 0 addresses the first element of the storage. Using the offset makes the hot inner loop that computes the global_index simpler and thus faster, because we do not have to branch for each axis to check whether it has an underflow bin. The offset is set to an invalid value when the histogram contains at least one growing axis, because this optimization then cannot be used. See detail/linearize.hpp, in this case linearize_growth is called. */ template std::size_t offset(const T& axes) { std::size_t n = 0; auto stride = static_cast(1); for_each_axis(axes, [&](const auto& a) { if (axis::traits::options(a) & axis::option::growth) n = invalid_index; else if (n != invalid_index && axis::traits::options(a) & axis::option::underflow) n += stride; stride *= axis::traits::extent(a); }); return n; } // make default-constructed buffer (no initialization for POD types) template auto make_stack_buffer(const A& a) { return sub_array::value>(axes_rank(a)); } // make buffer with elements initialized to v template auto make_stack_buffer(const A& a, const T& t) { return sub_array::value>(axes_rank(a), t); } template using has_underflow = decltype(axis::traits::get_options::test(axis::option::underflow)); template using is_growing = decltype(axis::traits::get_options::test(axis::option::growth)); template using is_not_inclusive = mp11::mp_not>; // for vector template struct axis_types_impl { using type = mp11::mp_list>; }; // for vector> template struct axis_types_impl> { using type = mp11::mp_list...>; }; // for tuple template struct axis_types_impl> { using type = mp11::mp_list...>; }; template using axis_types = typename axis_types_impl, mp11::mp_first, T>>::type; template