Commit c2a20833 authored by Edwin Carlinet's avatar Edwin Carlinet
Browse files

Clean legacy maxtree

parent 14046c08
#pragma once
#include <mln/core/assert.hpp>
#include <array>
#include <memory>
#include <numeric>
#include <vector>
namespace mln
{
template <typename T, std::size_t nLevel, typename Allocator = std::allocator<T>, bool queue = false,
typename Enable = void>
struct bounded_hqueue
{
bounded_hqueue();
explicit bounded_hqueue(const size_t* histo);
~bounded_hqueue();
void init(const size_t* histo);
bool empty(unsigned l) const;
void push_at_level(const T& x, unsigned l);
T top_at_level(unsigned l) const;
T pop_at_level(unsigned l);
private:
std::array<T*, nLevel> m_head;
std::array<T*, nLevel> m_tail;
Allocator m_allocator;
T* m_q;
T* m_end;
};
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
struct bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>
{
bounded_hqueue();
explicit bounded_hqueue(const size_t* histo);
~bounded_hqueue();
void init(const size_t* histo);
bool empty(unsigned l) const;
void push_at_level(const T& x, unsigned l);
T top_at_level(unsigned l) const;
T pop_at_level(unsigned l);
private:
std::vector<T*> m_head;
std::vector<T*> m_tail;
Allocator m_allocator;
T* m_q;
T* m_end;
};
/********************/
/** Implementation **/
/********************/
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline bounded_hqueue<T, nLevel, Allocator, queue, Enable>::bounded_hqueue()
: m_head{{
NULL,
}}
, m_tail{{
NULL,
}}
, m_q(NULL)
, m_end(NULL)
{
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline bounded_hqueue<T, nLevel, Allocator, queue, Enable>::bounded_hqueue(const size_t* histo)
{
init(histo);
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline bounded_hqueue<T, nLevel, Allocator, queue, Enable>::~bounded_hqueue()
{
if (m_q != NULL)
{
for (unsigned i = 0; i < nLevel; ++i)
for (T* ptr = m_head[i]; ptr != m_tail[i]; ++ptr)
m_allocator.destroy(ptr);
m_allocator.deallocate(m_q, m_end - m_q);
}
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline void bounded_hqueue<T, nLevel, Allocator, queue, Enable>::init(const size_t* histo)
{
mln_precondition(m_q == NULL);
unsigned nelements = std::accumulate(histo, histo + nLevel, 0u);
m_q = m_allocator.allocate(nelements);
m_end = m_q + nelements;
unsigned n = 0;
for (unsigned i = 0; i < nLevel; ++i)
{
m_head[i] = m_tail[i] = m_q + n;
n += histo[i];
}
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline bool bounded_hqueue<T, nLevel, Allocator, queue, Enable>::empty(unsigned level) const
{
mln_precondition(level < nLevel);
return m_head[level] == m_tail[level];
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline void bounded_hqueue<T, nLevel, Allocator, queue, Enable>::push_at_level(const T& x, unsigned level)
{
mln_precondition(level < nLevel);
mln_precondition(m_tail[level] < m_end and (level == nLevel - 1 or m_tail[level] < m_head[level + 1]));
m_allocator.construct(m_tail[level]++, x);
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline T bounded_hqueue<T, nLevel, Allocator, queue, Enable>::pop_at_level(unsigned level)
{
mln_precondition(level < nLevel);
mln_precondition(!empty(level));
if (queue)
{
T x = std::move(*(m_head[level]));
m_allocator.destroy(m_head[level]++);
return x;
}
else
{
T x = std::move(*(--m_tail[level]));
m_allocator.destroy(m_tail[level]);
return x;
}
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue, typename Enable>
inline T bounded_hqueue<T, nLevel, Allocator, queue, Enable>::top_at_level(unsigned level) const
{
mln_precondition(level < nLevel);
mln_precondition(!empty(level));
if (queue)
{
T x = *(m_head[level]);
return x;
}
else
{
T x = *(m_tail[level] - 1);
return x;
}
}
/*********************************/
/* Implementation specialization */
/*********************************/
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::bounded_hqueue()
: m_q(NULL)
, m_end(NULL)
{
m_head.resize(nLevel);
m_tail.resize(nLevel);
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::bounded_hqueue(
const size_t* histo)
{
init(histo);
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::~bounded_hqueue()
{
if (m_q != NULL)
{
for (std::size_t i = 0; i < nLevel; ++i)
for (T* ptr = m_head[i]; ptr != m_tail[i]; ++ptr)
m_allocator.destroy(ptr);
m_allocator.deallocate(m_q, m_end - m_q);
}
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline void bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::init(
const size_t* histo)
{
mln_precondition(m_q == NULL);
size_t nelements = std::accumulate(histo, histo + nLevel, size_t(0));
m_q = m_allocator.allocate(nelements);
m_end = m_q + nelements;
unsigned n = 0;
for (std::size_t i = 0; i < nLevel; ++i)
{
m_head[i] = m_tail[i] = m_q + n;
n += static_cast<unsigned>(histo[i]);
}
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline bool bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::empty(
unsigned level) const
{
mln_precondition(level < nLevel);
return m_head[level] == m_tail[level];
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline void bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::push_at_level(
const T& x, unsigned level)
{
mln_precondition(level < nLevel);
mln_precondition(m_tail[level] < m_end and (level == nLevel - 1 or m_tail[level] < m_head[level + 1]));
m_allocator.construct(m_tail[level]++, x);
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline T bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::pop_at_level(
unsigned level)
{
mln_precondition(level < nLevel);
mln_precondition(!empty(level));
if (queue)
{
T x = std::move(*(m_head[level]));
m_allocator.destroy(m_head[level]++);
return x;
}
else
{
T x = std::move(*(--m_tail[level]));
m_allocator.destroy(m_tail[level]);
return x;
}
}
template <typename T, std::size_t nLevel, typename Allocator, bool queue>
inline T bounded_hqueue<T, nLevel, Allocator, queue, typename std::enable_if<(nLevel > 16)>::type>::top_at_level(
unsigned level) const
{
mln_precondition(level < nLevel);
mln_precondition(!empty(level));
if (queue)
{
T x = *(m_head[level]);
return x;
}
else
{
T x = *(m_tail[level] - 1);
return x;
}
}
} // namespace mln
#pragma once
#include <mln/io/imread.hpp>
#include <mln/io/imsave.hpp>
#include <mln/morpho/component_tree/component_tree.hpp>
#include <iosfwd>
namespace mln
{
namespace morpho
{
template <class P, class AssociativeMap>
void save(const component_tree<P, AssociativeMap>& tree, std::ostream& os);
template <class P, class AssociativeMap>
void load(std::istream& is, component_tree<P, AssociativeMap>& tree);
template <class P, class AssociativeMap>
void load(const std::string& s, component_tree<P, AssociativeMap>& tree);
/*************************************/
/***** Implementation *****/
/*************************************/
/*
// Do not extra information (version) of component tree node.
// This is redoundant with the tree class.
namespace boost {
namespace serialization {
template <class P, class AssociativeMap>
struct implementation_level<const internal::component_tree_node<P, AssociativeMap> >
{
typedef mpl::integral_c_tag tag;
typedef mpl::int_<boost::serialization::object_serializable> type;
BOOST_STATIC_CONSTANT(int, value = type::value);
};
template <class P, class AssociativeMap>
struct tracking_level<const internal::component_tree_node<P, AssociativeMap> >
{
typedef mpl::integral_c_tag tag;
typedef mpl::int_<track_never> type;
BOOST_STATIC_CONSTANT(int, value = type::value);
};
template <class P, class AssociativeMap>
struct is_bitwise_serializable<const internal::component_tree_node<P, AssociativeMap> >
: std::true_type
{
};
}
}
*/
template <class P, class AssociativeMap>
void save(const component_tree<P, AssociativeMap>& tree, std::ostream& s)
{
const internal::component_tree_data<P, AssociativeMap>* data = tree._get_data();
s << (unsigned)data->m_nodes.size() << std::endl;
s << (unsigned)data->m_S.size() << std::endl;
s << (int)data->m_pset_ordered << std::endl;
s << (unsigned)tree.get_root_id() << std::endl;
// write node array
s.write((const char*)&(data->m_nodes[0]), data->m_nodes.size() * sizeof(internal::component_tree_node));
// write S array
s.write((const char*)&(data->m_S[0]), data->m_S.size() * sizeof(P));
// write pmap
io::imsave(data->m_pmap, s);
s.flush();
}
template <class P, class AssociativeMap>
void load(std::istream& s, component_tree<P, AssociativeMap>& tree)
{
internal::component_tree_data<P, AssociativeMap>* data = tree._get_data();
unsigned nnodes;
unsigned npoints;
int ordered;
unsigned root;
s >> nnodes;
s >> npoints;
s >> ordered;
s >> root;
s.ignore(1);
// std::cout << nnodes << " " << npoints << " " << ordered << std::endl;
data->m_nodes.resize(nnodes);
data->m_S.resize(npoints);
data->m_pset_ordered = ordered;
// read node array
s.read((char*)(&data->m_nodes[0]), nnodes * sizeof(internal::component_tree_node));
// read S array
s.read((char*)(&data->m_S[0]), npoints * sizeof(P));
// // read pmap
io::imread(s, data->m_pmap);
// set the rooted
tree = tree.get_subtree(root);
}
template <class P, class AssociativeMap>
void load(const std::string& s, component_tree<P, AssociativeMap>& tree)
{
std::ifstream f(s);
load(f, tree);
}
template <class P, class AssociativeMap>
void save(component_tree<P, AssociativeMap>& tree, const std::string& s)
{
std::ofstream f(s);
save(tree, f);
}
} // namespace morpho
} // namespace mln
#pragma once
#include <mln/core/image/image.hpp>
#include <mln/core/neighborhood/neighborhood.hpp>
#include <mln/core/trace.hpp>
#include <mln/morpho/maxtree/maxtree_queue.hpp>
namespace mln
{
namespace morpho
{
///
/// \brief Compute a maxtree as a component tree where nodes
/// contain indexes instead of points.
///
/// \param ima The image
/// \param nbh The neighborhood
/// \param cmp A strict weak order on values
template <typename I, typename N, typename StrictWeakOrdering = std::less<mln_value(I)>>
component_tree<typename I::size_type, mln_ch_value(I, unsigned)>
maxtree_indexes(const Image<I>& ima, const Neighborhood<N>& nbh, StrictWeakOrdering cmp = StrictWeakOrdering());
///
/// \brief Compute a mintree as a component tree where nodes
/// contain indexes instead of points.
///
/// \param ima The image
/// \param nbh The neighborhood
/// \param cmp A strict weak order on values
template <typename I, typename N>
component_tree<typename I::size_type, mln_ch_value(I, unsigned)> mintree_indexes(const Image<I>& ima,
const Neighborhood<N>& nbh);
/*****************************/
/** Implementation **/
/*****************************/
template <typename I, typename N, typename StrictWeakOrdering>
component_tree<typename I::size_type, mln_ch_value(I, unsigned)>
maxtree_indexes(const Image<I>& ima, const Neighborhood<N>& nbh, StrictWeakOrdering cmp)
{
mln_entering("mln::morpho::maxtree_indexes");
auto res = impl::maxtree_queue_indexes(exact(ima), exact(nbh), cmp);
mln_exiting();
return res;
}
template <typename I, typename N>
component_tree<typename I::size_type, mln_ch_value(I, unsigned)> mintree_indexes(const Image<I>& ima,
const Neighborhood<N>& nbh)
{
mln_entering("mln::morpho::mintree_indexes");
auto res = impl::maxtree_queue_indexes(exact(ima), exact(nbh), std::greater<mln_value(I)>());
mln_exiting();
return res;
}
} // namespace morpho
} // namespace mln
#pragma once
#include <mln/core/extension/fill.hpp>
#include <mln/core/image/image.hpp>
#include <mln/core/value/value_traits.hpp>
#include <mln/core/wrt_offset.hpp>
#include <mln/morpho/datastruct/component_tree.hpp>
#include <mln/morpho/pqueue_fast.hpp>
#include <queue>
#include <stack>
#include <vector>
namespace mln
{
namespace morpho
{
namespace impl
{
template <typename I, typename Neighborhood, typename StrictWeakOrdering>
component_tree<typename I::size_type, mln_ch_value(I, unsigned)>
maxtree_queue_indexes(const I& ima, const Neighborhood& nbh, StrictWeakOrdering cmp)
{
typedef typename I::size_type size_type;
typedef mln_value(I) V;
// 1. Create the component tree
// Allocate enough memory to prevent reallocation
// {
typedef mln_ch_value(I, unsigned) map_t;
typedef morpho::internal::component_tree_node node_t;
typedef component_tree<size_type, map_t> tree_t;
tree_t ctree;
auto& nodes = ctree._get_data()->m_nodes;
auto& S = ctree._get_data()->m_S;
auto& pmap = ctree._get_data()->m_pmap;
size_type sz = ima.domain().size();
nodes.resize(sz + 1); // grow from the back
S.resize(sz); //
pmap = imchvalue<size_type>(ima); //
size_type spos = sz;
// }
// 1.b some methods to handle the nodes array like a stack
// {
size_type stack_top_position = 0;
size_type npos = 1; // The current nodes vector size
auto stack_empty = [&stack_top_position]() { return stack_top_position == 0; };
auto stack_top = [&nodes, &stack_top_position]() -> node_t& {
mln_precondition(stack_top_position != 0);
return nodes[stack_top_position];
};
auto stack_push = [&nodes, &stack_top_position, &npos](const node_t& x) {
mln_assertion(stack_top_position < npos);
nodes[npos] = x;
nodes[npos].m_parent = stack_top_position;
stack_top_position = npos++;
};
auto stack_pop = [&nodes, &stack_top_position]() {
mln_precondition(stack_top_position != 0);
stack_top_position = nodes[stack_top_position].m_parent;
};
// }
// 2. Create auxiliary structure for the computation
// {
mln_ch_value(I, bool) deja_vu = imchvalue<bool>(ima).adjust(nbh).set_init_value(false);
priority_queue_ima<I, StrictWeakOrdering> pqueue(ima, cmp);
extension::fill(deja_vu, true);
// }
// 3. Initialization
{
size_type pmin = ima.index_of_point(ima.domain().pmin);
pqueue.push(pmin);
stack_push(node_t{0, 0, 0, npos, pmin});
deja_vu[pmin] = true;
}
// 4. algo
// next_sibling is actually the last node in the substree
// it will be corrected afterward
// Fixme non-generic -> only dynamic neighborhood
auto nbh_delta_indexes = wrt_delta_index(ima, Neighborhood::dpoints);
while (!pqueue.empty())
{
flood:
size_type p = pqueue.top();
// size_type repr = stack.top();
// mln_assertion(not cmp(ima[p], ima[repr]) and not cmp(ima[repr], ima[p]));
V clevel = ima[p];
mln_foreach (auto k, nbh_delta_indexes)
{
auto q = p + k;
bool processed = deja_vu[q];
if (!processed)
{
pqueue.push(q);
deja_vu[q] = true;
if (cmp(clevel, ima[q]))
{
stack_push(node_t{0, 0, 0, npos, q});
goto flood;
}
}
}
// p done
// insert p in S and set him as representative
// for the current node if it is note yet defined
pqueue.pop();
S[--spos] = p;