%% ----------------------- %%
%% The LRDE Bibliography. %%
%% ----------------------- %%
%% This document lists all the publications by members of the LRDE.
%%
%% See https://www.lrde.epita.fr/dload/guide/guide.html#sec44
%% for instructions for adding/modifying entries in this file.
%%
%% See
%%
%% - http://www.lrde.epita.fr
%% for more information about the LRDE
%%
%% - http://publis.lrde.epita.fr
%% for more information about these publications
%%
%% - http://www.lrde.epita.fr/dload/papers/lrde.bib
%% for the most recent version of this BibTeX file
%%
%% Some of our fields are:
%%
%% lrdenewsdate = {YYYY-MM-DD} (To be filled by the authors)
%% Generates the news on our web site. The date is used to
%% sort news entries on the wiki, it usually corresponds to the
%% day the announcement was sent to annonce at lrde.epita.fr.
%%
@InProceedings{ angelidis.01.wscg,
author = {Alexis Angelidis and Geoffroy Fouquier},
title = {Visualization issues in virtual environments: from
computer graphics techniques to intentional visualization},
booktitle = {Proceedings of the 9th International Conference in Central
Europe on Computer Graphics, Visualization and Computer
Vision (WSCG)},
year = 2001,
editor = {V. Skala},
volume = 3,
pages = {90--98},
address = {Plzen, Czech Republic},
month = feb,
isbn = {80-7082-713-0},
abstract = {Rendering efficiently large virtual environment scenes
composed of many elements, dynamic objects, and a highly
moving viewpoint is a major issue. This paper focuses on
the first of the two viewing stage operations: required
elements determination, the second being shading/filtering.
We propose a classification, extending the existing
computer graphic techniques toward display scalability
requirements, that distinguishes two key points: keeping
only required elements (culling), and keeping only required
details (which includes traditional LODs). The mechanisms
needed for display scalability are presented.},
lrdeprojects = {URBI}
}
@InProceedings{ atlan.20.spie,
author = {Michael Atlan and Julie Rivet and Antoine Taliercio and
Nicolas Boutry and Guillaume Tochon and Jean-Pierre
Huignard},
title = {Experimental digital Gabor hologram rendering of {C.}
elegans worms by a model-trained convolutional neural
network (Conference Presentation)},
booktitle = {Label-free Biomedical Imaging and Sensing (LBIS) 2020},
volume = {11251},
doi = {10.1117/12.2545514},
year = {2020},
organization = {International Society for Optics and Photonics}
}
@InProceedings{ baarir.14.forte,
author = {Souheib Baarir and Alexandre Duret-Lutz},
title = {Mechanizing the Minimization of Deterministic Generalized
{B\"u}chi Automata},
booktitle = {Proceedings of the 34th IFIP International Conference on
Formal Techniques for Distributed Objects, Components and
Systems (FORTE'14)},
year = 2014,
month = jun,
series = {Lecture Notes in Computer Science},
volume = 8461,
pages = {266--283},
publisher = {Springer},
doi = {10.1007/978-3-662-43613-4_17},
lrdeprojects = {Spot},
lrdenewsdate = {2014-03-21},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/baarir.14.forte.pdf},
abstract = {Deterministic B{\"u}chi automata (DBA) are useful to
(probabilistic) model checking and synthesis. We survey
techniques used to obtain and minimize DBAs for different
classes of properties. We extend these techniques to
support DBA that have generalized and transition-based
acceptance (DTGBA) as they can be even smaller. Our
minimization technique---a reduction to a SAT
problem---synthesizes a DTGBA equivalent to the input DTGBA
for any given number of states and number of acceptance
sets (assuming such automaton exists). We present
benchmarks using a framework that implements all these
techniques.}
}
@InProceedings{ baarir.15.lpar,
author = {Souheib Baarir and Alexandre Duret-Lutz},
booktitle = {Proceedings of the 20th International Conference on Logic
for Programming, Artificial Intelligence, and Reasoning
(LPAR'15)},
title = {{SAT}-based Minimization of Deterministic
$\omega$-Automata},
year = {2015},
month = nov,
pages = {79--87},
publisher = {Springer},
doi = {10.1007/978-3-662-48899-7_6},
volume = {9450},
series = {Lecture Notes in Computer Science},
abstract = {We describe a tool that inputs a deterministic
$\omega$-automaton with any acceptance condition, and
synthesizes an equivalent $\omega$-automaton with another
arbitrary acceptance condition and a given number of
states, if such an automaton exists. This tool, that relies
on a SAT-based encoding of the problem, can be used to
provide minimal $\omega$-automata equivalent to given
properties, for different acceptance conditions.},
lrdeprojects = {Spot},
lrdenewsdate = {2015-09-01},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/baarir.15.lpar.pdf}
}
@InProceedings{ babiak.13.spin,
author = {Tom{\'a}{\v{s}} Babiak and Thomas Badie and Alexandre
Duret-Lutz and Mojm{\'i}r K{\v{r}}et{\'i}nsk{\'y} and Jan
Strej{\v{c}}ek},
title = {Compositional Approach to Suspension and Other
Improvements to {LTL} Translation},
booktitle = {Proceedings of the 20th International SPIN Symposium on
Model Checking of Software (SPIN'13)},
year = 2013,
volume = 7976,
series = {Lecture Notes in Computer Science},
pages = {81--98},
month = jul,
publisher = {Springer},
doi = {10.1007/978-3-642-39176-7_6},
abstract = {Recently, there was defined a fragment of LTL (containing
fairness properties among other interesting formulae) whose
validity over a given infinite word depends only on an
arbitrary suffix of the word. Building upon an existing
translation from LTL to B{\"u}chi automata, we introduce a
compositional approach where subformulae of this fragment
are translated separately from the rest of an input formula
and the produced automata are composed in a way that the
subformulae are checked only in relevant accepting strongly
connected components of the final automaton. Further, we
suggest improvements over some procedures commonly applied
to generalized B{\"u}chi automata, namely over generalized
acceptance simplification and over degeneralization.
Finally we show how existing simulation-based reductions
can be implemented in a signature-based framework in a way
that improves the determinism of the automaton.},
lrdeprojects = {Spot},
lrdenewsdate = {2013-04-28},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/babiak.13.spin.pdf}
}
@InProceedings{ babiak.15.cav,
author = {Tom{\'{a}\v{s}} Babiak and Franti{\v{s}}ek Blahoudek and
Alexandre Duret-Lutz and Joachim Klein and Jan K{\v
r}et{\'i}nsk{\'{y}} and David M{\"u}ller and David Parker
and Jan Strej{\v{c}}ek},
title = {The {H}anoi {O}mega-{A}utomata Format},
booktitle = {Proceedings of the 27th International Conference on
Computer Aided Verification (CAV'15)},
year = 2015,
volume = {9206},
series = {Lecture Notes in Computer Science},
pages = {479--486},
month = jul,
publisher = {Springer},
abstract = { We propose a flexible exchange format for
$\omega$-automata, as typically used in formal
verification, and implement support for it in a range of
established tools. Our aim is to simplify the interaction
of tools, helping the research community to build upon
other people's work. A key feature of the format is the use
of very generic acceptance conditions, specified by Boolean
combinations of acceptance primitives, rather than being
limited to common cases such as B\"uchi, Streett, or Rabin.
Such flexibility in the choice of acceptance conditions can
be exploited in applications, for example in probabilistic
model checking, and furthermore encourages the development
of acceptance-agnostic tools for automata manipulations.
The format allows acceptance conditions that are either
state-based or transition-based, and also supports
alternating automata.},
lrdeprojects = {Spot},
lrdenewsdate = {2015-04-27},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/babiak.15.cav.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/babiak.15.cav.poster.pdf},
doi = {10.1007/978-3-319-21690-4_31}
}
@InProceedings{ baier.19.atva,
author = {Christel Baier and Franti\v{s}ek Blahoudek and Alexandre
Duret-Lutz and Joachim Klein and David M\"uller and Jan
Strej\v{c}ek},
title = {Generic Emptiness Check for Fun and Profit},
booktitle = {Proceedings of the 17th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'19)},
year = {2019},
volume = {11781},
series = {Lecture Notes in Computer Science},
pages = {445--461},
month = oct,
publisher = {Springer},
abstract = {We present a new algorithm for checking the emptiness of
$\omega$-automata with an Emerson-Lei acceptance condition
(i.e., a positive Boolean formula over sets of states or
transitions that must be visited infinitely or finitely
often). The algorithm can also solve the model checking
problem of probabilistic positiveness of MDP under a
property given as a deterministic Emerson-Lei automaton.
Although both these problems are known to be NP-complete
and our algorithm is exponential in general, it runs in
polynomial time for simpler acceptance conditions like
generalized Rabin, Streett, or parity. In fact, the
algorithm provides a unifying view on emptiness checks for
these simpler automata classes. We have implemented the
algorithm in Spot and PRISM and our experiments show
improved performance over previous solutions.},
lrdekeywords = {Spot},
lrdenewsdate = {2019-07-29},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/baier.19.atva.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/baier.19.atva.slides.mefosyloma.pdf},
doi = {10.1007/978-3-030-31784-3_26}
}
@InProceedings{ baillard.05.adass,
author = {Anthony Baillard and Emmanuel Bertin and Yannic Mellier
and Henry Joy {McCracken} and Thierry G\'eraud and Roser
Pell\'o and Jean-Fran\c{c}ois {LeBorgne} and Pascal Fouqu\'e},
title = {Project {EFIGI}: Automatic classification of galaxies},
year = 2005,
booktitle = {Astronomical Data Analysis Software and Systems XV},
volume = 351,
pages = {236--239},
publisher = {Astronomical Society of the Pacific},
series = {Conference},
url = {http://www.aspbooks.org/custom/publications/paper/index.phtml?paper_id=3398},
editor = {Carlos Gabriel and Christophe Arviset and Daniel Ponz and
Enrique Solano},
isbn = {1-58381-219-9},
abstract = {We propose an automatic system to classify images of
galaxies with varying resolution. Morphologically typing
galaxies is a difficult task in particular for distant
galaxies convolved by a point-spread function and suffering
from a poor signal-to-noise ratio. In the context of the
first phase of the project EFIGI (extraction of the
idealized shapes of galaxies in imagery), we present the
three steps of our software: cleaning, dimensionality
reduction and supervised learning. We present preliminary
results derived from a subset of 774 galaxies from the
Principal Galaxies Catalog and compare them to human
classifications made by astronomers. We use g-band images
from the Sloan Digital Sky Survey. Finally, we discuss
future improvements which we intend to implement before
releasing our tool to the community.},
lrdekeywords = {Image},
lrdenewsdate = {2006-09-20}
}
@InProceedings{ baillard.07.gretsi,
author = {Anthony Baillard and Christophe Berger and Emmanuel Bertin
and Thierry G\'eraud and Roland Levillain and Nicolas
Widynski},
title = {Algorithme de calcul de l'arbre des composantes avec
applications \`a la reconnaissance des formes en imagerie
satellitaire},
booktitle = {Proceedings of the 21st Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
year = 2007,
address = {Troyes, France},
month = sep,
abstract = {In this paper a new algorithm to compute the component
tree is presented. As compared to the state-of-the-art,
this algorithm does not use excessive memory and is able to
work efficiently on images whose values are highly
quantized or even with images having floating values. We
also describe how it can be applied to astronomical data to
identify relevant objects.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/baillard.07.gretsi.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/baillard.07.gretsi.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2007-05-11}
}
@InCollection{ barnat.18.hpcr,
author = {Jiri Barnat and Vincent Bloemen and Alexandre Duret-Lutz
and Alfons Laarman and Laure Petrucci and Jaco van de Pol
and Etienne Renault},
editor = {Youssef Hamadi and Lakhdar Sais},
title = {Parallel Model Checking Algorithms for Linear-Time
Temporal Logic},
booktitle = {Handbook of Parallel Constraint Reasoning},
year = {2018},
publisher = {Springer International Publishing},
address = {Cham},
chapter = 12,
pages = {457--507},
abstract = {Model checking is a fully automated, formal method for
demonstrating absence of bugs in reactive systems. Here,
bugs are violations of properties in Linear-time Temporal
Logic (LTL). A fundamental challenge to its application is
the exponential explosion in the number of system states.
The current chapter discusses the use of parallelism in
order to overcome this challenge. We reiterate the textbook
automata-theoretic approach, which reduces the model
checking problem to the graph problem of finding cycles. We
discuss several parallel algorithms that attack this
problem in various ways, each with different
characteristics: Depth-first search (DFS) based algorithms
rely on heuristics for good parallelization, but exhibit a
low complexity and good on-the-fly behavior. Breadth-first
search (BFS) based approaches, on the other hand, offer
good parallel scalability and support distributed
parallelism. In addition, we present various simpler model
checking tasks, which still solve a large and important
subset of the LTL model checking problem, and show how
these can be exploited to yield more efficient algorithms.
In particular, we provide simplified DFS-based search
algorithms and show that the BFS-based algorithms exhibit
optimal runtimes in certain cases.},
isbn = {978-3-319-63516-3},
doi = {10.1007/978-3-319-63516-3_12},
lrdekeywords = {Verification},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/barnat.18.hpcr.pdf},
lrdenewsdate = {2018-04-10}
}
@InProceedings{ bensalem.11.sumo,
author = {Ala Eddine Ben{ S}alem and Alexandre Duret-Lutz and
Fabrice Kordon},
title = {Generalized {B\"u}chi Automata versus Testing Automata for
Model Checking},
booktitle = {Proceedings of the second International Workshop on
Scalable and Usable Model Checking for Petri Net and other
models of Concurrency (SUMO'11)},
address = {Newcastle, UK},
series = {Workshop Proceedings},
year = 2011,
month = jun,
volume = 726,
publisher = {CEUR},
url = {http://ftp.informatik.rwth-aachen.de/Publications/CEUR-WS/Vol-726/},
abstract = {Geldenhuys and Hansen have shown that a kind of
$\omega$-automa\-ton known as \emph{testing automata} can
outperform the Buchi automata traditionally used in the
automata-theoretic approach to model
checking~\cite{geldenhuys.06.spin}. This work completes
their experiments by including a comparison with
generalized Buchi automata; by using larger state spaces
derived from Petri nets; and by distinguishing violated
formul\ae{} (for which testing automata fare better) from
verified formul\ae{} (where testing automata are hindered
by their two-pass emptiness check).},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.11.sumo.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2011-05-25}
}
@Article{ bensalem.12.topnoc,
author = {Ala Eddine Ben{ S}alem and Alexandre Duret-Lutz and
Fabrice Kordon},
title = {Model Checking using Generalized Testing Automata},
journal = {Transactions on Petri Nets and Other Models of Concurrency
(ToPNoC VI)},
year = 2012,
volume = 7400,
series = {Lecture Notes in Computer Science},
isbn = {978-3-642-35178-5},
publisher = {Springer-Verlag},
doi = {10.1007/978-3-642-35179-2_5},
pages = {94--112},
abstract = {Geldenhuys and Hansen showed that a kind of
$\omega$-automa\-ta known as \emph{Testing Automata} (TA)
can, in the case of stuttering-insensitive properties,
outperform the Buchi automata traditionally used in the
automata-theoretic approach to model
checking~\cite{geldenhuys.06.spin}. In previous
work~\cite{bensalem.sumo.2011}, we compared TA against
\emph{Transition-based Generalized Buchi Automata} (TGBA),
and concluded that TA were more interesting when
counterexamples were expected, otherwise TGBA were more
efficient. In this work we introduce a new kind of
automata, dubbed \emph{Transition-based Generalized Testing
Automata} (TGTA), that combine ideas from TA and TGBA.
Implementation and experimentation of TGTA show that they
outperform other approaches in most of the cases.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.12.topnoc.pdf},
lrdeprojects = {Spot}
}
@PhDThesis{ bensalem.14.phd,
author = {Ala Eddine Ben{ S}alem},
title = {Improving the Model Checking of Stutter-Invariant {LTL}
Properties},
school = {{Universit{\'e} Pierre et Marie Curie - Paris VI}},
year = 2014,
address = {Paris, France},
month = sep,
abstract = {Software systems have become ubiquitous in our everyday
life. They replace humans for critical tasks that involve
high costs and even human lives. The serious consequences
caused by the failure of such systems make crucial the use
of rigorous methods for system validation. One of the
widely-used formal verification methods is the
automata-theoretic approach to model checking. It takes as
input a model of the system and a property, and answers if
the model satisfies or not the property. To achieve this
goal, it translates the negation of the property in an
automaton and checks whether the product of the model and
this automaton is empty. Although it is automatic, this
approach suffers from the combinatorial explosion of the
resulting product. To tackle this problem, especially when
checking stutter-invariant LTL properties, we firstly
improve the two-pass verification algorithm of Testing
automata (TA), then we propose a transformation of TA into
a normal form (STA) that only requires a single-pass
verification algorithm. We also propose a new type of
automata: the TGTA. These automata also enable a check in a
single-pass and without adding artificial states : it
combines the benefits of TA and generalized B\"uchi
automata (TGBA). TGTA improve the explicit and symbolic
model checking approaches. In particular, by combining TGTA
with the saturation technique, the performances of the
symbolic approach has been improved by an order of
magnitude compared to TGBA. Used in hybrid approaches TGTA
prove complementary to TGBA. All the contributions of this
work have been implemented in SPOT and LTS-ITS,
respectively, an explicit and a symbolic open source
model-checking libraries.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.14.phd.pdf},
lrdenewsdate = {2014-09-25},
lrdeprojects = {Spot}
}
@InProceedings{ bensalem.14.tacas,
author = {Ala Eddine Ben{ S}alem and Alexandre Duret-Lutz and
Fabrice Kordon and Yann Thierry-Mieg},
title = {Symbolic Model Checking of Stutter Invariant Properties
Using Generalized Testing Automata},
booktitle = {Proceedings of the 20th International Conference on Tools
and Algorithms for the Construction and Analysis of Systems
(TACAS'14)},
year = 2014,
publisher = {Springer},
doi = {10.1007/978-3-642-54862-8_38},
series = {Lecture Notes in Computer Science},
volume = 8413,
pages = {440--454},
address = {Grenoble, France},
month = apr,
abstract = {In a previous work, we showed that a kind of
$\omega$-automata known as \emph{Tran\-sition-based
Generalized Testing Automata} (TGTA) can outperform the
B\"uchi automata traditionally used for \textit{explicit}
model checking when verifying stutter-invariant properties.
In this work, we investigate the use of these generalized
testing automata to improve \textit{symbolic} model
checking of stutter-invariant LTL properties. We propose an
efficient symbolic encoding of stuttering transitions in
the product between a model and a TGTA. Saturation
techniques available for decision diagrams then benefit
from the presence of stuttering self-loops on all states of
TGTA. Experimentation of this approach confirms that it
outperforms the symbolic approach based on
(transition-based) Generalized B\"uchi Automata.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.14.tacas.pdf},
lrdeprojects = {Spot}
}
@InProceedings{ bensalem.15.acsd,
author = {Ala Eddine Ben{ S}alem and Mohamed Graiet},
title = {Combining Explicit and Symbolic {LTL} Model Checking Using
Generalized Testing Automata},
booktitle = {Proceedings of the 15th International Conference on
Application of Concurrency to System Design (ACSD'15)},
year = 2015,
address = {Brussels, Belgium},
month = jun,
publisher = {IEEE Computer Society},
lrdeprojects = {Spot},
lrdenewsdate = {2015-05-19},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.15.acsd.pdf},
abstract = {In automata-theoretic model checking, there are mainly two
approaches: \emph{explicit} and \emph{symbolic}. In the
explicit approach, the state-space is constructed
explicitly and lazily during exploration (i.e.,
on-the-fly). The symbolic approach tries to overcome the
state-space explosion obstacle by symbolically encoding the
state-space in a concise way using decision diagrams.
However, this symbolic construction is not performed
on-the-fly as in the explicit approach. In order to take
advantage of the best of both worlds, \emph{hybrid
approaches} are proposed as combinations of explicit and
symbolic approaches. A hybrid approach is usually based on
an on-the-fly construction of an explicit graph of symbolic
nodes, where each symbolic node encodes a subset of states
by means of binary decision diagrams. An alternative to the
standard {B\"u}chi automata, called Testing automata have
never been used before for hybrid model checking. In
addition, in previous work, we have shown that
\emph{Generalized Testing Automata} (TGTA) can outperform
the {B\"u}chi automata for explicit and symbolic model
checking of stutter-invariant LTL properties. In this work,
we investigate the use of these TGTA to improve hybrid
model checking. We show how traditional hybrid approaches
based on Generalized {B\"u}chi Automata (TGBA) can be
adapted to obtain TGTA-based hybrid approaches. Then, each
original approach is experimentally compared against its
TGTA variant. The results show that these new variants are
statistically more efficient. }
}
@InProceedings{ bensalem.15.forte,
author = {Ala Eddine Ben{ S}alem},
title = {Extending Testing Automata to All {LTL}},
booktitle = {Proceedings of the 35th IFIP International Conference on
Formal Techniques for Distributed Objects, Components and
Systems (FORTE'15)},
year = 2015,
address = {Grenoble, France},
month = jun,
series = {Lecture Notes in Computer Science},
volume = 9039,
publisher = {Springer},
lrdeprojects = {Spot},
lrdenewsdate = {2015-05-19},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.15.forte.pdf},
abstract = {An alternative to the traditional {B\"u}chi Automata (BA),
called Testing Automata (TA) was proposed by Hansen et al.
to improve the automata theoretic approach to LTL model
checking. In previous work, we proposed an improvement of
this alternative approach called TGTA (Generalized Testing
Automata). TGTA mixes features from both TA and TGBA
(Generalized {B\"u}chi Automata), without the disadvantage
of TA, which is the second pass of the emptiness check
algorithm. We have shown that TGTA outperform TA, BA and
TGBA for explicit and symbolic LTL model checking. However,
TA and TGTA are less expressive than {B\"u}chi Automata
since they are able to represent only stutter-invariant LTL
properties (LTL\X). In this paper, we show how to extend
Generalized Testing Automata (TGTA) to represent any LTL
property. This allows to extend the model checking approach
based on this new form of testing automata to check other
kinds of properties and also other kinds of models (such as
Timed models). Implementation and experimentation of this
extended TGTA approach show that it is statistically more
efficient than the {B\"u}chi Automata approaches (BA and
TGBA), for the explicit model checking of LTL properties. }
}
@InProceedings{ bensalem.15.lata,
author = {Ala Eddine Ben{ S}alem},
title = {Single-pass Testing Automata for {LTL} Model Checking},
booktitle = {Proceedings of the 9th International Conference on
Language and Automata Theory and Applications (LATA'15)},
year = 2015,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
address = {Nice, France},
month = mar,
volume = 8977,
pages = {563--576},
abstract = {Testing Automaton (TA) is a new kind of $\omega$-automaton
introduced by Hansen et al. as an alternative to the
standard B\"uchi Automata (BA) for the verification of
stutter-invariant LTL properties. Geldenhuys and Hansen
shown later how to use TA in the automata-theoretic
approach to LTL model checking. They propose a TA-based
approach using a verification algorithm that requires two
searches (two passes) and compare its performance against
the BA approach. This paper improves their work by
proposing a transformation of TA into a normal form (STA)
that only requires a single one-pass verification
algorithm. The resulting automaton is called Single-pass
Testing Automaton (STA). We have implemented the STA
approach in Spot model checking library. We are thus able
to compare it with the "traditional" BA and TA approaches.
These experiments show that STA compete well on our
examples.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bensalem.15.lata.pdf},
lrdeprojects = {Spot}
}
@InProceedings{ berger.07.icip,
author = {Christophe Berger and Thierry G\'eraud and Roland
Levillain and Nicolas Widynski and Anthony Baillard and
Emmanuel Bertin},
title = {Effective Component Tree Computation with Application to
Pattern Recognition in Astronomical Imaging},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
pages = {41--44},
volume = 4,
year = 2007,
address = {San Antonio, TX, USA},
month = sep,
abstract = {In this paper a new algorithm to compute the component
tree is presented. As compared to the state of the art,
this algorithm does not use excessive memory and is able to
work efficiently on images whose values are highly
quantized or even with images having floating values. We
also describe how it can be applied to astronomical data to
identify relevant objects.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/berger.07.icip.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/berger.07.icip.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2007-05-03}
}
@InProceedings{ blahoudek.14.spin,
author = {Franti\v{s}ek Blahoudek and Alexandre Duret-Lutz and
Mojm\'{i}r K\v{r}et\'{i}nsk\'{y} and Jan Strej\v{c}ek},
title = {Is There a Best {B\"u}chi Automaton for Explicit Model
Checking?},
booktitle = {Proceedings of the 21th International SPIN Symposium on
Model Checking of Software (SPIN'14)},
year = 2014,
pages = {68--76},
publisher = {ACM},
abstract = { LTL to B\"uchi automata (BA) translators are
traditionally optimized to produce automata with a small
number of states or a small number of non-deterministic
states. In this paper, we search for properties of B\"uchi
automata that really influence the performance of explicit
model checkers. We do that by manual analysis of several
automata and by experiments with common LTL-to-BA
translators and realistic verification tasks. As a result
of these experiences, we gain a better insight into the
characteristics of automata that work well with Spin. },
lrdeprojects = {Spot},
lrdenewsdate = {2014-06-16},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/blahoudek.14.spin.pdf},
doi = {10.1145/2632362.2632377}
}
@InProceedings{ blahoudek.15.spin,
author = {Franti\v{s}ek Blahoudek and Alexandre Duret-Lutz and
Vojt\v{c}ech Rujbr and Jan Strej\v{c}ek},
title = {On Refinement of {B\"u}chi Automata for Explicit Model
Checking},
booktitle = {Proceedings of the 22th International SPIN Symposium on
Model Checking of Software (SPIN'15)},
year = 2015,
month = aug,
pages = {66--83},
publisher = {Springer},
volume = 9232,
series = {Lecture Notes in Computer Science},
abstract = {In explicit model checking, systems are typically
described in an implicit and compact way. Some valid
information about the system can be easily derived directly
from this description, for example that some atomic
propositions cannot be valid at the same time. The paper
shows several ways to apply this information to improve the
B{\"u}chi automaton built from an LTL specification. As a
result, we get smaller automata with shorter edge labels
that are easier to understand and, more importantly, for
which the explicit model checking process performs better.},
lrdeprojects = {Spot},
lrdenewsdate = {2015-06-15},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/blahoudek.15.spin.pdf},
doi = {10.1007/978-3-319-23404-5_6}
}
@InProceedings{ blahoudek.17.lpar,
author = {Franti\v{s}ek Blahoudek and Alexandre Duret-Lutz and
Mikul\'{a}\v{s} Kloko\v{c}ka and Mojm{\'i}r
K{\v{r}}et{\'i}nsk{\'y} and Jan Strej{\v{c}}ek},
title = {Seminator: A Tool for Semi-Determinization of
Omega-Automata},
booktitle = {Proceedings of the 21th International Conference on Logic
for Programming, Artificial Intelligence, and Reasoning
(LPAR-21)},
year = {2017},
editor = {Thomas Eiter and David Sands and Geoff Sutcliffe},
volume = {46},
series = {EPiC Series in Computing},
pages = {356--367},
month = may,
publisher = {EasyChair Publications},
lrdeprojects = {Spot},
lrdenewsdate = {2017-04-03},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/blahoudek.17.lpar.pdf},
abstract = {We present a tool that transforms nondeterministic
$\omega$-automata to semi-deterministic $\omega$-automata.
The tool Seminator accepts transition-based generalized
B\"uchi automata (TGBA) as an input and produces automata
with two kinds of semi-determinism. The implemented
procedure performs degeneralization and
semi-determinization simultaneously and employs several
other optimizations. We experimentally evaluate Seminator
in the context of LTL to semi-deterministic automata
translation.},
doi = {10.29007/k5nl}
}
@InProceedings{ blahoudek.20.cav,
author = {Franti\v{s}ek Blahoudek and Alexandre Duret-Lutz and Jan
Strej\v{c}ek},
title = {{S}eminator~2 Can Complement Generalized {B\"u}chi
Automata via Improved Semi-Determinization},
booktitle = {Proceedings of the 32nd International Conference on
Computer-Aided Verification (CAV'20)},
year = {2020},
publisher = {Springer},
volume = {12225},
series = {Lecture Notes in Computer Science},
pages = {15--27},
month = jul,
abstract = {We present the second generation of the tool Seminator
that transforms transition-based generalized B{\"u}chi
automata (TGBAs) into equivalent semi-deterministic
automata. The tool has been extended with numerous
optimizations and produces considerably smaller automata
than its first version. In connection with the
state-of-the-art LTL to TGBAs translator Spot, Seminator~2
produces smaller (on average) semi-deterministic automata
than the direct LTL to semi-deterministic automata
translator \texttt{ltl2ldgba} of the Owl library. Further,
Seminator~2 has been extended with an improved NCSB
complementation procedure for semi-deterministic automata,
providing a new way to complement automata that is
competitive with state-of-the-art complementation tools.},
doi = {10.1007/978-3-030-53291-8_2},
lrdeprojects = {Spot},
lrdenewsdate = {2020-05-14},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/blahoudek.20.cav.pdf}
}
@Article{ bloch.03.ai,
author = {Isabelle Bloch and Thierry G\'eraud and Henri Ma\^itre},
title = {Representation and fusion of heterogeneous fuzzy
information in the {3D} space for model-based structural
recognition---application to {3D} brain imaging},
journal = {Artificial Intelligence},
month = aug,
year = 2003,
volume = 148,
number = {1-2},
pages = {141--175},
doi = {10.1016/S0004-3702(03)00018-3},
abstract = {We present a novel approach of model-based pattern
recognition where structural information and spatial
relationships have a most important role. It is illustrated
in the domain of 3D brain structure recognition using an
anatomical atlas. Our approach performs simultaneously
segmentation and recognition of the scene and the solution
of the recognition task is progressive, processing
successively different objects, using different of
knowledge about the object and about relationships between
objects. Therefore the core of the approach is the
representation part, and constitutes the main contribution
of this paper. We make use of a spatial representation of
each piece of information, as a spatial set representing a
constraint to be satisfied by the searched object, thanks
in particular to fuzzy mathematical operations. Fusion of
these constraints allows to, segment and recognize the
desired object.},
lrdekeywords = {Image}
}
@Article{ bloch.05.prl,
author = {Isabelle Bloch and Olivier Colliot and Oscar Camara and
Thierry G\'eraud},
title = {Fusion of spatial relationships for guiding recognition,
example of brain structure recognition in {3D} {MRI}},
journal = {Pattern Recognition Letters},
year = 2005,
volume = 26,
number = 4,
month = mar,
pages = {449--457},
doi = {10.1016/j.patrec.2004.08.009},
abstract = {Spatial relations play an important role in recognition of
structures embedded in a complex environment and for
reasoning under imprecision. Several types of relationships
can be modeled in a unified way using fuzzy mathematical
morphology. Their combination benefits from the powerful
framework of fuzzy set theory for fusion tasks and decision
making. This paper presents several methods of fusion of
information about spatial relationships and illustrates
them on the example of model-based recognition of brain
structures in 3D magnetic resonance imaging.},
lrdekeywords = {Image},
lrdenewsdate = {2004-07-09}
}
@InProceedings{ bloch.21.dgmm,
doi = {10.1007/978-3-030-76657-3_33},
author = {Isabelle Bloch and Samy Blusseau and Ram\'on {Pino
P\'erez} and \'Elodie Puybareau and Guillaume Tochon},
editor = {Lindblad, Joakim and Malmberg, Filip and Sladoje,
Nata{\v{s}}a},
title = {On Some Associations Between Mathematical Morphology and
Artificial Intelligence},
booktitle = {Proceedings of the IAPR International Conference on
Discrete Geometry and Mathematical Morphology (DGMM)},
year = {2021},
address = {Uppsala, Sweden},
series = {Lecture Notes in Computer Science},
volume = {12708},
publisher = {Springer},
pages = {457--469},
month = may,
abstract = {This paper aims at providing an overview of the use of
mathematical morphology, in its algebraic setting, in
several fields of artificial intelligence (AI). Three
domains of AI will be covered. In the first domain,
mathematical morphology operators will be expressed in some
logics (propositional, modal, description logics) to answer
typical questions in knowledge representation and
reasoning, such as revision, fusion, explanatory relations,
satisfying usual postulates. In the second domain, spatial
reasoning will benefit from spatial relations modeled using
fuzzy sets and morphological operators, with applications
in model-based image understanding. In the third domain,
interactions between mathematical morphology and deep
learning will be detailed. Morphological neural networks
were introduced as an alternative to classical
architectures, yielding a new geometry in decision
surfaces. Deep networks were also trained to learn
morphological operators and pipelines, and morphological
algorithms were used as companion tools to machine
learning, for pre/post processing or even regularization
purposes. These ideas have known a large resurgence in the
last few years and new ones are emerging.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2021-02-16}
}
@InProceedings{ bloemen.17.spin,
author = {Vincent Bloemen and Alexandre Duret-Lutz and Jaco van de
Pol},
title = {Explicit State Model Checking with Generalized B{\"u}chi
and Rabin Automata},
booktitle = {Proceedings of the 24th International SPIN Symposium on
Model Checking of Software (SPIN'17)},
pages = {50--59},
year = {2017},
publisher = {ACM},
month = jul,
doi = {10.1145/3092282.3092288},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bloemen.17.spin.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2017-05-22},
abstract = {In the automata theoretic approach to explicit state LTL
model checking, the synchronized product of the model and
an automaton that represents the negated formula is checked
for emptiness. In practice, a (transition-based
generalized) B\"uchi automaton (TGBA) is used for this
procedure.
This paper investigates whether using a more general form
of acceptance, namely transition-based generalized Rabin
automata (TGRAs), improves the model checking procedure.
TGRAs can have significantly fewer states than TGBAs,
however the corresponding emptiness checking procedure is
more involved. With recent advances in probabilistic model
checking and LTL to TGRA translators, it is only natural to
ask whether checking a TGRA directly is more advantageous
in practice.
We designed a multi-core TGRA checking algorithm and
performed experiments on a subset of the models and
formulas from the 2015 Model Checking Contest. We observed
that our algorithm can be used to replace a TGBA checking
algorithm without losing performance. In general, we found
little to no improvement by checking TGRAs directly.}
}
@Article{ bloemen.19.sttt,
author = {Vincent Bloemen and Alexandre Duret-Lutz and Jaco van de
Pol},
title = {Model checking with generalized {R}abin and {F}in-less
automata},
journal = {International Journal on Software Tools for Technology
Transfer},
publisher = {Springer},
month = jun,
volume = 21,
number = 3,
pages = {307--324},
year = {2019},
doi = {10.1007/s10009-019-00508-4},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/bloemen.19.sttt.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2019-04-01},
abstract = { In the automata theoretic approach to explicit state LTL
model checking, the synchronized product of the model and
an automaton that represents the negated formula is checked
for emptiness. In practice, a (transition-based
generalized) B\"uchi automaton (TGBA) is used for this
procedure.
This paper investigates whether using a more general form
of acceptance, namely a transition-based generalized Rabin
automaton (TGRA), improves the model checking procedure.
TGRAs can have significantly fewer states than TGBAs,
however the corresponding emptiness checking procedure is
more involved. With recent advances in probabilistic model
checking and LTL to TGRA translators, it is only natural to
ask whether checking a TGRA directly is more advantageous
in practice.
We designed a multi-core TGRA checking algorithm and
performed experiments on a subset of the models and
formulas from the 2015 Model Checking Contest and generated
LTL formulas for models from the BEEM database. While we
found little to no improvement by checking TGRAs directly,
we show how various aspects of a TGRA's structure
influences the model checking performance.
In this paper, we also introduce a Fin-less acceptance
condition, which is a disjunction of TGBAs. We show how to
convert TGRAs into automata with Fin-less acceptance and
show how a TGBA emptiness procedure can be extended to
check Fin-less automata.}
}
@InProceedings{ boldo.18.arith,
title = {A Formally-Proved Algorithm to Compute the Correct Average
of Decimal Floating-Point Numbers},
author = {Boldo, Sylvie and Faissole, Florian and Tourneur,
Vincent},
booktitle = {25th IEEE Symposium on Computer Arithmetic},
address = {Amherst, MA, United States},
year = {2018},
month = jun,
pdf = {https://hal.inria.fr/hal-01772272/file/article-HAL.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boldo.18.arith.pdf},
lrdedate = {2017-05-22},
abstract = {Some modern processors include decimal floating-point
units, with a conforming implementation of the IEEE-754
2008 standard. Unfortunately, many algorithms from the
computer arithmetic literature are not correct anymore when
computations are done in radix 10. This is in particular
the case for the computation of the average of two
floating-point numbers. Several radix-2 algorithms are
available, including one that provides the correct
rounding, but none hold in radix 10. This paper presents a
new radix-10 algorithm that computes the correctly-rounded
average. To guarantee a higher level of confidence, we also
provide a Coq formal proof of our theorems, that takes
gradual underflow into account. Note that our formal proof
was generalized to ensure this algorithm is correct when
computations are done with any even radix.}
}
@Article{ borghi.06.crossroads,
author = {Alexandre Borghi and Valentin David and Akim Demaille},
title = {{C}-{T}ransformers --- {A} Framework to Write {C} Program
Transformations},
journal = {ACM Crossroads},
year = 2006,
volume = 12,
number = 3,
month = {Spring},
lrdeprojects = {Transformers},
note = {\url{http://www.acm.org/crossroads/xrds12-3/contractc.html}},
abstract = {Program transformation techniques have reached a maturity
level that allows processing high-level language sources in
new ways. Not only do they revolutionize the implementation
of compilers and interpreters, but with modularity as a
design philosophy, they also permit the seamless extension
of the syntax and semantics of existing programming
languages. The C-Transformers project provides a
transformation environment for C, a language that proves to
be hard to transform. We demonstrate the effectiveness of
C-Transformers by extending C's instructions and control
flow to support Design by Contract. C-Transformers is
developed by members of the LRDE: EPITA undergraduate
students.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/borghi.06.crossroads.pdf},
lrdenewsdate = {2005-10-16}
}
@InProceedings{ borgstrom.12.nistsre,
author = {Jonas Borgstrom and William Campbell and Najim Dehak and
R{\'e}da Dehak and Daniel Garcia-Romero and Kara
Greenfieldand Alan McCree and Doug Reynold and Fred
Richardsony and Elliot Singery and Douglas Sturim and Pedro
A. Torres-Carrasquillo},
title = {{MITLL} 2012 Speaker Recognition Evaluation System
Description},
booktitle = {NIST Speaker Recognition Evaluation},
year = 2012,
address = {Orlando},
month = dec
}
@InProceedings{ boutry.14.dgci,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {On Making {$n$D} Images Well-Composed by a Self-Dual Local
Interpolation},
booktitle = {Proceedings of the 18th International Conference on
Discrete Geometry for Computer Imagery (DGCI)},
year = 2014,
month = sep,
pages = {320--331},
address = {Siena, Italy},
series = {Lecture Notes in Computer Science},
volume = {8668},
publisher = {Springer},
editor = {E. Barcucci and A. Frosini and S. Rinaldi},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.14.dgci.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2014-05-28},
doi = {10.1007/978-3-319-09955-2_27},
abstract = {Natural and synthetic discrete images are generally not
well-composed, leading to many topological issues:
connectivities in binary images are not equivalent, the
Jordan Separation theorem is not true anymore, and so on.
Conversely, making images well-composed solves those
problems and then gives access to many powerful tools
already known in mathematical morphology as the Tree of
Shapes which is of our principal interest. In this paper,
we present two main results: a characterization of 3D
well-composed gray-valued images; and a counter-example
showing that no local self-dual interpolation with a
classical set of properties makes well-composed images with
one subdivision in 3D, as soon as we choose the mean
operator to interpolate in 1D. Then, we briefly discuss
various constraints that could be interesting to change to
make the problem solvable in nD.}
}
@Misc{ boutry.14.geodis,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {Une g\'en\'eralisation du {\it bien-compos\'e} \`a la
dimension $n$},
howpublished = {Communication at Journ\'ee du Groupe de Travail de
G\'eometrie Discr\`ete (GT GeoDis, Reims Image 2014)},
month = nov,
year = {2014},
note = {In French},
abstract = {La notion de bien-compos\'e a \'et\'e introduite par
Latecki en 1995 pour les ensembles et les images 2D et pour
les ensembles 3D en 1997. Les images binaires
bien-compos\'ees disposent d'importantes propri\'et\'es
topologiques. De plus, de nombreux algorithmes peuvent
tirer avantage de ces propri\'et\'es topologiques.
Jusqu'\`a maintenant, la notion de bien-compos\'e n'a pas
\'et\'e \'etudi\'ee en dimension $n$, avec $n > 3$. Dans le
travail pr\'esent\'e ici, nous d\'emontrons le th\'eor\`eme
fondamental de l'\'equivalence des connexit\'es pour un
ensemble bien-compos\'e, puis nous g\'en\'eralisons la
caract\'erisation des ensembles et des images bien-compos\'es \`a la dimension $n$. },
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2014-11-17},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.14.geodis.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/boutry.14.geodis.poster.pdf}
}
@InProceedings{ boutry.15.icip,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {How to Make {$n$D} Images Well-Composed Without
Interpolation},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
year = {2015},
month = sep,
address = {Qu\'ebec City, Canada},
pages = {2149--2153},
lrdeprojects = {Olena},
lrdenewsdate = {2015-05-14},
doi = {10.1109/ICIP.2015.7351181},
abstract = {Latecki et al. have introduced the notion of well-composed
images, i.e., a class of images free from the
connectivities paradox of discrete topology. Unfortunately
natural and synthetic images are not a priori
well-composed, usually leading to topological issues.
Making any $n$D image well-composed is interesting because,
afterwards, the classical connectivities of components are
equivalent, the component boundaries satisfy the Jordan
separation theorem, and so on. In this paper, we propose an
algorithm able to make $n$D images well-composed without
any interpolation. We illustrate on text detection the
benefits of having strong topological properties.},
lrdeinc = {Publications/boutry.15.icip.inc},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.15.icip.pdf}
}
@InProceedings{ boutry.15.ismm,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {How to Make {$n$D} Functions Digitally Well-Composed in a
Self-Dual Way},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 12th International
Symposium on Mathematical Morphology (ISMM)},
year = {2015},
series = {Lecture Notes in Computer Science Series},
volume = {9082},
address = {Reykjavik, Iceland},
publisher = {Springer},
editor = {J.A. Benediktsson and J. Chanussot and L. Najman and H.
Talbot},
pages = {561--572},
lrdeprojects = {Olena},
doi = {10.1007/978-3-319-18720-4_47},
abstract = {Latecki {\it et al.} introduced the notion of 2D and 3D
well-composed images, {\it i.e.}, a class of images free
from the ``connectivities paradox'' of digital topology.
Unfortunately natural and synthetic images are not {\it a
priori} well-composed. In this paper we extend the notion
of ``digital well-composedness'' to $n$D sets,
integer-valued functions (gray-level images), and
interval-valued maps. We also prove that the digital
well-composedness implies the equivalence of connectivities
of the level set components in $n$D. Contrasting with a
previous result stating that it is not possible to obtain a
discrete $n$D self-dual digitally well-composed function
with a local interpolation, we then propose and prove a
self-dual discrete (non-local) interpolation method whose
result is always a digitally well-composed function. This
method is based on a sub-part of a quasi-linear algorithm
that computes the morphological tree of shapes.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.15.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-04-07}
}
@PhDThesis{ boutry.16.phd,
author = {Nicolas Boutry},
title = {A Study of Well-Composedness in $n$-D},
school = {Universit\'e Paris-Est},
year = 2016,
address = {Noisy-Le-Grand, France},
month = dec,
abstract = {Digitization of the real world using real sensors has many
drawbacks; in particular, we loose ``well-composedness'' in
the sense that two digitized objects can be connected or
not depending on the connectivity we choose in the digital
image, leading then to ambiguities. Furthermore, digitized
images are arrays of numerical values, and then do not own
any topology by nature, contrary to our usual modeling of
the real world in mathematics and in physics. Loosing all
these properties makes difficult the development of
algorithms which are ``topologically correct'' in image
processing: e.g., the computation of the tree of shapes
needs the representation of a given image to be continuous
and well-composed; in the contrary case, we can obtain
abnormalities in the final result. Some well-composed
continuous representations already exist, but they are not
in the same time $n$-dimensional and self-dual. In fact,
$n$-dimensionality is crucial since usual signals are more
and more 3-dimensional (like 2D videos) or 4-dimensional
(like 4D Computerized Tomography-scans), and self-duality
is necessary when a same image can contain different
objects with different contrasts. We developed then a new
way to make images well-composed by interpolation in a
self-dual way and in $n$-D; followed with a span-based
immersion, this interpolation becomes a self-dual
continuous well-composed representation of the initial
$n$-D signal. This representation benefits from many strong
topological properties: it verifies the intermediate value
theorem, the boundaries of any threshold set of the
representation are disjoint union of discrete surfaces, and
so on.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.2016.phd.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/boutry.2016.phd.slides.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena}
}
@InProceedings{ boutry.17.dgci,
author = {Nicolas Boutry and Laurent Najman and Thierry G\'eraud},
title = {Well-Composedness in {A}lexandrov spaces implies Digital
Well-Composedness in $Z^n$},
booktitle = {Discrete Geometry for Computer Imagery -- Proceedings of
the 20th IAPR International Conference on Discrete Geometry
for Computer Imagery (DGCI)},
year = {2017},
series = {Lecture Notes in Computer Science},
volume = {10502},
publisher = {Springer},
editor = {W.G. Kropatsch and N.M. Artner and I. Janusch},
pages = {225--237},
month = sep,
address = {Vienna, Austria},
doi = {10.1007/978-3-319-66272-5_19},
abstract = {In digital topology, it is well-known that, in 2D and in
3D, a digital set $X \subseteq Z^n$ is \emph{digitally
well-composed (DWC)}, {\it i.e.}, does not contain any
critical configuration, if its immersion in the Khalimsky
grids $H^n$ is \emph{well-composed in the sense of
Alexandrov (AWC)}, {\it i.e.}, its boundary is a disjoint
union of discrete $(n-1)$-surfaces. We show that this is
still true in $n$-D, $n \geq 2$, which is of prime
importance since today 4D signals are more and more frequent.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.17.dgci.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2017-06-01}
}
@Article{ boutry.17.jmiv,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {A Tutorial on Well-Composedness},
journal = {Journal of Mathematical Imaging and Vision},
volume = {60},
number = {3},
pages = {443--478},
month = mar,
year = {2018},
doi = {10.1007/s10851-017-0769-6},
lrdeprojects = {Olena},
abstract = {Due to digitization, usual discrete signals generally
present topological paradoxes, such as the connectivity
paradoxes of Rosenfeld. To get rid of those paradoxes, and
to restore some topological properties to the objects
contained in the image, like manifoldness, Latecki proposed
a new class of images, called well-composed images, with no
topological issues. Furthermore, well-composed images have
some other interesting properties: for example, the Euler
number is locally computable, boundaries of objects
separate background from foreground, the tree of shapes is
well-defined, and so on. Last, but not the least, some
recent works in mathematical morphology have shown that
very nice practical results can be obtained thanks to
well-composed images. Believing in its prime importance in
digital topology, we then propose this state-of-the-art of
well-composedness, summarizing its different flavours, the
different methods existing to produce well-composed
signals, and the various topics that are related to
well-composedness.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.17.jmiv.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2017-10-12}
}
@Article{ boutry.18.is,
author = {Nicolas Boutry and Rocio Gonzalez-Diaz and Maria-Jose
Jimenez},
title = {Weakly Well-Composed Cell Complexes over {$n$D} Pictures},
journal = {Information Sciences},
volume = {499},
pages = {62--83},
month = oct,
year = {2019},
lrdeprojects = {Olena},
doi = {10.1016/j.ins.2018.06.005},
abstract = {In previous work we proposed a combinatorial algorithm to
``locally repair'' the cubical complex $Q(I)$ that is
canonically associated with a given 3D picture I. The
algorithm constructs a 3D polyhedral complex $P(I)$ which
is homotopy equivalent to $Q(I)$ and whose boundary surface
is a 2D manifold. A polyhedral complex satisfying these
properties is called {\it well-composed}. In the present
paper we extend these results to higher dimensions. We
prove that for a given $n$-dimensional picture the obtained
cell complex is well-composed in a weaker sense but is
still homotopy equivalent to the initial cubical complex.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.18.is.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2018-7-4}
}
@InProceedings{ boutry.19.dgci,
author = {Nicolas Boutry and Rocio Gonzalez-Diaz and Maria-Jose
Jimenez},
title = {One More Step Towards Well-Composedness of Cell Complexes
over {$n$-D} Pictures},
booktitle = {Proceedings of the 21st International Conference on
Discrete Geometry for Computer Imagery (DGCI)},
year = 2019,
month = mar,
pages = {101--114},
address = {Marne-la-Vall{\'e}e, France},
series = {Lecture Notes in Computer Science},
volume = {11414},
publisher = {Springer},
editor = {Michel Couprie and Jean Cousty and Yukiko Kenmochi and
Nabil Mustafa},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.19.dgci.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2019-06-18},
doi = {doi.org/10.1007/978-3-030-14085-4_9},
abstract = {An {$n$-D} pure regular cell complex $K$ is weakly
well-composed (wWC) if, for each vertex $v$ of $K$, the set
of $n$-cells incident to $v$ is face-connected. In previous
work we proved that if an {$n$-D} picture $I$ is digitally
well composed (DWC) then the cubical complex $Q(I)$
associated to $I$ is wWC. If $I$ is not DWC, we proposed a
combinatorial algorithm to locally repair $Q(I)$ obtaining
an {$n$-D} pure simplicial complex $P_S(I)$ homotopy
equivalent to $Q(I)$ which is always wWC. In this paper we
give a combinatorial procedure to compute a simplicial
complex $P_S(\bar{I})$ which decomposes the complement
space of $|P_S(I)|$ and prove that $P_S(\bar{I})$ is also
wWC. This paper means one more step on the way to our
ultimate goal: to prove that the {$n$-D} repaired complex
is continuously well-composed (CWC), that is, the boundary
of its continuous analog is an $(n-1)$-manifold. }
}
@InProceedings{ boutry.19.ismm,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {An Equivalence Relation between Morphological Dynamics and
Persistent Homology in {1D}},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 14th International
Symposium on Mathematical Morphology (ISMM)},
year = 2019,
series = {Lecture Notes in Computer Science Series},
address = {Saarbr\"ucken, Germany},
publisher = {Springer},
pages = {1--12},
month = jul,
doi = {10.1007/978-3-030-20867-7_5},
lrdeprojects = {Olena},
abstract = {We state in this paper a strong relation existing between
Mathematical Morphology and Discrete Morse Theory when we
work with 1D Morse functions. Specifically, in Mathematical
Morphology, a classic way to extract robust markers for
segmentation purposes, is to use the dynamics. On the other
hand, in Discrete Morse Theory, a well-known tool to
simplify the Morse-Smale complexes representing the
topological information of a Morse function is the
persistence. We show that pairing by persistence is
equivalent to pairing by dynamics. Furthermore,
self-duality and injectivity of these pairings are
proved.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.19.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-03-13}
}
@Article{ boutry.19.jmiv,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {How to Make {$n$-D} Plain Maps {A}lexandrov-Well-Composed
in a Self-dual Way},
journal = {Journal of Mathematical Imaging and Vision},
volume = {61},
number = {6},
pages = {849--873},
year = {2019},
month = jul,
doi = {10.1007/s10851-019-00873-4},
lrdeprojects = {Olena},
abstract = {In 2013, Najman and G\'eraud proved that by working on a
well-composed discrete representation of a gray-level
image, we can compute what is called its tree of shapes, a
hierarchical representation of the shapes in this image.
This way, we can proceed to morphological filtering and to
image segmentation. However, the authors did not provide
such a representation for the non-cubical case. We propose
in this paper a way to compute a well-composed
representation of any gray-level image defined on a
discrete surface, which is a more general framework than
the usual cubical grid. Furthermore, the proposed
representation is self-dual in the sense that it treats
bright and dark components in the image the same way. This
paper can be seen as an extension to gray-level images of
the works of Daragon et al. on discrete surfaces.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.19.jmiv.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-2-4}
}
@InProceedings{ boutry.20.brainles,
author = {Nicolas Boutry and Joseph Chazalon and \'Elodie Puybareau
and Guillaume Tochon and Hugues Talbot and Thierry G\'eraud},
title = {Using Separated Inputs for Multimodal Brain Tumor
Segmentation with {3D} {U-Net}-like Architectures},
booktitle = {Proceedings of the 4th International Workshop, BrainLes
2019, Held in Conjunction with MICCAI 2019},
year = 2019,
editor = {A. Crimi and S. Bakas},
volume = {11992},
series = {Lecture Notes in Computer Science},
pages = {187--199},
publisher = {Springer},
doi = {10.1007/978-3-030-46640-4_18},
abstract = {The work presented in this paper addresses the MICCAI
BraTS 2019 challenge devoted to brain tumor segmentation
using mag- netic resonance images. For each task of the
challenge, we proposed and submitted for evaluation an
original method. For the tumor segmentation task (Task 1),
our convolutional neural network is based on a variant of
the U-Net architecture of Ronneberger et al. with two
modifications: first, we separate the four convolution
parts to decorrelate the weights corresponding to each
modality, and second, we provide volumes of size 240 * 240
* 3 as inputs in these convolution parts. This way, we
profit of the 3D aspect of the input signal, and we do not
use the same weights for separate inputs. For the overall
survival task (Task 2), we compute explainable features and
use a kernel PCA embedding followed by a Random Forest
classifier to build a predictor with very few training
samples. For the uncertainty estimation task (Task 3), we
introduce and compare lightweight methods based on simple
principles which can be applied to any segmentation
approach. The overall performance of each of our
contribution is honorable given the low computational
requirements they have both for training and testing.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.20.brainles.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2020-06-01}
}
@InProceedings{ boutry.20.iwcia1,
author = {Nicolas Boutry and Rocio Gonzalez-Diaz and Maria-Jose
Jimenez and Eduardo Paluzo-Hildago},
title = {Euler Well-Composedness},
booktitle = {Combinatorial Image Analysis: Proceedings of the 20th
International Workshop, IWCIA 2020, Novi Sad, Serbia, July
16--18, 2020},
year = 2020,
editor = {T. Lukic and R. P. Barneva and V. Brimkov and L. Comic and
N. Sladoje},
volume = {12148},
series = {Lecture Notes in Computer Science},
pages = {3--19},
publisher = {Springer},
doi = {10.1007/978-3-030-51002-2_1},
abstract = {In this paper, we define a new flavour of
well-composedness, called Euler well-composedness, in the
general setting of regular cell complexes: A regular cell
complex is Euler well-composed if the Euler characteristic
of the link of each boundary vertex is $1$. A cell
decomposition of a picture $I$ is a pair of regular cell
complexes $\big(K(I),K(\bar{I})\big)$ such that $K(I)$
(resp. $K(\bar{I})$) is a topological and geometrical model
representing $I$ (resp. its complementary, $\bar{I}$).
Then, a cell decomposition of a picture $I$ is self-dual
Euler well-composed if both $K(I)$ and $K(\bar{I})$ are
Euler well-composed. We prove in this paper that, first,
self-dual Euler well-composedness is equivalent to digital
well-composedness in dimension 2 and 3, and second, in
dimension 4, self-dual Euler well-composedness implies
digital well-composedness, though the converse is not true.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.20.iwcia1.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2020-07-21}
}
@InProceedings{ boutry.20.iwcia2,
author = {Nicolas Boutry and Rocio Gonzalez-Diaz and Laurent Najman
and Thierry G\'eraud},
title = {A {4D} Counter-Example Showing that {DWCness} Does Not
Imply {CWCness} in $n$-{D}},
booktitle = {Combinatorial Image Analysis: Proceedings of the 20th
International Workshop, IWCIA 2020, Novi Sad, Serbia, July
16--18, 2020},
year = 2020,
editor = {T. Lukic and R. P. Barneva and V. Brimkov and L. Comic and
N. Sladoje},
volume = {12148},
series = {Lecture Notes in Computer Science},
pages = {73--87},
publisher = {Springer},
doi = {10.1007/978-3-030-51002-2_6},
abstract = {In this paper, we prove that the two flavours of
well-composedness called Continuous Well-Composedness
(shortly CWCness), stating that the boundary of the
continuous analog of a discrete set is a manifold, and
Digital Well-Composedness (shortly DWCness), stating that a
discrete set does not contain any critical configuration,
are not equivalent in dimension 4. To prove this, we
exhibit the example of a configuration of 8 tesseracts (4D
cubes) sharing a common corner (vertex), which is DWC but
not CWC. This result is surprising since we know that
CWCness and DWCness are equivalent in 2D and 3D. To reach
our goal, we use local homology.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.20.iwcia2.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2020-07-21}
}
@Article{ boutry.20.jmiv.1,
author = {Nicolas Boutry and Laurent Najman and Thierry G\'eraud},
title = {Topological Properties of the First Non-Local Digitally
Well-Composed Interpolation on {$n$-D} Cubical Grids},
journal = {Journal of Mathematical Imaging and Vision},
volume = {62},
pages = {1256--1284},
month = sep,
year = {2020},
doi = {10.1007/s10851-020-00989-y},
lrdeprojects = {Olena},
abstract = {In discrete topology, we like digitally well-composed
(shortly DWC) interpolations because they remove pinches in
cubical images. Usual well-composed interpolations are
local and sometimes self-dual (they treat in a same way
dark and bright components in the image). In our case, we
are particularly interested in $n$-D self-dual DWC
interpolations to obtain a purely self-dual tree of shapes.
However, it has been proved that we cannot have an $n$-D
interpolation which is at the same time local, self-dual,
and well-composed. By removing the locality constraint, we
have obtained an $n$-D interpolation with many properties
in practice: it is self-dual, DWC, and in-between (this
last property means that it preserves the contours). Since
we did not published the proofs of these results before, we
propose to provide in a first time the proofs of the two
last properties here (DWCness and in-betweeness) and a
sketch of the proof of self-duality (the complete proof of
self-duality requires more material and will come later).
Some theoretical and practical results are given. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.20.jmiv.1.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2020-09-03}
}
@Article{ boutry.20.jmiv.2,
author = {Nicolas Boutry and Laurent Najman and Thierry G\'eraud},
title = {Equivalence between Digital Well-Composedness and
Well-Composedness in the Sense of {A}lexandrov on {$n$-D}
Cubical Grids},
journal = {Journal of Mathematical Imaging and Vision},
volume = {62},
pages = {1285--1333},
month = sep,
year = {2020},
doi = {10.1007/s10851-020-00988-z},
lrdeprojects = {Olena},
abstract = {Among the different flavors of well-composednesses on
cubical grids, two of them, called respectively Digital
Well-Composedness (DWCness) and Well-Composedness in the
sens of Alexandrov (AWCness), are known to be equivalent in
2D and in 3D. The former means that a cubical set does not
contain critical configurations when the latter means that
the boundary of a cubical set is made of a disjoint union
of discrete surfaces. In this paper, we prove that this
equivalence holds in $n$-D, which is of interest because
today images are not only 2D or 3D but also 4D and beyond.
The main benefit of this proof is that the topological
properties available for AWC sets, mainly their separation
properties, are also true for DWC sets, and the properties
of DWC sets are also true for AWC sets: an Euler number
locally computable, equivalent connectivities from a local
or global point of view... This result is also true for
gray-level images thanks to cross-section topology, which
means that the sets of shapes of DWC gray-level images make
a tree like the ones of AWC gray-level images. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.20.jmiv.2.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2020-09-03}
}
@InProceedings{ boutry.21.dgmm.1,
author = {Nicolas Boutry and Thierry G\'eraud and Laurent Najman},
title = {An Equivalence Relation between Morphological Dynamics and
Persistent Homology in {$n$-D}},
booktitle = {Proceedings of the IAPR International Conference on
Discrete Geometry and Mathematical Morphology (DGMM)},
year = 2021,
month = may,
address = {Uppsala, Sweden},
series = {Lecture Notes in Computer Science},
volume = {12708},
publisher = {Springer},
pages = {525--537},
abstract = {In Mathematical Morphology (MM), dynamics are used to
compute markers to proceed for example to watershed-based
image decomposition. At the same time, persistence is a
concept coming from Persistent Homology (PH) and Morse
Theory (MT) and represents the stability of the extrema of
a Morse function. Since these concepts are similar on Morse
functions, we studied their relationship and we found, and
proved, that they are equal on 1D Morse functions. Here, we
propose to extend this proof to $n$-D, $n \geq 2$, showing
that this equality can be applied to $n$-D images and not
only to 1D functions. This is a step further to show how
much MM and MT are related.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.21.dgmm.1.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2021-03-02},
doi = {10.1007/978-3-030-76657-3_38}
}
@InProceedings{ boutry.21.dgmm.2,
author = {Nicolas Boutry and Thierry G\'eraud},
title = {A New Matching Algorithm between Trees of Shapes and its
Application to Brain Tumor Segmentation},
booktitle = {Proceedings of the IAPR International Conference on
Discrete Geometry and Mathematical Morphology (DGMM)},
year = 2021,
month = may,
pages = {67--78},
address = {Uppsala, Sweden},
series = {Lecture Notes in Computer Science},
volume = {12708},
publisher = {Springer},
abstract = {Many approaches exist to compute the distance between two
trees in pattern recognition. These trees can be structures
with or without values on their nodes or edges. However,
none of these distances take into account the shapes
possibly associated to the nodes of the tree. For this
reason, we propose in this paper a new distance between two
trees of shapes based on the Hausdorff distance. This
distance allows us to make inexact tree matching and to
compute what we call residual trees, representing where two
trees differ. We will also see that thanks to these
residual trees, we can obtain good results in matter of
brain tumor segmentation. This segmentation does not
provide only a segmentation but also the tree of shapes
corresponding to the segmentation and its depth map.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.21.dgmm.2.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2021-03-02},
doi = {10.1007/978-3-030-76657-3_4}
}
@InProceedings{ boutry.21.dgmm.3,
author = {Nicolas Boutry and Guillaume Tochon},
title = {Stability of the Tree of Shapes to Additive Noise},
booktitle = {Proceedings of the IAPR International Conference on
Discrete Geometry and Mathematical Morphology (DGMM)},
year = 2021,
month = may,
address = {Uppsala, Sweden},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {12708},
pages = {365--377},
abstract = {The tree of shapes (ToS) is a famous self-dual
hierarchical structure in mathematical morphology, which
represents the inclusion relationship of the shapes
(\textit{i.e.} the interior of the level lines with holes
filled) in a grayscale image. The ToS has already found
numerous applications in image processing tasks, such as
grain filtering, contour extraction, image simplification,
and so on. Its structure consistency is bound to the
cleanliness of the level lines, which are themselves deeply
affected by the presence of noise within the image.
However, according to our knowledge, no one has measured
before how resistant to (additive) noise this hierarchical
structure is. In this paper, we propose and compare several
measures to evaluate the stability of the ToS structure to
noise.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.21.dgmm.3.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2021-03-02},
doi = {10.1007/978-3-030-76657-3_26}
}
@Article{ boutry.21.media,
author = {Sharib Ali and Mariia Dmitrieva and Noha Ghatwary and
Sophia Bano and Gorkem Polat and Alptekin Temizel and
Adrian Krenzer and Amar Hekalo and Yun Bo Guo and Bogdan
Matuszewski and Mourad Gridach and Irina Voiculescu and
Vishnusai Yoganand and Arnav Chavan and Aryan Raj and Nhan
T. Nguyen and Dat Q. Tran and Le Duy Huynh and Nicolas
Boutry and Shahadate Rezvy and Haijian Chen and Yoon Ho
Choi and Anand Subramanian and Velmurugan Balasubramanian
and Xiaohong W. Gao and Hongyu Hu and Yusheng Liao and
Danail Stoyanov and Christian Daul and Stefano Realdon and
Renato Cannizzaro and Dominique Lamarque and Terry
Tran-Nguyen and Adam Bailey and Barbara Braden and James
East and Jens Rittscher},
title = {Deep Learning for Detection and Segmentation of Artefact
and Disease Instances in Gastrointestinal Endoscopy},
journal = {Medical Image Analysis},
number = {102002},
year = {2021},
month = may,
doi = {10.1016/j.media.2021.102002},
abstract = {The Endoscopy Computer Vision Challenge (EndoCV) is a
crowd-sourcing initiative to address eminent problems in
developing reliable computer aided detection and diagnosis
endoscopy systems and suggest a pathway for clinical
translation of technologies. Whilst endoscopy is a widely
used diagnostic and treatment tool for hollow-organs, there
are several core challenges often faced by endoscopists,
mainly: 1) presence of multi-class artefacts that hinder
their visual interpretation, and 2) difficulty in
identifying subtle precancerous precursors and cancer
abnormalities. Artefacts often affect the robustness of
deep learning methods applied to the gastrointestinal tract
organs as they can be confused with tissue of interest.
EndoCV2020 challenges are designed to address research
questions in these remits. In this paper, we present a
summary of methods developed by the top 17 teams and
provide an objective comparison of state-of-the-art methods
and methods designed by the participants for two
sub-challenges: i) artefact detection and segmentation
(EAD2020), and ii) disease detection and segmentation
(EDD2020). Multi-center, multi-organ, multi-class, and
multi-modal clinical endoscopy datasets were compiled for
both EAD2020 and EDD2020 sub-challenges. The out-of-sample
generalization ability of detection algorithms was also
evaluated. Whilst most teams focused on accuracy
improvements, only a few methods hold credibility for
clinical usability. The best performing teams provided
solutions to tackle class imbalance, and variabilities in
size, origin, modality and occurrences by exploring data
augmentation, data fusion, and optimal class thresholding
techniques.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/boutry.21.media.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2021-02-24}
}
@InProceedings{ buatois.19.brainles,
title = {Two Stages {CNN}-Based Segmentation of Gliomas,
Uncertainty Quantification and Prediction of Overall
Patient Survival},
author = {Thibault Buatois and \'Elodie Puybareau and Guillaume
Tochon and Joseph Chazalon},
booktitle = {International MICCAI Brainlesion Workshop},
year = {2019},
editor = {A. Crimi and S. Bakas},
volume = {11992},
series = {Lecture Notes in Computer Science},
pages = {167--178},
publisher = {Springer},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/buatois.19.brainles.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2020-09-03},
doi = {10.1007/978-3-030-46643-5_16}
}
@InProceedings{ burrus.03.mpool,
author = {Nicolas Burrus and Alexandre Duret-Lutz and Thierry
G\'eraud and David Lesage and Rapha\"el Poss},
title = {A static {C++} object-oriented programming ({SCOOP})
paradigm mixing benefits of traditional {OOP} and generic
programming},
booktitle = {Proceedings of the Workshop on Multiple Paradigm with
Object-Oriented Languages (MPOOL)},
year = 2003,
address = {Anaheim, CA, USA},
month = oct,
abstract = {Object-oriented and generic programming are both supported
in C++. OOP provides high expressiveness whereas GP leads
to more efficient programs by avoiding dynamic typing. This
paper presents SCOOP, a new paradigm which enables both
classical OO design and high performance in C++ by mixing
OOP and GP. We show how classical and advanced OO features
such as virtual methods, multiple inheritance, argument
covariance, virtual types and multimethods can be
implemented in a fully statically typed model, hence
without run-time overhead.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/mpool03-abstract.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2003-10-29}
}
@InProceedings{ cadilhac.06.avocs,
author = {Micha\"el Cadilhac and Thomas H\'erault and Richard
Lassaigne and Sylvain Peyronnet and Sebastien Tixeuil},
title = {Evaluating complex {MAC} protocols for sensor networks
with {APMC}},
booktitle = {Proceedings of the 6th International Workshop on Automated
Verification of Critical Systems (AVoCS)},
year = 2006,
series = {Electronic Notes in Theoretical Computer Science Series},
pages = {33--46},
volume = 185,
abstract = {In this paper we present an analysis of a MAC (Medium
Access Control) protocol for wireless sensor networks. The
purpose of this protocol is to manage wireless media access
by constructing a Time Division Media Access (TDMA)
schedule. APMC (Approximate Probabilistic Model Checker) is
a tool that uses approximation-based verification
techniques in order to analyse the behavior of complex
probabilistic systems. Using APMC, we approximately
computed the probabilities of several properties of the MAC
protocol being studied, thus giving some insights about its
performance.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/cadilhac.06.avocs.pdf},
lrdeprojects = {APMC},
lrdenewsdate = {2006-07-27}
}
@InProceedings{ calarasanu.15.icdar,
author = {Stefania Calarasanu and Jonathan Fabrizio and S\'everine
Dubuisson},
title = {Using histogram representation and Earth Mover's Distance
as an evaluation tool for text detection},
booktitle = {Proceedings of the 13th IAPR International Conference on
Document Analysis and Recognition (ICDAR)},
address = {Nancy, France},
month = aug,
year = 2015,
pages = {221--225},
lrdeprojects = {Olena},
abstract = { In the context of text detection evaluation, it is
essential to use protocols that are capable of describing
both the quality and the quantity aspects of detection
results. In this paper we propose a novel visual
representation and evaluation tool that captures the whole
nature of a detector by using histograms. First, two
histograms (coverage and accuracy) are generated to
visualize the different characteristics of a detector.
Secondly, we compare these two histograms to a so called
optimal one to compute representative and comparable
scores. To do so, we introduce the usage of the Earth
Mover's Distance as a reliable evaluation tool to estimate
recall and precision scores. Results obtained on the ICDAR
2013 dataset show that this method intuitively
characterizes the accuracy of a text detector and gives at
a glance various useful characteristics of the analyzed
algorithm.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/calarasanu.15.icdar.pdf},
doi = {10.1109/ICDAR.2015.7333756}
}
@PhDThesis{ calarasanu.15.phd,
author = {Stefania Calarasanu},
title = {Improvement of a text detection chain and the proposition
of a new evaluation protocol for text detection
algorithms},
school = {Universit\'e Pierre et Marie Curie - Paris 6},
year = 2015,
address = {Paris, France},
month = dec,
abstract = {The objective of this thesis is twofold. On one hand it
targets the proposition of a more accurate evaluation
protocol designed for text detection systems that solves
some of the existing problems in this area. On the other
hand, it focuses on the design of a text rectification
procedure used for the correction of highly deformed texts.
Text detection systems have gained a significant importance
during the last years. The growing number of approaches
proposed in the literature requires a rigorous performance
evaluation and ranking. In the context of text detection,
an evaluation protocol relies on three elements: a reliable
text reference, a matching set of rules deciding the
relationship between the ground truth and the detections
and finally a set of metrics that produce intuitive scores.
The few existing evaluation protocols often lack accuracy
either due to inconsistent matching procedures that provide
unfair scores or due to unrepresentative metrics. Despite
these issues, until today, researchers continue to use
these protocols to evaluate their work. In this Ph.D thesis
we propose a new evaluation protocol for text detection
algorithms that tackles most of the drawbacks faced by
currently used evaluation methods. This work is focused on
three main contributions: firstly, we introduce a complex
text reference representation that does not constrain text
detectors to adopt a specific detection granularity level
or annotation representation; secondly, we propose a set of
matching rules capable of evaluating any type of scenario
that can occur between a text reference and a detection;
and finally we show how we can analyze a set of detection
results, not only through a set of metrics, but also
through an intuitive visual representation. We use this
protocol to evaluate different text detectors and then
compare the results with those provided by alternative
evaluation methods. A frequent challenge for many Text
Understanding Systems is to tackle the variety of text
characteristics in born-digital and natural scene images to
which current OCRs are not well adapted. For example, texts
in perspective are frequently present in real-word images
because the camera capture angle is not normal to the plane
containing text regions. Despite the ability of some
detectors to accurately localize such text objects, the
recognition stage fails most of the time. Indeed, most OCRs
are not designed to handle text strings in perspective but
rather expect horizontal texts in a parallel-frontal plane
to provide a correct transcription. All these aspects,
together with the proposition of a very challenging
dataset, motivated us to propose a rectification procedure
capable of correcting highly distorted texts.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/calarasanu.15.phd.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/calarasanu.15.phd_slides.pdf},
lrdeprojects = {Olena}
}
@Article{ calarasanu.16.ivc,
author = {Stefania Calarasanu and Jonathan Fabrizio and S\'everine
Dubuisson},
title = {What is a good evaluation protocol for text localization
systems? Concerns, arguments, comparisons and solutions},
journal = {Image and Vision Computing},
year = 2016,
volume = 46,
month = feb,
pages = {1--17},
lrdeprojects = {Olena},
abstract = {A trustworthy protocol is essential to evaluate a text
detection algorithm in order to, first measure its
efficiency and adjust its parameters and, second to compare
its performances with those of other algorithms. However,
current protocols do not give precise enough evaluations
because they use coarse evaluation metrics, and deal with
inconsistent matchings between the output of detection
algorithms and the ground truth, both often limited to
rectangular shapes. In this paper, we propose a new
evaluation protocol, named EvaLTex, that solves some of the
current problems associated with classical metrics and
matching strategies. Our system deals with different kinds
of annotations and detection shapes. It also considers
different kinds of granularity between detections and
ground truth objects and hence provides more realistic and
accurate evaluation measures. We use this protocol to
evaluate text detection algorithms and highlight some key
examples that show that the provided scores are more
relevant than those of currently used evaluation protocols.
},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/calarasanu.16.ivc.pdf},
doi = {10.1016/j.imavis.2015.12.001}
}
@InProceedings{ calarasanu.16.iwrr,
author = {Stefania Calarasanu and Jonathan Fabrizio and S\'everine
Dubuisson},
title = {From text detection to text segmentation: a unified
evaluation scheme},
booktitle = {Proceedings of the 2nd International Workshop on Robust
Reading Conference (IWRR-ECCV)},
address = {Amsterdam, The Netherlands},
month = oct,
year = 2016,
lrdeprojects = {Olena},
abstract = {Current text segmentation evaluation protocols are often
incapable of properly handling different scenarios
(broken/merged/partial characters). This leads to scores
that incorrectly reflect the segmentation accuracy. In this
article we propose a new evaluation scheme that overcomes
most of the existent drawbacks by extending the EvaLTex
protocol (initially designed to evaluate text detection at
region level). This new unified platform has numerous
advantages: it is able to evaluate a text understanding
system at every detection stage and granularity level
(paragraph/line/word and now character) by using the same
metrics and matching rules; it is robust to all
segmentation scenarios; it provides a qualitative and
quantitative evaluation and a visual score representation
that captures the whole behavior of a segmentation
algorithm. Experimental results on nine segmentation
algorithms using different evaluation frameworks are also
provided to emphasize the interest of our method.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/calarasanu.16.iwrr.pdf}
}
@InProceedings{ calarasanu.16.visapp,
author = {Stefania Calarasanu and S\'everine Dubuisson and Jonathan
Fabrizio },
title = {Towards the rectification of highly distorted texts},
booktitle = {Proceedings of the 11th International Conference on
Computer Vision Theory and Applications (VISAPP)},
address = {Rome, Italie},
month = feb,
year = 2016,
lrdeprojects = {Olena},
abstract = {A frequent challenge for many Text Understanding Systems
is to tackle the variety of text characteristics in
born-digital and natural scene images to which current OCRs
are not well adapted. For example, texts in perspective are
frequently present in real-word images, but despite the
ability of some detectors to accurately localize such text
objects, the recognition stage fails most of the time.
Indeed, most OCRs are not designed to handle text strings
in perspective but rather expect horizontal texts in a
parallel-frontal plane to provide a correct transcription.
In this paper, we propose a rectification procedure that
can correct highly distorted texts, subject to rotation,
shearing and perspective deformations. The method is based
on an accurate estimation of the quadrangle bounding the
deformed text in order to compute a homography to transform
this quadrangle (and its content) into a horizontal
rectangle. The rectification is validated on the dataset
proposed during the ICDAR 2015 Competition on Scene Text
Rectification. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/calarasanu.16.visapp.pdf},
doi = {10.5220/0005772602410248}
}
@InProceedings{ carlier.02.itrs,
author = {S\'ebastien Carlier},
title = {Polar type inference with intersection types and $\omega$},
booktitle = {Proceedings of the 2nd Workshop on Intersection Types and
Related Systems (ITRS), published in: Electronic Notes in
Theoretical Computer Science},
volume = 70,
issue = 1,
publisher = {Elsevier},
year = 2002,
address = {Copenhagen, Denmark},
month = jul,
lrdeprojects = {Software},
abstract = {We present a type system featuring intersection types and
omega, a type constant which is assigned to unused terms.
We exploit and extend the technology of expansion variables
from the recently developed System I, with which we believe
our system shares many interesting properties, such as
strong normalization, principal typings, and compositional
analysis. Our presentation emphasizes a polarity discipline
and shows its benefits. We syntactically distinguish
positive and negative types, and give them different
interpretations. We take the point of view that the
interpretation of a type is intrinsic to it, and should not
change implicitly when it appears at the opposite polarity.
Our system is the result of a process which started with an
extension of Trevor Jim's Polar Type System.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlier.02.itrs.pdf},
lrdekeywords = {Software engineering}
}
@InProceedings{ carlinet.13.ismm,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {A comparison of many max-tree computation algorithms},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 11th International
Symposium on Mathematical Morphology (ISMM)},
year = 2013,
editor = {C.L. Luengo Hendriks and G. Borgefors and R. Strand},
volume = 7883,
series = {Lecture Notes in Computer Science Series},
address = {Uppsala, Sweden},
publisher = {Springer},
pages = {73--85},
lrdeprojects = {Olena},
abstract = {With the development of connected filters in the last
decade, many algorithms have been proposed to compute the
max-tree. Max-tree allows computation of the most advanced
connected operators in a simple way. However, no exhaustive
comparison of these algorithms has been proposed so far and
the choice of an algorithm over another depends on many
parameters. Since the need for fast algorithms is obvious
for production code, we present an in depth comparison of
five algorithms and some variations of them in a unique
framework. Finally, a decision tree will be proposed to
help the user choose the most appropriate algorithm
according to their requirements.},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/carlinet.13.ismm-poster.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.13.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2013-03-14}
}
@Misc{ carlinet.14.geodis,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Traitement d'images multivari\'ees avec l'arbre des
formes},
howpublished = {Communication at Journ\'ee du Groupe de Travail de
G\'eometrie Discr\`ete (GT GeoDis, Reims Image 2014)},
month = nov,
year = {2014},
note = {In French},
abstract = {L'Arbre des Formes (ToS) est un arbre morphologique qui
fournit une repr\'esentation hi\'erarchique de l'image
auto-duale et invariante par changement de contraste. De ce
fait, il est adapt\'e \`a de nombreuses applications de
traitement d'images. N\'eanmoins, on se heurte \`a des
probl\`emes avec l'Arbre des Formes lorsqu'on doit traiter
des images couleurs car sa d\'efinition tient uniquement en
niveaux de gris. Les solutions les plus courantes sont
alors d'effectuer un traitement composante par composante
(marginal) ou d'imposer un ordre total. Ces solutions ne
sont g\'en\'eralement pas satisfaisantes et font survenir
des probl\`emes (des artefacts de couleur, des pertes de
propri\'et\'es...) Dans cet article, nous insistons sur la
n\'ecessit\'e d'une repr\'esentation \`a la fois auto-duale
et invariante par changement de contraste et nous proposons
une m\'ethode qui construit un Arbre des Formes unique en
fusionnant des formes issues des composantes marginales
tout en pr\'eservant les propri\'et\'es intrins\`eques de
l'arbre. Cette m\'ethode s'affranchit de tout relation
d'ordre totale en utilisant uniquement la relation
d'inclusion entre les formes et en effectuant une fusion
dans l'espace des formes. Finalement, nous montrerons la
pertinence de notre m\'ethode et de la structure en les
illustrant sur de la simplification d'images et de la segmentation interactive.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2014-11-17},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.14.geodis.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/carlinet.14.geodis.poster.pdf}
}
@InProceedings{ carlinet.14.icip,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Getting a morphological Tree of Shapes for Multivariate
Images: Paths, Traps and Pitfalls},
booktitle = {Proceedings of the 21st International Conference on Image
Processing (ICIP)},
year = 2014,
address = {Paris, France},
pages = {615--619},
lrdeprojects = {Olena},
abstract = {The Tree of Shapes is a morphological tree that provides
an high-level hierarchical representation of the image
suitable for many image processing tasks. This structure
has the desirable properties to be self-dual and
contrast-invariant and describes the organization of the
objects through level lines inclusion. Yet it is defined on
gray-level while many images have multivariate data (color
images, multispectral images\ldots) where information are
split across channels. In this paper, we propose some leads
to extend the tree of shapes on colors with classical
approaches based on total orders, more recent approaches
based on graphs and also a new distance-based method.
Eventually, we compare these approaches through denoising
to highlight their strengths and weaknesses and show the
strong potential of the new methods compared to classical
ones.},
lrdeinc = {Publications/carlinet.14.icip.inc},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.14.icip.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/carlinet.14.icip.poster.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2014-05-26},
doi = {10.1109/ICIP.2014.7025123}
}
@InProceedings{ carlinet.14.icpr,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {A Morphological Tree of Shapes for Color Images},
booktitle = {Proceedings of the 22nd International Conference on
Pattern Recognition (ICPR)},
year = 2014,
month = aug,
address = {Stockholm, Sweden},
pages = {1133--1137},
lrdeprojects = {Olena},
lrdeinc = {Publications/carlinet.14.icpr.inc},
lrdekeywords = {Image},
lrdenewsdate = {2014-04-02},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.14.icpr.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/carlinet.14.icpr.poster.pdf},
doi = {10.1109/ICPR.2014.204}
}
@Article{ carlinet.14.itip,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {A Comparative Review of Component Tree Computation
Algorithms},
journal = {IEEE Transactions on Image Processing},
year = 2014,
volume = {23},
number = {9},
month = sep,
pages = {3885--3895},
lrdeprojects = {Olena},
abstract = {Connected operators are morphological tools that have the
property of filtering images without creating new contours
and without moving the contours that are preserved. Those
operators are related to the max-tree and min-tree repre-
sentations of images, and many algorithms have been
proposed to compute those trees. However, no exhaustive
comparison of these algorithms has been proposed so far,
and the choice of an algorithm over another depends on many
parameters. Since the need for fast algorithms is obvious
for production code, we present an in-depth comparison of
the existing algorithms in a unique framework, as well as
variations of some of them that improve their efficiency.
This comparison involves both sequential and parallel
algorithms, and execution times are given with respect to
the number of threads, the input image size, and the pixel
value quantization. Eventually, a decision tree is given to
help the user choose the most appropriate algorithm with
respect to the user requirements. To favor reproducible
research, an online demo allows the user to upload an image
and bench the different algorithms, and the source code of
every algorithms has been made available.},
lrdeinc = {Olena/MaxtreeReview},
lrdekeywords = {Image},
lrdenewsdate = {2014-06-16},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/carlinet.14.itip.pdf},
url = {10.1109/TIP.2014.2336551}
}
@InProceedings{ carlinet.15.gretsi,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Une approche morphologique de segmentation interactive
avec l'arbre des formes couleur},
booktitle = {Actes du 15e Colloque GRETSI},
year = {2015},
address = {Lyon, France},
category = {national},
month = sep,
lrdeprojects = {Olena},
abstract = {L'arbre des formes est un arbre morphologique \`a la fois
auto-dual et invariant par changement de contraste. Il
fournit une repr\'esentation haut-niveau de l'image,
int\'eressante pour de nombreuses t\^aches de traitement
d'images. Malgr\'e son potentiel et sa simplicit\'e, il
reste largement sous-utilis\'e en reconnaissance des formes
et vision par ordinateur. Dans cet article, nous
pr\'esentons une m\'ethode de segmentation interactive qui
s'effectue simplement en manipulant cet arbre. Pour cela,
nous nous appuierons sur une repr\'esentation r\'ecemment
d\'efinie~: \og l'Arbre des Formes Couleur \fg. La
m\'ethode de segmentation interactive que nous proposons ne
requiert aucun apprentissage statistique~; n\'eanmoins elle
obtient des r\'esultats qui rivalisent avec ceux de
l'\'etat de l'art. Bien que pr\'eliminaires, les
r\'esultats obtenus mettent en avant le potentiel et
l'int\'er\^et des m\'ethodes travaillant dans l'espace des formes.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.15.gretsi.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-06-16}
}
@InProceedings{ carlinet.15.ipta,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Morphological Object Picking Based on the Color Tree of
Shapes},
booktitle = {Proceedings of 5th International Conference on Image
Processing Theory, Tools and Applications (IPTA'15)},
year = {2015},
address = {Orl{\'e}ans, France},
pages = {125--130},
month = nov,
lrdeprojects = {Olena},
abstract = {The Tree of Shapes is a self-dual and contrast invariant
morphological tree that provides a high-level hierarchical
representation of images, suitable for many image
processing tasks. Despite its powerfulness and its
simplicity, it is still under-exploited in pattern
recognition and computer vision. In this paper, we show
that both interactive and automatic image segmentation can
be achieved with some simple tree processings. To that aim,
we rely on the ``Color Tree of Shapes'', recently defined.
We propose a method for interactive segmentation that does
not involve any statistical learning, yet yielding results
that compete with state-of-the-art approaches. We further
extend this algorithm to unsupervised segmentation and give
some results. Although they are preliminary, they highlight
the potential of such an approach that works in the shape
space.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.15.ipta.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-06-29},
doi = {10.1109/IPTA.2015.7367111}
}
@InProceedings{ carlinet.15.ismm,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {A Color Tree of Shapes with Illustrations on Filtering,
Simplification, and Segmentation},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 12th International
Symposium on Mathematical Morphology (ISMM)},
year = {2015},
series = {Lecture Notes in Computer Science Series},
volume = {9082},
address = {Reykjavik, Iceland},
publisher = {Springer},
editor = {J.A. Benediktsson and J. Chanussot and L. Najman and H.
Talbot},
pages = {363--374},
lrdeprojects = {Olena},
abstract = {The Tree of Shapes is a morphological tree that provides a
high-level, hierarchical, self-dual, and contrast invariant
representation of images, suitable for many image
processing tasks. When dealing with color images, one
cannot use the Tree of Shapes because its definition is
ill-formed on multivariate data. Common workarounds such as
marginal processing, or imposing a total order on data are
not satisfactory and yield many problems (color artifacts,
loss of invariances, etc.) In this paper, we highlight the
need for a self-dual and contrast invariant representation
of color images and we provide a method that builds a
single Tree of Shapes by merging the shapes computed
marginally, while guarantying the most important properties
of the ToS. This method does not try to impose an arbitrary
total ordering on values but uses only the inclusion
relationship between shapes. Eventually, we show the
relevance of our method and our structure through some
illustrations on filtering, image simplification, and
interactive segmentation.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/carlinet.15.ismm.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.15.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-04-07},
doi = {10.1007/978-3-319-18720-4_31}
}
@Article{ carlinet.15.itip,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {{MToS}: A Tree of Shapes for Multivariate Images},
journal = {IEEE Transactions on Image Processing},
year = 2015,
volume = {24},
number = {12},
pages = {5330--5342},
month = dec,
lrdeprojects = {Olena},
abstract = {The Tree of Shapes (ToS) is a morphological tree that
provides an high-level hierarchical representation of the
image suitable for many image processing tasks. When
dealing with color images, one cannot use the ToS because
its definition is ill-formed on multivariate data. Common
workarounds such as marginal processing, or imposing a
total order on data are not satisfactory and yield many
problems (color artifacts, loss of invariances...) In this
paper, we highlight the need for a self-dual and contrast
invariant representation of the image and provide a method
that builds a single ToS by merging the shapes computed
marginally and preserving the most important properties of
the ToS. This method does not try to impose an arbitrary
total ordering on values but uses only the inclusion
relationship between shapes and the merging strategy works
in a shape space. Eventually, we show the relevance of our
method and our structure through several applications
involving color and multispectral image analysis.},
lrdeinc = {Publications/carlinet.15.itip.inc},
lrdekeywords = {Image},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/carlinet.15.itip.pdf},
lrdenewsdate = {2015-10-26},
url = {10.1109/TIP.2015.2480599}
}
@PhDThesis{ carlinet.15.phd,
author = {Edwin Carlinet},
title = {A Tree of Shapes for Multivariate Images},
school = {Universit\'e Paris Est},
year = 2015,
address = {Paris, France},
month = nov,
abstract = {Nowadays, the demand for multi-scale and region-based
analysis in many computer vision and pattern recognition
applications is obvious. No one would consider a pixelbased
approach as a good candidate to solve such problems. To
meet this need, the Mathematical Morphology (MM) framework
has supplied region-based hierarchical representations of
images such as the Tree of Shapes (ToS). The ToS represents
the image in terms of a tree of the inclusion of its
level-lines. The ToS is thus self-dual and contrastchange
invariant which make it well-adapted for high-level image
processing. Yet, it is only defined on grayscale images and
most attempts to extend it on multivariate images - e.g. by
imposing an ``arbitrary'' total ordering - are not
satisfactory. In this dissertation, we present the
Multivariate Tree of Shapes (MToS) as a novel approach to
extend the grayscale ToS on multivariate images. This
representation is a mix of the ToS's computed marginally on
each channel of the image; it aims at merging the marginal
shapes in a ``sensible'' way by preserving the maximum
number of inclusion. The method proposed has theoretical
foundations expressing the ToS in terms of a topographic
map of the curvilinear total variation computed from the
image border; which has allowed its extension on
multivariate data. In addition, the MToS features similar
properties as the grayscale ToS, the most important one
being its invariance to any marginal change of contrast and
any marginal inversion of contrast (a somewhat
``self-duality'' in the multidimensional case). As the need
for efficient image processing techniques is obvious
regarding the larger and larger amount of data to process,
we propose an efficient algorithm that can build the MToS
in quasi-linear time w.r.t. the number of pixels and
quadratic w.r.t. the number of channels. We also propose
tree-based processing algorithms to demonstrate in
practice, that the MToS is a versatile, easy-to-use, and
efficient structure. Eventually, to validate the soundness
of our approach, we propose some experiments testing the
robustness of the structure to non-relevant components
(e.g. with noise or with low dynamics) and we show that
such defaults do not affect the overall structure of the
MToS. In addition, we propose many real-case applications
using the MToS. Many of them are just a slight modification
of methods employing the ``regular'' ToS and adapted to our
new structure. For example, we successfully use the MToS
for image filtering, image simplification, image
segmentation, image classification and object detection.
From these applications, we show that the MToS generally
outperforms its ToS-based counterpart, demonstrating the
potential of our approach.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.15.phd.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/carlinet.15.phd_slides.pdf},
lrdeprojects = {Olena}
}
@InProceedings{ carlinet.17.orasis,
author = {Edwin Carlinet and Yongchao Xu and Nicolas Boutry and
Thierry G\'eraud},
title = {La pseudo-distance du dahu},
booktitle = {Actes d'ORASIS},
year = {2017},
month = jun,
address = {Colleville-sur-Mer, France},
category = {national},
abstract = {La distance de la barri\`ere minimum est d\'efinie comme
le plus petit intervalle de l'ensemble des niveaux de gris
le long d'un chemin entre deux points dans une image. Pour
cela, on consid\`ere que l'image est un graphe \`a valeurs
sur les sommets. Cependant, cette d\'efinition ne
correspond pas \`a l'interpr\'etation d'une image comme
\'etant une carte d'\'el\'evation, c'est-\`a-dire, un
paysage continu d'une mani\`ere ou d'une autre. En se
pla\c{c}ant dans le cadre des fonctions multivoques, nous
pr\'esentons une nouvelle d\'efinition pour cette distance.
Cette d\'efinition, compatible avec l'interpr\'etation
paysag\`ere, est d\'enu\'ee de probl\`emes topologiques
bien qu'en restant dans un monde discret. Nous montrons que
la distance propos\'ee est reli\'ee \`a la structure
morphologique d'arbre des formes, qui permet de surcro\^it
un calcul rapide et exact de cette distance. Cela se
d\'emarque de sa d\'efinition classique, pour laquelle le
seul calcul rapide n'est qu'approximatif.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.17.orasis.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2017-03-21}
}
@InProceedings{ carlinet.18.icip,
author = {Edwin Carlinet and Thierry G\'eraud and S\'ebastien Crozet},
title = {The Tree of Shapes Turned into a Max-Tree: {A} Simple and
Efficient Linear Algorithm},
booktitle = {Proceedings of the 24th IEEE International Conference on
Image Processing (ICIP)},
year = {2018},
month = oct,
pages = {1488--1492},
address = {Athens, Greece},
doi = {10.1109/ICIP.2018.8451180},
abstract = {The Tree of Shapes (ToS) is a morphological tree-based
representation of an image translating the inclusion of its
level lines. It features many invariances to image changes,
which makes it well-suited for a lot of applications in
image processing and pattern recognition. In this paper, we
propose a way of turning this algorithm into a Max-Tree
computation. The latter has been widely studied, and many
efficient algorithms (including parallel ones) have been
developed. Furthermore, we develop a specific optimization
to speed-up the common 2D case. It follows a simple and
efficient algorithm, running in linear time with a low
memory footprint, that outperforms other current
algorithms. For Reproducible Research purpose, we
distribute our code as free software.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.18.icip.pdf},
lrdeinc = {Publications/carlinet.18.rfiap.inc},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2018-05-10}
}
@InProceedings{ carlinet.18.rfiap,
author = {Edwin Carlinet and S\'ebastien Crozet and Thierry G\'eraud},
title = {Un algorithme de complexit\'e lin\'eaire pour le calcul de
l'arbre des formes},
booktitle = {Actes du congr\`es Reconnaissance des Formes, Image,
Apprentissage et Perception (RFIAP)},
year = {2018},
month = jun,
address = {Marne-la-Vall\'ee, France},
category = {national},
abstract = {L'arbre des formes (AdF) est une repr\'esentation morpho-
logique hi\'erarchique de l'image qui traduit l'inclusion
des ses lignes de niveaux. Il se caract\'erise par son
invariance \`a certains changement de l'image, ce qui fait
de lui un outil id\'eal pour le d\'eveloppement
d'applications de reconnaissance des formes. Dans cet
article, nous proposons une m\'ethode pour transformer sa
construction en un calcul de Max-tree. Ce dernier a \'et\'e
largement \'etudi\'e au cours des derni\`eres ann\'ees et
des algorithmes efficaces (dont certains parall\`eles)
existent d\'ej\`a. Nous proposons \'egalement une
optimisation qui permet d'acc\'el\'erer son calcul dans le
cas classique des images 2D. Il en d\'ecoule un algorithme
simple, efficace, s'ex\'ecutant lin\'eairement en fonction
du nombre de pixels, avec une faible empreinte m\'emoire,
et qui surpasse les algorithmes \`a l'\'etat de l'art.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.18.rfiap.pdf},
lrdeinc = {Publications/carlinet.18.rfiap.inc},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2018-05-04}
}
@InProceedings{ carlinet.19.csi,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Intervertebral Disc Segmentation Using Mathematical
Morphology---{A} {CNN}-Free Approach},
booktitle = {Proceedings of the 5th MICCAI Workshop \& Challenge on
Computational Methods and Clinical Applications for Spine
Imaging (CSI)},
year = {2019},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {11384},
pages = {105--118},
doi = {10.1007/978-3-030-13736-6_9},
abstract = {In the context of the challenge of ``automatic
InterVertebral Disc (IVD) localization and segmentation
from 3D multi-modality MR images'' that took place at
MICCAI 2018, we have proposed a segmentation method based
on simple image processing operators. Most of these
operators come from the mathematical morphology framework.
Driven by some prior knowledge on IVDs (basic information
about their shape and the distance between them), and on
their contrast in the different modalities, we were able to
segment correctly almost every IVD. The most interesting
feature of our method is to rely on the morphological
structure called the Three of Shapes, which is another way
to represent the image contents. This structure arranges
all the connected components of an image obtained by
thresholding into a tree, where each node represents a
particular region. Such structure is actually powerful and
versatile for pattern recognition tasks in medical
imaging.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.19.csi.pdf},
lrdeinc = {Publications/carlinet.19.csi.inc},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2018-11-26}
}
@InProceedings{ carlinet.19.gretsi,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Filtres connexes multivari\'es par fusion d'arbres de
composantes},
booktitle = {Proceedings of the 27st Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
year = 2019,
address = {Lille, France},
month = aug,
abstract = {Les arbres de composantes fournissent une repr\'esentation
d'images de haut niveau, hi\'erarchis\'ee et invariante par
contraste, adapt\'ee \`a de nombreuses t\^aches de
traitement d'image. Pourtant, ils sont mal d\'efinis sur
des donn\'ees multivari\'ees, telle que celles des images
couleur, des images multimodalit\'es, des images
multibande, etc. Les solutions courantes, telles que le
traitement marginal, ou l'imposition d'un ordre total sur
les donn\'ees, ne sont pas satisfaisantes et g\'en\`erent
de nombreux probl\`emes, tels que des artefacts visuels, la
perte d'invariances, etc. Dans cet article, inspir\'e par
la mani\`ere dont l'arbre des formes multivari\'es (MToS) a
\'et\'e d\'efini, nous proposons une d\'efinition pour un
Min-Tree ou un Max-Tree multivari\'e. Nous n'imposons pas
un ordre total arbitraire aux valeurs; nous utilisons
uniquement la relation d'inclusion entre les composantes.
En cons\'equence, nous introduisons une nouvelle classe
d'ouvertures et de fermetures connect\'ees multivari\'ees.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.19.gretsi.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/carlinet.19.gretsi.poster.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-06-14}
}
@InProceedings{ carlinet.19.ismm,
author = {Edwin Carlinet and Thierry G\'eraud},
title = {Introducing Multivariate Connected Openings and Closings},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 14th International
Symposium on Mathematical Morphology (ISMM)},
year = 2019,
series = {Lecture Notes in Computer Science Series},
address = {Saarbr\"ucken, Germany},
publisher = {Springer},
pages = {1--12},
month = jul,
doi = {10.1007/978-3-030-20867-7_17},
lrdeprojects = {Olena},
abstract = {The component trees provide a high-level, hierarchical,
and contrast invariant representations of images, suitable
for many image processing tasks. Yet their definition is
ill-formed on multivariate data, e.g., color images,
multi-modality images, multi-band images, and so on. Common
workarounds such as marginal processing, or imposing a
total order on data are not satisfactory and yield many
problems, such as artifacts, loss of invariances, etc. In
this paper, inspired by the way the Multivariate Tree of
Shapes (MToS) has been defined, we propose a definition for
a Multivariate min-tree or max-tree. We do not impose an
arbitrary total ordering on values; we use only the
inclusion relationship between components. As a
straightforward consequence, we thus have a new class of
multivariate connected openings and closings.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/carlinet.19.ismm.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/carlinet.19.ismm.poster.pdf},
lrdeinc = {Publications/carlinet.19.ismm.inc},
lrdekeywords = {Image},
lrdenewsdate = {2019-03-13}
}
@InProceedings{ cavallaro.16.igarss,
author = {Gabriele Cavallaro and Mauro {Dalla Mura} and Edwin
Carlinet and Thierry G\'eraud and Nicola Falco and J\'on
Atli Benediktsson},
title = {Region-Based Classification of Remote Sensing Images with
the Morphological Tree of Shapes},
booktitle = {Proceedings of the IEEE International Geoscience and
Remote Sensing Symposium (IGARSS)},
year = {2016},
month = jul,
pages = {5087--5090},
address = {Beijing, China},
abstract = {Satellite image classification is a key task used in
remote sensing for the automatic interpretation of a large
amount of information. Today there exist many types of
classification algorithms using advanced image processing
methods enhancing the classification accuracy rate. One of
the best state-of-the-art methods which improves
significantly the classification of complex scenes relies
on Self-Dual Attribute Profiles (SDAPs). In this approach,
the underlying representation of an image is the Tree of
Shapes, which encodes the inclusion of connected components
of the image. The SDAP computes for each pixel a vector of
attributes providing a local multiscale representation of
the information and hence leading to a fine description of
the local structures of the image. Instead of performing a
pixel-wise classification on features extracted from the
Tree of Shapes, it is proposed to directly classify its
nodes. Extending a specific interactive segmentation
algorithm enables it to deal with the multi-class
classification problem. The method does not involve any
statistical learning and it is based entirely on
morphological information related to the tree.
Consequently, a very simple and effective region-based
classifier relying on basic attributes is presented.},
lrdekeywords = {Image},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/cavallaro.16.igarss.pdf},
lrdenewsdate = {2016-04-12},
doi = {10.1109/IGARSS.2016.7730326}
}
@InProceedings{ chazalon.17.icdar-ost,
title = {{SmartDoc} 2017 Video Capture: {M}obile Document
Acquisition in Video Mode},
author = {J. Chazalon and P. Gomez-Kr{\"a}mer and J.-C. Burie and M.
Coustaty and S. Eskenazi and M. Luqman and N. Nayef and M.
Rusi{\~n}ol and N. Sid{\`e}re and J.M. Ogier.},
booktitle = {Proceedings of the 1st International Workshop on Open
Services and Tools for Document Analysis (ICDAR-OST)},
year = {2017},
month = nov,
pages = {11--16},
address = {Kyoto, Japan},
abstract = {As mobile document acquisition using smartphones is
getting more and more common, along with the continuous
improvement of mobile devices (both in terms of computing
power and image quality), we can wonder to which extent
mobile phones can replace desktop scanners. Modern
applications can cope with perspective distortion and
normalize the contrast of a document page captured with a
smartphone, and in some cases like bottle labels or
posters, smartphones even have the advantage of allowing
the acquisition of non-flat or large documents. However,
several cases remain hard to handle, such as reflective
documents (identity cards, badges, glossy magazine cover,
etc.) or large documents for which some regions require an
important amount of detail. This paper introduces the
SmartDoc 2017 benchmark (named ``SmartDoc Video Capture''),
which aims at assessing whether capturing documents using
the video mode of a smartphone could solve those issues.
The task under evaluation is both a stitching and a
reconstruction problem, as the user can move the device
over different parts of the document to capture details or
try to erase highlights. The material released consists of
a dataset, an evaluation method and the associated tool, a
sample method, and the tools required to extend the
dataset. All the components are released publicly under
very permissive licenses, and we particularly cared about
maximizing the ease of understanding, usage and
improvement.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/chazalon.17.icdar-ost.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2017-07-21},
doi = {10.1109/ICDAR.2017.306}
}
@InProceedings{ chazalon.21.icdar.1,
title = {Revisiting the {C}oco Panoptic Metric to Enable Visual and
Qualitative Analysis of Historical Map Instance
Segmentation},
author = {Joseph Chazalon and Edwin Carlinet},
booktitle = {Proceedings of the 16th International Conference on
Document Analysis and Recognition (ICDAR'21)},
year = {2021},
month = sep,
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
volume = {12824},
pages = {367--382},
address = {Lausanne, Switzerland},
abstract = {Segmentation is an important task. It is so important that
there exist tens of metrics trying to score and rank
segmentation systems. It is so important that each topic
has its own metric because their problem is too specific.
Does it? What are the fundamental differences with the
ZoneMap metric used for page segmentation, the COCO
Panoptic metric used in computer vision and metrics used to
rank hierarchical segmentations? In this paper, while
assessing segmentation accuracy for historical maps, we
explain, compare and demystify some the most used
segmentation evaluation protocols. In particular, we focus
on an alternative view of the COCO Panoptic metric as a
classification evaluation; we show its soundness and
propose extensions with more ``shape-oriented'' metrics.
Beyond a quantitative metric, this paper aims also at
providing qualitative measures through
\emph{precision-recall maps} that enable visualizing the
success and the failures of a segmentation method.},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/chazalon.21.icdar.1.pdf},
lrdeposter = {https://www.lrde.epita.fr/dload/papers/chazalon.21.icdar.1.poster.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2021-05-17},
doi = {10.1007/978-3-030-86337-1_25}
}
@InProceedings{ chazalon.21.icdar.2,
title = {{ICDAR} 2021 Competition on Historical Map Segmentation},
author = {Joseph Chazalon and Edwin Carlinet and Yizi Chen and
Julien Perret and Bertrand Dum\'enieu and Cl\'ement Mallet
and Thierry G\'eraud and Vincent Nguyen and Nam Nguyen and
Josef Baloun and Ladislav Lenc and Pavel Kr\'al},
booktitle = {Proceedings of the 16th International Conference on
Document Analysis and Recognition (ICDAR'21)},
year = {2021},
month = sep,
pages = {693--707},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
volume = {12824},
address = {Lausanne, Switzerland},
abstract = {This paper presents the final results of the ICDAR 2021
Competition on Historical Map Segmentation (MapSeg),
encouraging research on a series of historical atlases of
Paris, France, drawn at 1/5000 scale between 1894 and 1937.
The competition featured three tasks, awarded separately.
Task~1 consists in detecting building blocks and was won by
the L3IRIS team using a DenseNet-121 network trained in a
weakly supervised fashion. This task is evaluated on 3
large images containing hundreds of shapes to detect.
Task~2 consists in segmenting map content from the larger
map sheet, and was won by the UWB team using a U-Net-like
FCN combined with a binarization method to increase
detection edge accuracy. Task~3 consists in locating
intersection points of geo-referencing lines, and was also
won by the UWB team who used a dedicated pipeline combining
binarization, line detection with Hough transform,
candidate filtering, and template matching for intersection
refinement. Tasks~2 and~3 are evaluated on 95 map sheets
with complex content. Dataset, evaluation tools and results
are available under permissive licensing at
\url{https://icdar21-mapseg.github.io/}.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/chazalon.21.icdar.2.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2021-05-17},
doi = {10.1007/978-3-030-86337-1_46}
}
@TechReport{ chedeau.12.tr,
author = {Christopher Chedeau and Didier Verna},
title = {{JSPP}: Morphing {C++} into {JavaScript}},
institution = {EPITA Research and Development Laboratory},
year = 2012,
number = {201201-TR},
month = jan,
abstract = {In a time where the differences between static and dynamic
languages are starting to fade away, this paper brings one
more element to the "convergence" picture by showing that
thanks to the novelties from the recent C++0x standard, it
is relatively easy to implement a JavaScript layer on top
of C++. By that, we not only mean to implement the language
features, but also to preserve as much of its original
notation as possible. In doing so, we provide the
programmer with a means to freely incorporate highly
dynamic JavaScript-like code into a regular C++ program.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/chedeau.12.tr.pdf},
lrdekeywords = {Software engineering}
}
@InProceedings{ chekroun.06.iciar,
author = {Mickael Chekroun and J\'er\^ome Darbon and Igor Ciril},
title = {On a Polynomial Vector Field Model for Shape
Representation},
booktitle = {Proceedings of the International Conference on Image
Analysis and Recognition (ICIAR)},
publisher = {Springer-Verlag},
year = 2006,
address = {Povoa de Varzim, Portugal},
month = sep,
lrdeprojects = {Olena},
abstract = {In this paper we propose an efficient algorithm to perform
a polynomial approximation of the vector field derived from
the usual distance mapping method. The main ingredients
consist of minimizing a quadratic functional and
transforming this problem in an appropriate setting for
implementation. With this approach, we reduce the problem
of obtaining an approximating polynomial vector field to
the resolution of a not expansive linear algebraic system.
By this procedure, we obtain an analytical shape
representation that relies only on some coefficients.
Fidelity and numerical efficiency of our approach are
presented on illustrative examples.},
lrdekeywords = {Image},
lrdenewsdate = {2006-04-28}
}
@InProceedings{ chen.21.dgmm,
author = {Yizi Chen and Edwin Carlinet and Joseph Chazalon and
Cl\'ement Mallet and Bertrand Dum\'enieu and Julien Perret},
title = {Combining Deep Learning and Mathematical Morphology for
Historical Map Segmentation},
booktitle = {Proceedings of the IAPR International Conference on
Discrete Geometry and Mathematical Morphology (DGMM)},
year = {2021},
series = {Lecture Notes in Computer Science},
volume = {12708},
month = may,
address = {Uppsala, Sweden},
publisher = {Springer},
pages = {79--92},
abstract = {The digitization of historical maps enables the study of
ancient, fragile, unique, and hardly accessible information
sources. Main map features can be retrieved and tracked
through the time for subsequent thematic analysis. The goal
of this work is the vectorization step, i.e., the
extraction of vector shapes of the objects of interest from
raster images of maps. We are particularly interested in
closed shape detection such as buildings, building blocks,
gardens, rivers, etc. in order to monitor their temporal
evolution. Historical map images present significant
pattern recognition challenges. The extraction of closed
shapes by using traditional Mathematical Morphology (MM) is
highly challenging due to the overlapping of multiple map
features and texts. Moreover, state-of-the-art
Convolutional Neural Networks (CNN) are perfectly designed
for content image filtering but provide no guarantee about
closed shape detection. Also, the lack of textural and
color information of historical maps makes it hard for CNN
to detect shapes that are represented by only their
boundaries. Our contribution is a pipeline that combines
the strengths of CNN (efficient edge detection and
filtering) and MM (guaranteed extraction of closed shapes)
in order to achieve such a task. The evaluation of our
approach on a public dataset shows its effectiveness for
extracting the closed boundaries of objects in historical
maps.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/chen.2021.dgmm.pdf},
lrdekeywords = {Image},
note = {Accepted},
lrdenewsdate = {2021-02-16},
doi = {10.1007/978-3-030-76657-3_5}
}
@InProceedings{ chen.21.icdar,
title = {Vectorization of Historical Maps Using Deep Edge Filtering
and Closed Shape Extraction},
author = {Yizi Chen and Edwin Carlinet and Joseph Chazalon and
Cl\'ement Mallet and Bertrand Dum\'enieu and Julien Perret},
booktitle = {Proceedings of the 16th International Conference on
Document Analysis and Recognition (ICDAR'21)},
year = {2021},
month = sep,
pages = {510--525},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
volume = {12824},
address = {Lausanne, Switzerland},
abstract = {Maps have been a unique source of knowledge for centuries.
Such historical documents provide invaluable information
for analyzing the complex spatial transformation of
landscapes over important time frames. This is particularly
true for urban areas that encompass multiple interleaved
research domains (social sciences, economy, etc.). The
large amount and significant diversity of map sources call
for automatic image processing techniques in order to
extract the relevant objects under a vectorial shape. The
complexity of maps (text, noise, digitization artifacts,
etc.) has hindered the capacity of proposing a versatile
and efficient raster-to-vector approaches for decades. We
propose a learnable, reproducible, and reusable solution
for the automatic transformation of raster maps into vector
objects (building blocks, streets, rivers). It is built
upon the complementary strength of mathematical morphology
and convolutional neural networks through efficient edge
filtering. Evenmore, we modify ConnNet and combine with
deep edge filtering architecture to make use of pixel
connectivity information and built an end-to-end system
without requiring any post-processing techniques. In this
paper, we focus on the comprehensive benchmark on various
architectures on multiple datasets coupled with a novel
vectorization step. Our experimental results on a new
public dataset using COCO Panoptic metric exhibit very
encouraging results confirmed by a qualitative analysis of
the success and failure cases of our approach. Code,
dataset, results and extra illustrations are freely
available at
\url{https://github.com/soduco/ICDAR-2021-Vectorization}. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/chen.21.icdar.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2021-05-17},
doi = {10.1007/978-3-030-86337-1_34}
}
@InProceedings{ claveirole.05.ciaa,
author = {Thomas Claveirole and Sylvain Lombardy and Sarah O'Connor
and Louis-No\"el Pouchet and Jacques Sakarovitch},
title = {Inside {V}aucanson},
booktitle = {Proceedings of Implementation and Application of Automata,
10th International Conference (CIAA)},
year = 2005,
pages = {117--128},
editor = {Springer-Verlag},
volume = 3845,
series = {Lecture Notes in Computer Science Series},
address = {Sophia Antipolis, France},
month = jun,
abstract = {This paper presents some features of the Vaucanson
platform. We describe some original algorithms on weighted
automata and transducers (computation of the quotient,
conversion of a regular expression into a weighted
automaton, and composition). We explain how complex
declarations due to the generic programming are masked from
the user and finally we present a proposal for an XML
format that allows implicit descriptions for simple types
of automata.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/ins-vauc.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/ciaa-slides.05.pdf},
lrdeprojects = {Vaucanson},
lrdenewsdate = {2005-05-25}
}
@TechReport{ clouard.99.tr,
author = {R\'egis Clouard and Abderrahim Elmoataz and Fran\c{c}ois
Angot and Olivier Lezoray and Alexandre Duret-Lutz},
title = {Une biblioth\`eque et un environnement de programmation
d'op\'erateurs de traitement d'images},
institution = {GREYC-ISMRA},
year = 1999,
number = 99008,
address = {Caen, France},
month = nov,
url = {http://www.greyc.ismra.fr/~regis/Pandore/},
lrdeprojects = {Olena}
}
@InProceedings{ crozet.14.icip,
author = {S\'ebastien Crozet and Thierry G\'eraud},
title = {A First Parallel Algorithm to Compute the Morphological
Tree of Shapes of {$n$D} Images},
booktitle = {Proceedings of the 21st International Conference on Image
Processing (ICIP)},
year = 2014,
address = {Paris, France},
pages = {2933--2937},
lrdeprojects = {Olena},
abstract = {The tree of shapes is a self-dual tree-based image
representation belonging to the field of mathematical
morphology. This representation is highly interesting since
it is invariant to contrast changes and inversion, and
allows for numerous and powerful applications. A new
algorithm to compute the tree of shapes has been recently
presented: it has a quasi-linear complexity; it is the only
known algorithm that is also effective for nD images with n
> 2; yet it is sequential. With the increasing size of data
to process, the need of a parallel algorithm to compute
that tree is of prime importance; in this paper, we present
such an algorithm. We also give some benchmarks that show
that the parallel version is computationally effective. As
a consequence, that makes possible to process 3D images
with some powerful self-dual morphological tools.},
lrdeinc = {Publications/crozet.14.icip.inc},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/crozet.14.icip.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/crozet.14.icip.poster.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2014-05-26},
doi = {10.1109/ICIP.2014.7025593}
}
@InProceedings{ dangla.18.das,
author = {Aliona Dangla and \'Elodie Puybareau and Guillaume Tochon
and Jonathan Fabrizio},
title = {A first step toward a fair comparison of evaluation
protocols for text detection algorithms},
booktitle = {Proceedings of the IAPR International Workshop on Document
Analysis Systems (DAS)},
year = {2018},
month = apr,
address = {Vienna, Austria},
abstract = {Text detection is an important topic in pattern
recognition, but evaluating the reliability of such
detection algorithms is challenging. While many evaluation
protocols have been developed for that purpose, they often
show dissimilar behaviors when applied in the same context.
As a consequence, their usage may lead to
misinterpretations, potentially yielding erroneous
comparisons between detection algorithms or their incorrect
parameters tuning. This paper is a first attempt to derive
a methodology to perform the comparison of evaluation
protocols. We then apply it on five state-of-the-art
protocols, and exhibit that there indeed exist
inconsistencies among their evaluation criteria. Our aim
here is not to rank the investigated evaluation protocols,
but rather raising awareness in the community that we
should carefully reconsider them in order to converge to
their optimal usage.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/dangla.18.das.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/dangla.18.das.poster.pdf},
lrdenewsdate = {2018-02-02},
doi = {10.1109/DAS.2018.55}
}
@InProceedings{ darbon.01.ei,
author = {J\'er\^ome Darbon and Bulent Sankur and Henri Ma\^{\i}tre},
title = {Error correcting code performance for watermark
protection},
booktitle = {Proceedings of the 13th Symposium SPIE on Electronic
Imaging----Security and Watermarking of Multimedia Contents
III (EI27)},
year = 2001,
address = {San Jose, CA, USA},
month = jan,
volume = 4314,
editors = {P.W. Wong and E.J. Delp III},
pages = {663--672},
lrdeprojects = {Olena},
abstract = {The watermark signals are weakly inserted in images due to
imperceptibility constraints which makes them prone to
errors in the extraction stage. Although the error
correcting codes can potentially improve their performance
one must pay attention to the fact that the watermarking
channel is in general very noisy. We have considered the
trade-off of the BCH codes and repetition codes in various
concatenation modes. At the higher rates that can be
encountered in watermarking channels such as due to
low-quality JPEG compression, codes like the BCH codes
cease being useful. Repetition coding seems to be the last
resort at these error rates of 25\% and beyond. It has been
observed that there is a zone of bit error rate where their
concatenation turns out to be more useful. In fact the
concatenation of repetition and BCH codes judiciously
dimensioned, given the available number of insertion sites
and the payload size, achieves a higher reliability level.},
lrdekeywords = {Image}
}
@InProceedings{ darbon.02.ismm,
author = {J\'er\^ome Darbon and Thierry G\'eraud and Alexandre
Duret-Lutz},
title = {Generic implementation of morphological image operators},
booktitle = {Mathematical Morphology, Proceedings of the 6th
International Symposium (ISMM)},
pages = {175--184},
year = 2002,
address = {Sydney, Australia},
month = apr,
publisher = {CSIRO Publishing},
abstract = {Several libraries dedicated to mathematical morphology
exist. But they lack genericity, that is to say, the
ability for operators to accept input of different natures
---2D binary images, graphs enclosing floating values, etc.
We describe solutions which are integrated in Olena, a
library providing morphological operators. We demonstrate
with some examples that translating mathematical formulas
and algorithms into source code is made easy and safe with
Olena. Moreover, experimental results show that no extra
costs at run-time are induced.},
lrdeprojects = {Olena}
}
@InProceedings{ darbon.04.ecoopphd,
author = {J\'er\^ome Darbon and Thierry G\'eraud and Patrick Bellot},
title = {Generic algorithmic blocks dedicated to image processing},
booktitle = {Proceedings of the ECOOP Workshop for PhD Students},
year = 2004,
address = {Oslo, Norway},
month = jun,
abstract = {This paper deals with the implementation of algorithms in
the specific domain of image processing. Although many
image processing libraries are available, they generally
lack genericity and flexibility. Many image processing
algorithms can be expressed as compositions of elementary
algorithmic operations referred to as blocks. Implementing
these compositions is achieved using generic programming.
Our solution is compared to previous ones and we
demonstrate it on a class image processing algorithms.},
lrdeprojects = {Olena},
lrdenewsdate = {2004-03-10}
}
@InProceedings{ darbon.04.iwcia,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Exact optimization of discrete constrained total variation
minimization problems},
booktitle = {Proceedings of the 10th International Workshop on
Combinatorial Image Analysis (IWCIA)},
year = 2004,
address = {Auckland, New Zealand},
month = dec,
pages = {548--557},
editors = {R. Klette and J. Zunic},
series = {Lecture Notes in Computer Science Series},
publisher = {Springer-Verlag},
volume = 3322,
lrdeprojects = {Olena},
abstract = {This paper deals with the total variation minimization
problem when the fidelity is either the $L^2$-norm or the
$L^1$-norm. We propose an algorithm which computes the
exact solution of these two problems after discretization.
Our method relies on the decomposition of an image into its
level sets. It maps the original problems into independent
binary Markov Random Field optimization problems associated
with each level set. Exact solutions of these binary
problems are found thanks to minimum-cut techniques. We
prove that these binary solutions are increasing and thus
allow to reconstruct the solution of the original
problems.},
lrdekeywords = {Image},
lrdenewsdate = {2004-09-01}
}
@TechReport{ darbon.04.tr,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Exact optimization of discrete constrained total variation
minimization problems},
institution = {ENST},
year = 2004,
number = {2004C004},
address = {Paris, France},
month = oct,
lrdeprojects = {Olena},
annote = {This technical report corresponds to the publication
darbon.04.iwcia. ; 200412-IWCIA},
abstract = {This paper deals with the total variation minimization
problem when the fidelity is either the $L^2$-norm or the
$L^1$-norm. We propose an algorithm which computes the
exact solution of these two problems after discretization.
Our method relies on the decomposition of an image into its
level sets. It maps the original problems into independent
binary Markov Random Field optimization problems associated
with each level set. Exact solutions of these binary
problems are found thanks to minimum-cut techniques. We
prove that these binary solutions are increasing and thus
allow to reconstruct the solution of the original
problems.},
lrdekeywords = {Image}
}
@InProceedings{ darbon.05.eusipco,
author = {J\'er\^ome Darbon and Ceyhun Burak Akg\"ul},
title = {An efficient algorithm for attribute openings and
closings},
booktitle = {Proceedings of the 13th European Signal Processing
Conference (EUSIPCO)},
year = 2005,
address = {Antalya, Turkey},
month = sep,
lrdeprojects = {Olena},
abstract = {In this paper, we present fast algorithms for area opening
and closing on grayscale images. Salembier's max-tree based
algorithm is one of the well known methods to perform area
opening. It makes use of a special representation where
each node in the tree stands for a flat region and the tree
itself is oriented towards the maxima of the grayscale
image. Pruning the tree with respect to some attribute,
e.g., the area, boils down to attribute opening. Following
the same approach, we propose an algorithm for area opening
(closing) without building the max-tree (min-tree). Our
algorithm exhibit considerable performance compared to the
state-of-the art in this domain.},
lrdekeywords = {Image},
lrdenewsdate = {2005-04-14}
}
@InProceedings{ darbon.05.ibpria,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {A Fast and Exact Algorithm for Total Variation
Minimization},
booktitle = {Proceedings of the 2nd Iberian Conference on Pattern
Recognition and Image Analysis (IbPRIA)},
publisher = {Springer-Verlag},
volume = 3522,
pages = {351--359},
year = 2005,
address = {Estoril, Portugal},
month = jun,
lrdeprojects = {Olena},
abstract = {This paper deals with the minimization of the total
variation under a convex data fidelity term. We propose an
algorithm which computes an exact minimizer of this
problem. The method relies on the decomposition of an image
into its level sets. Using these level sets, we map the
problem into optimizations of independent binary Markov
Random Fields. Binary solutions are found thanks to
graph-cut techniques and we show how to derive a fast
algorithm. We also study the special case when the fidelity
term is the $L^1$-norm. Finally we provide some
experiments.},
lrdekeywords = {Image},
lrdenewsdate = {2005-01-18}
}
@InProceedings{ darbon.05.ispa,
author = {J\'er\^ome Darbon},
title = {Total Variation Minimization with $L^1$ Data Fidelity as a
Contrast Invariant Filter},
booktitle = {Proceedings of the 4th International Symposium on Image
and Signal Processing and Analysis (ISPA 2005)},
year = 2005,
address = {Zagreb, Croatia},
month = sep,
pages = {221--226},
lrdeprojects = {Olena},
abstract = {This paper sheds new light on minimization of the total
variation under the $L^1$-norm as data fidelity term
($L^1+TV$) and its link with mathematical morphology. It is
well known that morphological filters enjoy the property of
being invariant with respect to any change of contrast.
First, we show that minimization of $L^1+TV$ yields a
self-dual and contrast invariant filter. Then, we further
constrain the minimization process by only optimizing the
grey levels of level sets of the image while keeping their
boundaries fixed. This new constraint is maintained thanks
to the Fast Level Set Transform which yields a complete
representation of the image as a tree. We show that this
filter can be expressed as a Markov Random Field on this
tree. Finally, we present some results which demonstrate
that these new filters can be particularly useful as a
preprocessing stage before segmentation.},
lrdekeywords = {Image},
lrdenewsdate = {2005-04-27}
}
@InProceedings{ darbon.05.isvc,
author = {J\'er\^ome Darbon and Sylvain Peyronnet},
title = {A Vectorial Self-Dual Morphological Filter based on Total
Variation Minimization},
booktitle = {Proceedings of the First International Conference on
Visual Computing},
year = 2005,
address = {Lake Tahoe, Nevada, USA},
month = dec,
lrdeprojects = {Olena},
pages = {388--395},
series = {Lecture Notes in Computer Science Series},
publisher = {Springer-Verlag},
volume = 3804,
abstract = {We present a vectorial self dual morphological filter.
Contrary to many methods, our approach does not require the
use of an ordering on vectors. It relies on the
minimization of the total variation with $L^1$ norm as data
fidelity on each channel. We further constraint this
minimization in order not to create new values. It is shown
that this minimization yields a self-dual and contrast
invariant filter. Although the above minimization is not a
convex problem, we propose an algorithm which computes a
global minimizer. This algorithm relies on minimum cost
cut-based optimizations.},
lrdekeywords = {Image},
lrdenewsdate = {2005-08-20}
}
@PhDThesis{ darbon.05.phd,
author = {J\'er\^ome Darbon},
title = {Composants logiciels et algorithmes de minimisation exacte
d'\'energies d\'edid\'ees au traitement d'images},
school = {\'Ecole Nationale Sup\'erieure des T\'el\'ecommunications
de Paris (ENST)},
address = {Paris, France},
year = 2005,
month = oct,
number = {XXX},
note = {In French},
abstract = {Dans cette th\`ese nous \'etudions la minimisation
d'\'energies markoviennes rencontr\'ees dans les domaines
du traitement des images et de la vision par ordinateur.
Nous proposons des algorithmes de minimisation exacte pour
diff\'erents types d'\'energies. Ces algorithmes ont
l'int\'er\^et de fournir un minimum global quand bien
m\^eme l'\'energie n'est pas convexe. Enfin, nous mettons
en \'evidence quelques liens entre les champs de Markov
binaires et la morphologie math\'ematique. La version
finale de ce manuscrit suit les recommandations des rapporteurs.},
lrdekeywords = {Image}
}
@TechReport{ darbon.05.tr,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {A fast and exact algorithm for total variation
minimization},
institution = {ENST},
year = 2005,
number = {2005D002},
address = {Paris, France},
month = jan,
lrdeprojects = {Olena},
annote = {This technical report corresponds to the publication
darbon.05.ibpria.},
abstract = {This paper deals with the minimization of the total
variation under a convex data fidelity term. We propose an
algorithm which computes an exact minimizer of this
problem. The method relies on the decomposition of an image
into its level sets. Using these level sets, we map the
problem into optimizations of independent binary Markov
Random Fields. Binary solutions are found thanks to
graph-cut techniques and we show how to derive a fast
algorithm. We also study the special case when the fidelity
term is the $L^1$-norm. Finally we provide some
experiments.},
lrdekeywords = {Image}
}
@InProceedings{ darbon.06.iccp,
author = {J\'er\^ome Darbon and Richard Lassaigne and Sylvain
Peyronnet},
title = {Approximate Probabilistic Model Checking for Programs},
booktitle = {Proceedings of the {IEEE} 2nd International Conference on
Intelligent Computer Communication and Processing
({ICCP'06)}},
year = 2006,
address = {Technical University of Cluj-Napoca, Romania},
month = sep,
abstract = {In this paper we deal with the problem of applying model
checking to real programs. We verify a program without
constructing the whole transition system using a technique
based on Monte-Carlo sampling, also called ``approximate
model checking''. This technique combines model checking
and randomized approximation. Thus, it avoids the so called
state space explosion phenomenon. We propose a prototype
implementation that works directly on C source code. It
means that, contrary to others approaches, we do not need
to use a specific language nor specific data structures in
order to describe the system we wish to verify. Finally, we
present experimental results that show the effectiveness of
the approach applied to finding bugs in real programs.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/david.06.iccp.pdf},
lrdenewsdate = {2006-07-27}
}
@Article{ darbon.06.jmiv,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Image restoration with discrete constrained {T}otal
{Variation}---Part~{I}: Fast and exact optimization},
journal = {Journal of Mathematical Imaging and Vision},
year = 2006,
volume = 26,
number = 3,
month = dec,
pages = {261--276},
lrdeprojects = {Olena},
abstract = {This paper deals with the total variation minimization
problem in image restoration for convex data fidelity
functionals. We propose a new and fast algorithm which
computes an exact solution in the discrete framework. Our
method relies on the decomposition of an image into its
level sets. It maps the original problems into independent
binary Markov Random Field optimization problems at each
level. Exact solutions of these binary problems are found
thanks to minimum cost cut techniques in graphs. These
binary solutions are proved to be monotone increasing with
levels and yield thus an exact solution of the discrete
original problem. Furthermore we show that minimization of
total variation under $L^1$ data fidelity term yields a
self-dual contrast invariant filter. Finally we present
some results.},
lrdekeywords = {Image},
lrdenewsdate = {2006-03-24}
}
@Article{ darbon.06.jmivb,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Image restoration with discrete constrained {T}otal
{Variation}---Part~{II}: Levelable functions, convex priors
and non-convex case},
journal = {Journal of Mathematical Imaging and Vision},
year = 2006,
volume = 26,
number = 3,
month = dec,
pages = {277--291},
lrdeprojects = {Olena},
abstract = {In Part II of this paper we extend the results obtained in
Part I for total variation minimization in image
restoration towards the following directions: first we
investigate the decomposability property of energies on
levels, which leads us to introduce the concept of
levelable regularization functions (which TV is the
paradigm of). We show that convex levelable posterior
energies can be minimized exactly using the
level-independant cut optimization scheme seen in part I.
Next we extend this graph cut scheme optimization scheme to
the case of non-convex levelable energies. We present
convincing restoration results for images corrupted with
impulsive noise. We also provide a minimum-cost based
algorithm which computes a global minimizer for Markov
Random Field with convex priors. Last we show that
non-levelable models with convex local conditional
posterior energies such as the class of generalized
gaussian models can be exactly minimized with a generalized
coupled Simulated Annealing.},
lrdekeywords = {Image},
lrdenewsdate = {2006-03-24}
}
@InProceedings{ darbon.06.siam,
author = {J\'er\^ome Darbon and Marc Sigelle},
title = {Fast and Exact Discrete Image Restoration Based on Total
Variation and on Its Extensions to Levelable Potentials},
booktitle = {SIAM Conference on Imaging Sciences},
year = 2006,
address = {Minneapolis, USA},
month = may,
lrdeprojects = {Olena},
abstract = {We investigate the decomposition property of posterior
restoration energies on level sets in a discrete Markov
Random Field framework. This leads us to the concept of
'levelable' potentials (which TV is shown to be the
paradigm of). We prove that convex levelable posterior
energies can be minimized exactly with level-independant
binary graph cuts. We extend this scheme to the case of
non-convex levelable energies, and present convincing
restoration results for images degraded by impulsive
noise.},
lrdekeywords = {Image},
lrdenewsdate = {2006-02-22}
}
@TechReport{ darbon.06.tr,
author = {J\'er\^ome Darbon and Marc Sigelle and Florence Tupin},
title = {A note on nice-levelable {MRFs} for {SAR} image denoising
with contrast preservation},
institution = {Signal and Image Processing Group, Ecole Nationale
Sup\'erieure des T\'el\'ecommunications},
year = 2006,
number = {2006D006},
address = {Paris, France},
month = sep,
lrdeprojects = {Olena},
annote = {On this technical report is based the publication
darbon.07.ei ; 200701-SPIE}
}
@InProceedings{ darbon.07.ei,
author = {J\'er\^ome Darbon and Marc Sigelle and Florence Tupin},
title = {The use of levelable regularization functions for {MRF}
restoration of {SAR} images},
booktitle = {Proceedings of the 19th Symposium SPIE on Electronic
Imaging},
year = 2007,
address = {San Jose, CA, USA},
month = jan,
lrdeprojects = {Olena},
abstract = {It is well-known that Total Variation (TV) minimization
with L2 data fidelity terms (which corresponds to white
Gaussian additive noise) yields a restored image which
presents some loss of contrast. The same behavior occurs
for TVmodels with non-convex data fidelity terms that
represent speckle noise. In this note we propose a new
approach to cope with the restoration of Synthetic Aperture
Radar images while preserving the contrast.},
lrdekeywords = {Image},
lrdenewsdate = {2006-09-30}
}
@InProceedings{ darbon.07.mirage,
author = {J\'er\^ome Darbon},
title = {A Note on the Discrete Binary {Mumford-Shah} Model},
booktitle = {Proceedings of the international Computer Vision /
Computer Graphics Collaboration Techniques and Applications
(MIRAGE 2007)},
year = 2007,
address = {Paris, France},
month = mar,
lrdeprojects = {Olena},
abstract = {This paper is concerned itself with the analysis of the
two-phase Mumford-Shah model also known as the active
contour without edges model introduced by Chan and Vese. It
consists of approximating an observed image by a piecewise
constant image which can take only two values. First we
show that this model with the $L^1$-norm as data fidelity
yields a contrast invariant filter which is a well known
property of morphological filters. Then we consider a
discrete version of the original problem. We show that an
inclusion property holds for the minimizers. The latter is
used to design an efficient graph-cut based algorithm which
computes an exact minimizer. Some preliminary results are
presented.},
lrdekeywords = {Image},
lrdenewsdate = {2006-12-29}
}
@InProceedings{ darbon.08.iwcia,
author = {J\'er\^ome Darbon},
title = {Global Optimization for First Order {Markov} Random Fields
with Submodular Priors},
booktitle = {Proceedings of the twelfth International Workshop on
Combinatorial Image Analysis (IWCIA'08) },
year = 2008,
address = {Buffalo, New York, USA},
month = apr,
lrdeprojects = {Olena},
abstract = {This paper copes with the optimization of Markov Random
Fields with pairwise interactions defined on arbitrary
graphs. The set of labels is assumed to be linearly ordered
and the priors are supposed to be submodular. Under these
assumptions we propose an algorithm which computes an exact
minimizer of the Markovian energy. Our approach relies on
mapping the original into a combinatorial one which
involves only binary variables. The latter is shown to be
exactly solvable via computing a maximum flow. The
restatement into a binary combinatorial problem is done by
considering the level-sets of the labels instead of the
label values themselves. The submodularity of the priors is
shown to be a necessary and sufficient condition for the
applicability of the proposed approach.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/darbon.08.iwcia.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2008-01-03}
}
@Misc{ david.05.sud,
author = {Valentin David and Akim Demaille and Renaud Durlin and
Olivier Gournet},
title = {{C}/{C++} Disambiguation Using Attribute Grammars},
year = 2005,
note = {Communication to Stratego Users Day 2005},
address = {Utrecht {U}niversity, {N}etherland},
month = may,
lrdeprojects = {Transformers},
abstract = {We propose a novel approach to semantics driven
disambiguation based on Attribute Grammars (AGs). AGs share
the same modularity model as its host grammar language,
here Syntax Definition Formalism (SDF), what makes them
particularly attractive for working on unstable grammars,
or grammar extensions. The framework we propose is
effective, since a full ISO-C99 disambiguation chain
already works, and the core of the hardest ambiguities of
C++ is solved. This requires specific techniques, and some
extensions to the stock AG model.},
lrdepaper = {http://www.lrde.epita.fr/dload/200505-SUD/disamb/article-200505-SUD-disamb.pdf}
}
@InProceedings{ david.06.iccp,
author = {Valentin David and Akim Demaille and Olivier Gournet},
title = {Attribute Grammars for Modular Disambiguation},
booktitle = {Proceedings of the {IEEE} 2nd International Conference on
Intelligent Computer Communication and Processing
({ICCP'06)}},
year = 2006,
address = {Technical University of Cluj-Napoca, Romania},
month = sep,
abstract = {To face the challenges to tomorrow's software engineering
tools, powerful language-generic program-transformation
components are needed. We propose the use of attribute
grammars (AGs) to generate language specific disambiguation
filters. In this paper, a complete implementation of a
language-independent AGs system is presented. As a full
scale experiment, we present an implementation of a
flexible C front-end. Its specifications are concise,
modular, and the result is efficient. On top of it,
transformations such as software renovation, code metrics,
domain specific language embedding can be implemented.},
lrdeprojects = {Transformers},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/david.06.iccp.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/david.06.iccp.pdf},
lrdenewsdate = {2006-07-12}
}
@Article{ dehak.05.pami,
author = {R\'eda Dehak and Isabelle Bloch and Henri Ma{\^\i}tre},
title = {Spatial reasoning with relative incomplete information on
relative positioning},
journal = {IEEE Transactions on Pattern Analysis and Machine
Intelligence},
year = 2005,
pages = {1473--1484},
volume = 27,
month = sep,
number = 9,
lrdeprojects = {Olena},
abstract = {This paper describes a probabilistic method of inferring
the position of a point with respect to a reference point
knowing their relative spatial position to a third point.
We address this problem in the case of incomplete
information where only the angular spatial relationships
are known. The use of probabilistic representations allows
us to model prior knowledge. We derive exact formulae
expressing the conditional probability of the position
given the two known angles, in typical cases: uniform or
Gaussian random prior distributions within rectangular or
circular regions. This result is illustrated with respect
to two different simulations: The first is devoted to the
localization of a mobile phone using only angular
relationships, the second, to geopositioning within a city.
This last example uses angular relationships and some
additional knowledge about the position.},
lrdekeywords = {Image}
}
@InProceedings{ dehak.06.nist,
author = {R\'eda Dehak and Charles-Alban Deledalle and Najim Dehak},
title = {{LRDE} System description},
booktitle = {NIST SRE'06 Workshop: speaker recognition evaluation
campaign},
year = 2006,
address = {San Juan, Puerto Rico},
month = jun
}
@InProceedings{ dehak.07.interspeech,
author = {R\'eda Dehak and Najim Dehak and Patrick Kenny and Pierre
Dumouchel},
title = {Linear and Non Linear Kernel {GMM} SuperVector Machines
for Speaker Verification},
booktitle = {Proceedings of the European Conference on Speech
Communication and Technologies (Interspeech'07)},
year = 2007,
address = {Antwerp, Belgium},
month = aug,
abstract = {This paper presents a comparison between Support Vector
Machines (SVM) speaker verification systems based on linear
and non linear kernels defined in GMM supervector space. We
describe how these kernel functions are related and we show
how the nuisance attribute projection (NAP) technique can
be used with both of these kernels to deal with the session
variability problem. We demonstrate the importance of GMM
model normalization (M-Norm) especially for the non linear
kernel. All our experiments were performed on the core
condition of NIST 2006 speaker recognition evaluation (all
trials). Our best results (an equal error rate of 6.3\%)
were obtained using NAP and GMM model normalization with
the non linear kernel.},
lrdenewsdate = {2007-08-27}
}
@InProceedings{ dehak.08.nist,
author = {R\'eda Dehak and Najim Dehak and Patrick Kenny},
title = {The {LRDE} Systems for the 2008 {NIST} Speaker Recognition
Evaluation},
booktitle = {NIST-SRE 2008},
year = 2008,
address = {Montr\'eal, Canada},
month = jun
}
@InProceedings{ dehak.08.odysseya,
author = {R\'eda Dehak and Najim Dehak and Patrick Kenny and Pierre
Dumouchel},
title = {Kernel Combination for {SVM} Speaker Verification},
booktitle = {Proceedings of the Speaker and Language Recognition
Workshop (IEEE-Odyssey 2008)},
year = 2008,
address = {Stellenbosch, South Africa},
month = jan,
abstract = {We present a new approach for constructing the kernels
used to build support vector machines for speaker
verification. The idea is to construct new kernels by
taking linear combination of many kernels such as the GLDS
and GMM supervector kernels. In this new kernel
combination, the combination weights are speaker dependent
rather than universal weights on score level fusion and
there is no need for extra-data to estimate them. An
experiment on the NIST 2006 speaker recognition evaluation
dataset (all trial) was done using three different kernel
functions (GLDS kernel, linear and Gaussian GMM supervector
kernels). We compared our kernel combination to the optimal
linear score fusion obtained using logistic regression.
This optimal score fusion was trained on the same test
data. We had an equal error rate of $\simeq 5,9\%$ using
the kernel combination technique which is better than the
optimal score fusion system ($\simeq 6,0\%$).},
lrdenewsdate = {2007-09-25}
}
@InProceedings{ dehak.08.odysseyb,
author = {Najim Dehak and R\'eda Dehak and Patrick Kenny and Pierre
Dumouchel},
title = {Comparison Between Factor Analysis and {GMM} Support
Vector Machines for Speaker Verification},
booktitle = {Proceedings of the Speaker and Language Recognition
Workshop (IEEE-Odyssey 2008)},
year = 2008,
address = {Stellenbosch, South Africa},
month = jan,
abstract = {We present a comparison between speaker verification
systems based on factor analysis modeling and support
vector machines using GMM supervectors as features. All
systems used the same acoustic features and they were
trained and tested on the same data sets. We test two types
of kernel (one linear, the other non-linear) for the GMM
support vector machines. The results show that factor
analysis using speaker factors gives the best results on
the core condition of the NIST 2006 speaker recognition
evaluation. The difference is particularly marked on the
English language subset. Fusion of all systems gave an
equal error rate of 4.2\% (all trials) and 3.2\% (English
trials only).},
lrdenewsdate = {2007-09-25}
}
@InProceedings{ dehak.09.icassp,
author = {Najim Dehak and Patrick Kenny and R\'eda Dehak and Ondrej
Glember and Pierre Dumouchel and Lukas Burget and
Valiantsina Hubeika and Fabio Castaldo},
title = {Support Vector Machines and Joint Factor Analysis for
Speaker Verification},
booktitle = {IEEE-ICASSP},
year = 2009,
address = {Taipei - Taiwan},
month = apr,
abstract = {This article presents several techniques to combine
between Support vector machines (SVM) and Joint Factor
Analysis (JFA) model for speaker verification. In this
combination, the SVMs are applied on different sources of
information produced by the JFA. These informations are the
Gaussian Mixture Model supervectors and speakers and Common
factors. We found that the use of JFA factors gave the best
results especially when within class covariance
normalization method is applied in the speaker factors
space, in order to compensate for the channel effect. The
new combination results are comparable to other classical
JFA scoring techniques.},
lrdenewsdate = {2009-04-19}
}
@InProceedings{ dehak.09.interspeech,
author = {Najim Dehak and R\'eda Dehak and Patrick Kenny and Niko
Brummer and Pierre Ouellet and Pierre Dumouchel},
title = {Support Vector Machines versus Fast Scoring in the
Low-Dimensional Total Variability Space for Speaker
Verification},
booktitle = {Interspeech},
year = 2009,
month = sep,
abstract = {This paper presents a new speaker verification system
architecture based on Joint Factor Analysis (JFA) as
feature extractor. In this modeling, the JFA is used to
define a new low-dimensional space named the total
variability factor space, instead of both channel and
speaker variability spaces for the classical JFA. The main
contribution in this approach, is the use of the cosine
kernel in the new total factor space to design two
different systems: the first system is Support Vector
Machines based, and the second one uses directly this
kernel as a decision score. This last scoring method makes
the process faster and less computation complex compared to
others classical methods. We tested several intersession
compensation methods in total factors, and we found that
the combination of Linear Discriminate Analysis and Within
Class Covariance Normalization achieved the best
performance.},
lrdenewsdate = {2009-06-22}
}
@InProceedings{ dehak.09.interspeechb,
author = {Pierre Dumouchel and Najim Dehak and Yazid Attabi and
R\'eda Dehak and Narj\`es Boufaden},
title = {Cepstral and Long-Term Features for Emotion Recognition},
booktitle = {Interspeech},
year = 2009,
month = sep,
note = {Open Performance Sub-Challenge Prize},
abstract = {In this paper, we describe systems that were developed for
the Open Performance Sub-Challenge of the INTERSPEECH 2009
Emotion Challenge. We participate to both two-class and
five-class emotion detection. For the two-class problem,
the best performance is obtained by logistic regression
fusion of three systems. Theses systems use short- and
long-term speech features. This fusion achieved an absolute
improvement of 2,6\% on the unweighted recall value
compared with [6]. For the five-class problem, we submitted
two individual systems: cepstral GMM vs. long-term GMM-UBM.
The best result comes from a cepstral GMM and produced an
absolute improvement of 3,5\% compared to [6].},
lrdenewsdate = {2009-06-22}
}
@InProceedings{ dehak.10.nist,
author = {R\'eda Dehak and Najim Dehak},
title = {LRDE {S}peaker {R}ecognition {S}ystem for {NIST-SRE}
2010},
booktitle = {NIST 2010 Speaker Recognition Evaluation},
year = 2010,
address = {Brno, CZ}
}
@InProceedings{ dehak.10.odyssey,
author = {Najim Dehak and R\'eda Dehak and J. Glass and D. Reynolds
and P. Kenny},
title = {Cosine {S}imilarity {S}coring without {S}core
{N}ormalization {T}echniques},
booktitle = {Odyssey The Speaker and Language Recognition},
year = 2010,
address = {Brno, Czech Republic}
}
@InProceedings{ dehak.10.odyssey2,
author = {S. Shum and Najim Dehak and R\'eda Dehak and J. Glass},
title = {Unsupervised {S}peaker {A}daptation based on the {C}osine
{S}imilarity for {T}ext-{I}ndependent {S}peaker
{V}erification},
booktitle = {Odyssey The Speaker and Language Recognition},
year = 2010,
address = {Brno, Czech Republic}
}
@InProceedings{ dehak.11.icassp,
author = {Najim Dehak and Z. Karam and D. Reynolds and R\'eda Dehak
and W. Campbell and J. Glass},
title = {A {C}hannel-{B}lind {S}ystem for {S}peaker
{V}erification},
booktitle = {International Conference on Acoustics, Speech and Signal
Processing (ICASSP)},
pages = {4536--4539},
year = 2011,
address = {Prage, Czech Republic},
month = may
}
@InProceedings{ dehak.11.interspeech,
author = {Najim Dehak and Pedro A. Torres-Carrasquillo and Douglas
Reynolds and Reda Dehak},
title = {Language {R}ecognition via {I}-{V}ectors and
{D}imensionality {R}eduction},
booktitle = {INTERSPEECH 2011},
pages = {857--860},
year = 2011,
address = {Florence, Italy},
month = aug
}
@Article{ dehak.11.taslp,
author = {Najim Dehak and P. Kenny and R\'eda Dehak and P. Dumouchel
and P. Ouellet},
title = {Front-{E}nd {F}actor {A}nalysis {F}or {S}peaker
{V}erification},
year = 2011,
journal = {IEEE Transactions on Audio, Speech, and Language
Processing},
volume = 13,
number = 4,
pages = {788--798},
month = may
}
@InProceedings{ dehak.14.odyssey,
author = {Najim Dehak and O. Plchot and M.H. Bahari and L. Burget
and H. Van hamme and R\'eda Dehak},
title = {{GMM} Weights Adaptation Based on Subspace Approaches for
Speaker Verification},
booktitle = {Odyssey 2014, The Speaker and Language Recognition
Workshop},
year = 2014,
address = {Joensuu, Finland},
month = jun,
lrdeprojects = {SpeakerId},
lrdenewsdate = {2014-06-16},
abstract = {In this paper, we explored the use of Gaussian Mixture
Model (GMM) weights adaptation for speaker verifica- tion.
We compared two different subspace weight adap- tation
approaches: Subspace Multinomial Model (SMM) and
Non-Negative factor Analysis (NFA). Both techniques
achieved similar results and seemed to outperform the
retraining maximum likelihood (ML) weight adaptation.
However, the training process for the NFA approach is
substantially faster than the SMM technique. The i-vector
fusion between each weight adaptation approach and the
classical i-vector yielded slight improvements on the tele-
phone part of the NIST 2010 Speaker Recognition Eval-
uation dataset.},
pages = {48--53}
}
@InProceedings{ dehak.16.nistsre,
author = {Pedro A. Torres-Carrasquillo and Frederick Richardson and
Shahan Nercessian and Douglas Sturim and William Campbell
and Youngjune Gwon and Swaroop Vattam and Reda Dehak and
Harish Mallidi and Phani Sankar Nidadavolu and Ruizhi Li
and Raghavendra Reddy Pappagari and Nanxin Chen and Najim
Dehak and Ruben Zazo },
title = {The {M}IT {L}incoln {L}aboratory 2016 Speaker Recognition
System},
booktitle = {NIST Speaker Recognition Evaluation 2016},
year = 2016,
address = {San Diego, California},
month = dec,
lrdeprojects = {SpeakerId},
abstract = {This document presents the system submission for the group
composed of MIT Lincoln Laboratory, Johns Hopkins
University (JHU), Laboratoire de Recherche et de
D\'eveloppement de l'EPITA (LRDE) and Universidad
Aut\'onoma de Madrid (ATVS). The primary submission is a
combination of four systems focused on i-vector systems.
Two secondary submissions are also included},
lrdenewsdate = {2016-12-12}
}
@InProceedings{ demaille.05.iticse,
author = {Akim Demaille},
title = {Making Compiler Construction Projects Relevant to Core
Curriculums},
booktitle = {Proceedings of the Tenth Annual Conference on Innovation
and Technology in Computer Science Education
({ITICSE'05})},
year = 2005,
address = {Universidade Nova de {L}isboa, {M}onte da {P}acarita,
{P}ortugal},
month = jun,
lrdeprojects = {Tiger},
isbn = {1-59593-024-8},
pages = {266--270},
abstract = {Having 300 students a year implement a compiler is a
debatable enterprise, since the industry will certainly
\emph{not} recruit them for this competence. Yet we made
that decision five years ago, for reasons not related to
compiler construction. We detail these motivations, the
resulting compiler design, and how we manage the
assignment. The project meets its goals, since the majority
of former students invariably refer to it as \emph{the}
project that taught them the most.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.05.iticse.pdf},
lrdenewsdate = {2005-02-06}
}
@InProceedings{ demaille.06.isola,
author = {Akim Demaille and Sylvain Peyronnet and Beno\^it Sigoure},
title = {Modeling of Sensor Networks Using {XRM}},
booktitle = {Proceedings of the 2nd International Symposium on
Leveraging Applications of Formal Methods, Verification and
Validation ({ISoLA'06})},
year = 2006,
address = {Coral Beach Resort, {P}aphos, {C}yprus},
month = nov,
lrdeprojects = {Transformers},
abstract = {Sensor networks are composed of small electronic devices
that embed processors, sensors, batteries, memory and
communication capabilities. One of the main goal in the
design of such systems is the handling of the inherent
complexity of the nodes, strengthened by the huge number of
nodes in the network. For these reasons, it becomes very
difficult to model and verify such systems. In this paper,
we investigate the main characteristics of sensor nodes,
discuss about the use of a language derived from Reactive
Modules for their modeling and propose a language (and a
tool set) that ease the modeling of this kind of systems.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.06.isola.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.06.isola.pdf},
lrdenewsdate = {2006-09-14}
}
@InProceedings{ demaille.06.rivf,
author = {Akim Demaille and Sylvain Peyronnet and Thomas H\'erault},
title = {Probabilistic Verification of Sensor Networks},
booktitle = {Proceedings of the Fourth International Conference on
Computer Sciences, Research, Innovation and Vision for the
Future (RIVF'06)},
year = 2006,
address = {Ho Chi Minh City, Vietnam},
isbn = {1-4244-0316-2},
month = feb,
abstract = {Sensor networks are networks consisting of miniature and
low-cost systems with limited computation power and energy.
Thanks to the low cost of the devices, one can spread a
huge number of sensors into a given area to monitor, for
example, physical change of the environment. Typical
applications are in defense, environment, and design of
ad-hoc networks areas. In this paper, we address the
problem of verifying the correctness of such networks
through a case study. We modelize a simple sensor network
whose aim is to detect the apparition of an event in a
bounded area (such as a fire in a forest). The behaviour of
the network is probabilistic, so we use APMC, a tool that
allows to approximately check the correctness of extremely
large probabilistic systems, to verify it.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.06.rivf.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.06.rivf.pdf},
lrdeprojects = {APMC}
}
@InProceedings{ demaille.08.fsmnlp,
author = {Akim Demaille and Alexandre Duret-Lutz and Florian Lesaint
and Sylvain Lombardy and Jacques Sakarovitch and Florent
Terrones},
title = {An {XML} format proposal for the description of weighted
automata, transducers, and regular expressions},
booktitle = {Post-proceedings of the seventh international workshop on
Finite-State Methods and Natural Language Processing
(FSMNLP'08)},
editor = {Jakub Piskorski and Bruce W. Watson and Anssi
Yli-Jyr{\"a}},
publisher = {IOS Press},
series = {Frontiers in Artificial Intelligence and Applications},
volume = 19,
pages = {199-206},
year = 2009,
address = {Ispra, Italia},
month = sep,
abstract = {We present an XML format that allows to describe a large
class of finite weighted automata and transducers. Our
design choices stem from our policy of making the
implementation as simple as possible. This format has been
tested for the communication between the modules of our
automata manipulation platform Vaucanson, but this document
is less an experiment report than a position paper intended
to open the discussion among the community of automata
software writers.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.08.fsmnlp.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.08.fsmnlp.pdf},
lrdeprojects = {Vaucanson},
lrdenewsdate = {2008-07-28}
}
@InProceedings{ demaille.08.iticse,
author = {Akim Demaille and Roland Levillain and Beno\^it Perrot},
title = {A Set of Tools to Teach Compiler Construction},
booktitle = {Proceedings of the Thirteenth Annual Conference on
Innovation and Technology in Computer Science Education
({ITICSE'08})},
pages = {68--72},
year = 2008,
address = {Universidad Polit\'ecnica de Madrid, Spain},
month = jun,
lrdeprojects = {Tiger},
abstract = {Compiler construction is a widely used software
engineering exercise, but because most students will not be
compiler writers, care must be taken to make it relevant in
a core curriculum. Auxiliary tools, such as generators and
interpreters, often hinder the learning: students have to
fight tool idiosyncrasies, mysterious errors, and other
poorly educative issues. We introduce a set of tools
especially designed or improved for compiler construction
educative projects in \Cxx. We also provide suggestions
about new approaches to compiler construction. We draw
guidelines from our experience to make tools suitable for
education purposes.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.08.iticse.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.08.iticse.pdf},
lrdenewsdate = {2008-03-29}
}
@Misc{ demaille.08.kex,
author = {Akim Demaille and Roland Levillain},
title = {Compiler Construction as an Effective Application to Teach
Object-Oriented Programming},
howpublished = {The seventh ``Killer Examples'' workshop, Worked Examples
for Sound OO Pedagogy, at OOPSLA'08},
address = {Nashville, USA},
month = oct,
year = 2008,
note = {Oral presentation},
lrdeprojects = {Tiger},
abstract = {Compiler construction, a course feared by most students,
and a competence seldom needed in the industry. Yet we
claim that compiler construction is wonderful topic that
benefits from virtually all the computer-science topics. In
this paper we show in particular why compiler construction
is a killer example for Object-Oriented Programming,
providing a unique opportunity for students to understand
what it is, what it can be used for, and how it works.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.08.kex.pdf},
lrdekeywords = {LRDE},
lrdenewsdate = {2008-09-15}
}
@InProceedings{ demaille.08.ldta,
oldkeys = {durlin.08.seminar},
author = {Akim Demaille and Renaud Durlin and Nicolas Pierron and
Beno\^it Sigoure},
title = {{Semantics driven disambiguation: A comparison of
different approaches}},
booktitle = {Proceedings of the 8th workshop on Language Descriptions,
Tools and Applications (LDTA'08)},
year = 2008,
abstract = {Context-sensitive languages such as \langC or \Cxx can be
parsed using a context-free but ambiguous grammar, which
requires another stage, disambiguation, in order to select
the single parse tree that complies with the language's
semantical rules. Naturally, large and complex languages
induce large and complex disambiguation stages. If, in
addition, the parser should be extensible, for instance to
enable the embedding of domain specific languages, the
disambiguation techniques should feature traditional
software-engineering qualities: modularity, extensibility,
scalability and expressiveness. \\ We evaluate three
approaches to write disambiguation filters for \acs{sdf}
grammars: algebraic equations with \acs{asf}, rewrite-rules
with programmable traversals for \stratego, and attribute
grammars with \acr{tag}, our system. To this end we
introduce \phenix, a highly ambiguous language. Its
``standard'' grammar exhibits ambiguities inspired by those
found in the \langC and \Cxx standard grammars. To evaluate
modularity, the grammar is layered: it starts with a small
core language, and several layers add new features, new
production rules, and new ambiguities.},
lrdeprojects = {Transformers},
keywords = {Transformers, context-free grammar, attribute grammar,
Stratego, ASF, SDF, disambiguation, parsing, program
transformation, term rewriting},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.08.ldta.pdf},
lrdenewsdate = {2008-02-08}
}
@InProceedings{ demaille.09.sac,
author = {Akim Demaille and Roland Levillain and Beno\^it Sigoure},
title = {{TWEAST}: A Simple and Effective Technique to Implement
Concrete-Syntax {AST} Rewriting Using Partial Parsing},
booktitle = {Proceedings of the 24th Annual ACM Symposium on Applied
Computing (SAC'09)},
pages = {1924--1929},
year = 2009,
address = {Waikiki Beach, Honolulu, Hawaii, USA},
month = mar,
lrdeprojects = {Tiger},
abstract = {ASTs are commonly used to represent an input/output
program in compilers and language processing tools. Many of
the tasks of these tools consist in generating and
rewriting ASTs. Such an approach can become tedious and
hard to maintain for complex operations, namely program
transformation, optimization, instrumentation, etc. On the
other hand, \emph{concrete syntax} provides a natural and
simpler representation of programs, but it is not usually
available as a direct feature of the aforementioned tools.
We propose a simple technique to implement AST generation
and rewriting in general purpose languages using concrete
syntax. Our approach relies on extensions made in the
scanner and the parser and the use of objects supporting
partial parsing called Text With Embedded Abstract Syntax
Trees (TWEASTS). A compiler for a simple language (Tiger)
written in \Cxx serves as an example, featuring
transformations in concrete syntax: syntactic desugaring,
optimization, code instrumentation such as bounds-checking,
etc. Extensions of this technique to provide a full-fledged
concrete-syntax rewriting framework are presented as well.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.09.sac.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.09.sac.pdf},
lrdenewsdate = {2008-10-11}
}
@InProceedings{ demaille.13.ciaa,
author = {Akim Demaille and Alexandre Duret-Lutz and Sylvain
Lombardy and Jacques Sakarovitch},
title = {Implementation Concepts in {V}aucanson 2},
booktitle = {Proceedings of Implementation and Application of Automata,
18th International Conference (CIAA'13)},
pages = {122--133},
year = 2013,
editor = {Stavros Konstantinidis},
publisher = {Springer},
isbn = {978-3-642-39274-0},
volume = 7982,
series = {Lecture Notes in Computer Science},
address = {Halifax, NS, Canada},
month = jul,
lrdeprojects = {Vaucanson},
abstract = {Vaucanson is an open source C++ platform dedicated to the
computation with finite weighted automata. It is generic:
it allows to write algorithms that apply on a wide set of
mathematical objects. Initiated ten years ago, several
shortcomings were discovered along the years, especially
problems related to code complexity and obfuscation as well
as performance issues. This paper presents the concepts
underlying Vaucanson 2, a complete rewrite of the platform
that addresses these issues.},
lrdeinc = {demaille.13.ciaa.inc},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.13.ciaa.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.13.ciaa.pdf},
lrdenewsdate = {2013-05-02},
doi = {10.1007/978-3-642-39274-0_12}
}
@InProceedings{ demaille.14.ciaa,
author = {Akim Demaille and Alexandre Duret-Lutz and Sylvain
Lombardy and Luca Saiu and Jacques Sakarovitch},
title = {A Type System for Weighted Automata and Rational
Expressions},
booktitle = {Proceedings of Implementation and Application of Automata,
19th International Conference (CIAA'14)},
year = 2014,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 8587,
address = {Giessen, Germany},
month = jul,
lrdeprojects = {Vaucanson},
abstract = {We present a type system for automata and rational
expressions, expressive enough to encompass weighted
automata and transducers in a single coherent formalism.
The system allows to express useful properties about the
applicability of operations including binary heterogeneous
functions over automata.\\ We apply the type system to the
design of the \vcsnd platform, a library dedicated to the
computation with finite weighted automata, in which
genericity and high efficiency are obtained at the lowest
level through the use of template metaprogramming, by
letting the template system play the role of a static type
system for automata. Between such a low-level layer and the
interactive high-level interface, the type system plays the
crucial role of a mediator and allows for a
cleanly-structured use of dynamic compilation. },
lrdeinc = {demaille.14.ciaa.inc},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/demaille.14.ciaa.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.14.ciaa.pdf},
lrdenewsdate = {2014-05-20},
doi = {10.1007/978-3-319-08846-4_12}
}
@InProceedings{ demaille.16.ciaa,
author = {Akim Demaille},
title = {Derived-Term Automata of Multitape Rational Expressions},
booktitle = {Proceedings of Implementation and Application of Automata,
21st International Conference (CIAA'16)},
editor = {Yo-Sub Han and Kai Salomaa},
year = 2016,
publisher = {Springer},
address = {Seoul, South Korea},
pages = {51--63},
isbn = {978-3-319-40946-7},
doi = {10.1007/978-3-319-40946-7_5},
anurl = {http://dx.doi.org/10.1007/978-3-319-40946-7_5},
series = {Lecture Notes in Computer Science},
volume = 9705,
month = jul,
abstract = {We introduce (weighted) rational expressions to denote
series over Cartesian products of monoids. To this end, we
propose the operator $\mid$ to build multitape expressions
such as $(a^+\mid x + b^+\mid y)^*$. We define expansions,
which generalize the concept of derivative of a rational
expression, but relieved from the need of a free monoid. We
propose an algorithm based on expansions to build multitape
automata from multitape expressions.},
lrdeprojects = {Vcsn},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.16.ciaa.pdf},
lrdereport = {http://www.lrde.epita.fr/dload/papers/demaille.16.ciaa.report.pdf},
lrdenewsdate = {2016-04-26}
}
@InProceedings{ demaille.16.ictac,
author = {Akim Demaille},
title = {Derived-term Automata for Extended Weighted Rational
Expressions},
booktitle = {Proceedings of the Thirteenth International Colloquium on
Theoretical Aspects of Computing (ICTAC)},
year = 2016,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
address = {Taipei, Taiwan},
month = oct,
abstract = {We present an algorithm to build an automaton from a
rational expression. This approach introduces support for
extended weighted expressions. Inspired by derived-term
based algorithms, its core relies on a different construct,
rational expansions. We introduce an inductive algorithm to
compute the expansion of an expression from which the
automaton follows. This algorithm is independent of the
size of the alphabet, and actually even supports infinite
alphabets. It can easily be accommodated to generate
deterministic (weighted) automata. These constructs are
implemented in Vcsn, a free-software platform dedicated to
weighted automata and rational expressions.},
lrdeprojects = {Vcsn},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.16.ictac.pdf},
lrdereport = {http://www.lrde.epita.fr/dload/papers/demaille.16.ictac.report.pdf},
lrdenewsdate = {2016-07-06}
}
@InProceedings{ demaille.17.ictac,
author = {Akim Demaille and Thibaud Michaud},
title = {Derived-Term Automata of Weighted Rational Expressions
with Quotient Operators},
booktitle = {Proceedings of the Thirteenth International Colloquium on
Theoretical Aspects of Computing (ICTAC)},
year = 2017,
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {10580},
pages = {155--173},
address = {Hanoi, Vietnam},
month = oct,
abstract = {Quotient operators have been rarely studied in the context
of weighted rational expressions and automaton
generation---in spite of the key role played by the
quotient of words in formal language theory. To handle both
left- and right-quotients we generalize an expansion-based
construction of the derived-term (or Antimirov, or
equation) automaton and rely on support for a transposition
(or reversal) operator. The resulting automata may have
spontaneous transitions, which requires different
techniques from the usual derived-term constructions. },
lrdeprojects = {Vcsn},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.17.ictac.pdf},
lrdenewsdate = {2017-07-05}
}
@Article{ demaille.17.sacs,
title = {Derived-Term Automata of Multitape Expressions with
Composition},
author = {Akim Demaille},
journal = {Scientific Annals of Computer Science},
volume = {27},
number = {2},
organization = {``A.I. Cuza'' University, Ia\c si, Rom\^ania},
year = {2017},
pages = {137--176},
doi = {10.7561/SACS.2017.2.137},
publisher = {``A.I. Cuza'' University Press, Ia\c si},
lrdeprojects = {Vcsn},
lrdenewsdate = {2017-12-29},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/demaille.17.sacs.pdf},
abstract = {Rational expressions are powerful tools to define
automata, but often restricted to single-tape automata. Our
goal is to unleash their expressive power for transducers,
and more generally, any multitape automaton; for instance
$(a^+\mathbin{\vert} x + b^+\mathbin{\vert} y)^*$. We
generalize the construction of the derived-term automaton
by using \emph{expansions}. This approach generates small
automata, and even allows us to support a composition
operator.}
}
@InProceedings{ denise.06.rt,
author = {Alain Denise and Marie-Claude Gaudel and
Sandrine-Dominique Gouraud and Richard Lassaigne and
Sylvain Peyronnet},
title = {Uniform Random Sampling of Traces in Very Large Models},
booktitle = {Proceedings of the 1st international workshop on Random
Testing 2006 (RT06)},
year = 2006,
series = {ACM digital library},
pages = {10--19},
lrdeprojects = {APMC},
abstract = {This paper presents some first results on how to perform
uniform random walks (where every trace has the same
probability to occur) in very large models. The models
considered here are described in a succinct way as a set of
communicating reactive modules. The method relies upon
techniques for counting and drawing uniformly at random
words in regular languages. Each module is considered as an
automaton defining such a language. It is shown how it is
possible to combine local uniform drawings of traces, and
to obtain some global uniform random sampling, without
construction of the global model.},
lrdenewsdate = {2006-05-30}
}
@Article{ dolstra.10.jfp,
author = {Eelco Dolstra and Andres L\"oh and Nicolas Pierron},
title = {{NixOS}: A purely functional {Linux} distribution},
journal = {Journal of Functional Programming},
year = 2010,
note = {Published online by Cambridge University Press 15 Oct
2010},
doi = {10.1017/S0956796810000195},
lrdekeywords = {Software engineering},
lrdenewsdate = {2010-10-15}
}
@InProceedings{ drapeau.17.grec,
author = {Jordan Drapeau and Thierry G\'eraud and Micka\"el Coustaty
and Joseph Chazalon and Jean-Christophe Burie and
V\'eronique Eglin and St\'ephane Bres},
title = {Extraction of Ancient Map Contents Using Trees of
Connected Components},
booktitle = {Proceedings of the 12th IAPR International Workshop on
Graphics Recognition (GREC)},
year = 2017,
address = {Kyoto, Japan},
month = nov,
doi = {10.1007/978-3-030-02284-6_9},
abstract = {Ancient maps are an historical and cultural heritage
widely recognized as a very important source of
information, but exploiting such maps is complicated. In
this project, we consider the Linguistic Atlas of France
(ALF), built between 1902 and 1910. This cartographical
heritage produces firstrate data for dialectological
researches. In this paper, we focus on the separation of
the content in layers for facilitating the extraction, the
analysis, the visualization and the diffusion of the data
contained in these ancient linguistic atlases.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/drapeau.17.grec.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2017-10-20}
}
@InProceedings{ drumetz.17.lva-ica,
author = {Lucas Drumetz and Guillaume Tochon and Jocelyn Chanussot
and Christian Jutten},
title = {Estimating the Number of Endmembers to Use in Spectral
Unmixing of Hyperspectral Data with Collaborative
Sparsity},
booktitle = {Proceedings of the 13th International Conference on Latent
Variable Analysis and Signal Separation (LVA-ICA)},
year = 2017,
address = {Grenoble, France},
month = feb,
abstract = {Spectral Umixing (SU) in hyperspectral remote sensing aims
at recovering the signatures of the pure materials in the
scene (endmembers) and their abundances in each pixel of
the image. The usual SU chain does not take spectral
variability (SV) into account, and relies on the estimation
of the Intrinsic Dimensionality (ID) of the data, related
to the number of endmembers (NOE) to use. However, the ID
can be significantly overestimated in difficult scenarios,
and sometimes does not correspond to the desired scale and
application dependent NOE. Spurious endmembers are then
frequently extracted and included in the model. We propose
an algorithm for SU incorporating SV, using collaborative
sparsity to discard the least explicative endmembers in the
whole image. We compute an algorithmic regularization path
for this problem to select the optimal set of endmembers
using a statistical criterion. Results on simulated and
real data show the interest of the approach.},
lrdekeywords = {Image},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/drumetz.17.lva-ica.pdf},
lrdenewsdate = {2016-11-22}
}
@InProceedings{ drumetz.20.icassp,
author = {Lucas Drumetz and Mauro Dalla Mura and Guillaume Tochon
and Ronan Fablet},
title = {Learning Endmember Dynamics in Multitemporal Hyperspectral
Data using a State-Space Model Formulation},
booktitle = {Proceedings of the 45th IEEE International Conference on
Acoustics, Speech, and Signal Processing (ICASSP)},
year = 2020,
month = may,
address = {Barcelona, Spain},
pages = {2483--2487},
abstract = {Hyperspectral image unmixing is an inverse problem aiming
at recovering the spectral signatures of pure materials of
interest (called endmembers) and estimating their
proportions (called abundances) in every pixel of the
image. However, in spite of a tremendous applicative
potential and the avent of new satellite sensors with high
temporal resolution, multitemporal hyperspectral unmixing
is still a relatively underexplored research avenue in the
community, compared to standard image unmixing. In this
paper, we propose a new framework for multitemporal
unmixing and endmember extraction based on a state-space
model, and present a proof of concept on simulated data to
show how this representation can be used to inform
multitemporal unmixing with external prior knowledge, or on
the contrary to learn the dynamics of the quantities
involved from data using neural network architectures
adapted to the identification of dynamical systems.},
lrdekeywords = {Image},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/drumetz.20.icassp.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2020-01-24},
doi = {10.1109/ICASSP40776.2020.9053787}
}
@InProceedings{ dubuisson.15.visapp,
author = {S\'everine Dubuisson and Myriam Robert-Seidowsky and
Jonathan Fabrizio},
title = {A Self-Adaptive Likelihood Function for Tracking with
Particle Filter},
booktitle = {Proceedings of the 10th International Conference on
Computer Vision Theory and Applications (VISAPP)},
month = mar,
year = 2015,
pages = {446--453},
abstract = {The particle filter is known to be efficient for visual
tracking. However, its parameters are empirically fixed,
depending on the target application, the video sequences
and the context. In this paper, we introduce a new
algorithm which automatically adjusts ``on-line" two majors
of them: the correction and the propagation parameters. Our
purpose is to determine, for each frame of a video, the
optimal value of the correction parameter and to adjust the
propagation one to improve the tracking performance. On one
hand, our experimental results show that the common
settings of particle filter are sub-optimal. On another
hand, we prove that our approach achieves a lower tracking
error without needing tuning these parameters. Our adaptive
method allows to track objects in complex conditions
(illumination changes, cluttered background, etc.) without
adding any computational cost compared to the common usage
with fixed parameters.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/dubuisson.15.visapp.pdf},
lrdeprojects = {Olena},
doi = {10.5220/0005260004460453}
}
@InProceedings{ duflot.04.avocs,
author = {Marie Duflot and Laurent Fribourg and Thomas Herault and
Richard Lassaigne and Fr\'ed\'eric Magniette and Stephane
Messika and Sylvain Peyronnet and Claudine Picaronny},
title = {Probabilistic model checking of the {CSMA/CD}, protocol
using {PRISM} and {APMC}},
booktitle = {Proceedings of the 4th International Workshop on Automated
Verification of Critical Systems (AVoCS)},
year = 2004,
series = {Electronic Notes in Theoretical Computer Science Series},
volume = 128,
issue = 6,
pages = {195--214},
lrdeprojects = {APMC},
abstract = {Carrier Sense Multiple Access/Collision Detection
(CSMA/CD) is the protocol for carrier transmission access
in Ethernet networks (international standard IEEE 802.3).
On Ethernet, any Network Interface Card (NIC) can try to
send a packet in a channel at any time. If another NIC
tries to send a packet at the same time, a collision is
said to occur and the packets are discarded. The CSMA/CD
protocol was designed to avoid this problem, more precisely
to allow a NIC to send its packet without collision. This
is done by way of a randomized exponential backoff process.
In this paper, we analyse the correctness of the CSMA/CD
protocol, using techniques from probabilistic model
checking and approximate probabilistic model checking. The
tools that we use are PRISM and APMC. Moreover, we provide
a quantitative analysis of some CSMA/CD properties.}
}
@InCollection{ duflot.06.book,
author = {Marie Duflot and Marta Kwiatkowska and Gethin Norman and
Dave Parker and Sylvain Peyronnet and Claudine Picaronny
and Jeremy Sproston},
title = {Practical Application of Probabilistic Model Checking to
Communication Protocols},
booktitle = {FMICS Handbook on Industrial Critical Systems},
year = 2006,
editor = {Stefania Gnesi and Tiziana Margaria},
chapter = 7,
note = {To appear},
lrdeprojects = {APMC}
}
@InProceedings{ duret.00.gcse,
author = {Alexandre Duret-Lutz},
title = {Olena: a component-based platform for image processing,
mixing generic, generative and {OO} programming},
booktitle = {Proceedings of the 2nd International Symposium on
Generative and Component-Based Software Engineering
(GCSE)---Young Researchers Workshop; published in
``Net.ObjectDays2000''},
pages = {653--659},
year = 2000,
address = {Erfurt, Germany},
month = oct,
isbn = {3-89683-932-2},
lrdeprojects = {Olena},
abstract = {This paper presents Olena, a toolkit for programming and
designing image processing chains in which each processing
is a component. But since there exist many image types
(different structures such as 2D images, 3D images or
graphs, as well as different value types) the platform has
been designed with genericity and reusability in mind: each
component is written as a generic C++ procedure, \`a la
STL. Other libraries, such as Khoros have a different
approach where a processing component contains an
implementation for each type supported by the library. This
makes code maintenance hard and prevents easy addition of
new image types. Still, Olena is not only a generic
component library, it shall contain additional tools such
as a visual programming environment (VPE). Those tools may
be programmed in a classical object-oriented fashion (using
operation and inclusion polymorphism) which may seems
antagonist with the generic programming paradigm used in
the library. Section 2 outlines the architecture of Olena
and elaborates more on the design problems resulting from
the use of generic components. Section 3 presents the
solution chosen to address these problems.}
}
@InProceedings{ duret.01.ae,
author = {Alexandre Duret-Lutz},
title = {Expression templates in {A}da~95},
booktitle = {Proceedings of the 6th International Conference on
Reliable Software Technologies (Ada-Europe)},
year = 2001,
series = {Lecture Notes in Computer Science Series},
volume = 2043,
address = {Leuven, Belgium},
month = may,
publisher = {Springer-Verlag},
pages = {191--202},
note = {Best Paper Award},
lrdeprojects = {Software},
abstract = {High-order matrix or vector expressions tend to be
penalized by the use of huge temporary variables.
Expression templates is a C++ technique which can be used
to avoid these temporaries, in a way that is transparent to
the user. We present an Ada adaptation of this technique
which - while not transparent - addresses the same
efficiency issue as the original. We make intensive use of
the signature idiom to combine packages together, and
discuss its importance in generic programming. Finally, we
express some concerns about generic programming in Ada.},
lrdekeywords = {Software engineering}
}
@InProceedings{ duret.01.coots,
author = {Alexandre Duret-Lutz and Thierry G\'eraud and Akim
Demaille},
title = {Generic design patterns in {C++}},
booktitle = {Proceedings of the 6th USENIX Conference on
Object-Oriented Technologies and Systems (COOTS)},
year = 2001,
address = {San Antonio, TX, USA},
pages = {189--202},
month = {January-February},
publisher = {USENIX Association},
lrdeprojects = {Software},
abstract = {Generic programming is a paradigm whose wide adoption by
the C++ community is quite recent. In this approach most
classes and procedures are parameterized, leading to the
construction of general and efficient software components.
In this paper, we show how some design patterns from Gamma
et al. can be adapted to this paradigm. Although these
patterns rely highly on dynamic binding. We show that, by
making intensive use of parametric polymorphism in the
context of generic programming, the method calls in these
patterns can be resolved at compile-time. The speed-up
achieved using these patterns is significant.},
lrdekeywords = {Software engineering}
}
@InProceedings{ duret.09.atva,
author = {Alexandre Duret-Lutz and Denis Poitrenaud and Jean-Michel
Couvreur},
title = {On-the-fly Emptiness Check of Transition-based {S}treett
Automata},
booktitle = {Proceedings of the 7th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'09)},
year = 2009,
editor = {Zhiming Liu and Anders P. Ravn},
series = {Lecture Notes in Computer Science},
publisher = {Springer-Verlag},
pages = {213--227},
volume = 5799,
abstract = {In the automata theoretic approach to model checking,
checking a state-space $S$ against a linear-time property
$\varphi$ can be done in $\mathrm{O}(|S|\times
2^{\mathrm{O}(|\varphi|)})$ time. When model checking under
$n$ strong fairness hypotheses expressed as a Generalized
B\"uchi automaton, this complexity becomes
$\mathrm{O}(|S|\times 2^{\mathrm{O}(|\varphi|+n)})$.\par
Here we describe an algorithm to check the emptiness of
Streett automata, which allows model checking under $n$
strong fairness hypotheses in $\mathrm{O}(|S|\times
2^{\mathrm{O}(|\varphi|)}\times n)$. We focus on
transition-based Streett automata, because it allows us to
express strong fairness hypotheses by injecting Streett
acceptance conditions into the state-space without any blowup.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.09.atva.pdf},
lrdeprojects = {Spot},
doi = {10.1007/978-3-642-04761-9_17}
}
@TechReport{ duret.11.arxiv,
author = {Alexandre Duret-Lutz and Kais Klai and Denis Poitrenaud
and Yann Thierry-Mieg},
title = {Combining Explicit and Symbolic Approaches for Better
On-the-Fly {LTL} Model Checking},
institution = {arXiv},
year = 2011,
number = {1106.5700},
month = jun,
note = {Extended version of our ATVA'11 paper, presenting two new
techniques instead of one.},
url = {http://arxiv.org/abs/1106.5700},
abstract = {We present two new hybrid techniques that replace the
synchronized product used in the automata-theoretic
approach for LTL model checking. The proposed products are
explicit graphs of aggregates (symbolic sets of states)
that can be interpreted as B\"uchi automata. These hybrid
approaches allow on the one hand to use classical
emptiness-check algorithms and build the graph on-the-fly,
and on the other hand, to have a compact encoding of the
state space thanks to the symbolic representation of the
aggregates. The \emph{Symbolic Observation Product} assumes
a globally stuttering property (e.g., LTL-X) to aggregate
states. The \emph{Self-Loop Aggregation Product} does not
require the property to be globally stuttering (i.e., it
can tackle full LTL), but dynamically detects and exploits
a form of stuttering where possible. Our experiments show
that these two variants, while incomparable with each
other, can outperform other existing approaches.}
}
@InProceedings{ duret.11.atva,
author = {Alexandre Duret-Lutz and Kais Klai and Denis Poitrenaud
and Yann Thierry-Mieg},
title = {Self-Loop Aggregation Product --- A New Hybrid Approach to
On-the-Fly {LTL} Model Checking},
booktitle = {Proceedings of the 9th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'11)},
year = 2011,
series = {Lecture Notes in Computer Science},
volume = 6996,
pages = {336--350},
address = {Taipei, Taiwan},
month = oct,
publisher = {Springer},
abstract = {We present the \emph{Self-Loop Aggregation Product}
(SLAP), a new hybrid technique that replaces the
synchronized product used in the automata-theoretic
approach for LTL model checking. The proposed product is an
explicit graph of aggregates (symbolic sets of states) that
can be interpreted as a B\"uchi automata. The criterion
used by SLAP to aggregate states from the Kripke structure
is based on the analysis of self-loops that occur in the
B\"uchi automaton expressing the property to verify. Our
hybrid approach allows on the one hand to use classical
emptiness-check algorithms and build the graph on-the-fly,
and on the other hand, to have a compact encoding of the
state space thanks to the symbolic representation of the
aggregates. Our experiments show that this technique often
outperforms other existing (hybrid or fully symbolic)
approaches.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/duret.11.atva.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.11.atva.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2011-06-23},
doi = {10.1007/978-3-642-24372-1_24}
}
@Misc{ duret.11.sumo,
author = {Alexandre Duret-Lutz},
title = {Building {LTL} Model Checkers using {T}ransition-based
{G}eneralized {B\"u}chi {A}utomata},
howpublished = {Invited talk to SUMo'11},
month = jun,
year = 2011
}
@InProceedings{ duret.11.vecos,
author = {Alexandre Duret-Lutz},
title = {{LTL} Translation Improvements in {Spot}},
booktitle = {Proceedings of the 5th International Workshop on
Verification and Evaluation of Computer and Communication
Systems (VECoS'11)},
year = 2011,
series = {Electronic Workshops in Computing},
address = {Tunis, Tunisia},
month = sep,
publisher = {British Computer Society},
abstract = {Spot is a library of model-checking algorithms. This paper
focuses on the module translating LTL formul{\ae} into
automata. We discuss improvements that have been
implemented in the last four years, we show how Spot's
translation competes on various benchmarks, and we give
some insight into its implementation.},
url = {http://ewic.bcs.org/category/15853},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.11.vecos.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2011-07-25}
}
@InProceedings{ duret.13.atva,
author = {Alexandre Duret-Lutz},
title = {Manipulating {LTL} formulas using {S}pot 1.0},
booktitle = {Proceedings of the 11th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'13)},
year = 2013,
series = {Lecture Notes in Computer Science},
volume = 8172,
pages = {442--445},
address = {Hanoi, Vietnam},
month = oct,
publisher = {Springer},
abstract = {We present a collection of command-line tools designed to
generate, filter, convert, simplify, lists of Linear-time
Temporal Logic formulas. These tools were introduced in the
release 1.0 of Spot, and we believe they should be of
interest to anybody who has to manipulate LTL formulas. We
focus on two tools in particular: ltlfilt, to filter and
transform formulas, and ltlcross to cross-check
LTL-to-B\"{u}chi-Automata translators.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.13.atva.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2013-06-15},
doi = {10.1007/978-3-319-02444-8_31}
}
@Article{ duret.14.ijccbs,
author = {Alexandre Duret-Lutz},
title = {{LTL} Translation Improvements in {S}pot 1.0},
journal = {International Journal on Critical Computer-Based Systems},
year = 2014,
volume = 5,
number = {1/2},
pages = {31--54},
month = mar,
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.14.ijccbs.draft.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2014-03-06},
abstract = { Spot is a library of model-checking algorithms started in
2003. This paper focuses on its module for translating
linear-time temporal logic (LTL) formulas into B{\"u}chi
automata: one of the steps required in the
automata-theoretic approach to LTL model-checking. We
detail the different algorithms involved in this
translation: the core translation itself, which performs
many simplifications thanks to its use of binary decision
diagrams; the pre-processing of the LTL formulas with
rewriting rules chosen to help their translation; and
various post-processing algorithms whose use depends on the
intent of the translation: do we favor deterministic
automata, or small automata? Using different benchmarks, we
show how Spot competes with other LTL translators, and how
it has improved over the years.},
doi = {10.1504/IJCCBS.2014.059594}
}
@InProceedings{ duret.16.atva,
author = {Alexandre Duret-Lutz and Fabrice Kordon and Denis
Poitrenaud and Etienne Renault},
title = {Heuristics for Checking Liveness Properties with Partial
Order Reductions},
booktitle = {Proceedings of the 14th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'16)},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {9938},
pages = {340--356},
year = {2016},
month = oct,
abstract = {Checking liveness properties with partial-order reductions
requires a cycle proviso to ensure that an action cannot be
postponed forever. The proviso forces each cycle to contain
at least one fully expanded state. We present new
heuristics to select which state to expand, hoping to
reduce the size of the resulting graph. The choice of the
state to expand is done when encountering a
\emph{dangerous} edge. Almost all existing provisos expand
the source of this edge, while this paper also explores the
expansion of the destination and the use of SCC-based
information.},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.16.atva.pdf},
lrdenewsdate = {2016-06-17},
doi = {10.1007/978-3-319-46520-3_22}
}
@InProceedings{ duret.16.atva2,
author = {Alexandre Duret-Lutz and Alexandre Lewkowicz and Amaury
Fauchille and Thibaud Michaud and Etienne Renault and
Laurent Xu},
title = {Spot 2.0 --- a framework for {LTL} and $\omega$-automata
manipulation},
booktitle = {Proceedings of the 14th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'16)},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {9938},
pages = {122--129},
year = {2016},
month = oct,
abstract = {We present Spot 2.0, a C++ library with Python bindings
and an assortment of command-line tools designed to
manipulate LTL and $\omega$-automata in batch. New
automata-manipulation tools were introduced in Spot 2.0;
they support arbitrary acceptance conditions, as
expressible in the Hanoi Omega Automaton format. Besides
being useful to researchers who have automata to process,
its Python bindings can also be used in interactive
environments to teach $\omega$-automata and model checking.},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.16.atva2.pdf},
lrdenewsdate = {2016-06-17},
doi = {10.1007/978-3-319-46520-3_8}
}
@PhDThesis{ duret.17.hdr,
author = {Alexandre Duret-Lutz},
title = {Contributions to {LTL} and $\omega$-Automata for Model
Checking},
school = {Universit{\'e} Pierre et Marie Curie (Paris 6)},
year = {2017},
month = feb,
lrdeproject = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/duret.17.hdr.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/duret.17.hdr.slides.pdf},
type = {Habilitation Thesis}
}
@InProceedings{ esteban.19.caip,
author = {Baptiste Esteban and Guillaume Tochon and Thierry
G\'eraud},
title = {Estimating the noise level function with the tree of
shapes and non-parametric statistics},
booktitle = {Proceedings of the 18th International Conference on
Computer Analysis of Images and Patterns (CAIP)},
year = 2019,
pages = {377--388},
series = {Lecture Notes in Computer Science Series},
volume = {11679},
publisher = {Springer},
address = {Salerno, Italy},
month = sep,
doi = {10.1007/978-3-030-29891-3_33},
abstract = {The knowledge of the noise level within an image is a
valuableinformation for many image processing applications.
Estimating the noise level function (NLF) requires the
identification of homogeneous regions, upon which the noise
parameters are computed. Sutour et al. have proposed a
method to estimate this NLF based on the search for
homogeneous regions of square shape. We generalize this
method to the search for homogeneous regions with arbitrary
shape thanks to the tree of shapes representation of the
image under study, thus allowing a more robust and precise
estimation of the noise level function.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/esteban.19.caip.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-06-07}
}
@InProceedings{ esteban.19.gretsi,
author = {Baptiste Esteban and Guillaume Tochon and Thierry
G\'eraud},
title = {Estimation du niveau de bruit par arbre des formes et
statistiques non param\'etriques},
booktitle = {Proceedings of the 27st Symposium on Signal and Image
Processing (GRETSI)},
year = 2019,
address = {Lille, France},
category = {national},
month = aug,
abstract = {La connaissance du niveau de bruit dans une image est
pr\'ecieuse pour de nombreuses applications en traitement
d'images. L'estimation de la fonction de niveau de bruit
requiert l'identification des zones homog\`enes sur
lesquelles les param\`etres du bruit peuvent \^etre
calcul\'es. Sutour et al. en 2015 ont propos\'e une
m\'ethode d'estimation de la fonction de niveau de bruit se
basant sur la recherche de zones homog\`enes de forme
carr\'ee, donc inadapt\'ees au contenu local de l'image.
Nous g\'en\'eralisons cette m\'ethode \`a la recherche de
zones homog\`enes de forme quelconque en nous basant sur la
repr\'esentation par arbre des formes de l'image
\'etudi\'ee, permettant ainsi une estimation plus robuste
de la fonction de niveau de bruit.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/esteban.19.gretsi.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-06-14}
}
@InProceedings{ estopinan.21.eusipco,
author = {Joaquim Estopinan and Guillaume Tochon and Lucas Drumetz},
title = {Learning {Sentinel-2} Spectral Dynamics for Long-Run
Predictions Using Residual Neural Networks},
booktitle = {Proceedings of the 29th European Signal Processing
Conference (EUSIPCO)},
year = 2021,
address = {Dublin, Ireland},
month = aug,
lrdeprojects = {Olena},
abstract = {Making the most of multispectral image time-series is a
promising but still relatively under-explored research
direction because of the complexity of jointly analyzing
spatial, spectral and temporal information. Capturing and
characterizing temporal dynamics is one of the important
and challenging issues. Our new method paves the way to
capture real data dynamics and should eventually benefit
applications like unmixing or classification. Dealing with
time-series dynamics classically requires the knowledge of
a dynamical model and an observation model. The former may
be incorrect or computationally hard to handle, thus
motivating data-driven strategies aiming at learning
dynamics directly from data. In this paper, we adapt neural
network architectures to learn periodic dynamics of both
simulated and real multispectral time-series. We emphasize
the necessity of choosing the right state variable to
capture periodic dynamics and show that our models can
reproduce the average seasonal dynamics of vegetation using
only one year of training data.},
lrdekeywords = {Image},
lrdenewsdate = {2021-05-04}
}
@InProceedings{ fabre.00.egve,
author = {Yoann Fabre and Guillaume Pitel and Laurent Soubrevilla
and Emmanuel Marchand and Thierry G\'eraud and Akim
Demaille},
title = {An asynchronous architecture to manage communication,
display, and user interaction in distributed virtual
environments},
booktitle = {Virtual Environments 2000, Proceedings of the 6th
Eurographics Workshop on Virtual Environments (EGVE)},
year = 2000,
address = {Amsterdam, The Netherlands},
month = jun,
pages = {105--113},
series = {Computer Science / Eurographics Series},
editor = {J.D. Mulder and R. van Liere},
publisher = {Springer-Verlag WienNewYork},
lrdeprojects = {URBI},
abstract = {In Distributed Virtual Environments, each machine runs the
same software, which is in charge of handling the
communications over the network, providing the user with a
view of the world, and processing his requests. A major
issue in the design of such a software is to ensure that
network communication does not degrade the interactivity
between the machine and the user. In this paper, we present
a software designed to achieve this goal, based on tools
rarely used in this area.}
}
@InProceedings{ fabre.00.vsmm,
author = {Yoann Fabre and Guillaume Pitel and Didier Verna},
title = {Urbi et {O}rbi: unusual design and implementation choices
for distributed virtual environments},
booktitle = {Proceedings of the 6th International Conference on Virtual
Systems and MultiMedia (VSMM)---Intelligent Environments
Workshop},
pages = {714--724},
year = 2000,
address = {Gifu, Japan},
month = oct,
publisher = {IOS Press, USA},
isbn = {1-58603-108-2},
lrdeprojects = {URBI},
abstract = {This paper describes Urbi et Orbi, a distributed virtual
environment (DVE) project that is being conducted in the
Research and Development Laboratory at EPITA. Our ultimate
goal is to provide support for large scale multi-user
virtual worlds on end-user machines. The incremental
development of this project led us to take unusual design
and implementation decisions that we propose to relate in
this paper. Firstly, a general overview of the project is
given, along with the initial requirements we wanted to
meet. Then, we go on with a description of the system's
architecture. Lastly, we describe and justify the unusual
choices we have made in the project's internals.}
}
@InProceedings{ fabre.00.vw,
author = {Yoann Fabre and Guillaume Pitel and Laurent Soubrevilla
and Emmanuel Marchand and Thierry G\'eraud and Akim
Demaille},
title = {A framework to dynamically manage distributed virtual
environments},
booktitle = {Proceedings of the 2nd International Conference on Virtual
Worlds (VW)},
year = 2000,
address = {Paris, France},
month = jul,
pages = {54--64},
editor = {J.-C. Heudin},
publisher = {Springer Verlag},
series = {Lecture Notes in Computer Science Series},
volume = {LNAI 1834},
lrdeprojects = {URBI},
abstract = {In this paper, we present the project urbi, a framework to
dynamically manage distributed virtual environments (DVEs).
This framework relies on a dedicated scripting language,
goal, which is typed, object-oriented and dynamically
bound. goal is interpreted by the application hosted by
each machine and is designed to handle efficiently both
network communications and interactivity. Finally, we have
made an unusual design decision: our project is based on a
functional programming language, ocaml.}
}
@Article{ fabrizio.12.spic,
author = {Jonathan Fabrizio and S\'everine Dubuisson and Dominique
B\'er\'eziat},
title = {Motion compensation based on Tangent Distance prediction
for video compression},
journal = {Signal Processing: Image Communication},
year = 2012,
volume = 27,
number = 2,
publisher = {Elsevier},
pages = {113--208},
month = feb,
abstract = { We present a new algorithm for motion compensation that
uses a motion estimation method based on tangent distance.
The method is compared with a Block-Matching based approach
in various common situations. Whereas Block-Matching
algorithms usually only predict positions of blocks over
time, our method also predicts the evolution of pixels into
these blocks. The prediction error is then drastically
decreased. The method is implemented into the Theora codec
proving that this algorithm improves the video codec
performances. },
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2012-02-09}
}
@Article{ fabrizio.13.paa,
author = {Jonathan Fabrizio and Beatriz Marcotegui and Matthieu
Cord},
title = {Text detection in street level image},
journal = {Pattern Analysis and Applications},
year = 2013,
volume = 16,
number = 4,
month = nov,
publisher = {Springer},
pages = {519--533},
lrdeprojects = {Olena},
abstract = {Text detection system for natural images is a very
challenging task in Computer Vision. Image acquisition
introduces distortion in terms of perspective, blurring,
illumination, and characters which may have very different
shape, size, and color. We introduce in this article a full
text detection scheme. Our architecture is based on a new
process to combine a hypothesis generation step to get
potential boxes of text and a hypothesis validation step to
filter false detections. The hypothesis generation process
relies on a new efficient segmentation method based on a
morphological operator. Regions are then filtered and
classified using shape descriptors based on Fourier, Pseudo
Zernike moments and an original polar descriptor, which is
invariant to rotation. Classification process relies on
three SVM classifiers combined in a late fusion scheme.
Detected characters are finally grouped to generate our
text box hypotheses. Validation step is based on a global
SVM classification of the box content using dedicated
descriptors adapted from the HOG approach. Results on the
well-known ICDAR database are reported showing that our
method is competitive. Evaluation protocol and metrics are
deeply discussed and results on a very challenging
street-level database are also proposed.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/fabrizio.13.paa.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2013-11-05}
}
@InProceedings{ fabrizio.14.icip,
author = {Jonathan Fabrizio},
title = {A precise skew estimation algorithm for document images
using {KNN} clustering and Fourier transform},
booktitle = {Proceedings of the 21st International Conference on Image
Processing (ICIP)},
year = 2014,
address = {Paris, France},
pages = {2585--2588},
lrdeprojects = {Olena},
abstract = {In this article, we propose a simple and precise skew
estimation algorithm for binarized document images. The
estimation is performed in the frequency domain. To get a
precise result, the Fourier transform is not applied to the
document itself but the document is preprocessed: all
regions of the document are clustered using a KNN and
contours of grouped regions are smoothed using the convex
hull to form more regular shapes, with better orientation.
No assumption has been made concerning the nature or the
content of the document. This method has been shown to be
very accurate and was ranked first at the DISEC'13 contest,
during the ICDAR competitions.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/fabrizio.14.icip.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/fabrizio.14.icip.poster.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2014-05-26},
doi = {10.1109/ICIP.2014.7025523}
}
@Article{ fabrizio.16.ijdar,
author = {Jonathan Fabrizio and Myriam Robert-Seidowsky and
S\'everine Dubuisson and Stefania Calarasanu and Rapha\"el
Boissel},
title = {TextCatcher: {A} method to detect curved and challenging
text in natural scenes},
journal = {International Journal on Document Analysis and
Recognition},
year = 2016,
volume = 19,
number = 2,
publisher = {Springer},
pages = {99--117},
month = feb,
abstract = {In this paper, we propose a text detection algorithm which
is hybrid and multi-scale. First, it relies on a connected
component-based approach: After the segmentation of the
image, a classification step using a new wavelet descriptor
spots the letters. A new graph modeling and its traversal
procedure allow to form candidate text areas. Second, a
texture-based approach discards the false positives.
Finally, the detected text areas are precisely cut out and
a new binarization step is introduced. The main advantage
of our method is that few assumptions are put forward.
Thus, ``challenging texts'' like multi-sized,
multi-colored, multi-oriented or curved text can be
localized. The efficiency of TextCatcher has been validated
on three different datasets: Two come from the ICDAR
competition, and the third one contains photographs we have
taken with various daily life texts. We present both
qualitative and quantitative results.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/fabrizio.16.ijdar.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2016-04-08},
doi = {10.1007/s10032-016-0264-4}
}
@InProceedings{ fouquier.07.gbr,
author = {Geoffroy Fouquier and Jamal Atif and Isabelle Bloch},
title = {Local reasoning in fuzzy attribute graphs for optimizing
sequential segmentation},
booktitle = {Proceedings of the 6th IAPR TC-15 Workshop on Graph-based
Representations in Pattern Recognition (GBR)},
year = 2007,
month = jun,
address = {Alicante, Spain},
volume = {LNCS 4538},
editor = {F. Escolano and M. Vento},
publisher = {Springer Verlag},
isbn = {978-3-540-72902-0},
pages = {138--147},
abstract = {Spatial relations play a crucial role in model-based image
recognition and interpretation due to their stability
compared to many other image appearance characteristics.
Graphs are well adapted to represent such information.
Sequential methods for knowledge-based recognition of
structures require to define in which order the structures
have to be recognized. We propose to address this problem
of order definition by developing algorithms that
automatically deduce sequential segmentation paths from
fuzzy spatial attribute graphs. As an illustration, these
algorithms are applied on brain image understanding.},
lrdekeywords = {Image},
lrdenewsdate = {2007-02-15}
}
@InProceedings{ fouquier.07.icassp,
author = {Geoffroy Fouquier and Laurence Likforman and J\'er\^ome
Darbon and Bulent Sankur},
title = {The Biosecure Geometry-based System for Hand Modality},
booktitle = {Proceedings of the 32nd IEEE International Conference on
Acoustics, Speech, and Signal Processing (ICASSP)},
year = 2007,
month = apr,
address = {Honolulu, Hawaii, USA},
volume = {I},
pages = {801--804},
isbn = {1-4244-0728-1},
abstract = {We present an identification and authentification system
based on hand modality which is part of a reference system
for all modalities developed within the Biosecure
consortium. It relies on simple geometric features
extracted from hand boundary. The different steps of this
system are detailed, namely: pre-processing, feature
extraction and hand matching. This system has been tested
on the Biosecure hand database which consists of 4500 hand
images of 750 individuals. Results are detailed with
respect to different enrolment conditions such as
population size, enrolment size, and image resolution.},
lrdekeywords = {Image},
lrdenewsdate = {2006-12-18}
}
@InProceedings{ fronc.13.atva,
author = {{\L}ukasz Fronc and Alexandre Duret-Lutz},
title = {{LTL} Model Checking with {N}eco},
booktitle = {Proceedings of the 11th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'13)},
year = 2013,
series = {Lecture Notes in Computer Science},
volume = 8172,
pages = {451--454},
address = {Hanoi, Vietnam},
month = oct,
publisher = {Springer},
abstract = {We introduce neco-spot, an LTL model checker for Petri net
models. It builds upon Neco, a compiler turning Petri nets
into native shared libraries that allows fast on-the-fly
exploration of the state-space, and upon Spot, a C++
library of model-checking algorithms. We show the
architecture of Neco and explain how it was combined with
Spot to build an LTL model checker.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/fronc.13.atva.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2013-06-15},
doi = {10.1007/978-3-319-02444-8_33}
}
@InProceedings{ geraud.00.europlop,
author = {Thierry G\'eraud and Alexandre Duret-Lutz},
title = {Generic programming redesign of patterns},
booktitle = {Proceedings of the 5th European Conference on Pattern
Languages of Programs (EuroPLoP)},
year = 2000,
month = jul,
address = {Irsee, Germany},
pages = {283--294},
editors = {M. Devos and A. R\"uping},
publisher = {UVK, Univ. Verlag, Konstanz},
lrdeprojects = {Software},
abstract = {This pattern faces the problem of improving the
performances of design patterns when they are involved in
intensive algorithms. Generic programming is a paradigm in
which most classes and procedures are parameterized, thus
leading to the construction of general and efficient
software components. We demonstrate that some design
patterns from Gamma et al. can be translated into this
paradigm while handling operation polymorphism by
parametric polymorphism. We thus preserve their modularity
and reusability properties but we avoid the performance
penalty due to their dynamic behavior, which is a critical
issue in numerical computing.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/europlop00.pdf},
lrdekeywords = {Software engineering}
}
@InProceedings{ geraud.00.icpr,
author = {Thierry G\'eraud and Yoann Fabre and Alexandre Duret-Lutz
and Dimitri Papadopoulos-Orfanos and Jean-Fran\c{c}ois
Mangin},
title = {Obtaining genericity for image processing and pattern
recognition algorithms},
booktitle = {Proceedings of the 15th International Conference on
Pattern Recognition (ICPR)},
year = 2000,
month = sep,
address = {Barcelona, Spain},
volume = 4,
pages = {816--819},
publisher = {IEEE Computer Society},
lrdeprojects = {Olena},
abstract = {Algorithm libraries dedicated to image processing and
pattern recognition are not reusable; to run an algorithm
on particular data, one usually has either to rewrite the
algorithm or to manually ``copy, paste, and modify''. This
is due to the lack of genericity of the programming
paradigm used to implement the libraries. In this paper, we
present a recent paradigm that allows algorithms to be
written once and for all and to accept input of various
types. Moreover, this total reusability can be obtained
with a very comprehensive writing and without significant
cost at execution, compared to a dedicated algorithm. This
new paradigm is called ``generic programming'' and is fully
supported by the C++ language. We show how this paradigm
can be applied to image processing and pattern recognition
routines. The perspective of our work is the creation of a
generic library.}
}
@InProceedings{ geraud.00.rfia,
author = {Thierry G\'eraud and Isabelle Bloch and Henri Ma{\^\i}tre},
title = {Reconnaissance de structures c\'er\'ebrales \`a l'aide
d'un atlas et par fusion d'informations structurelles floues},
booktitle = {Actes du 12\`eme Congr\`es Francophone AFRIF-AFIA de
Reconnaissance des Formes et Intelligence Artificielle
(RFIA)},
year = 2000,
address = {Paris, France},
month = feb,
volume = 1,
pages = {287--295},
note = {EPITA as current address},
category = {national},
lrdeprojects = {Olena},
abstract = {Nous proposons une proc\'edure automatique de
reconnaissance progressive des structures internes du
cerveau guid\'ee par un atlas anatomique. L'originalit\'e
de notre proc\'edure est multiple. D'une part, elle prend
en compte des informations structurelles sous la forme de
contraintes spatiales flexibles, en utilisant les
formalismes de la th\'eorie des ensembles flous et de la
fusion d'informations. D'autre part, le calcul de la
correspondance entre volume IRM et atlas que nous proposons
permet d'inf\'erer un champ de d\'eformations discret,
respectant des contraintes sur la surface des objets.
Enfin, le caract\`ere s\'equentiel de la proc\'edure permet
de s'appuyer sur la connaissance des objets d\'ej\`a
segment\'es pour acc\'eder \`a des objets dont l'obtention
est a priori de plus en plus difficile.},
lrdekeywords = {Image}
}
@InProceedings{ geraud.01.ai,
author = {Thierry G\'eraud and Yoann Fabre and Alexandre Duret-Lutz},
title = {Applying generic programming to image processing},
booktitle = {Proceedings of the IASTED International Conference on
Applied Informatics (AI)---Symposium on Advances in
Computer Applications},
year = 2001,
publisher = {ACTA Press},
editor = {M.H.~Hamsa},
address = {Innsbruck, Austria},
pages = {577--581},
month = feb,
lrdeprojects = {Olena},
abstract = {This paper presents the evolution of algorithms
implementation in image processing libraries and discusses
the limits of these implementations in terms of
reusability. In particular, we show that in C++, an
algorithm can have a general implementation; said
differently, an implementation can be generic, i.e.,
independent of both the input aggregate type and the type
of the data contained in the input aggregate. A total
reusability of algorithms can therefore be obtained;
moreover, a generic implementation is more natural and does
not introduce a meaningful additional cost in execution
time as compared to an implementation dedicated to a
particular input type.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/ai01-slides.pdf}
}
@InProceedings{ geraud.01.icip,
author = {Thierry G\'eraud and Pierre-Yves Strub and J\'er\^ome
Darbon},
title = {Color image segmentation based on automatic morphological
clustering},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
year = 2001,
volume = 3,
pages = {70--73},
address = {Thessaloniki, Greece},
month = oct,
lrdeprojects = {Olena},
abstract = {We present an original method to segment color images
using a classification in the 3-D color space. In the case
of ordinary images, clusters that appear in 3-D histograms
usually do not fit a well-known statistical model. For that
reason, we propose a classifier that relies on mathematical
morphology, and more precisely on the watershed algorithm.
We show on various images that the expected color clusters
are correctly identified by our method. Last, to segment
color images into coherent regions, we perform a Markovian
labeling that takes advantage of the morphological
classification results.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/geraud.01.icip_slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/geraud.01.icip.pdf},
lrdekeywords = {Image}
}
@InProceedings{ geraud.01.icisp,
author = {Thierry G\'eraud and Pierre-Yves Strub and J\'er\^ome
Darbon},
title = {Segmentation d'images en couleur par classification
morphologique non supervis\'ee},
booktitle = {Proceedings of the International Conference on Image and
Signal Processing (ICISP)},
year = 2001,
pages = {387--394},
address = {Agadir, Morocco},
month = may,
publisher = {Faculty of Sciences at Ibn Zohr University, Morocco},
note = {In French},
lrdeprojects = {Olena},
abstract = {In this paper, we present an original method to segment
color images using a classification of the image histogram
in the 3D color space. As color modes in natural images
usually do not fit a well-known statistical model, we
propose a classifier that rely on mathematical morphology
and, more particularly, on the watershed algorithm. We show
on various images that the expected color modes are
correctly identified and, in order to obtain coherent
region, we extend the method to make the segmentation
contextual.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/icisp01.pdf},
lrdekeywords = {Image}
}
@InProceedings{ geraud.03.grec,
author = {Thierry G\'eraud and Geoffroy Fouquier and Quoc Peyrot and
Nicolas Lucas and Franck Signorile},
title = {Document type recognition using evidence theory},
booktitle = {Proceedings of the 5th IAPR International Workshop on
Graphics Recognition (GREC)},
year = 2003,
pages = {212--221},
editors = {Josep Llad\`os},
address = {Computer Vision Center, UAB, Barcelona, Spain},
month = jul,
lrdeprojects = {Olena},
abstract = {This paper presents a method to recognize the type of a
document when a database of models (document types) is
given. For instance, when every documents are forms and
when we know every different types of forms, we want to be
able to assign to an input document its type of form. To
that aim, we define each model by a set of characteristics
whose nature can vary from one to another. For instance, a
characteristic can be having a flower-shaped logo on
top-left as well as having about 12pt fonts. This paper
does not intent to explain how to extract such knowledge
from documents but it describes how to use such information
to decide what the type of a given document is when
different document types are described by
characteristics.},
lrdekeywords = {Image},
lrdenewsdate = {2003-04-29}
}
@InProceedings{ geraud.03.ibpria,
author = {Thierry G\'eraud},
title = {Segmentation of curvilinear objects using a
watershed-based curve adjacency graph},
booktitle = {Proceedings of the 1st Iberian Conference on Pattern
Recognition and Image Analysis (IbPRIA)},
pages = {279--286},
year = 2003,
editor = {Springer-Verlag},
volume = 2652,
series = {Lecture Notes in Computer Science Series},
address = {Mallorca, Spain},
month = jun,
publisher = {Springer-Verlag},
lrdeprojects = {Olena},
abstract = {This paper presents a general framework to segment
curvilinear objects in 2D images. A pre-processing step
relies on mathematical morphology to obtain a connected
line which encloses curvilinear objects. Then, a graph is
constructed from this line and a Markovian Random Field is
defined to perform objects segmentation. Applications of
our framework are numerous: they go from simple surve
segmentation to complex road network extraction in
satellite images.},
lrdekeywords = {Image},
lrdenewsdate = {2003-03-10}
}
@InProceedings{ geraud.03.icisp,
author = {Thierry G\'eraud},
title = {Segmentation d'objets curvilignes \`a l'aide des champs de
Markov sur un graphe d'adjacence de courbes issu de
l'algorithme de la ligne de partage des eaux},
booktitle = {Proceedings of the International Conference on Image and
Signal Processing (ICISP)},
year = 2003,
volume = 2,
pages = {404--411},
address = {Agadir, Morocco},
month = jun,
publisher = {Faculty of Sciences at Ibn Zohr University, Morocco},
note = {In French},
lrdeprojects = {Olena},
abstract = {This paper presents a general framework to segment
curvilinear objects in 2D images. A pre-processing step
relies on mathematical morphology to obtain a connected
line which encloses curvilinear objects. Then, a graph is
constructed from this line and a Markovian Random Field is
defined to perform objects segmentation. Applications of
our framework are numerous: they go from simple surve
segmentation to complex road network extraction in
satellite images.},
lrdekeywords = {Image}
}
@InProceedings{ geraud.03.nsip,
author = {Thierry G\'eraud},
title = {Fast Road Network Extraction in Satellite Images using
Mathematical Morphology and {MRF}},
booktitle = {Proceedings of the EURASIP Workshop on Nonlinear Signal
and Image Processing (NSIP)},
year = 2003,
address = {Trieste, Italy},
month = jun,
lrdeprojects = {Olena},
abstract = {This paper presents a fast method to extract road network
in satellite images. A pre-processing stage relies on
mathematical morphology to obtain a connected line which
encloses road network. Then, a graph is constructed from
this line and a Markovian Random Field is defined to
perform road extraction.},
lrdekeywords = {Image},
lrdenewsdate = {2003-01-31}
}
@InProceedings{ geraud.04.iccvg,
author = {Thierry G\'eraud and Giovanni Palma and Niels {Van Vliet}},
title = {Fast color image segmentation based on levellings in
feature Space},
booktitle = {Computer Vision and Graphics---International Conference on
Computer Vision and Graphics (ICCVG), Warsaw, Poland,
September 2004},
year = 2004,
series = {Computational Imaging and Vision},
volume = 32,
editor = {Kluwer Academic Publishers},
pages = {800--807},
note = {On CD},
lrdeprojects = {Olena},
abstract = {This paper presents a morphological classifier with
application to color image segmentation. The basic idea of
a morphological classifier is to consider that a color
histogram is a 3D gray-level image and that morphological
operators can be applied to modify this image. The final
objective is to extract clusters in color space, that is,
identify regions in the 3D image. In this paper, we
particularly focus on a powerful class of morphology-based
filters called levellings to transform the 3D
histogram-image to identify clusters. We also show that our
method gives better results than the ones of
state-of-the-art methods.},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/iccvg04/geraud-04-iccvg-slides.pdf},
lrdenewsdate = {2004-08-11}
}
@Article{ geraud.04.jasp,
author = {Thierry G\'eraud and Jean-Baptiste Mouret},
title = {Fast road network extraction in satellite images using
mathematical morphology and {M}arkov random fields},
journal = {EURASIP Journal on Applied Signal Processing},
year = 2004,
number = 16,
volume = 2004,
pages = {2503--2514},
month = nov,
note = {Special issue on Nonlinear Signal and Image Processing -
Part II},
lrdeprojects = {Olena},
doi = {http://doi.acm.org/10.1155/S1110865704409093},
abstract = {This paper presents a fast method for road network
extraction in satellite images. It can be seen as a
transposition of the segmentation scheme "watershed
transform + region adjacency graph + Markov random fields"
to the extraction of curvilinear objects. Many road
extractors can be found in the literature which are
composed of two stages. The first one acts like a filter
that can decide from a local analysis, at every image
point, if there is a road or not. The second stage aims at
obtaining the road network structure. In the method we
propose, we rely on a "potential" image, that is,
unstructured image data that can be derived from any road
extractor filter. In such a potential image, the value
assigned to a point is a measure of its likelihood to be
located in the middle of a road. A filtering step applied
on the potential image relies on the area closing operator
followed by the watershed transform to obtain a connected
line which encloses the road network. Then a graph
describing adjacency relationships between watershed lines
is built. Defining Markov random fields upon this graph,
associated with an energetic model of road networks, leads
to the expression of road network extraction as a global
energy minimization problem. This method can easily be
adapted to other image processing fields where the
recognition of curvilinear structures is involved.},
lrdekeywords = {Image},
lrdenewsdate = {2004-09-05}
}
@InProceedings{ geraud.05.ismm,
author = {Thierry G\'eraud},
title = {Ruminations on {T}arjan's {U}nion-{F}ind algorithm and
connected operators},
booktitle = {Proceedings of the 7th International Symposium on
Mathematical Morphology (ISMM'05)},
year = 2005,
volume = 30,
pages = {105--116},
abstract = {This papers presents a comprehensive and general form of
the Tarjan's union-find algorithm dedicated to connected
operators. An interesting feature of this form is to
introduce the notion of separated domains. The properties
of this form and its flexibility are discussed and
highlighted with examples. In particular, we give clues to
handle correctly the constraint of domain-disjointness
preservation and, as a consequence, we show how we can rely
on ``union-find'' to obtain algorithms for self-dual
filters approaches and levelings with a marker function.},
address = {Paris, France},
lrdekeywords = {Image},
lrdenewsdate = {2005-01-05},
lrdeprojects = {Olena},
month = apr,
publisher = {Springer},
series = {Computational Imaging and Vision}
}
@InProceedings{ geraud.08.mpool,
author = {Thierry G\'eraud and Roland Levillain},
title = {Semantics-Driven Genericity: A Sequel to the Static {C++}
Object-Oriented Programming Paradigm ({SCOOP 2})},
booktitle = {Proceedings of the 6th International Workshop on
Multiparadigm Programming with Object-Oriented Languages
(MPOOL)},
year = 2008,
address = {Paphos, Cyprus},
month = jul,
lrdeprojects = {Olena},
abstract = {Classical (unbounded) genericity in \Cxx{}03 defines the
interactions between generic data types and algorithms in
terms of concepts. Concepts define the requirements over a
type (or a parameter) by expressing constraints on its
methods and dependent types (typedefs). The upcoming
\Cxx{}0x standard will promote concepts from abstract
entities (not directly enforced by the tools) to language
constructs, enabling compilers and tools to perform
additional checks on generic constructs as well as enabling
new features (e.g., concept-based overloading). Most modern
languages support this notion of signature on generic
types. However, generic types built on other types and
relying on concepts to both ensure type conformance and
drive code specialization, restrain the interface and the
implementation of the newly created type: specific methods
and associated types not mentioned in the concept will not
be part of the new type. The paradigm of concept-based
genericity lacks the required semantics to transform types
while retaining or adapting their intrinsic capabilities.
We present a new form of semantically-enriched genericity
allowing static generic type transformations through a
simple form of type introspection based on type metadata
called properties. This approach relies on a new Static
\Cxx Object-Oriented Programming (SCOOP) paradigm, and is
adapted to the creation of generic and efficient libraries,
especially in the field of scientific computing. Our
proposal uses a metaprogramming facility built into a \Cxx
library called Static, and doesn't require any language
extension nor additional processing (preprocessor,
transformation tool).},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/geraud.08.mpool.pdf},
lrdenewsdate = {2008-05-26}
}
@InCollection{ geraud.10.book,
author = {Thierry G\'eraud and Hugues Talbot and Marc Van
Droogenbroeck},
title = {Algorithms for Mathematical Morphology},
booktitle = {Mathematical Morphology---From Theory to Applications},
pages = {323--353},
publisher = {Wiley-ISTE},
year = 2010,
editor = {Laurent Najman and Hugues Talbot},
isbn = {978-1-84821-215-2},
month = jul,
url = {http://eu.wiley.com/WileyCDA/WileyTitle/productCd-1848212151.html},
lrdekeywords = {Image}
}
@InCollection{ geraud.10.livre,
author = {Thierry G\'eraud and Hugues Talbot and Marc Van
Droogenbroeck},
title = {Morphologie et algorithmes},
booktitle = {Morphologie math\'ematique 2~: estimation, choix et mise
en {\oe}uvre},
pages = {151--180},
publisher = {Herm\`es Science Publications},
year = 2010,
series = {IC2 signal et image},
chapter = 6,
editor = {Laurent Najman and Hugues Talbot},
month = sep,
lrdekeywords = {Image}
}
@PhDThesis{ geraud.12.hdr,
author = {Thierry G\'eraud},
title = {Outil logiciel pour le traitement d'images:
Biblioth\`eque, paradigmes, types et algorithmes},
school = {Universit\'e Paris-Est},
year = 2012,
month = jun,
lrdeproject = {Olena},
type = {Habilitation Thesis},
note = {In French},
lrdepaper = {http://www.lrde.epita.fr/~theo/papers/geraud.2012.hdr.pdf},
lrdeslides = {http://www.lrde.epita.fr/~theo/slides/geraud.2012.hdr_slides.pdf}
}
@InProceedings{ geraud.13.ismm,
author = {Thierry G\'eraud and Edwin Carlinet and S\'ebastien Crozet
and Laurent Najman},
title = {A Quasi-Linear Algorithm to Compute the Tree of Shapes of
{$n$-D} Images},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 11th International
Symposium on Mathematical Morphology (ISMM)},
year = 2013,
editor = {C.L. Luengo Hendriks and G. Borgefors and R. Strand},
volume = 7883,
series = {Lecture Notes in Computer Science Series},
address = {Uppsala, Sweden},
publisher = {Springer},
pages = {98--110},
lrdeprojects = {Olena},
abstract = {To compute the morphological self-dual representation of
images, namely the tree of shapes, the state-of-the-art
algorithms do not have a satisfactory time complexity.
Furthermore the proposed algorithms are only effective for
2D images and they are far from being simple to implement.
That is really penalizing since a self-dual represen-
tation of images is a structure that gives rise to many
powerful operators and applications, and that could be very
useful for 3D images. In this paper we propose a
simple-to-write algorithm to compute the tree of shapes; it
works for nD images and has a quasi-linear complexity when
data quantization is low, typically 12 bits or less. To get
that result, this paper introduces a novel representation
of images that has some amazing properties of continuity,
while remaining discrete.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/geraud.13.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2013-03-14}
}
@InProceedings{ geraud.14.icip,
author = {Thierry G\'eraud},
title = {A Morphological Method for Music Score Staff Removal},
booktitle = {Proceedings of the 21st International Conference on Image
Processing (ICIP)},
year = 2014,
address = {Paris, France},
pages = {2599--2603},
lrdeprojects = {Olena},
abstract = {Removing the staff in music score images is a key to
improve the recognition of music symbols and, with ancient
and degraded handwritten music scores, it is not a
straightforward task. In this paper we present the method
that has won in 2013 the staff removal competition,
organized at the International Conference on Document
Analysis and Recognition (ICDAR). The main characteristics
of this method is that it essentially relies on
mathematical morphology filtering. So it is simple, fast,
and its full source code is provided to favor reproducible
research.},
lrdeinc = {Publications/geraud.14.icip.inc},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/geraud.14.icip.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/geraud.14.icip.poster.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2014-06-03},
doi = {10.1109/ICIP.2014.7025526}
}
@InProceedings{ geraud.15.ismm,
author = {Thierry G\'eraud and Edwin Carlinet and S\'ebastien Crozet},
title = {Self-Duality and Digital Topology: {L}inks Between the
Morphological Tree of Shapes and Well-Composed Gray-Level
Images},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 12th International
Symposium on Mathematical Morphology (ISMM)},
year = {2015},
series = {Lecture Notes in Computer Science Series},
volume = {9082},
address = {Reykjavik, Iceland},
publisher = {Springer},
editor = {J.A. Benediktsson and J. Chanussot and L. Najman and H.
Talbot},
pages = {573--584},
lrdeprojects = {Olena},
abstract = {In digital topology, the use of a pair of connectivities
is required to avoid topological paradoxes. In mathematical
morphology, self-dual operators and methods also rely on
such a pair of connectivities. There are several major
issues: self-duality is impure, the image graph structure
depends on the image values, it impacts the way small
objects and texture are processed, and so on. A sub-class
of images defined on the cubical grid, {\it well-composed}
images, has been proposed, where all connectivities are
equivalent, thus avoiding many topological problems. In
this paper we unveil the link existing between the notion
of well-composed images and the morphological tree of
shapes. We prove that a well-composed image has a
well-defined tree of shapes. We also prove that the only
self-dual well-composed interpolation of a 2D image is
obtained by the median operator. What follows from our
results is that we can have a purely self-dual
representation of images, and consequently, purely
self-dual operators.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/geraud.15.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-04-07},
doi = {10.1007/978-3-319-18720-4_48}
}
@InProceedings{ geraud.17.ismm,
author = {Thierry G\'eraud and Yongchao Xu and Edwin Carlinet and
Nicolas Boutry},
title = {Introducing the {D}ahu Pseudo-Distance},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 13th International
Symposium on Mathematical Morphology (ISMM)},
year = {2017},
editor = {J. Angulo and S. Velasco-Forero and F. Meyer},
volume = {10225},
series = {Lecture Notes in Computer Science},
pages = {55--67},
month = may,
address = {Fontainebleau, France},
publisher = {Springer},
lrdeinc = {Publications/geraud.17.ismm.inc},
doi = {10.1007/978-3-319-57240-6_5},
lrdeprojects = {Olena},
abstract = {The minimum barrier (MB) distance is defined as the
minimal interval of gray-level values in an image along a
path between two points, where the image is considered as a
vertex-valued graph. Yet this definition does not fit with
the interpretation of an image as an elevation map, i.e. a
somehow continuous landscape. In this paper, based on the
discrete set-valued continuity setting, we present a new
discrete definition for this distance, which is compatible
with this interpretation, while being free from digital
topology issues. Amazingly, we show that the proposed
distance is related to the morphological tree of shapes,
which in addition allows for a fast and exact computation
of this distance. That contrasts with the classical
definition of the MB distance, where its fast computation
is only an approximation.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/geraud.17.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2017-02-23}
}
@InProceedings{ geraud.99.cimaf,
author = {Thierry G\'eraud and Isabelle Bloch and Henri Ma\^{\i}tre},
title = {Atlas-guided recognition of cerebral structures in {MRI}
using fusion of fuzzy structural information},
booktitle = {Proceeding of CIMAF Symposium on Artificial Intelligence},
pages = {99--106},
year = 1999,
address = {La Havana, Cuba},
note = {EPITA as current address.},
lrdeprojects = {Olena}
}
@InProceedings{ geraud.99.gretsi,
author = {Thierry G\'eraud and Yoann Fabre and Dimitri
Papadopoulos-Orfanos and Jean-Fran\c{c}ois Mangin},
title = {Vers une r\'eutilisabilit\'e totale des algorithmes de
traitement d'images},
booktitle = {Proceedings of the 17th Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
pages = {331--334},
volume = 2,
year = 1999,
address = {Vannes, France},
month = sep,
note = {In French},
lrdeprojects = {Olena},
abstract = {Cet article pr\'esente l'\'evolution des techniques de
programmation d'algorithmes de traitement d'images et
discute des limites de la r\'eutilisabilit\'e de ces
algorithmes. En particulier, nous montrons qu'en C++ un
algorithme peut s'\'ecrire sous une forme g\'en\'erale,
ind\'ependante aussi bien du type des donn\'ees que du type
des structures de donn\'ees sur lesquelles il peut
s'appliquer. Une r\'eutilisabilit\'e totale des algorithmes
peut donc \^etre obtenue ; mieux, leur \'ecriture est plus
naturelle et elle n'introduit pas de surco\^ut significatif
en temps d'ex\'ecution.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/gretsi99.pdf}
}
@Article{ goetz.18.tpds,
author = {Markus G\"otz and Gabriele Cavallaro and Thierry G\'eraud
and Matthias Book and Morris Riedel},
title = {Parallel Computation of Component Trees on Distributed
Memory Machines},
journal = {IEEE Transactions on Parallel and Distributed Systems},
year = 2018,
volume = {29},
number = {11},
pages = {2582--2598},
month = may,
doi = {10.1109/TPDS.2018.2829724},
abstract = {Component trees are region-based representations that
encode the inclusion relationship of the threshold sets of
an image. These representations are one of the most
promising strategies for the analysis and the
interpretation of spatial information of complex scenes as
they allow the simple and efficient implementation of
connected filters. This work proposes a new efficient
hybrid algorithm for the parallel computation of two
particular component trees---the max- and min-tree---in
shared and distributed memory environments. For the
node-local computation a modified version of the
flooding-based algorithm of Salembier is employed. A novel
tuple-based merging scheme allows to merge the acquired
partial images into a globally correct view. Using the
proposed approach a speed-up of up to 44.88 using 128
processing cores on eight-bit gray-scale images could be
achieved. This is more than a five-fold increase over the
state-of-the-art shared-memory algorithm, while also
requiring only one-thirty-second of the memory.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/goetz.18.tpds.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-04-02}
}
@Misc{ gournet.05.sud,
author = {Alexandre Borghi and Valentin David and Akim Demaille and
Olivier Gournet},
title = {Implementing attributes in {SDF}},
year = 2005,
address = {Utrecht {U}niversity, {N}etherland},
note = {Communication to Stratego Users Day 2005},
month = may,
lrdeprojects = {Transformers},
abstract = {Attribute Grammars (AGs) provide a very convenient means
to bind semantics to syntax. They enjoy an extensive
bibliography and are used in several types of applications.
Yet, to our knowledge, their use to disambiguate is novel.
We present our implementation of an evaluator of attributes
for ambiguous AGs, tailored to ambiguous parse trees
disambiguation. This paper focuses on its implementation
that heavily relies on Stratego/XT, which is also used as
language to express the attribute rules. A companion paper
presents the disambiguation process in details
200505-SUD-disamb.}
}
@InProceedings{ grosicki.04.icc,
author = {Emmanuel Grosicki and Karim Abed-Meraim and R\'eda Dehak},
title = {A novel method to fight the non line of sight error in
{AOA} measurements for mobile location},
booktitle = {Proceedings of the IEEE International Conference on
Communications (ICC)},
year = 2004,
volume = 5,
pages = {2794--2798},
address = {Paris, France},
month = jun,
lrdeprojects = {Olena},
abstract = {In this contribution, a mobile location method is provided
using measurements from two different Base-Stations.
Although computationally from two different Base-Stations.
Although based on a simple trilateration and takes into
account error measurements caused by Non-Line-Of-Sight
(NLOS) and near-far effect. The new method attributes an
index of confidence for each measure, in order to allow the
mobile to select the two most reliable measures and not to
use all measures, equally.},
lrdekeywords = {Image}
}
@InProceedings{ guirado.05.pdmc,
author = {Guillaume Guirado and Thomas Herault and Richard Lassaigne
and Sylvain Peyronnet},
title = {Distribution, approximation and probabilistic model
checking},
booktitle = {Proceedings of the 4th international workshop on Parallel
and Distributed Model Checking (PDMC)},
year = 2005,
lrdeprojects = {APMC},
abstract = {APMC is a model checker dedicated to the quantitative
verification of fully probabilistic systems against LTL
formulas. Using a Monte-Carlo method in order to
efficiently approximate the verification of probabilistic
specifications, it could be used naturally in a distributed
framework. We present here the tool and his distribution
scheme, together with extensive performance evaluation,
showing the scalability of the method, even on clusters
containing 500+ heterogeneous workstations.},
lrdenewsdate = {2005-05-23}
}
@InProceedings{ hacquard.21.els,
author = {Antoine Hacquard and Didier Verna},
title = {A Corpus Processing and Analysis Pipeline for {Q}uickref},
booktitle = {Proceedings of the 14th European Lisp Symposium (ELS)},
year = 2021,
pages = {27--35},
month = may,
address = {Online},
isbn = 9782955747452,
doi = {10.5281/zenodo.4714443},
abstract = {Quicklisp is a library manager working with your existing
Common Lisp implementation to download and install around
2000 libraries, from a central archive. Quickref, an
application itself written in Common Lisp, generates,
automatically and by introspection, a technical
documentation for every library in Quicklisp, and produces
a website for this documentation. In this paper, we present
a corpus processing and analysis pipeline for Quickref.
This pipeline consists of a set of natural language
processing blocks allowing us to analyze Quicklisp
libraries, based on natural language contents sources such
as README files, docstrings, or symbol names. The ultimate
purpose of this pipeline is the generation of a keyword
index for Quickref, although other applications such as
word clouds or topic analysis are also envisioned.}
}
@InProceedings{ hamez.07.pohll,
author = {Alexandre Hamez and Fabrice Kordon and Yann Thierry-Mieg},
title = {{libDMC}: a library to Operate Efficient Distributed Model
Checking},
booktitle = {Workshop on Performance Optimization for High-Level
Languages and Libraries --- associated to IPDPS'2007},
year = 2007,
lrdeprojects = {Verification},
abstract = {Model checking is a formal verification technique that
allows to automatically prove that a system's behavior is
correct. However it is often prohibitively expensive in
time and memory complexity, due to the so-called state
space explosion problem. We present a generic
multi-threaded and distributed infrastructure library
designed to allow distribution of the model checking
procedure over a cluster of machines. This library is
generic, and is designed to allow encapsulation of any
model checker in order to make it distributed. Performance
evaluations are reported and clearly show the advantages of
multi-threading to occupy processors while waiting for the
network, with linear speedup over the number of
processors.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/hamez.07.pohll.pdf},
lrdenewsdate = {2007-03-13}
}
@InProceedings{ hamez.08.atpn,
author = {Alexandre Hamez and Yann Thierry-Mieg and Fabrice Kordon},
title = {Hierarchical Set Decision Diagrams and Automatic
Saturation},
booktitle = {Petri Nets and Other Models of Concurrency --ICATPN 2008},
year = 2008,
lrdeprojects = {Verification},
abstract = {Shared decision diagram representations of a state-space
have been shown to provide efficient solutions for
model-checking of large systems. However, decision diagram
manipulation is tricky, as the construction procedure is
liable to produce intractable intermediate structures
(a.k.a peak effect). The definition of the so-called
saturation method has empirically been shown to mostly
avoid this peak effect, and allows verification of much
larger systems. However, applying this algorithm currently
requires deep knowledge of the decision diagram
data-structures, of the model or formalism manipulated, and
a level of interaction that is not offered by the API of
public DD packages. Hierarchical Set Decision Diagrams
(SDD) are decision diagrams in which arcs of the structure
are labeled with sets, themselves stored as SDD. This data
structure offers an elegant and very efficient way of
encoding structured specifications using decision diagram
technology. It also offers, through the concept of
inductive homomorphisms, unprecedented freedom to the user
when defining the transition relation. Finally, with very
limited user input, the SDD library is able to optimize
evaluation of a transition relation to produce a saturation
effect at runtime. We further show that using recursive
folding, SDD are able to offer solutions in logarithmic
complexity with respect to other DD. We conclude with some
performances on well known examples.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/hamez.08.atpn.pdf},
lrdenewsdate = {2008-03-01}
}
@Article{ hamez.09.fi,
abstract = {Shared decision diagram representations of a state-space
provide efficient solutions for model-checking of large
systems. However, decision diagram manipulation is tricky,
as the construction procedure is liable to produce
intractable intermediate structures (a.k.a peak effect).
The definition of the so-called saturation method has
empirically been shown to mostly avoid this peak effect,
and allows verification of much larger systems. However,
applying this algorithm currently requires deep knowledge
of the decision diagram data-structures. Hierarchical Set
Decision Diagrams (SDD) are decision diagrams in which arcs
of the structure are labeled with sets, themselves stored
as SDD. This data structure offers an elegant and very
efficient way of encoding structured specifications using
decision diagram technology. It also offers, through the
concept of inductive homomorphisms, flexibility to a user
defining a transition relation. We show in this paper how,
with very limited user input, the SDD library is able to
optimize evaluation of a transition relation to produce a
saturation effect at runtime. We build as an example an SDD
model-checker for a compositional formalism: Instantiable
Petri Nets (IPN). IPN define a \emph{type} as an abstract
contract. Labeled P/T nets are used as an elementary type.
A composite type is defined to hierarchically contain
instances (of elementary or composite type). To compose
behaviors, IPN use classic label synchronization semantics
from process calculi. With a particular recursive folding
SDD are able to offer solutions for symmetric systems in
logarithmic complexity with respect to other DD. Even in
less regular cases, the use of hierarchy in the
specification is shown to be well supported by SDD.
Experimentations and performances are reported on some well
known examples. },
author = {Alexandre Hamez and Yann Thierry-Mieg and Fabrice Kordon},
date-added = {2009-05-06 16:39:07 +0200},
date-modified = {2009-05-06 16:48:10 +0200},
journal = {Fundamenta Informaticae},
title = {Building Efficient Model checkers using Hierarchical Set
Decision Diagrams and automatic Saturation},
year = 2009
}
@PhDThesis{ hamez.09.phd,
author = {Alexandre Hamez},
title = {G{\'{e}}n{\'{e}}ration efficace de grands espaces
d'{\'{e}}tats},
year = {2009},
month = dec,
school = {{Universit{\'e} Pierre et Marie Curie - Paris VI}},
address = {Paris, France},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/hamez.09.phd.pdf}
}
@InProceedings{ hemon.08.sagt,
author = {S\'ebastien H\'emon and Michel de Rougemont and Miklos
Santha},
title = {Approximate {N}ash Equilibria for Multi-Player Games},
titre = {\'Equilibres de Nash approch\'es dans les jeux
multi-joueurs},
booktitle = {1st International Symposium on Algorithmic Games Theory},
year = 2008,
address = {Paderborn, Germany},
month = apr,
resume = {Les \'equilibres de Nash sont des positions-cl\'es de tout
jeu admettant une repr\'esentation finie : en effet, quel
que soit le nombre de joueurs et de strat\'egies, une telle
position existe toujours. Lorsqu'elle est atteinte, elle
dissuade tout joueur de vouloir se d\'etourner de sa
strat\'egie actuelle, d'o\`u la notion d'\'equilibre. De
nombreux probl\`emes y font appel mais calculer de
fa\c{c}on effective l'\'equilibre demeure un probl\`eme
difficile. En effet, le meilleur algorithme connu pour,
dans le cas g\'en\'eral, calculer un \'equilibre est
exponentiel en le nombre de strat\'egies. Nous
pr\'esenterons ici la notion d'\'equilibres approch\'es, et
donnerons des r\'esultats concernant leur calcul. Nous
montrerons qu'il ne saurait exister d'algorithmes pouvant
calculer un \'equilibre, m\^eme approch\'e, sans utiliser
au moins, pour un joueur, un nombre logarithmique de
strat\'egies. Nous montrerons comment calculer un
\'equilibre approch\'e en temps sub-exponentiel
$n^{\mathcal{O}(\frac{\ln n}{\varepsilon^2})}$, ce qui
demeure actuellement, pour le cas g\'en\'eral, la meilleure
complexit\'e en pire cas. Enfin, nous pr\'esenterons une
approche inductive de transfert d'approximation d'une
position d'un jeu \`a deux joueurs en une approximation
pour un jeu \`a $r$ joueurs, ce qui conf\`ere des r\'esultats novateurs dans le domaine.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/hemon.08.sagt.pdf},
lrdenewsdate = {2008-01-18}
}
@InProceedings{ herault.06.qest,
author = {Thomas H\'erault and Richard Lassaigne and Sylvain
Peyronnet},
title = {{APMC 3.0}: Approximate verification of Discrete and
Continuous Time Markov Chains},
booktitle = {Proceedings of Qest 2006},
year = 2006,
pages = {129--130},
lrdeprojects = {APMC},
abstract = {In this paper, we give a brief overview of APMC
(Approximate Probabilistic Model Checker). APMC is a model
checker that implements approximate probabilistic
verification of probabilistic systems. It is based on
Monte-Carlo method and the theory of randomized
approximation schemes and allows to verify extremely large
models without explicitly representing the global
transition system. To avoid the state-space explosion
phenomenon, APMC gives an accurate approximation of the
satisfaction probability of the property instead of the
exact value, but using only a very small amount of memory.
The version of APMC we present in this paper can now handle
efficiently both discrete and continuous time probabilistic
systems.}
}
@InProceedings{ huynh.16.icpr,
author = {L\^e Duy {Hu\`ynh} and Yongchao Xu and Thierry G\'eraud},
title = {Morphology-Based Hierarchical Representation with
Application to Text Segmentation in Natural Images},
booktitle = {Proceedings of the 23st International Conference on
Pattern Recognition (ICPR)},
year = 2016,
address = {Canc\'un, M\'exico},
month = dec,
pages = {4029--4034},
publisher = {IEEE Computer Society},
lrdeprojects = {Olena},
abstract = { Many text segmentation methods are elaborate and thus are
not suitable to real-time implementation on mobile devices.
Having an efficient and effective method, robust to noise,
blur, or uneven illumination, is interesting due to the
increasing number of mobile applications needing text
extraction. We propose a hierarchical image representation,
based on the morphological Laplace operator, which is used
to give a robust text segmentation. This representation
relies on several very sound theoretical tools; its
computation eventually translates to a simple labeling
algorithm, and for text segmentation and grouping, to an
easy tree-based processing. We also show that this method
can also be applied to document binarization, with the
interesting feature of getting also reverse-video text.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/huynh.16.icpr.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2016-07-13},
doi = {10.1109/ICPR.2016.7900264}
}
@InProceedings{ huynh.17.ismm,
author = {L\^e Duy {Hu\`ynh} and Yongchao Xu and Thierry G\'eraud},
title = {Morphological Hierarchical Image Decomposition Based on
{L}aplacian 0-Crossings},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 13th International
Symposium on Mathematical Morphology (ISMM)},
year = {2017},
editor = {J. Angulo and S. Velasco-Forero and F. Meyer},
volume = {10225},
series = {Lecture Notes in Computer Science},
month = may,
pages = {159--171},
address = {Fontainebleau, France},
publisher = {Springer},
doi = {10.1007/978-3-319-57240-6_13},
abstract = {A method of text detection in natural images, to be turn
into an effective embedded software on a mobile device,
shall be both efficient and lightweight. We observed that a
simple method based on the morphological Laplace operator
can do the trick: we can construct in quasi-linear time a
hierarchical image decomposition / simplification based on
its 0-crossings, and search for some text in the resulting
tree. Yet, for this decomposition to be sound, we need
``0-crossings'' to be Jordan curves, and to that aim, we
rely on some discrete topology tools. Eventually, the
hierarchical representation is the morphological tree of
shapes of the Laplacian sign. Moreover, we provide an
algorithm with linear time complexity to compute this
representation. We expect that the proposed hierarchical
representation can be useful in some applications other
than text detection.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/huynh.17.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2017-02-23}
}
@PhDThesis{ huynh.18.phd,
author = {L\^e Duy {Hu\`ynh}},
title = {Taking into account inclusion and adjacency information in
morphological hierarchical representations, with
application to the extraction of text in natural images and
videos.},
school = {Sorbonne Universit\'e},
year = 2018,
address = {Paris, France},
month = dec,
abstract = {The inclusion and adjacency relationship between image
regions usually carry contextual information. The later is
widely used since it tells how regions are arranged in
images. The former is usually not taken into account
although it parallels the object-background relationship.
The mathematical morphology framework provides several
hierarchical image representations. They include the Tree
of Shapes (ToS), which encodes the inclusion of level-line,
and the hierarchies of segmentation (e.g., alpha-tree,
BPT), which is useful in the analysis of the adjacency
relationship. In this work, we take advantage of both
inclusion and adjacency information in these
representations for computer vision applications. We
introduce the spatial alignment graph w.r.t inclusion that
is constructed by adding a new adjacency relationship to
nodes of the ToS. In a simple ToS such as our Tree of
Shapes of Laplacian sign, which encodes the inclusion of
Morphological Laplacian 0-crossings, the graph is reduced
to a disconnected graph where each connected component is a
semantic group. In other cases, e.g., classic ToS, the
spatial alignment graph is more complex. To address this
issue, we expand the shape-spaces morphology. Our expansion
has two primary results: 1)It allows the manipulation of
any graph of shapes. 2)It allows any tree filtering
strategy proposed by the connected operators frameworks.
With this expansion, the spatial graph could be analyzed
with the help of an alpha-tree. We demonstrated the
application aspect of our method in the application of text
detection. The experiment results show the efficiency and
effectiveness of our methods, which is appealing to mobile
applications.},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/huynh.18.phd.pdf},
lrdeslides = {https://www.lrde.epita.fr/dload/papers/huynh.18.phd.slides.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena}
}
@Article{ huynh.19.prl,
author = {L\^e Duy {Hu\`ynh} and Nicolas Boutry and Thierry G\'eraud},
title = {Connected Filters on Generalized Shape-Spaces},
journal = {Pattern Recognition Letters},
year = 2019,
volume = {128},
pages = {348--354},
month = dec,
doi = {10.1016/j.patrec.2019.09.018},
abstract = {Classical hierarchical image representations and connected
filters work on sets of connected components (CC). These
approaches can be defective to describe the relations
between disjoint objects or partitions on images. In
practice, objects can be made of several connected
components in images (due to occlusions for example),
therefore it can be interesting to be able to take into
account the relationship between these components to be
able to detect the whole object. In Mathematical
Morphology, second-generation connectivity (SGC) and
tree-based shape-space study this relation between the
connected components of an image. However, they have
limitations. For this reason, we propose in this paper an
extension of the usual shape-space paradigm into what we
call a Generalized Shape-Space (GSS). This new paradigm
allows to analyze any graph of connected components
hierarchically and to filter them thanks to connected
operators.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/huynh.19.prl.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-09-20}
}
@InProceedings{ jacobs.17.synt,
author = {Jacobs, Swen and Basset, Nicolas and Bloem, Roderick and
Brenguier, Romain and Colange, Maximilien and Faymonville,
Peter and Finkbeiner, Bernd and Khalimov, Ayrat and Klein,
Felix and Michaud, Thibaud and P\'erez, Guillermo A. and
Raskin, Jean-Fran\c{c}ois and Sankur, Ocan and Tentrup,
Leander},
title = {The 4th Reactive Synthesis Competition (SYNTCOMP 2017):
Benchmarks, Participants \& Results},
booktitle = {Proceedings Sixth Workshop on Synthesis},
address = {Heidelberg, Germany},
year = {2017},
month = jul,
editor = {Fisman, Dana and Jacobs, Swen},
volume = {260},
series = {Electronic Proceedings in Theoretical Computer Science},
pages = {116-143},
publisher = {Open Publishing Association},
doi = {10.4204/EPTCS.260.10},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/jacobs.17.synt.pdf},
lrdeproject = {Spot},
lrdenewdate = {2018-01-10}
}
@InProceedings{ kenny.08.odyssey,
author = {Patrick Kenny and Najim Dehak and R\'eda Dehak and Vishwa
Gupta and Pierre Dumouchel},
title = {The Role of Speaker Factors in the {NIST} Extended Data
Task},
booktitle = {Proceedings of the Speaker and Language Recognition
Workshop (IEEE-Odyssey 2008)},
year = 2008,
address = {Stellenbosch, South Africa},
month = jan,
abstract = {We tested factor analysis models having various numbers of
speaker factors on the core condition and the extended data
condition of the 2006 NIST speaker recognition evaluation.
In order to ensure strict disjointness between training and
test sets, the factor analysis models were trained without
using any of the data made available for the 2005
evaluation. The factor analysis training set consisted
primarily of Switchboard data and so was to some degree
mismatched with the 2006 test data (drawn from the Mixer
collection). Consequently, our initial results were not as
good as those submitted for the 2006 evaluation. However we
found that we could compensate for this by a simple
modification to our score normalization strategy, namely by
using 1000 z-norm utterances in zt-norm. Our purpose in
varying the number of speaker factors was to evaluate the
eigenvoiceMAP and classicalMAP components of the
inter-speaker variability model in factor analysis. We
found that on the core condition (i.e. 2--3 minutes of
enrollment data), only the eigenvoice MAP component plays a
useful role. On the other hand, on the extended data
condition (i.e. 15--20 minutes of enrollment data) both the
classical MAP component and the eigenvoice component proved
to be useful provided that the number of speaker factors
was limited. Our best result on the extended data condition
(all trials) was an equal error rate of 2.2\% and a
detection cost of 0.011. },
lrdenewsdate = {2007-09-25}
}
@InProceedings{ kheireddine.21.cp,
author = {Anissa Kheireddine and \'Etienne Renault and Souheib
Baarrir},
title = {Towards better Heuristics for solving Bounded Model
Checking Problems},
booktitle = {Proceedings of the 27th International Conference on
Principles and Practice of Constraint Programmings (CP)},
year = {2021},
month = oct,
abstract = {This paper presents a new way to improve the performance
of the SAT-based bounded model checking problem by
exploiting relevant information identified through the
characteristics of the original problem. This led us to
design a new way of building interesting heuristics based
on the structure of the underlying problem. The proposed
methodology is generic and can be applied for any SAT
problem. This paper compares the state-of-the-art approach
with two new heuristics: Structure-based and Linear
Programming heuristics and show promising results.},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/kheireddine.21.cp.pdf},
lrdenewsdate = {2021-08-31},
}
@Article{ kim.20.media,
title = {{PAIP} 2019: {L}iver Cancer Segmentation Challenge},
journal = {Medical Image Analysis},
volume = {67},
pages = {101854},
year = {2021},
month = jan,
issn = {1361-8415},
doi = {10.1016/j.media.2020.101854},
author = {Yoo Jung Kim and Hyungjoon Jang and Kyoungbun Lee and
Seongkeun Park and Sung-Gyu Min and Choyeon Hong and Jeong
Hwan Park and Kanggeun Lee and Jisoo Kim and Wonjae Hong
and Hyun Jung and Yanling Liu and Haran Rajkumar and
Mahendra Khened and Ganapathy Krishnamurthi and Sen Yang
and Xiyue Wang and Chang Hee Han and Jin Tae Kwak and
Jianqiang Ma and Zhe Tang and Bahram Marami and Jack Zeineh
and Zixu Zhao and Pheng-Ann Heng and Rudiger Schmitz and
Frederic Madesta and Thomas Rosch and Rene Werner and Jie
Tian and \'Elodie Puybareau and Matteo Bovio and Xiufeng
Zhang and Yifeng Zhu and Se Young Chun and Won-Ki Jeong and
Peom Park and Jinwook Choi},
keywords = {Liver cancer, Tumor burden, Digital pathology, Challenge,
Segmentation},
abstract = {Pathology Artificial Intelligence Platform (PAIP) is a
free research platform in support of pathological
artificial intelligence (AI). The main goal of the platform
is to construct a high-quality pathology learning data set
that will allow greater accessibility. The PAIP Liver
Cancer Segmentation Challenge, organized in conjunction
with the Medical Image Computing and Computer Assisted
Intervention Society (MICCAI 2019), is the first image
analysis challenge to apply PAIP datasets. The goal of the
challenge was to evaluate new and existing algorithms for
automated detection of liver cancer in whole-slide images
(WSIs). Additionally, the PAIP of this year attempted to
address potential future problems of AI applicability in
clinical settings. In the challenge, participants were
asked to use analytical data and statistical metrics to
evaluate the performance of automated algorithms in two
different tasks. The participants were given the two
different tasks: Task 1 involved investigating Liver Cancer
Segmentation and Task 2 involved investigating Viable Tumor
Burden Estimation. There was a strong correlation between
high performance of teams on both tasks, in which teams
that performed well on Task 1 also performed well on Task
2. After evaluation, we summarized the top 11 team's
algorithms. We then gave pathological implications on the
easily predicted images for cancer segmentation and the
challenging images for viable tumor burden estimation. Out
of the 231 participants of the PAIP challenge datasets, a
total of 64 were submitted from 28 team participants. The
submitted algorithms predicted the automatic segmentation
on the liver cancer with WSIs to an accuracy of a score
estimation of 0.78. The PAIP challenge was created in an
effort to combat the lack of research that has been done to
address Liver cancer using digital pathology. It remains
unclear of how the applicability of AI algorithms created
during the challenge can affect clinical diagnoses.
However, the results of this dataset and evaluation metric
provided has the potential to aid the development and
benchmarking of cancer diagnosis and segmentation.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/kim.20.media.pdf},
lrdenewsdate = {2020-11-10}
}
@InProceedings{ kirszenberg.21.dgmm,
author = {Alexandre Kirszenberg and Guillaume Tochon and \'{E}lodie
Puybareau and Jesus Angulo},
title = {Going beyond p-convolutions to learn grayscale
morphological operators},
booktitle = {Proceedings of the IAPR International Conference on
Discrete Geometry and Mathematical Morphology (DGMM)},
year = {2021},
series = {Lecture Notes in Computer Science},
volume = {12708},
month = may,
address = {Uppsala, Sweden},
publisher = {Springer},
pages = {470--482},
abstract = {Integrating mathematical morphology operations within deep
neural networks has been subject to increasing attention
lately. However, replacing standard convolution layers with
erosions or dilations is particularly challenging because
the min and max operations are not differentiable. Relying
on the asymptotic behavior of the counter-harmonic mean,
p-convolutional layers were proposed as a possible
workaround to this issue since they can perform
pseudo-dilation or pseudo-erosion operations (depending on
the value of their inner parameter p), and very promising
results were reported. In this work, we present two new
morphological layers based on the same principle as the
p-convolutional layer while circumventing its principal
drawbacks, and demonstrate their potential interest in
further implementations within deep convolutional neural
network architectures.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/kirszie.2021.dgmm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2021-02-16},
doi = {10.1007/978-3-030-76657-3_34}
}
@InProceedings{ kirszenberg.21.spin,
author = {Alexandre Kirszenberg and Antoine Martin and Hugo Moreau
and Etienne Renault},
title = {{Go2Pins}: {A} Framework for the {LTL} Verification of
{Go} Programs},
booktitle = {Proceedings of the 27th International SPIN Symposium on
Model Checking of Software (SPIN'21)},
year = {2021},
series = {Lecture Notes in Computer Science},
volume = {12864},
month = may,
address = {Aarhus, Denmark (online)},
publisher = {Springer, Cham},
pages = {140--156},
abstract = {We introduce Go2Pins, a tool that takes a program written
in Go and links it with two model-checkers: LTSMin [19] and
Spot [7]. Go2Pins is an effort to promote the integration
of both formal verifica- tion and testing inside
industrial-size projects. With this goal in mind, we
introduce black-box transitions, an efficient and scalable
technique for handling the Go runtime. This approach,
inspired by hardware ver- ification techniques, allows
easy, automatic and efficient abstractions. Go2Pins also
handles basic concurrent programs through the use of a
dedicated scheduler. In this paper we demonstrate the usage
of Go2Pins over benchmarks inspired by industrial problems
and a set of LTL formulae. Even if Go2Pins is still at the
early stages of development, our results are promising and
show the the benefits of using black-box transitions.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/kirszenberg.21.spin.pdf},
lrdekeywords = {Spot},
lrdenewsdate = {2021-06-08},
doi = {10.1007/978-3-030-84629-9_8}
}
@Article{ kuijf.19.tmi,
author = {H. J. Kuijf and J. M. Biesbroek and J. de Bresser and R.
Heinen and S. Andermatt and M. Bento and M. Berseth and M.
Belyaev and M. J. Cardoso and A. Casamitjana and D. L.
Collins and M. Dadar and A. Georgiou and M. Ghafoorian and
D. Jin and A. Khademi and J. Knight and H. Li and X.
Llad\'{o} and M. Luna and Q. Mahmood and R. McKinley and A.
Mehrtash and S. Ourselin and B. Park and H. Park and S. H.
Park and S. Pezold and \'{E}lodie Puybareau and L. Rittner
and C. H. Sudre and S. Valverde and V. Vilaplana and R.
Wiest and Yongchao Xu and Z. Xu and G. Zeng and J. Zhang
and G. Zheng and C. Chen and W. van der Flier and F.
Barkhof and M. A. Viergever and G. J. Biessels},
journal = {IEEE Transactions on Medical Imaging},
title = {Standardized Assessment of Automatic Segmentation of White
Matter Hyperintensities: {R}esults of the {WMH}
Segmentation Challenge},
year = {2019},
month = nov,
volume = {38},
number = {11},
pages = {2556--2568},
abstract = {Quantification of cerebral white matter hyperintensities
(WMH) of presumed vascular origin is of key importance in
many neurological research studies. Currently, measurements
are often still obtained from manual segmentations on brain
MR images, which is a laborious procedure. Automatic WMH
segmentation methods exist, but a standardized comparison
of the performance of such methods is lacking. We organized
a scientific challenge, in which developers could evaluate
their method on a standardized multi-center/-scanner image
dataset, giving an objective comparison: the WMH
Segmentation Challenge (https://wmh.isi.uu.nl/). Sixty
T1+FLAIR images from three MR scanners were released with
manual WMH segmentations for training. A test set of 110
images from five MR scanners was used for evaluation.
Segmentation methods had to be containerized and submitted
to the challenge organizers. Five evaluation metrics were
used to rank the methods: (1) Dice similarity coefficient,
(2) modified Hausdorff distance (95th percentile), (3)
absolute log-transformed volume difference, (4) sensitivity
for detecting individual lesions, and (5) F1-score for
individual lesions. Additionally, methods were ranked on
their inter-scanner robustness. Twenty participants
submitted their method for evaluation. This paper provides
a detailed analysis of the results. In brief, there is a
cluster of four methods that rank significantly better than
the other methods, with one clear winner. The inter-scanner
robustness ranking shows that not all methods generalize to
unseen scanners. The challenge remains open for future
submissions and provides a public platform for method
evaluation.},
keywords = {Image segmentation; Three-dimensional displays; Manuals;
White matter; Biomedical imaging; Radiology; Magnetic
resonance imaging (MRI); Brain; Evaluation and performance;
Segmentation},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/kuijf.19.tmi.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2019-04-10},
url = {10.1109/TMI.2019.2905770}
}
@Article{ laplante.07.tocl,
author = {Sophie Laplante and Richard Lassaigne and Fr\'ed\'eric
Magniez and Sylvain Peyronnet and Michel de Rougemont},
title = {Probabilistic abstraction for model checking: an approach
based on property testing},
journal = {ACM Transactions on Computational Logic},
year = 2007,
lrdeprojects = {APMC},
month = aug,
volume = 8,
number = 4,
abstract = {The goal of model checking is to verify the correctness of
a given program, on all its inputs. The main obstacle, in
many cases, is the intractably large size of the program's
transition system. Property testing is a randomized method
to verify whether some fixed property holds on individual
inputs, by looking at a small random part of that input. We
join the strengths of both approaches by introducing a new
notion of probabilistic abstraction, and by extending the
framework of model checking to include the use of these
abstractions. Our abstractions map transition systems
associated with large graphs to small transition systems
associated with small random subgraphs. This reduces the
original transition system to a family of small, even
constant-size, transition systems. We prove that with high
probability, ``sufficiently'' incorrect programs will be
rejected ($\eps$-robustness). We also prove that under a
certain condition (exactness), correct programs will never
be rejected (soundness). Our work applies to programs for
graph properties such as bipartiteness, $k$-colorability,
or any $\exists\forall$ first order graph properties. Our
main contribution is to show how to apply the ideas of
property testing to syntactic programs for such properties.
We give a concrete example of an abstraction for a program
for bipartiteness. Finally, we show that the relaxation of
the test alone does not yield transition systems small
enough to use the standard model checking method. More
specifically, we prove, using methods from communication
complexity, that the OBDD size remains exponential for
approximate bipartiteness.},
lrdenewsdate = {2005-11-21}
}
@InProceedings{ lassaigne.05.wollic,
author = {Richard Lassaigne and Sylvain Peyronnet},
title = {Probabilistic verification and approximation},
booktitle = {Proceedings of 12th Workshop on Logic, Language,
Information and Computation (Wollic)},
year = 2005,
series = {Electronic Notes in Theoretical Computer Science},
volume = 143,
pages = {101--114},
lrdeprojects = {APMC},
abstract = {Model checking is an algorithmic method allowing to
automatically verify if a system which is represented as a
Kripke model satisfies a given specification.
Specifications are usually expressed by formulas of
temporal logic. The first objective of this paper is to
give an overview of methods able to verify probabilistic
systems. Models of such systems are labelled discrete time
Markov chains and specifications are expressed in
extensions of temporal logic by probabilistic operators.
The second objective is to focus on complexity of these
methods and to answer the question: can probabilistic
verification be efficiently approximated? In general, the
answer is negative. However, in many applications, the
specification formulas can be expressed in some positive
fragment of linear time temporal logic. In this paper, we
show how some simple randomized approximation algorithms
can improve the efficiency of the verification of such
probabilistic specifications.},
lrdenewsdate = {2005-04-11}
}
@InProceedings{ lazzara.11.icdar,
author = {Guillaume Lazzara and Roland Levillain and Thierry
G\'eraud and Yann Jacquelet and Julien Marquegnies and
Arthur Cr\'epin-Leblond},
title = {The {SCRIBO} Module of the {Olena} Platform: a Free
Software Framework for Document Image Analysis},
booktitle = {Proceedings of the 11th International Conference on
Document Analysis and Recognition (ICDAR)},
year = 2011,
address = {Beijing, China},
month = sep,
pages = {252--258},
organization = {International Association for Pattern Recognition (IAPR)},
lrdeprojects = {Olena},
abstract = {Electronic documents are being more and more usable thanks
to better and more affordable network, storage and
computational facilities. But in order to benefit from
computer-aided document management, paper documents must be
digitized and analyzed. This task may be challenging at
several levels. Data may be of multiple types thus
requiring different adapted processing chains. The tools to
be developed should also take into account the needs and
knowledge of users, ranging from a simple graphical
application to a complete programming framework. Finally,
the data sets to process may be large. In this paper, we
expose a set of features that a Document Image Analysis
framework should provide to handle the previous issues. In
particular, a good strategy to address both flexibility and
efficiency issues is the Generic Programming (GP) paradigm.
These ideas are implemented as an open source module,
SCRIBO, built on top of Olena, a generic and efficient
image processing platform. Our solution features services
such as preprocessing filters, text detection, page
segmentation and document reconstruction (as XML, PDF or
HTML documents). This framework, composed of reusable
software components, can be used to create full-fledged
graphical applications, small utilities, or processing
chains to be integrated into third-party projects.},
keywords = {Document Image Analysis, Software Design, Reusability,
Free Software},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/lazzara.11.icdar.poster.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/lazzara.11.icdar.pdf},
lrdenewsdate = {2011-06-01}
}
@Article{ lazzara.13.ijdar,
author = {Guillaume Lazzara and Thierry G\'eraud},
title = {Efficient Multiscale {S}auvola's Binarization},
journal = {International Journal of Document Analysis and Recognition
(IJDAR)},
year = 2014,
month = jun,
volume = 17,
number = 2,
pages = {105--123},
publisher = {Springer Berlin Heidelberg},
doi = {10.1007/s10032-013-0209-0},
abstract = {This work is focused on the most commonly used
binarization method: Sauvola's. It performs relatively well
on classical documents. However, three main defects remain:
the window parameter of Sauvola's formula do not fit
automatically to the content; it is not robust to low
contrasts; it is non-invariant with respect to contrast
inversion. Thus, on documents such as magazines, the
content may not be retrieved correctly which is crucial for
indexing purpose. In this paper, we describe how to
implement an efficient multiscale implementation of
Sauvola's algorithm in order to guarantee good binarization
for both small and large objects inside a single document
without adjusting the window size to the content. We also
describe on how to implement it in an efficient way, step
by step. Pixel-based accuracy and OCR evaluations are
performed on more than 120 documents. This implementation
remains notably fast compared to the original algorithm.
For fixed parameters, text recognition rates and
binarization quality are equal or better than other methods
on small and medium text and is significantly improved on
large text. Thanks to the way it is implemented, it is also
more robust on textured text and on image binarization.
This implementation extends the robustness of Sauvola's
algorithm by making the results almost insensible to the
window size whatever the object sizes. Its properties make
it usable in full document analysis toolchains.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/lazzara.13.ijdar.pdf},
lrdeinc = {Publications/lazzara.13.ijdar.inc},
lrdeprojects = {Olena},
lrdenewsdate = {2013-04-25}
}
@InProceedings{ lazzara.14.das,
author = {Guillaume Lazzara and Thierry G\'eraud and Roland
Levillain},
title = {Planting, Growing and Pruning Trees: {C}onnected Filters
Applied to Document Image Analysis},
booktitle = {Proceedings of the 11th IAPR International Workshop on
Document Analysis Systems (DAS)},
year = 2014,
address = {Tours, France},
pages = {36--40},
month = apr,
organization = {IAPR},
lrdeprojects = {Olena},
abstract = {Mathematical morphology, when used in the field of
document image analysis and processing, is often limited to
some classical yet basic tools. The domain however features
a lesser-known class of powerful operators, called
connected filters. These operators present an important
property: they do not shift nor create contours. Most
connected filters are linked to a tree-based representation
of an image's contents, where nodes represent connected
components while edges express an inclusion relation. By
computing attributes for each node of the tree from the
corresponding connected component, then selecting nodes
according to an attribute-based criterion, one can either
filter or recognize objects in an image. This strategy is
very intuitive, efficient, easy to implement, and actually
well-suited to processing images of magazines. Examples of
applications include image simplification, smart
binarization, and object identification. },
lrdeinc = {Publications/201404-DAS-Ressources},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/lazzara.14.das.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/lazzara.14.das.slides.pdf},
lrdenewsdate = {2013-12-10},
doi = {10.1109/DAS.2014.36}
}
@InProceedings{ le-frioux.17.sat,
author = {Ludovic {Le Frioux} and Souheib Baarir and Julien Sopena
and Fabrice Kordon},
title = {{PaInleSS}: a Framework for Parallel {SAT} Solving},
booktitle = {Proceedings of the 20th International Conference on Theory
and Applications of Satisfiability Testing (SAT'17)},
year = 2017,
month = aug,
pages = {233--250},
volume = {10491},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
abstract = {Over the last decade, parallel SAT solving has been widely
studied from both theoretical and practical aspects. There
are now numerous solvers that dier by parallelization
strategies, programming languages, concurrent programming,
involved libraries, etc. Hence, comparing the eciency of
the theoretical approaches is a challenging task. Moreover,
the introduction of a new approach needs either a deep
understanding of the existing solvers, or to start from
scratch the implementation of a new tool. We present
PaInleSS: a framework to build parallel SAT solvers for
many-core environments. Thanks to its genericity and
modularity, it provides the implementation of basics for
parallel SAT solving like clause exchanges, Portfolio and
Divide-and-Conquer strategies. It also enables users to
easily create their own parallel solvers based on new
strategies. Our experiments show that our framework
compares well with some of the best state-of-the-art
solvers.},
lrdenewsdate = {2017-06-30},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/le-frioux.17.sat.pdf},
lrdekeywords = {parallel, satisfiability, clause sharing, portfolio, cube
and conquer}
}
@PhDThesis{ le-frioux.19.phd,
author = {Ludovic {Le Frioux}},
title = {Towards more efficient parallel SAT solving},
school = {Sorbonne Universit\'e},
year = 2019,
address = {Paris, France},
month = jul,
abstract = {Boolean SATisfiability has been used successfully in many
applicative contexts. This is due to the capability of
modern SAT solvers to solve complex problems involving
millions of variables. Most SAT solvers have long been
sequential and based on the CDCL algorithm. The emergence
of many-core machines opens new possibilities in this
domain. There are numerous parallel SAT solvers that differ
by their strategies, programming languages, etc. Hence,
comparing the efficiency of the theoretical approaches in a
fair way is a challenging task. Moreover, the introduction
of a new approach needs a deep understanding of the
existing solvers' implementations. We present Painless: a
framework to build parallel SAT solvers for many-core
environments. Thanks to its genericness and modularity, it
provides the implementation of basics for parallel SAT
solving. It also enables users to easily create their
parallel solvers based on new strategies. Painless allowed
to build and test existing strategies by using different
chunk of solutions present in the literature. We were able
to easily mimic the behaviour of three state-of-the-art
solvers by factorising many parts of their implementations.
The efficiency of Painless was highlighted as these
implementations are at least efficient as the original
ones. Moreover, one of our solvers won the SAT
Competition'18. Going further, Painless enabled to conduct
fair experiments in the context of divide-and-conquer
solvers, and allowed us to highlight original compositions
of strategies performing better than already known ones.
Also, we were able to create and test new original
strategies exploiting the modular structure of SAT
instances.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/le-frioux.19.phd.pdf},
lrdenewsdate = {2019-07-03},
lrdekeywords = {parallelisation, Boolean satisfiability, portfolio,
divide-and-conquer, clause sharing}
}
@InProceedings{ le-frioux.19.tacas,
author = {Ludovic {Le Frioux} and Souheib Baarir and Julien Sopena
and Fabrice Kordon},
title = {Modular and Efficient Divide-and-Conquer {SAT} Solver on
Top of the {Painless} Framework},
booktitle = {Proceedings of the 25th International Conference on Tools
and Algorithms for the Construction and Analysis of Systems
(TACAS'19)},
year = 2019,
month = apr,
volume = {11427},
pages = {135--151},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
abstract = {Over the last decade, parallel SATisfiability solving has
been widely studied from both theoretical and practical
aspects. There are two main approaches. First,
divide-and-conquer (D\&C) splits the search space, each
solver being in charge of a particular subspace. The second
one, portfolio launches multiple solvers in parallel, and
the first to find a solution ends the computation. However
although D\&C based approaches seem to be the natural way
to work in parallel, portfolio ones experimentally provide
better performances. An explanation resides on the
difficulties to use the native formulation of the SAT
problem (i.e., the CNF form) to compute an a priori good
search space partitioning (i.e., all parallel solvers
process their subspaces in comparable computational time).
To avoid this, dynamic load balancing of the search
subspaces is implemented. Unfortunately, this is difficult
to compare load balancing strategies since state-of-the-art
SAT solvers appropriately dealing with these aspects are
hardly adaptable to various strategies than the ones they
have been designed for. This paper aims at providing a way
to overcome this problem by proposing an implementation and
evaluation of different types of divide-and- conquer
inspired from the literature. These are relying on the
Painless framework, which provides concurrent facilities to
elaborate such parallel SAT solvers. Comparison of the
various strategies are then discussed.},
lrdenewsdate = {2019-02-13},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/le-frioux.19.tacas.pdf},
lrdekeywords = {Divide-and-conquer, parallel satisfiability, tool}
}
@InProceedings{ le-quoc.07.ntms,
author = {Cuong Le Quoc and Patrick Bellot and Akim Demaille},
title = {On the security of quantum networks: a proposal framework
and its capacity},
booktitle = {Proceedings of the 2007 International Conference on New
Technologies, Mobility and Security (NTMS'07)},
year = 2007,
address = {Paris, France},
month = may,
abstract = {In large Quantum Key Distribution (QKD)-based networks,
intermediate nodes are necessary because of the short
length of QKD links. They have tendency to be used more
than classical networks. A realistic assumption is that
there are eavesdropping operations in these nodes without
knowledge of legitimate network participants. We develop a
QKD-based network framework. We present a percolation-based
approach to discuss about conditions of extremely high
secret key transmission. We propose also an adaptive
stochastic routing algorithm that helps on protecting keys
from reasonable eavesdroppers in a dense QKD network. We
show that under some assumptions, one could prevent
eavesdroppers from sniffing the secrets with an arbitrarily
large probability.},
lrdekeywords = {Software engineering},
lrdenewsdate = {2007-03-10}
}
@InProceedings{ le-quoc.07.rivf,
author = {Cuong Le Quoc and Patrick Bellot and Akim Demaille},
title = {Stochastic routing in large grid-shaped quantum networks},
booktitle = {Proceedings of the Fifth International Conference on
Computer Sciences, Research, Innovation and Vision for the
Future (RIVF'07)},
year = 2007,
address = {Hanoi, Vietnam},
month = mar,
isbn = {1-4244-0695-1},
abstract = {This paper investigates the problem of secret key
transmissions for an arbitrary Alice-Bob pair in Quantum
Key Distribution-based networks. We develop a realistic
QKD-based network framework and we show that the key
transmission problem on such a framework can be considered
as a variant of the classical percolation problem. We also
present an adaptive stochastic routing algorithm protect
from inevitable eavesdroppers. Simulations were carried out
not only to validate our approach, but also to compute
critical parameters ensuring security. These results show
that large quantum networks with eavesdroppers do provide
security.},
keywords = {Quantum Key Distribution, QKD network, percolation theory,
stochastic routing},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/le-quoc.07.rivf.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2006-12-04}
}
@InProceedings{ le-quoc.08.ispec,
author = {Cuong Le Quoc and Patrick Bellot and Akim Demaille},
title = {Towards the World-Wide Quantum Network},
booktitle = {Proceedings of the 4th Information Security Practice and
Experience Conference (ISPEC'08)},
year = 2008,
address = {Sydney, Australia},
month = apr,
abstract = {Quantum Key Distribution (QKD) networks are of much
interest due to their capacity of providing extremely high
security keys to network participants. Most QKD network
studies so far focus on trusted models where all the
network nodes are assumed to be perfectly secured. This
restricts QKD networks to be small. In this paper, we first
develop a novel model dedicated to large-scale QKD
networks, some of whose nodes could be eavesdropped
secretly. Then, we investigate the key transmission problem
in the new model by an approach based on percolation theory
and stochastic routing. Analyses show that under computable
conditions large-scale QKD networks could protect secret
keys with an extremely high probability. Simulations
validate our results.},
keywords = {Quantum Key Distribution, QKD network, percolation theory,
stochastic routing},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/le-quoc.08.ispec.pdf},
lrdenewsdate = {2008-01-25}
}
@TechReport{ lefebvre.04.tr,
author = {Sylvain Lefebvre and J\'er\^ome Darbon and Fabrice Neyret},
title = {Unified texture management for arbitrary meshes},
institution = {INRIA-Rhone-Alpes},
year = 2004,
number = {RR-5210},
address = {France},
month = may,
lrdeprojects = {Olena},
abstract = {Video games and simulators commonly use very detailed
textures, whose cumulative size is often larger than the
GPU memory. Textures may be loaded progressively, but
dynamically loading and transferring this large amount of
data in GPU memory results in loading delays and poor
performance. Therefore, managing texture memory has become
an important issue. While this problem has been (partly)
addressed early for the specific case of terrain rendering,
there is no generic texture management system for arbitrary
meshes. We propose such a system, implemented on today's
GPUs, which unifies classical solutions aimed at reducing
memory transfer: progressive loading, texture compression,
and caching strategies. For this, we introduce a new
algorithm -- running on GPU -- to solve the major
difficulty of detecting which parts of the texture are
required for rendering. Our system is based on three
components manipulating a tile pool which stores texture
data in GPU memory. First, the Texture Load Map determines
at every frame the appropriate list of texture tiles (i.e.
location and MIP-map level) to render from the current
viewpoint. Second, the Texture Cache manages the tile pool.
Finally, the Texture Producer loads and decodes required
texture tiles asynchronously in the tile pool. Decoding of
compressed texture data is implemented on GPU to minimize
texture transfer. The Texture Producer can also generate
procedural textures. Our system is transparent to the user,
and the only parameter that must be supplied at runtime is
the current viewpoint. No modifications of the mesh are
required. We demonstrate our system on large scenes
displayed in real time. We show that it achieves
interactive frame rates even in low-memory low-bandwidth
situations.},
lrdenewsdate = {2004-04-26}
}
@InProceedings{ lesage.06.isvc,
author = {David Lesage and J\'er\^ome Darbon and Ceyhun Burak Akg\"ul},
title = {An Efficient Algorithm for Connected Attribute Thinnings
and Thickenings},
booktitle = {Proceedings of the second International Conference on
Visual Computing},
year = 2006,
address = {Lake Tahoe, Nevada, USA},
month = nov,
lrdeprojects = {Olena},
pages = {393--404},
volume = 4292,
series = {Lecture Notes in Computer Science Series},
publisher = {Springer-Verlag},
abstract = {Connected attribute filters are anti-extensive
morphological operators widely used for their ability of
simplifying the image without moving its contours. In this
paper, we present a fast, versatile and easy-to-implement
algorithm for grayscale connected attribute thinnings and
thickennings, a subclass of connected filters for the wide
range of non-increasing attributes. We show that our
algorithm consumes less memory and is computationally more
efficient than other available methods on natural images.},
lrdekeywords = {Image},
lrdenewsdate = {2006-08-09}
}
@Misc{ levillain.05.olenaposter,
author = {Roland Levillain},
title = {{O}lena {P}roject poster},
month = oct,
year = 2005,
lrdeposter = {http://www.lrde.epita.fr/dload/posters/poster-olena.pdf},
lrdeprojects = {Olena}
}
@Misc{ levillain.05.tigerposter,
author = {Roland Levillain},
title = {{T}iger {P}roject poster},
month = oct,
year = 2005,
lrdeposter = {http://www.lrde.epita.fr/dload/posters/poster-tiger.pdf},
lrdeprojects = {Tiger}
}
@InProceedings{ levillain.09.ismm,
author = {Roland Levillain and Thierry G\'eraud and Laurent Najman},
title = {{Milena}: Write Generic Morphological Algorithms Once, Run
on Many Kinds of Images},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the Ninth International
Symposium on Mathematical Morphology (ISMM)},
pages = {295--306},
year = 2009,
editor = {Michael H. F. Wilkinson and Jos B. T. M. Roerdink},
series = {Lecture Notes in Computer Science},
address = {Groningen, The Netherlands},
month = aug,
publisher = {Springer Berlin / Heidelberg},
volume = 5720,
lrdeprojects = {Olena},
abstract = {We present a programming framework for discrete
mathematical morphology centered on the concept of
genericity. We show that formal definitions of
morphological algorithms can be translated into actual
code, usable on virtually any kind of compatible images,
provided a general definition of the concept of image is
given. This work is implemented in Milena, a generic,
efficient, and user-friendly image processing library.},
keywords = {mathematical morphology, image processing operator,
genericity, programming},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/levillain.09.ismm.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/levillain.09.ismm.pdf},
lrdenewsdate = {2009-04-09}
}
@InProceedings{ levillain.10.icip,
author = {Roland Levillain and Thierry G\'eraud and Laurent Najman},
title = {Why and How to Design a Generic and Efficient Image
Processing Framework: The Case of the {Milena} Library},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
pages = {1941--1944},
year = 2010,
address = {Hong Kong},
month = sep,
lrdeprojects = {Olena},
abstract = {Most image processing frameworks are not generic enough to
provide true reusability of data structures and algorithms.
In fact, genericity allows users to write and experiment
virtually any method on any compatible input(s). In this
paper, we advocate the use of generic programming in the
design of image processing software, while preserving
performances close to dedicated code. The implementation of
our proposal, Milena, a generic and efficient library,
illustrates the benefits of our approach.},
keywords = {Genericity, Image Processing, Software Design,
Reusability, Efficiency},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/levillain.10.icip.poster.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/levillain.10.icip.pdf},
lrdenewsdate = {2010-05-26}
}
@InProceedings{ levillain.10.wadgmm,
author = {Roland Levillain and Thierry G\'eraud and Laurent Najman},
title = {Writing Reusable Digital Geometry Algorithms in a Generic
Image Processing Framework},
booktitle = {Proceedings of the Workshop on Applications of Digital
Geometry and Mathematical Morphology (WADGMM)},
pages = {96--100},
year = 2010,
address = {Istanbul, Turkey},
month = aug,
url = {http://mdigest.jrc.ec.europa.eu/wadgmm2010/},
lrdeprojects = {Olena},
abstract = {Digital Geometry software should reflect the generality of
the underlying mathematics: mapping the latter to the
former requires genericity. By designing generic solutions,
one can effectively reuse digital geometry data structures
and algorithms. We propose an image processing framework
centered on the Generic Programming paradigm in which an
algorithm on the paper can be turn into a single code,
written once and usable with various input types. This
approach enables users to design and implement new methods
at a lower cost, try cross-domain experiments and help
generalize results.},
keywords = {Generic Programming, Interface, Skeleton, Complex},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/levillain.10.wadgmm.slides.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/levillain.10.wadgmm.pdf},
lrdenewsdate = {2012-07-30}
}
@InProceedings{ levillain.11.gretsi,
author = {Roland Levillain and Thierry G\'eraud and Laurent Najman},
title = {Une approche g\'en\'erique du logiciel pour le traitement
d'images pr\'eservant les performances},
booktitle = {Proceedings of the 23rd Symposium on Signal and Image
Processing (GRETSI)},
category = {national},
year = 2011,
address = {Bordeaux, France},
month = sep,
note = {In French.},
lrdeprojects = {Olena},
abstract = {De plus en plus d'outils logiciels modernes pour le
traitement d'images sont con\c{c}us en prenant en compte le
probl\`eme de la g\'en\'ericit\'e du code, c'est-\`a-dire
la possibilit\'e d'\'ecrire des algorithmes
r\'eutilisables, compatibles avec de nombreux types
d'entr\'ees. Cependant, ce choix de conception se fait
souvent au d\'etriment des performances du code
ex\'ecut\'e. Du fait de la grande vari\'et\'e des types
d'images existants et de la n\'ecessit\'e d'avoir des
impl\'ementations rapides, g\'en\'ericit\'e et performance
apparaissent comme des qualit\'es essentielles du logiciel
en traitement d'images. Cet article pr\'esente une approche
pr\'eservant les performances dans un framework logiciel
g\'en\'erique tirant parti des caract\'eristiques des types
de donn\'ees utilis\'es. Gr\^ace \`a celles-ci, il est
possible d'\'ecrire des variantes d'algorithmes
g\'en\'eriques offrant un compromis entre g\'en\'ericit\'e
et performance. Ces alternatives sont capables de
pr\'eserver une partie des aspects g\'en\'eriques d'origine
tout en apportant des gains substantiels \`a l'ex\'ecution.
D'apr\`es nos essais, ces optimisations g\'en\'eriques
fournissent des performances supportant la comparaison avec
du code d\'edi\'e, allant parfois m\^eme jusqu'\`a surpasser des routines optimis\'ees manuellement.},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/levillain.11.gretsi.poster.pdf},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/levillain.11.gretsi.pdf},
lrdenewsdate = {2011-05-13}
}
@PhDThesis{ levillain.11.phd,
author = {Roland Levillain},
title = {Towards a Software Architecture for Generic Image
Processing},
school = {Universit\'e Paris-Est},
year = 2011,
address = {Marne-la-Vall\'ee, France},
month = nov,
abstract = {In the context of software engineering for image
processing (IP), we consider the notion of reusability of
algorithms. In many software tools, an algorithm's
implementation often depends on the type of processed data.
In a broad definition, discrete digital images may have
various forms---classical 2D images, 3D volumes,
non-regular graphs, cell complexes, and so on---thus
leading to a combinatorial explosion of the theoretical
number of implementations.\\ Generic programming (GP) is a
framework suited to the development of reusable software
tools. We present a programming paradigm based on GP
designed for the creation of scientific software such as IP
tools. This approach combines the benefits of reusability,
expressive power, extensibility, and efficiency.\\ We then
propose a software architecture for IP using this
programming paradigm based on a generic IP library. The
foundations of this framework define essential IP concepts,
enabling the development of algorithms compatible with many
image types.\\ We finally present a strategy to build
high-level tools on top of this library, such as bridges to
dynamic languages or graphical user interfaces. This
mechanism has been designed to preserve the genericity and
efficiency of the underlying software tools, while making
them simpler to use and more flexible.},
lrdepaper = {http://www.lrde.epita.fr/~roland/phd/levillain-phd.pdf},
lrdeprojects = {Olena}
}
@InProceedings{ levillain.12.wadgmm-lncs,
oldkeys = {levillain.12.lncs},
author = {Roland Levillain and Thierry G\'eraud and Laurent Najman},
title = {Writing Reusable Digital Topology Algorithms in a Generic
Image Processing Framework},
booktitle = {WADGMM 2010},
pages = {140--153},
year = 2012,
editor = {Ullrich K\"othe and Annick Montanvert and Pierre Soille},
publisher = {Springer-Verlag Berlin Heidelberg},
series = {Lecture Notes in Computer Science},
volume = 7346,
lrdeprojects = {Olena},
abstract = {Digital Topology software should reflect the generality of
the underlying mathematics: mapping the latter to the
former requires genericity. By designing generic solutions,
one can effectively reuse digital topology data structures
and algorithms. We propose an image processing framework
focused on the Generic Programming paradigm in which an
algorithm on the paper can be turned into a single code,
written once and usable with various input types. This
approach enables users to design and implement new methods
at a lower cost, try cross-domain experiments and help
generalize results.},
keywords = {Generic Programming, Interface, Skeleton, Complex},
annote = {This paper is an extended version of levillain.10.wadgmm
(201008-WADGMM).},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/levillain.12.wadgmm-lncs.pdf},
lrdenewsdate = {2012-08-08}
}
@InProceedings{ levillain.14.ciarp,
author = {Roland Levillain and Thierry G\'eraud and Laurent Najman
and Edwin Carlinet},
title = {Practical Genericity: {W}riting Image Processing
Algorithms Both Reusable and Efficient},
booktitle = {Progress in Pattern Recognition, Image Analysis, Computer
Vision, and Applications -- Proceedings of the 19th
Iberoamerican Congress on Pattern Recognition (CIARP)},
address = {Puerto Vallarta, Mexico},
month = nov,
year = {2014},
pages = {70--79},
editor = {Eduardo Bayro and Edwin Hancock},
publisher = {Springer-Verlag},
series = {Lecture Notes in Computer Science},
volume = {8827},
lrdeprojects = {Olena},
abstract = {An important topic for the image processing and pattern
recognition community is the construction of open source
and efficient libraries. An increasing number of software
frameworks are said to be generic: they allow users to
write reusable algorithms compatible with many input image
types. However, this design choice is often made at the
expense of performance. We present an approach to preserve
efficiency in a generic image processing framework, by
leveraging data types features. Variants of generic
algorithms taking advantage of image types properties can
be defined, offering an adjustable trade-off between
genericity and efficiency. Our experiments show that these
generic optimizations can match dedicated code in terms of
execution times, and even sometimes perform better than
routines optimized by hand. Digital Topology software
should reflect the generality of the underlying
mathematics: mapping the latter to the former requires
genericity. By designing generic solutions, one can
effectively reuse digital topology data structures and
algorithms. We propose an image processing framework
focused on the Generic Programming paradigm in which an
algorithm on the paper can be turned into a single code,
written once and usable with various input types. This
approach enables users to design and implement new methods
at a lower cost, try cross-domain experiments and help
generalize results.},
keywords = {Generic Programming, Image Processing, Performance,
Olena},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/levillain.14.ciarp.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/levillain.14.ciarp.slides.pdf},
lrdenewsdate = {2014-09-10},
doi = {10.1007/978-3-319-12568-8_9}
}
@PhDThesis{ linard.09.phd,
author = {Alban Linard},
title = {S{\'{e}}mantique param{\'{e}}trable des Diagrammes de
D{\'{e}}cision : une d{\'{e}}marche vers l'unification},
year = {2009},
month = nov,
school = {{Universit{\'e} Pierre et Marie Curie - Paris VI}},
address = {Paris, France}
}
@InProceedings{ linard.10.acsd,
author = {Alban Linard and Emmanuel Paviot-Adet and Fabrice Kordon
and Didier Buchs and Samuel Charron},
title = {{polyDD}: Towards a Framework Generalizing Decision
Diagrams},
booktitle = {Proceedings of the 10th International Conference on
Application of Concurrency to System Design (ACSD)},
pages = {124--133},
year = 2010,
address = {Braga, Portugal},
month = jun,
lrdeprojects = {Verification},
publisher = {IEEE Computer Society},
abstract = {Decision Diagrams are now widely used in model checking as
extremely compact representations of state spaces. Many
Decision Diagram categories have been developed over the
past twenty years based on the same principles. Each one
targets a specific domain with its own characteristics.
Moreover, each one provides its own definition. It prevents
sharing concepts and techniques between these structures.
This paper aims to propose a basis for a common Framework
for Decision Diagrams. It should help users of this
technology to define new Decision Diagram categories thanks
to a simple specification mechanism called Controller. This
enables the building of efficient Decision Diagrams
dedicated to a given problem.}
}
@InProceedings{ lombardy.03.ciaa,
author = {Sylvain Lombardy and Rapha\"el Poss and Yann
R\'egis-Gianas and Jacques Sakarovitch},
title = {Introducing {V}aucanson},
booktitle = {Proceedings of Implementation and Application of Automata,
8th International Conference (CIAA'03)},
pages = {96--107},
year = 2003,
editor = {Oscar H. Ibarra and Zhe Dang},
publisher = {Springer},
volume = 2759,
series = {Lecture Notes in Computer Science},
address = {Santa Barbara, CA, USA},
month = jul,
lrdeprojects = {Vaucanson},
abstract = {This paper reports on a new software platform dedicated to
the computation with automata and transducers, called
Vaucanson, the main feature of which is the capacity of
dealing with automata whose labels may belong to various
algebraic structures. The paper successively shows how
Vaucanson allows to program algorithms on automata in a way
which is very close to the mathematical expression of the
algorithm, describes some features of the Vaucanson
platform, including the fact that the very rich data
structure used to implement automata does not weight too
much on the performance and finally explains the main
issues of the programming design that allow to achieve both
genericity and efficiency.},
lrdenewsdate = {2003-05-05}
}
@Article{ lombardy.04.tcs,
author = {Sylvain Lombardy and Yann R\'egis-{G}ianas and Jacques
Sakarovitch},
title = {Introducing {V}aucanson},
journal = {Theoretical Computer Science},
volume = 328,
year = 2004,
pages = {77--96},
month = nov,
lrdeprojects = {Vaucanson},
abstract = {This paper reports on a new software platform called
VAUCANSON and dedicated to the computation with automata
and transducers. Its main feature is the capacity of
dealing with automata whose labels may belong to various
algebraic structures. The paper successively describes the
main features of the VAUCANSON platform, including the fact
that the very rich data structure used to implement
automata does not weigh too much on the performance, shows
how VAUCANSON allows to program algorithms on automata in a
way which is very close to the mathematical expression of
the algorithm and finally explains the main choices of the
programming design that enable to achieve both genericity
and efficiency.}
}
@InProceedings{ maes.03.dpcool,
author = {Francis Maes},
title = {Program templates: expression templates applied to program
evaluation},
booktitle = {Proceedings of the Workshop on Declarative Programming in
the Context of Object-Oriented Languages (DP-COOL; in
conjunction with PLI)},
year = 2003,
address = {Uppsala, Sweden},
number = {FZJ-ZAM-IB-2003-10},
pages = {67--86},
editor = {J\"org Striegnitz and Kei Davis},
month = aug,
series = {John von Neumann Institute for Computing (NIC)},
lrdeprojects = {Software},
abstract = {The C++ language provides a two-layer execution model:
static execution of meta-programs and dynamic execution of
resulting programs. The Expression Templates technique
takes advantage of this dual execution model through the
construction of C++ types expressing simple arithmetic
formulas. Our intent is to extend this technique to a whole
programming language. The Tiger language is a small,
imperative language with types, variables, arrays, records,
ow control structures and nested functions. The rst step is
to show how to express a Tiger program as a C++ type. The
second step concerns operational analysis which is done
through the use of meta-programs. Finally an implementation
of our Tiger evaluator is proposed. Our technique goes much
deeper than the Expression Templates one. It shows how the
generative power of C++ meta-programming can be used in
order to compile abstract syntax trees of a fully featured
programming language.},
lrdenewsdate = {2003-07-31}
}
@InProceedings{ maes.04.mpool,
author = {Francis Maes},
title = {Metagene, a {C++} meta-program generation tool},
booktitle = {Proceedings of the Workshop on Multiple Paradigm with OO
Languages (MPOOL; in conjunction with ECOOP)},
year = 2004,
address = {Oslo, Norway},
month = jun,
lrdeprojects = {Software},
abstract = {The C++ language offers a two layer evaluation model.
Thus, it is possible to evaluate a program in two steps:
the so-called static and dynamic evaluations. Static
evaluation is used for reducing the amount of work done at
execution-time. Programs executed statically (called
metaprograms) are written in C++ through an intensive use
of template classes. Due to the complexity of these
structures, writing, debugging and maintaining C++
meta-programs is a difficult task. Metagene is a program
transformation tool which simplifies the development of
such programs. Due to the similarities between C++
meta-programming and functional programming, the input
language of Metagene is an ML language. Given a functional
input program, Metagene outputs the corresponding C++
meta-program expressed using template classes.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/maes-04-mtg.pdf},
lrdenewsdate = {2004-05-25}
}
@InProceedings{ menouer.17.iccs,
author = {Tarek Menouer and Souheib Baarir},
title = {Parallel Learning Portfolio-Based Solvers},
booktitle = {Proceedings of the International Conference on
Computational Science (ICCS)},
month = jun,
address = {Zurich, Switzerland},
pages = {335--344},
year = {2017},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/menouer.17.iccs.pdf},
abstract = {Exploiting multi-core architectures is a way to tackle the
CPU time consumption when solving SAT- isfiability (SAT)
problems. Portfolio is one of the main techniques that
implements this principle. It consists in making several
solvers competing, on the same problem, and the winner will
be the first that answers. In this work, we improved this
technique by using a learning schema, namely the
Exploration- Exploitation using Exponential weight (EXP3),
that allows smart resource allocations. Our contribution is
adapted to situations where we have to solve a bench of SAT
instances issued from one or several sequence of problems.
Our experiments show that our approach achieves good
results.},
lrdenewsdate = {2017-06-01},
lrdeprojects = {Spot}
}
@InProceedings{ menouer.17.pdp,
author = {Tarek Menouer and Souheib Baarir},
title = {Parallel Satisfiability Solver Based on Hybrid
Partitioning Method},
booktitle = {Proceedings of the 25th Euromicro International Conference
on Parallel, Distributed and Network-based Processing
(PDP)},
address = {St. Petersburg, Russia},
month = mar,
pages = {54--60},
year = {2017},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/menouer.17.pdp.pdf},
abstract = {This paper presents a hybrid partitioning method used to
improve the performance of solving a Satisfiability (SAT)
problems. The principle of our approach consist firstly to
apply a static partitioning to decompose the search tree in
finite set of disjoint sub-trees, than assign each sub-tree
to one computing core. However it is not easy to choose the
relevant branching variables to partition the search tree.
We propose in this context to partition the search tree
according to the variables that occur more frequently then
others. The advantage of this method is that it gives a
good disjoint sub- trees. However, the drawback is the
imbalance load between all computing cores of the system.
To overcome this drawback, we propose as novelty to extend
the static partitioning by combining with a new dynamic
partitioning that assure a good load balancing between
cores. Each time a new waiting core is detected, the
dynamic partitioning selects automatically using an
estimation function the computing core which has the most
work to do in order to partition dynamically its sub-tree
in two parts. It keeps one part and gives the second part
to the waiting core. Preliminary result show that a good
speedup is achieved using our hybrid method.},
lrdenewsdate = {2017-03-01},
lrdeprojects = {Spot}
}
@InProceedings{ metin.18.tacas,
author = {Hakan Metin and Souheib Baarir and Maximilien Colange and
Fabrice Kordon},
title = {{CDCLSym}: Introducing Effective Symmetry Breaking in
{SAT} Solving},
booktitle = {Proceedings of the 24th International Conference on Tools
and Algorithms for the Construction and Analysis of Systems
(TACAS'18)},
year = 2018,
month = apr,
pages = {99--114},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {10805},
address = {Thessaloniki, Greece},
abstract = {SAT solvers are now widely used to solve a large variety
of problems, including formal verification of systems. SAT
problems derived from such applications often exhibit
symmetry properties that could be exploited to speed up
their solving. Static symmetry breaking is so far the most
popular approach to take advantage of symmetries. It relies
on a symmetry preprocessor which augments the initial
problem with constraints that force the solver to consider
only a few configurations among the many symmetric ones.
This paper presents a new way to handle symmetries, that
avoids the main problem of the current static approaches:
the prohibitive cost of the preprocessing phase. Extensive
experiments on the benchmarks of last six SAT competitions
show that our approach is competitive with the best
state-of-the-art static symmetry breaking solutions.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/metin.18.tacas.pdf},
lrdenewsdate = {2018-01-05},
lrdeprojects = {Spot}
}
@InProceedings{ michaud.15.spin,
author = {Thibaud Michaud and Alexandre Duret-Lutz},
title = {Practical Stutter-Invariance Checks for {$\omega$}-Regular
Languages},
booktitle = {Proceedings of the 22th International SPIN Symposium on
Model Checking of Software (SPIN'15)},
year = 2015,
month = aug,
pages = {84--101},
volume = 9232,
series = {Lecture Notes in Computer Science},
publisher = {Springer},
abstract = {We propose several automata-based constructions that check
whether a specification is stutter-invariant. These
constructions assume that a specification and its negation
can be translated into B{\"u}chi automata, but aside from
that, they are independent of the specification formalism.
These transformations were inspired by a construction due
to Holzmann and Kupferman, but that we broke down into two
operations that can have different realizations, and that
can be combined in different ways. As it turns out,
implementing only one of these operations is needed to
obtain a functional stutter-invariant check. Finally we
have implemented these techniques in a tool so that users
can easily check whether an LTL or PSL formula is
stutter-invariant.},
lrdeprojects = {Spot},
lrdenewsdate = {2015-06-15},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/michaud.15.spin.pdf},
doi = {10.1007/978-3-319-23404-5_7}
}
@InProceedings{ michaud.18.synt,
author = {Thibaud Michaud and Maximilien Colange},
title = {Reactive Synthesis from {LTL} Specification with {S}pot},
booktitle = {Proceedings of the 7th Workshop on Synthesis, SYNT@CAV
2018},
series = {Electronic Proceedings in Theoretical Computer Science},
pages = {xx},
volume = {xx},
year = 2018,
abstract = {We present \texttt{ltlsynt}, a new tool for reactive
synthesis from LTL specifications. It relies on the
efficiency of Spot to translate the input LTL specification
to a deterministic parity automaton. The latter yields a
parity game, which we solve with Zielonka's recursive
algorithm.
The approach taken in \texttt{ltlsynt} was widely believed
to be impractical, due to the double-exponential size of
the parity game, and to the open status of the complexity
of parity games resolution. \texttt{ltlsynt} ranked second
of its track in the $2017$ edition of the SYNTCOMP
competition. This demonstrates the practical applicability
of the parity game approach, when backed by efficient
manipulations of $\omega$-automata such as the ones
provided by Spot. We present our approach and report on our
experimental evaluation of \texttt{ltlsynt} to better
understand its strengths and weaknesses.},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/michaud.18.synt.pdf},
lrdenewsdate = {2018-06-07}
}
@InProceedings{ minetto.10.icip,
author = {Rodrigo Minetto and Nicolas Thome and Matthieu Cord and
Jonathan Fabrizio and Beatriz Marcotegui},
title = {SnooperText: A Multiresolution System for Text Detection
in Complex Visual Scenes},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
pages = {3861--3864},
year = 2010,
address = {Hong Kong},
month = sep,
abstract = {Text detection in natural images remains a very
challenging task. For instance, in an urban context, the
detection is very difficult due to large variations in
terms of shape, size, color, orientation, and the image may
be blurred or have irregular illumination, etc. In this
paper, we describe a robust and accurate multiresolution
approach to detect and classify text regions in such
scenarios. Based on generation/validation paradigm, we
first segment images to detect character regions with a
multiresolution algorithm able to manage large character
size variations. The segmented regions are then filtered
out using shapebased classification, and neighboring
characters are merged to generate text hypotheses. A
validation step computes a region signature based on
texture analysis to reject false positives. We evaluate our
algorithm in two challenging databases, achieving very good
results},
keywords = {Text detection, multiresolution, image segmentation,
machine learning},
lrdeprojects = {Olena}
}
@InProceedings{ morel.16.embc,
author = {Baptiste Morel and Yongchao Xu and Alessio Virzi and
Thierry G\'eraud and Catherine Adamsbaum and Isabelle
Bloch},
title = {A Challenging Issue: {D}etection of White Matter
Hyperintensities in Neonatal Brain {MRI}},
booktitle = {Proceedings of the Annual International Conference of the
IEEE Engineering in Medicine and Biology Society},
year = {2016},
month = aug,
pages = {93--96},
address = {Orlando, Florida, USA},
abstract = {The progress of magnetic resonance imaging (MRI) allows
for a precise exploration of the brain of premature infants
at term equivalent age. The so-called DEHSI (diffuse
excessive high signal intensity) of the white matter of
premature brains remains a challenging issue in terms of
definition, and thus of interpretation. We propose a
semi-automatic detection and quantification method of white
matter hyperintensities in MRI relying on morphological
operators and max-tree representations, which constitutes a
powerful tool to help radiologists to improve their
interpretation. Results show better reproducibility and
robustness than interactive segmentation.},
lrdekeywords = {Image},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/morel.16.embc.pdf},
lrdenewsdate = {2016-05-20},
doi = {10.1109/EMBC.2016.7590648}
}
@InProceedings{ movn.18.das,
author = {Minh {\^On V\~{u} Ng\d{o}c} and Jonathan Fabrizio and
Thierry G\'eraud},
title = {Saliency-Based Detection of Identity Documents Captured by
Smartphones},
booktitle = {Proceedings of the IAPR International Workshop on Document
Analysis Systems (DAS)},
year = {2018},
pages = {387--392},
month = apr,
address = {Vienna, Austria},
doi = {10.1109/DAS.2018.17},
abstract = {Smartphones have became an easy and convenient mean to
acquire documents. In this paper, we focus on the automatic
segmentation of identity documents in smartphone photos or
videos using visual saliency (VS). VS-based approaches,
which pertain to computer vision, have not be considered
yet for this particular task. Here we compare different VS
methods, and we propose a new VS scheme, based on a recent
distance belonging to the scope of mathematical morphology.
We show that the saliency maps we obtain are competitive
with state-of-the-art visual saliency methods and, that
such approaches are very promising for use in identity
document detection and segmentation, even without taking
into account any prior knowledge about document contents.
In particular they can work in real-time on smartphones.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/movn.18.das.pdf},
lrdeinc = {Publications/movn.18.das.inc},
lrdenewsdate = {2018-02-02}
}
@Article{ movn.20.cviu,
author = {Minh {\^On V\~{u} Ng\d{o}c} and Nicolas Boutry and
Jonathan Fabrizio and Thierry G\'eraud},
title = {A New Minimum Barrier Distance for Multivariate Images
with Applications to Salient Object Detection, Shortest
Path Finding, and Segmentation},
journal = {Computer Vision and Image Understanding},
year = {2020},
month = aug,
volume = {197--198},
doi = {10.1016/j.cviu.2020.102993},
abstract = {Distance transforms and the saliency maps they induce are
widely used in image processing, computer vision, and
pattern recognition. One of the most commonly used distance
transform is the geodesic one. Unfortunately, this distance
does not always achieve satisfying results on noisy or
blurred images. Recently, a new (pseudo-)distance, called
the minimum barrier distance (MBD), more robust to pixel
variations, has been introduced. Some years after, G\'eraud
et al. have proposed a good and fast-to compute
approximation of this distance: the Dahu pseudo-distance.
Since this distance was initially developped for grayscale
images, we propose here an extension of this transform to
multivariate images; we call it vectorial Dahu
pseudo-distance. An efficient way to compute it is provided
in this paper. Besides, we provide benchmarks demonstrating
how much the vectorial Dahu pseudo-distance is more robust
and competitive compared to other MB-based distances, which
shows how much this distance is promising for salient
object detection, shortest path finding, and object
segmentation.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/movn.20.cviu.pdf},
lrdenewsdate = {2020-06-02}
}
@PhDThesis{ movn.20.phd,
author = {Minh {\^On V\~{u} Ng\d{o}c}},
title = {Improvement of a text detection chain and the proposition
of a new evaluation protocol for text detection
algorithms},
school = {Sorbonne Universit\'e},
year = 2020,
address = {Paris, France},
month = feb,
abstract = {Hierarchical image representations are widely used in
image processing to model the content of an image in the
multi-scale structure. A well-known hierarchical
representation is the tree of shapes (ToS) which encodes
the inclusion relationship between connected components
from different thresholded levels. This kind of tree is
self-dual, contrast-change invariant and popular in
computer vision community. Typically, in our work, we use
this representation to compute the new distance which
belongs to the mathematical morphology domain. Distance
transforms and the saliency maps they induce are generally
used in image processing, computer vision, and pattern
recognition. One of the most commonly used distance
transforms is the geodesic one. Unfortunately, this
distance does not always achieve satisfying results on
noisy or blurred images. Recently, a new pseudo-distance,
called the minimum barrier distance (MBD), more robust to
pixel fluctuation, has been introduced. Some years after,
G\'{e}raud et al. have proposed a good and fast-to-compute
approximation of this distance: the Dahu pseudo-distance.
Since this distance was initially developed for grayscale
images, we propose here an extension of this transform to
multivariate images; we call it vectorial Dahu
pseudo-distance. This new distance is easily and
efficiently computed thanks to the multivariate tree of
shapes (MToS). We propose an efficient way to compute this
distance and its deduced saliency map in this thesis. We
also investigate the properties of this distance in dealing
with noise and blur in the image. This distance has been
proved to be robust for pixel invariant. To validate this
new distance, we provide benchmarks demonstrating how the
vectorial Dahu pseudo-distance is more robust and
competitive compared to other MB-based distances. This
distance is promising for salient object detection,
shortest path finding, and object segmentation. Moreover,
we apply this distance to detect the document in videos.
Our method is a region-based approach which relies on
visual saliency deduced from the Dahu pseudo-distance. We
show that the performance of our method is competitive with
state-of-the-art methods on the ICDAR Smartdoc 2015
Competition dataset. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/movn.20.phd.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2020-02-18}
}
@InProceedings{ najman.13.ismm,
author = {Laurent Najman and Thierry G\'eraud},
title = {Discrete set-valued continuity and interpolation},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 11th International
Symposium on Mathematical Morphology (ISMM)},
year = 2013,
editor = {C.L. Luengo Hendriks and G. Borgefors and R. Strand},
volume = 7883,
series = {Lecture Notes in Computer Science Series},
address = {Uppsala, Sweden},
publisher = {Springer},
pages = {37--48},
lrdeprojects = {Olena},
abstract = {The main question of this paper is to retrieve some
continuity properties on (discrete) T0-Alexandroff spaces.
One possible application, which will guide us, is the
construction of the so-called "tree of shapes"
(intuitively, the tree of level lines). This tree, which
should allow to process maxima and minima in the same way,
faces quite a number of theoretical difficulties that we
propose to solve using set-valued analysis in a purely
discrete setting. We also propose a way to interpret any
function defined on a grid as a "continuous" function
thanks to an interpolation scheme. The continuity
properties are essential to obtain a quasi-linear algorithm
for computing the tree of shapes in any dimension, which is
exposed in a companion paper.},
lrdekeywords = {Image},
lrdepaper = {https://www.lrde.epita.fr/dload/papers/najman.13.ismm.pdf},
lrdenewsdate = {2013-03-14}
}
@InProceedings{ nejati.20.cp,
author = {Saeed Nejati and Ludovic {Le Frioux} and Vijay Ganesh},
title = {A Machine Learning Based Splitting Heuristic for
Divide-and-Conquer Solvers},
booktitle = {Proceedings of the 26 th International Conference on
Principles and Practice of Constraint Programming (CP'20)},
year = 2020,
month = sep,
volume = {12333},
pages = {899--916},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
abstract = {In this paper, we present a machine learning based
splitting heuristic for divide-and-conquer parallel Boolean
SAT solvers. Splitting heuristics, whether they are
look-ahead or look-back, are designed using proxy metrics,
which when optimized, approximate the true metric of
minimizing solver runtime on sub-formulas resulting from a
split. The rationale for such metrics is that they have
been empirically shown to be excellent proxies for runtime
of solvers, in addition to being cheap to compute in an
online fashion. However, the design of traditional
splitting methods are often ad-hoc and do not leverage the
copious amounts of data that solvers generate. To address
the above-mentioned issues, we propose a machine learning
based splitting heuristic that leverages the features of
input formulas and data generated during the run of a
divide-and-conquer (DC) parallel solver. More precisely, we
reformulate the splitting problem as a ranking problem and
develop two machine learning models for pairwise ranking
and computing the minimum ranked variable. Our model can
compare variables according to their splitting quality,
which is based on a set of features extracted from
structural properties of the input formula, as well as
dynamic probing statistics, collected during the solver's
run. We derive the true labels through offline collection
of runtimes of a parallel DC solver on sample formulas and
variables within them. At each splitting point, we generate
a predicted ranking (pairwise or minimum rank) of candidate
variables and split the formula on the top variable. We
implemented our heuristic in the Painless parallel SAT
framework and evaluated our solver on a set of
cryptographic instances encoding the SHA-1 preimage as well
as SAT competition 2018 and 2019 benchmarks. We solve
significantly more instances compared to the baseline
Painless solver and outperform top divide-and-conquer
solvers from recent SAT competitions, such as Treengeling.
Furthermore, we are much faster than these top solvers on
cryptographic benchmarks.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/nejati.20.cp.pdf},
lrdekeywords = {Parallel satisfiability, splitting heuristic,
divide-and-conquer, machine learning}
}
@TechReport{ newton.16.dag.report,
lrdekeywords = {CLOS, graph, lisp},
lrdenewsdate = {2016-11-17},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.16.dag.report.pdf},
lrdeprojects = {Climb},
author = {Jim Newton},
title = {Finding maximal common joins in a DAG},
institution = {LRDE},
year = 2016,
address = {Paris, France},
month = nov,
abstract = { Given a directed acyclic graph (DAG) and two arbitrary
nodes, find maximal common joins of the two nodes. In this
technical report I suggest an algorithm for efficiently
calculating the minimal set of nodes which derive from a
pair of nodes.}
}
@InProceedings{ newton.16.els,
author = {Jim Newton and Akim Demaille and Didier Verna},
title = {Type-Checking of Heterogeneous Sequences in {C}ommon
{L}isp},
booktitle = {European Lisp Symposium},
year = 2016,
lrdeinc = {Publications/newton.16.els.inc},
lrdekeywords = {Rational languages, typechecking, finite automata},
lrdenewsdate = {2016-03-25},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.16.els.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.16.els.slides.pdf},
lrdeprojects = {Climb},
address = {Krak{\'o}w, Poland},
month = may,
abstract = {We introduce the abstract concept of rational type
expression and show its relationship to rational language
theory. We further present a concrete syntax, regular type
expression, and a Common Lisp implementation thereof which
allows the programmer to declaratively express the types of
heterogeneous sequences in a way which is natural in the
Common Lisp language. The implementation uses techniques
well known and well founded in rational language theory, in
particular the use of the Brzozowski derivative and
deterministic automata to reach a solution which can match
a sequence in linear time. We illustrate the concept with
several motivating examples, and finally explain many
details of its implementation.}
}
@TechReport{ newton.16.monad.report,
lrdekeywords = {monad, scala, lisp},
lrdenewsdate = {2016-11-18},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.16.monad.report.pdf},
lrdeprojects = {Climb},
author = {Jim Newton},
title = {Monads in Common Lisp},
institution = {LRDE},
year = 2016,
address = {Paris, France},
month = nov,
abstract = {In this article we explain monads so they can be
understood to the Lisp programmer. We base the explanation
on a very clean explanation presented in the Scala
programming language. We then proceed to re-present the
concepts using mostly simple Common Lisp concepts. We do
not attempt to justify the motivation behind the
definitions, and we do not attempt to give any examples of
applications. Most notably, we do not attempt to explain
the connection monads have to modeling side effects. }
}
@TechReport{ newton.16.rte.report,
lrdeinc = {Publications/newton.16.els.inc},
lrdekeywords = {Rational languages, typechecking, finite automata},
lrdenewsdate = {2016-02-22},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.16.rte.report.pdf},
lrdeprojects = {Climb},
author = {Jim Newton},
title = {Efficient dynamic type checking of heterogeneous
sequences},
institution = {LRDE},
year = 2016,
number = {2005D002},
address = {Paris, France},
month = feb,
annote = {This technical report corresponds to the publication
newton.16.edtchs},
abstract = { This report provides detailed background of our
development of the rational type expression, concrete
syntax, regular type expression, and a Common Lisp
implementation which allows the programmer to declarative
express the types of heterogeneous sequences in a way which
is natural in the Common Lisp language. We present a brief
theoretical background in rational language theory, which
facilitates the development of rational type expressions,
in particular the use of the Brzozowski derivative and
deterministic automata to arrive at a solution which can
match a sequence in linear time. We illustrate the concept
with several motivating examples, and finally explain many
details of its implementation. }
}
@TechReport{ newton.17.dtd.report,
lrdekeywords = {types, set theory, graph, lisp},
lrdenewsdate = {2017-02-02},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.17.dtd.report.pdf},
lrdeprojects = {Climb},
author = {Jim Newton},
title = {Analysis of Algorithms Calculating the Maximal Disjoint
Decomposition of a Set},
institution = {LRDE},
year = 2017,
address = {Paris, France},
month = jan,
abstract = { In this article we demonstrate 4 algorithms for
calculating the maximal disjoint decomposition of a given
set of types. We discuss some advantages and disadvantages
of each, and compare their performance. We extended
currently known work to describe an efficient algorithm for
manipulating binary decision diagrams representing types in
a programming language which supports subtyping viewed as
subsets.}
}
@InProceedings{ newton.17.els,
author = {Jim Newton and Didier Verna and Maximilien Colange},
title = {Programmatic Manipulation of {C}ommon {L}isp Type
Specifiers},
booktitle = {European Lisp Symposium},
year = 2017,
lrdeinc = {Publications/newton.17.els.inc},
lrdekeywords = {Graph algorithms, typechecking, Boolean functions, Binary
decision diagrams, data structures},
lrdenewsdate = {2017-02-06},
lrdestatus = {accepted},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.17.els.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.17.els.slides.pdf},
lrdeprojects = {Climb},
address = {Brussels, Belgium},
month = apr,
abstract = {In this article we contrast the use of the s-expression
with the BDD (Binary Decision Diagram) as a data structure
for programmatically manipulating Common Lisp type
specifiers. The s-expression is the de facto standard
surface syntax and also programmatic representation of the
type specifier, but the BDD data structure offers
advantages: most notably, type equivalence checks using
s-expressions can be computationally intensive, whereas the
type equivalence check using BDDs is a check for object
identity. As an implementation and performance experiment,
we define the notion of maximal disjoint type
decomposition, and discuss implementations of algorithms to
compute it: a brute force iteration, and as a tree
reduction. The experimental implementations represent type
specifiers by both aforementioned data structures, and we
compare the performance observed in each approach.}
}
@InProceedings{ newton.18.els,
author = {Jim Newton and Didier Verna},
title = {Approaches in Typecase Optimization},
booktitle = {European Lisp Symposium},
year = 2018,
lrdekeywords = {typechecking, Boolean functions, Binary decision
diagrams},
lrdenewsdate = {2018-04-05},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.18.els.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.18.slides.pdf},
lrdeprojects = {Climb},
address = {Marbella, Spain},
month = apr,
abstract = {We contrast two approaches to optimizing the Common Lisp
typecase macro expansion. The first approach is based on
heuristics intended to estimate run time performance of
certain type checks involving Common Lisp type specifiers.
The technique may, depending on code size, exhaustively
search the space of permutations of the type checks, intent
on finding the optimal order. With the second technique, we
represent a typecase form as a type specifier,
encapsulating the side-effecting non-Boolean parts so as to
appear compatible with the Common Lisp type algebra
operators. The encapsulated expressions are specially
handled so that the Common Lisp type algebra functions
preserve them, and we can unwrap them after a process of
Boolean reduction into efficient Common Lisp code,
maintaining the appropriate side effects but eliminating
unnecessary type checks. Both approaches allow us to
identify unreachable code, test for exhaustiveness of the
clauses and eliminate type checks which are calculated to
be redundant. }
}
@InProceedings{ newton.18.meta,
author = {Jim Newton and Didier Verna},
title = {Recognizing heterogeneous sequences by rational type
expression},
booktitle = {Proceedings of the Meta'18: Workshop on Meta-Programming
Techniques and Reflection},
year = 2018,
address = {Boston, MA USA},
month = nov,
lrdeprojects = {Climb},
abstract = { We summarize a technique for writing functions which
recognize types of heterogeneous sequences in Common Lisp.
The technique employs sequence recognition functions,
generated at compile time, and evaluated at run-time. The
technique we demonstrate extends the Common Lisp type
system, exploiting the theory of rational languages, Binary
Decision Diagrams, and the Turing complete macro facility
of Common Lisp. The resulting system uses meta-programming
to move an exponential complexity operation from run-time
to a compile-time operation, leaving a highly optimized
linear complexity operation for run-time.},
lrdestatus = {accepted},
lrdenewsdate = {2018-09-14},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.18.meta.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.18.meta.slides.pdf},
lrdekeywords = {rational language, types, lisp, reflection}
}
@PhDThesis{ newton.18.phd,
author = {Jim Newton},
title = {Representing and Computing with Types in Dynamically Typed
Languages},
school = {Sorbonne Universit\'e},
year = 2018,
address = {Paris, France},
month = nov,
abstract = {In this report, we present code generation techniques
related to run-time type checking of heterogeneous
sequences. Traditional regular expressions can be used to
recognize well defined sets of character strings called
rational languages or sometimes regular languages. Newton
et al. present an extension whereby a dynamic programming
language may recognize a well defined set of heterogeneous
sequences, such as lists and vectors. As with the analogous
string matching regular expression theory, matching these
regular type expressions can also be achieved by using a
finite state machine (deterministic finite automata, DFA).
Constructing such a DFA can be time consuming. The approach
we chose, uses meta-programming to intervene at
compile-time, generating efficient functions specific to
each DFA, and allowing the compiler to further optimize the
functions if possible. The functions are made available for
use at run-time. Without this use of meta-programming, the
program might otherwise be forced to construct the DFA at
run-time. The excessively high cost of such a construction
would likely far outweigh the time needed to match a string
against the expression. Our technique involves hooking into
the Common Lisp type system via the DEFTYPE macro. The
first time the compiler encounters a relevant type
specifier, the appropriate DFA is created, which may be a
Omega(2^n operation, from which specific low-level code is
generated to match that specific expression. Thereafter,
when the type specifier is encountered again, the same
pre-generated function can be used. The code generated is
Theta(n) complexity at run-time. A complication of this
approach, which we explain in this report, is that to build
the DFA we must calculate a disjoint type decomposition
which is time consuming, and also leads to sub-optimal use
of TYPECASE in machine generated code. To handle this
complication, we use our own macro OPTIMIZED-TYPECASE in
our machine generated code. Uses of this macro are also
implicitly expanded at compile time. Our macro expansion
uses BDDs (Binary Decision Diagrams) to optimize the
OPTIMIZED-TYPECASE into low level code, maintaining the
TYPECASE semantics but eliminating redundant type checks.
In the report we also describe an extension of BDDs to
accomodate subtyping in the Common Lisp type system as well
as an in-depth analysis of worst-case sizes of BDDs. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.18.phd.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.18.phd.slides.pdf},
lrdekeywords = {rational language, automata, lisp, bdd},
lrdeprojects = {Climb},
lrdenewsdate = {2018-11-01}
}
@Article{ newton.18.tocl,
author = {Jim Newton and Didier Verna},
title = {A Theoretical and Numerical Analysis of the Worst-Case
Size of Reduced Ordered Binary Decision Diagrams},
journal = {ACM Transactions on Computational Logic},
year = 2019,
volume = {20},
number = {1},
month = jan,
pages = {1--36},
lrdeprojects = {Climb},
abstract = {Binary Decision Diagrams (BDDs) and in particular ROBDDs
(Reduced Ordered BDDs) are a common data structure for
manipulating Boolean expressions, integrated circuit
design, type inferencers, model checkers, and many other
applications. Although the ROBDD is a lightweight data
structure to implement, the behavior, in terms of memory
allocation, may not be obvious to the program architect. We
explore experimentally, numerically, and theoretically the
typical and worst-case ROBDD sizes in terms of number of
nodes and residual compression ratios, as compared to
unreduced BDDs. While our theoretical results are not
surprising, as they are in keeping with previously known
results, we believe our method contributes to the current
body of research by our experimental and statistical
treatment of ROBDD sizes. In addition, we provide an
algorithm to calculate the worst-case size. Finally, we
present an algorithm for constructing a worst-case ROBDD of
a given number of variables. Our approach may be useful to
projects deciding whether the ROBDD is the appropriate data
structure to use, and in building worst-case examples to
test their code.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.18.tocl.pdf},
lrdestatus = {accepted},
lrdenewsdate = {2018-08-28}
}
@InProceedings{ newton.19.els,
author = {Jim Newton and Didier Verna},
title = {Finite Automata Theory Based Optimization of Conditional
Variable Binding},
booktitle = {European Lisp Symposium},
year = 2019,
lrdestatus = {accepted},
lrdekeywords = {finite automata, infinite alphabets, type systems, Common
Lisp, meta programming},
lrdenewsdate = {2019-01-14},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.19.els.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.19.els.slides.pdf},
lrdeprojects = {Climb},
address = {Genova, Italy},
month = apr,
abstract = {We present an efficient and highly optimized
implementation of destructuring-case in Common Lisp. This
macro allows the selection of the most appropriate
destructuring lambda list of several given based on
structure and types of data at run-time and thereafter
dispatches to the corresponding code branch. We examine an
optimization technique, based on finite automata theory
applied to conditional variable binding and execution, and
type-based pattern matching on Common Lisp sequences. A
risk of inefficiency associated with a naive implementation
of destructuring-case is that the candidate expression
being examined may be traversed multiple times, once for
each clause whose format fails to match, and finally once
for the successful match. We have implemented
destructuring-case in such a way to avoid multiple
traversals of the candidate expression. This article
explains how this optimization has been implemented.}
}
@Misc{ newton.20.tfp,
author = {Jim Newton},
title = {Performance Comparison of Several Folding Strategies},
booktitle = {Trends in Functional Programming},
year = 2020,
note = {Accepted},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.20.tfp.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.20.tfp.slides.pdf},
lrdestatus = {accepted},
lrdekeywords = {fold, binary decision diagram, scala, lisp, rational
numbers, functional programming},
lrdenewsdate = {2020-01-14},
address = {Krak{\'o}w, Poland},
month = feb,
abstract = {In this article we examine the computation order and
consequent performance of three different conceptual
implementations of the fold function. We explore a set of
performance based experiments on different implementations
of this function. In particular, we contrast the fold-left
implementation with two other implements we refer to as
pair-wise-fold and tree-like-fold. We explore two
application areas: ratio arithmetic and Binary Decisions
Diagram construction. We demonstrate several cases where
the performance of certain algorithms is very different
depending on the approach taken. In particular iterative
computations where the object size accumulates are good
candidates for the tree-like-fold.}
}
@InProceedings{ newton.21.els,
author = {Jim Newton and Adrien Pommellet},
title = {A Portable, Simple, Embeddable Type System},
booktitle = {Proceedings of the 14th European Lisp Symposium (ELS)},
year = 2021,
lrdestatus = {accepted},
lrdekeywords = {infinite alphabets, type systems, Common Lisp, Clojure,
Scala},
lrdenewsdate = {2021-04-26},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/newton.21.els.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/newton.21.els.slides.pdf},
lrdeprojects = {Spot},
address = {Online},
month = may,
abstract = { We present a simple type system inspired by that of
Common Lisp. The type system is intended to be embedded
into a host language and accepts certain fundamental types
from that language as axiomatically given. The type
calculus provided in the type system is capable of
expressing union, intersection, and complement types, as
well as membership, subtype, disjoint, and habitation
(non-emptiness) checks. We present a theoretical foundation
and two sample implementations, one in Clojure and one in
Scala.},
doi = {10.5281/zenodo.4709777}
}
@InProceedings{ perrot.06.nist,
author = {Patrick Perrot and R\'eda Dehak and G\'erard Chollet},
title = {{ENST-IRCGN} System Description},
booktitle = {NIST SRE'06 Workshop: speaker recognition evaluation
campaign},
year = 2006,
address = {San Juan, Puerto Rico},
month = jun,
lrdenewsdate = {2006-05-30}
}
@InProceedings{ poitrenaud.19.icfem,
author = {Denis Poitrenaud and Etienne Renault},
title = {Combining Parallel Emptiness Checks with Partial Order
Reductions},
booktitle = {Proceedings of the 21st International Conference on Formal
Engineering Methods (ICFEM'19)},
editor = {Yamine Ait Ameur and Shengchao Qin},
address = {Shenzhen, China},
year = 2019,
month = nov,
pages = {??--??},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = {11852},
abstract = { In explicit state model checking ofconcurrent systems,
multicore emptiness checks and partial order reductions
(POR) are two major techniques to handle large state
spaces. The first one tries to take advantage of multi-core
architectures while the second one may decrease
exponentially the size of the state space to explore. For
checking LTL properties, Bloemen and van de Pol [2] shown
that the best performance is currently obtained using their
multi-core SCC-based emptiness check. However, combining
the latest SCC-based algorithm with POR is not trivial
since a condition on cycles, the proviso, must be enforced
on an algorithm which processes collaboratively cycles. In
this paper, we suggest a pessimistic approach to tackle
this problem for liveness properties. For safety ones, we
propose an algorithm which takes benefit from the
information computed by the SCC-based algorithm. We also
present new parallel provisos for both safety and liveness
prop- erties that relies on other multi-core emptiness
checks. We observe that all presented algorithms maintain
good reductions and scalability.},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/poitrenaud.19.icfem.pdf},
lrdenewsdate = {2019-08-02}
}
@Article{ pommellet.20.isse,
author = {Adrien Pommellet and Tayssir Touili},
title = {{LTL} Model Checking for Communicating Concurrent
Programs},
journal = {Innovations in Systems and Software Engineering: a NASA
journal (ISSE)},
year = 2020,
volume = {16},
number = {2},
pages = {161--179},
month = jun,
publisher = {Springer},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/pommellet.20.isse.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2020-05-15},
abstract = {We present in this paper a new approach to the static
analysis of concurrent programs with procedures. To this
end, we model multi-threaded programs featuring recursive
procedure calls and synchronisation by rendez-vous between
parallel threads with communicating pushdown systems (from
now on CPDSs).
The reachability problem for this particular class of
automata is unfortunately undecidable. However, it has been
shown that an efficient abstraction of the execution traces
language can nonetheless be computed. To this end, an
algebraic framework to over-approximate context-free
languages has been introduced by Bouajjani et al.
In this paper, we combine this framework with an
automata-theoretic approach in order to approximate an
answer to the model checking problem of the linear-time
temporal logic (from now on LTL) on CPDSs. We then present
an algorithm that, given a single-indexed or
stutter-invariant LTL formula, allows us to prove that no
run of a CPDS verifies this formula if the procedure
ends.},
doi = {10.1007/s11334-020-00363-6}
}
@Misc{ pouillard.05.sud,
author = {Akim Demaille and Thomas Largillier and Nicolas
Pouillard},
title = {{ESDF}: A proposal for a more flexible {SDF} handling},
note = {Communication to Stratego Users Day 2005},
year = 2005,
address = {Utrecht {U}niversity, {N}etherland},
month = may,
lrdeprojects = {Transformers},
abstract = {By the means on its annotations, Syntax Definition
Formalism (SDF) seems to be extensible: the user is tempted
to tailor its grammar syntax by adding new annotation
kinds. Unfortunately the standard SDF crunching tools from
Stratego/XT do not support the extension of SDF, and the
user has to develop the whole set of tools for her home
grown extension(s). We present the SDF tool set that
provides ``weak'' genericity with respect to the grammar
grammar: support for arbitrary SDF annotations. We would
like to contribute it to Stratego/XT since its components
subsume their stock peers. Finally, we present a set of
four extensions we find useful.},
lrdepaper = {http://www.lrde.epita.fr/dload/200505-SUD/esdf/article-200505-SUD-esdf.pdf}
}
@InProceedings{ puybareau.17.gretsi,
author = {\'Elodie Puybareau and Hugues Talbot and Laurent Najman},
title = {Caract\'erisation des zones de mouvement p\'eriodiques
pour applications bio-m\'edicales},
booktitle = {Actes du 26e Colloque GRETSI},
year = {2017},
address = {Juan-les-Pins, France},
category = {national},
month = sep,
abstract = {De nombreuses applications biomedicales impliquent
l'analyse de s\'equences pour la caract\'erisation du
mouvement. Dans cet article, nous considerons des
s\'equences 2D+t o\`u un mouvement particulier (par exemple
un flux sanguin) est associ\'e \`a une zone sp\'ecifique de
l'image 2D (par exemple une art\`ere). Mais de nombreux
mouvements peuvent co-exister dans les s\'equences (par
exemple, il peut y avoir plusieurs vaisseaux sanguins
presents, chacun avec leur flux sp\'ecifique). La
caract\'erisation de ce type de mouvement implique d'abord
de trouver les zones o\`u le mouvement est pr\'esent, puis
d'analyser ces mouvements : vitesse, r\'egularit\'e,
fr\'equence, etc. Dans cet article, nous proposons une
m\'ethode appropri\'ee pour d\'etecter et caract\'eriser
simultan\'ement les zones o\`u le mouvement est pr\'esent
dans une s\'equence. Nous pouvons ensuite classer ce
mouvement en zones coh\'erentes en utilisant un
apprentissage non supervis\'e et produire des m\'etriques
directement utilisables pour diverses applications. Nous
illustrons et validons cette m\^eme m\'ethode sur l'analyse
du flux sanguin chez l'embryon de poisson.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2017-06-28},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.17.gretsi.pdf}
}
@InProceedings{ puybareau.17.isbi,
author = {\'Elodie Puybareau and Hugues Talbot and Laurent Najman},
title = {Periodic Area-of-Motion characterization for Bio-Medical
applications},
booktitle = {Proceedings of the IEEE International Symposium on
Bio-Medical Imaging (ISBI)},
year = 2017,
address = {Melbourne, Australia},
month = apr,
abstract = {Many bio-medical applications involve the analysis of
sequences for motion characterization. In this article, we
consider 2D+t sequences where a particular motion (e.g. a
blood flow) is associated with a specific area of the 2D
image (e.g. an artery) but multiple motions may exist
simultaneously in the same sequences (e.g. there may be
several blood vessels present, each with their specific
flow). The characterization of this type of motion
typically involves first finding the areas where motion is
present, followed by an analysis of these motions: speed,
regularity, frequency, etc. In this article, we propose a
methodology called "area-of-motion characterization"
suitable for simultaneously detecting and characterizing
areas where motion is present in a sequence. We can then
classify this motion into consistent areas using
unsupervised learning and produce directly usable metrics
for various ap- plications. We illustrate this methodology
for the analysis of cilia motion on ex-vivo human samples,
and we apply and validate the same methodology for blood
flow analysis in fish embryo.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.17.isbi.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2017-02-20},
doi = {10.1109/ISBI.2017.7950503}
}
@InProceedings{ puybareau.17.ismm,
author = {\'Elodie Puybareau and Hugues Talbot and Noha Gaber and
Tarik Bourouina},
title = {Morphological Analysis of Brownian Motion for Physical
Measurements},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 13th International
Symposium on Mathematical Morphology (ISMM)},
year = {2017},
editor = {J. Angulo and S. Velasco-Forero and F. Meyer},
volume = {10225},
series = {Lecture Notes in Computer Science},
pages = {486-497},
month = may,
address = {Fontainebleau, France},
publisher = {Springer},
abstract = {Brownian motion is a well-known, apparently chaotic mo-
tion affecting microscopic objects in fluid media. The
mathematical and physical basis of Brownian motion have
been well studied but not often exploited. In this article
we propose a particle tracking methodology based on
mathematical morphology, suitable for Brownian motion
analysis, which can provide difficult physical measurements
such as the local temperature and viscosity. We illustrate
our methodology on simulation and real data, showing that
interesting phenomena and good precision can be achieved.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.17.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2017-02-23},
doi = {10.1007/978-3-319-57240-6_40}
}
@InProceedings{ puybareau.18.brainles,
author = {\'Elodie Puybareau and Guillaume Tochon and Joseph
Chazalon and Jonathan Fabrizio},
title = {Segmentation of Gliomas and Prediction of Patient Overall
Survival: {A} Simple and Fast Procedure},
booktitle = {Proceedings of the Workshop on Brain Lesions (BrainLes),
in conjunction with MICCAI},
year = 2018,
series = {Lecture Notes in Computer Science},
volume = {11384},
pages = {199--209},
publisher = {Springer},
abstract = {In this paper, we propose a fast automatic method that
seg- ments glioma without any manual assistance, using a
fully convolutional network (FCN) and transfer learning.
From this segmentation, we predict the patient overall
survival using only the results of the segmentation and a
home made atlas. The FCN is the base network of VGG-16,
pretrained on ImageNet for natural image classification,
and fine tuned with the training dataset of the MICCAI 2018
BraTS Challenge. It relies on the "pseudo-3D" method
published at ICIP 2017, which allows for segmenting objects
from 2D color images which contain 3D information of MRI
volumes. For each n th slice of the volume to segment, we
consider three images, corresponding to the (n-1)th, nth,
and (n-1)th slices of the original volume. These three
gray-level 2D images are assembled to form a 2D RGB color
image (one image per channel). This image is the input of
the FCN to obtain a 2D segmentation of the n th slice. We
process all slices, then stack the results to form the 3D
output segmentation. With such a technique, the
segmentation of a 3D volume takes only a few seconds. The
prediction is based on Random Forests, and has the
advantage of not being dependant of the acquisition
modality, making it robust to inter-base data.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.18.brainles.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-11-05},
doi = {10.1007/978-3-030-11726-9_18}
}
@Misc{ puybareau.18.fish,
author = {Diane Genest and Elodie Puybareau and Jean Cousty and Marc
Leonard and Hugues Talbot and Noemie De Croze},
title = {High throughput automated detection of axial malformations
in fish embryo},
howpublished = {Communication at the 5th International Symposium and
Workshop on Fish and Amphibian Embryos as Alternative
Models in Toxicology and Teratology},
month = nov,
year = {2018},
abstract = {Fish embryo models are widely used as screening tools to
assess the efficacy and /or toxicity of chemicals. This
assessment involves analysing embryo morphological
abnormalities. In this article, we propose a multi-scale
pipeline to allow automated classification of fish embryos
(Medaka: Oryzias latipes) based on the presence or absence
of spine malformations. The proposed pipeline relies on the
acquisition of fish embryo 2D images, on feature extraction
due to mathematical morphology operators and on machine
learning classification. After image acquisition,
segmentation tools are used to focus on the embryo before
analysing several morphological features. An approach based
on machine learning is then applied to these features to
automatically classify embryos according to the detection
of axial malformations. We built and validated our learning
model on 1,459 images with a 10-fold cross- validation by
comparison with the gold standard of 3D observations
performed under a microscope by a trained operator. Our
pipeline results in correct classification in 85\% of the
cases included in the database. This percentage is similar
to the percentage of success of a trained human operator
working on 2D images. Indeed, most of the errors are due to
the inherent limitations of 2D images compared to 3D
observations. The key benefit of our approach is the low
computational cost of our image analysis pipeline, which
guarantees optimal throughput analysis.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-11-29}
}
@InProceedings{ puybareau.18.icip,
author = {\'Elodie Puybareau and Thierry G\'eraud},
title = {Real-Time Document Detection in Smartphone Videos},
booktitle = {Proceedings of the 24th IEEE International Conference on
Image Processing (ICIP)},
year = {2018},
pages = {1498--1502},
month = oct,
address = {Athens, Greece},
doi = {10.1109/ICIP.2018.8451533},
abstract = {Smartphones are more and more used to capture photos of
any kind of important documents in many different
situations, yielding to new image processing needs. One of
these is the ability of detecting documents in real time on
smartphones' video stream while being robust to classical
defects such as low contrast, fuzzy images, flares,
shadows, etc. This feature is interesting to help the user
to capture his document in the best conditions and to guide
this capture (evaluating appropriate distance, centering
and tilt). In this paper we propose a solution to detect in
real time documents taking very few assumptions concerning
their contents and background. This method is based on
morphological operators which contrasts with classical line
detectors or gradient based thresholds. The use of such
invariant operators makes our method robust to the defects
encountered in video stream and suitable for real time
document detection on smartphones.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.18.icip.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2018-05-10}
}
@InProceedings{ puybareau.18.rfiap,
author = {\'Elodie Puybareau and Yongchao Xu and Joseph Chazalon and
Isabelle Bloch and Thierry G\'eraud},
title = {Segmentation des hyperintensit\'es de la mati\`ere blanche
en quelques secondes \`a l'aide d'un r\'eseau de neurones
convolutif et de transfert d'apprentissage},
booktitle = {Actes du congr\`es Reconnaissance des Formes, Image,
Apprentissage et Perception (RFIAP), session sp\'eciale
``Deep Learning, deep in France''},
year = {2018},
month = jun,
address = {Marne-la-Vall\'ee, France},
category = {national},
abstract = {Dans cet article, nous proposons une m\'ethode automatique
et rapide pour segmenter les hyper-intensit\'es de la
mati\`ere blanche (WMH) dans des images IRM c\'er\'ebrales
3D, en utilisant un r\'eseau de neurones enti\`erement
convolutif (FCN) et du transfert d'apprentissage. Ce FCN
est le r\'eseau neuronal du Visual Geometry Group (VGG)
pr\'e-entra\^in\'e sur la base ImageNet pour la
classification des images naturelles, et affin\'e avec
l'ensemble des donn\'ees d'entra\^inement du concours
MICCAI WMH. Nous consid\'erons trois images pour chaque
coupe du volume \`a segmenter, provenant des acquisitions
en T1, en FLAIR, et le r\'esultat d'un op\'erateur
morphologique appliqu\'e sur le FLAIR, le top-hat, qui met
en \'evidence les petites structures de forte intensit\'e.
Ces trois images 2D sont assembl\'ees pour former une image
2D-3 canaux interpr\'et\'ee comme une image en couleurs,
ensuite pass\'ee au FCN pour obtenir la segmentation 2D de
la coupe correspondante. Nous traitons ainsi toutes les
coupes pour former la segmentation de sortie 3D. Avec une
telle technique, la segmentation de WMH sur un volume
c\'er\'ebral 3D prend environ 10 secondes, pr\'e-traitement
compris. Notre technique a \'et\'e class\'ee 6e sur 20
participants au concours MICCAI WMH.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.18.rfiap.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2018-05-04}
}
@InProceedings{ puybareau.18.stacom,
author = {\'Elodie Puybareau and Zhou Zhao and Younes Khoudli and
Edwin Carlinet and Yongchao Xu and J\'er\^ome Lacotte and
Thierry G\'eraud},
title = {Left Atrial Segmentation In a Few Seconds Using Fully
Convolutional Network and Transfer Learning},
booktitle = {Proceedings of the Workshop on Statistical Atlases and
Computational Modelling of the Heart (STACOM 2018), in
conjunction with MICCAI},
year = 2019,
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {11395},
pages = {339--347},
doi = {10.1007/978-3-030-12029-0_37},
abstract = {In this paper, we propose a fast automatic method that
segments left atrial cavity from 3D GE-MRIs without any
manual assistance, using a fully convolutional network
(FCN) and transfer learning. This FCN is the base network
of VGG-16, pre-trained on ImageNet for natural image
classification, and fine tuned with the training dataset of
the MICCAI 2018 Atrial Segmentation Challenge. It relies on
the "pseudo-3D" method published at ICIP 2017, which allows
for segmenting objects from 2D color images which contain
3D information of MRI volumes. For each $n^{\text{th}}$
slice of the volume to segment, we consider three images,
corresponding to the $(n-1)^{\text{th}}$, $n^{\text{th}}$,
and $(n+1)^{\text{th}}$ slices of the original volume.
These three gray-level 2D images are assembled to form a 2D
RGB color image (one image per channel). This image is the
input of the FCN to obtain a 2D segmentation of the
$n^{\text{th}}$ slice. We process all slices, then stack
the results to form the 3D output segmentation. With such a
technique, the segmentation of the left atrial cavity on a
3D volume takes only a few seconds. We obtain a Dice score
of 0.92 both on the training set in our experiments before
the challenge, and on the test set of the challenge.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.18.stacom.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-10-25}
}
@Article{ puybareau.19.cbm,
author = {Diane Genest and \'Elodie Puybareau and Marc L\'eonard and
Jean Cousty and No\'emie De Croz\'e and Hugues Talbot},
title = {High Throughput Automated Detection of Axial Malformations
in {M}edaka Embryo},
journal = {Computers in Biology and Medicine},
year = 2019,
month = feb,
pages = {157--168},
volume = {105},
lrdeprojects = {Olena},
abstract = {Fish embryo models are widely used as screening tools to
assess the efficacy and/or toxicity of chemicals. This
assessment involves the analysis of embryo morphological
abnormalities. In this article, we propose a multi-scale
pipeline to allow automated classification of fish embryos
(Medaka: Oryzias latipes) based on the presence or absence
of spine malformations. The proposed pipeline relies on the
acquisition of fish embryo 2D images, on feature extraction
based on mathematical morphology operators and on machine
learning classification. After image acquisition,
segmentation tools are used to detect the embryo before
analysing several morphological features. An approach based
on machine learning is then applied to these features to
automatically classify embryos according to the presence of
axial malformations. We built and validated our learning
model on 1459 images with a 10-fold cross-validation by
comparison with the gold standard of 3D observations
performed under a microscope by a trained operator. Our
pipeline results in correct classification in 85\% of the
cases included in the database. This percentage is similar
to the percentage of success of a trained human operator
working on 2D images. The key benefit of our approach is
the low computational cost of our image analysis pipeline,
which guarantees optimal throughput analysis..},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.19.cbm.pdf},
lrdenewsdate = {2019-01-22},
lrdekeywords = {Image},
doi = {10.1016/j.compbiomed.2018.12.016}
}
@InProceedings{ puybareau.19.ismm,
author = {\'{E}lodie Puybareau and Edwin Carlinet and Alessandro
Benfenati and Hugues Talbot},
title = {Spherical fluorescent particle segmentation and tracking
in {3D} confocal microscopy},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 14th International
Symposium on Mathematical Morphology (ISMM)},
year = 2019,
series = {Lecture Notes in Computer Science Series},
address = {Saarbr\"ucken, Germany},
publisher = {Springer},
pages = {1--12},
month = jul,
lrdeprojects = {Olena},
abstract = {Spherical fluorescent particle are micrometer-scale
spherical beads used in various areas of physics, chemistry
or biology as markers associated with local physical media.
They are useful for example in fluid dynamics to
characterize flows, diffusion coefficients, viscosity or
temperature; they are used in cells dynamics to estimate
mechanical strain and stress at the micrometer scale. In
order to estimate these physical measurements, tracking
these particles is necessary. Numerous approaches and
existing packages, both open-source and proprietary are
available to achieve tracking with a high degree of
precision in 2D. However, little such software is available
to achieve tracking in 3D. One major difficulty is that 3D
confocal microscopy acquisition is not typically fast
enough to assume that the beads are stationary during the
whole 3D scan. As a result, beads may move between planar
scans. Classical approaches to 3D segmentation may yield
objects are not spherical. In this article, we propose a 3D
bead segmentation that deals with this situation.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/puybareau.19.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-03-13},
doi = {10.1007/978-3-030-20867-7_40}
}
@InProceedings{ regisgianas.03.poosc,
author = {Yann R\'egis-Gianas and Rapha\"el Poss},
title = {On orthogonal specialization in {C++}: dealing with
efficiency and algebraic abstraction in {V}aucanson},
booktitle = {Proceedings of the Parallel/High-performance
Object-Oriented Scientific Computing (POOSC; in conjunction
with ECOOP)},
year = 2003,
number = {FZJ-ZAM-IB-2003-09},
pages = {71--82},
editor = {J\"org Striegnitz and Kei Davis},
series = {John von Neumann Institute for Computing (NIC)},
address = {Darmstadt, Germany},
month = jul,
lrdeprojects = {Vaucanson},
abstract = {Vaucanson is a C++ generic library for weighted finite
state machine manipulation. For the sake of generality, FSM
are defined using algebraic structures such as alphabet
(for the letters), free monoid (for the words), semiring
(for the weights) and series (mapping from words to
weights). As usual, what is at stake is to maintain
efficiency while providing a high-level layer for the
writing of generic algorithms. Yet, one of the
particularities of FSM manipulation is the need of a fine
grained specialization power on an object which is both an
algebraic concept and an intensive computing machine.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/poosc03-vaucanson.pdf},
lrdenewsdate = {2003-05-26}
}
@InProceedings{ renault.13.lpar,
author = {Etienne Renault and Alexandre Duret-Lutz and Fabrice
Kordon and Denis Poitrenaud},
title = {Three {SCC}-based Emptiness Checks for Generalized
{B\"u}chi Automata},
booktitle = {Proceedings of the 19th International Conference on Logic
for Programming, Artificial Intelligence, and Reasoning
(LPAR'13)},
editor = {Ken McMillan and Aart Middeldorp and Andrei Voronkov },
year = 2013,
month = dec,
pages = {668--682},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 8312,
abstract = {The automata-theoretic approach for the verification of
linear time properties involves checking the emptiness of a
B{\"u}chi automaton. However generalized B{\"u}chi
automata, with multiple acceptance sets, are preferred when
verifying under weak fairness hypotheses. Existing
emptiness checks for which the complexity is independent of
the number of acceptance sets are all based on the
enumeration of Strongly Connected Components (SCCs). In
this paper, we review the state of the art SCC enumeration
algorithms to study how they can be turned into emptiness
checks. This leads us to define two new emptiness check
algorithms (one of them based on the Union Find data
structure), introduce new optimizations, and show that one
of these can be of benefit to a classic SCCs enumeration
algorithm. We have implemented all these variants to
compare their relative performances and the overhead
induced by the emptiness check compared to the
corresponding SCCs enumeration algorithm. Our experiments
shows that these three algorithms are comparable.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.13.lpar.pdf},
lrdenewsdate = {2013-10-09},
doi = {10.1007/978-3-642-45221-5_44}
}
@InProceedings{ renault.13.tacas,
author = {Etienne Renault and Alexandre Duret-Lutz and Fabrice
Kordon and Denis Poitrenaud},
title = {Strength-Based Decomposition of the Property {B\"u}chi
Automaton for Faster Model Checking},
booktitle = {Proceedings of the 19th International Conference on Tools
and Algorithms for the Construction and Analysis of Systems
(TACAS'13)},
editor = {Nir Piterman and Scott A. Smolka},
year = 2013,
month = mar,
pages = {580--593},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 7795,
abstract = {The automata-theoretic approach for model checking of
linear-time temporal properties involves the emptiness
check of a large B{\"u}chi automaton. Specialized
emptiness-check algorithms have been proposed for the cases
where the property is represented by a weak or terminal
automaton. When the property automaton does not fall into
these categories, a general emptiness check is required.
This paper focuses on this class of properties. We refine
previous approaches by classifying strongly-connected
components rather than automata, and suggest a
decomposition of the property automaton into three smaller
automata capturing the terminal, weak, and the remaining
strong behaviors of the property. The three corresponding
emptiness checks can be performed independently, using the
most appropriate algorithm. Such a decomposition approach
can be used with any automata-based model checker. We
illustrate the interest of this new approach using explicit
and symbolic LTL model checkers.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.13.tacas.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2013-01-08},
doi = {10.1007/978-3-642-36742-7_42}
}
@PhDThesis{ renault.14.phd,
author = {Etienne Renault},
title = {{Contribution aux tests de vacuit\'e pour le model
checking explicite}},
school = {{Universit{\'e} Pierre et Marie Curie - Paris VI}},
year = 2014,
address = {Paris, France},
month = dec,
abstract = {The automata-theoretic approach to linear time
model-checking is a standard technique for formal
verification of concurrent systems. The system and the
property to check are modeled with omega-automata that
recognizes infinite words. Operations overs these automata
(synchronized product and emptiness checks) allows to
determine whether the system satisfies the property or not.
In this thesis we focus on a particular type of
omega-automata that enable a concise representation of weak
fairness properties: transitions-based generalized B\"uchi
automata (TGBA). First we outline existing verification
algorithms, and we propose new efficient algorithms for
strong automata. In a second step, the analysis of the
strongly connected components of the property automaton led
us to develop a decomposition of this automata. This
decomposition focuses on multi-strength property automata
and allows a natural parallelization for already existing
model-checkers. Finally, we proposed, for the first time,
new parallel emptiness checks for generalized B\"uchi
automata. Moreover, all these emptiness checks are
lock-free, unlike those of the state-of-the-art. All these
techniques have been implemented and then evaluated on a
large benchmark.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.14.phd.pdf},
lrdenewsdate = {2014-12-05},
lrdeprojects = {Spot}
}
@InProceedings{ renault.15.tacas,
author = {Etienne Renault and Alexandre Duret-Lutz and Fabrice
Kordon and Denis Poitrenaud},
title = {Parallel Explicit Model Checking for Generalized {B\"u}chi
Automata},
booktitle = {Proceedings of the 19th International Conference on Tools
and Algorithms for the Construction and Analysis of Systems
(TACAS'15)},
editor = {Christel Baier and Cesare Tinelli},
year = 2015,
month = apr,
pages = {613--627},
publisher = {Springer},
series = {Lecture Notes in Computer Science},
volume = 9035,
abstract = {We present new parallel emptiness checks for LTL model
checking. Unlike existing parallel emptiness checks, these
are based on an SCC enumeration, support generalized Buchi
acceptance, and require no synchronization points nor
repair procedures. A salient feature of our algorithms is
the use of a global union-find data structure in which
multiple threads share structural information about the
automaton being checked. Our prototype implementation has
encouraging performances: the new emptiness checks have
better speedup than existing algorithms in half of our
experiments.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.15.tacas.pdf},
lrdeprojects = {Spot},
lrdenewsdate = {2015-01-13},
doi = {10.1007/978-3-662-46681-0_56}
}
@Article{ renault.16.sttt,
author = {Etienne Renault and Alexandre Duret-Lutz and Fabrice
Kordon and Denis Poitrenaud},
title = {Variations on Parallel Explicit Model Checking for
Generalized {B\"u}chi Automata},
journal = {International Journal on Software Tools for Technology
Transfer (STTT)},
year = 2017,
note = {First published online on 26 April 2016.},
volume = 19,
number = 6,
pages = {653--673},
month = apr,
publisher = {Springer},
lrdeprojects = {Spot},
lrdenewsdate = {2015-10-26},
abstract = { We present new parallel explicit emptiness checks for LTL
model checking. Unlike existing parallel emptiness checks,
these are based on a Strongly Connected Component (SCC)
enumeration, support generalized {B\"u}chi acceptance, and
require no synchronization points nor recomputing
procedures. A salient feature of our algorithms is the use
of a global union-find data structure in which multiple
threads share structural information about the automaton
checked. Besides these basic algorithms, we present one
architectural variant isolating threads that write to the
union-find, and one extension that decomposes the automaton
based on the strength of its SCCs to use more optimized
emptiness checks. The results from an extensive
experimentation of our algorithms and their variations show
encouraging performances, especially when the decomposition
technique is used.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.16.sttt.pdf},
doi = {10.1007/s10009-016-0422-5}
}
@InProceedings{ renault.18.vecos,
author = {Etienne Renault},
title = {Improving Parallel State-Space Exploration Using Genetic
Algorithms},
booktitle = {Proceedings of the 12th International Conference on
Verification and Evaluation of Computer and Communication
Systems (VECOS'18)},
editor = {Mohamed Faouzi Atig and Saddek Bensalem and Simon Bliudze
and Bruno Monsuez},
address = {Grenoble, France},
year = 2018,
month = sep,
pages = {133--149},
publisher = {Springer, Cham},
series = {Lecture Notes in Computer Science},
volume = {11181},
abstract = { The verification of temporal properties against a given
system may require the exploration of its full state space.
In explicit model-checking this exploration uses a
Depth-First-Search (DFS) and can be achieved with multiple
randomized threads to increase performance. Nonetheless the
topology of the state-space and the exploration order can
cap the speedup up to a certain number of threads. This
paper proposes a new technique that aims to tackle this
limitation by generating artificial initial states, using
genetic algorithms. Threads are then launched from these
states and thus explore different parts of the state space.
Our prototype implementation runs 10\% faster than
state-of-the-art algorithms. These results demonstrate that
this novel approach worth to be considered as a way to
overcome existing limitations.},
lrdeprojects = {Spot},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.18.vecos.pdf},
lrdenewsdate = {2018-06-14}
}
@Article{ renault.20.isse,
author = {Etienne Renault},
title = {Improving swarming using genetic algorithms},
journal = {Innovations in Systems and Software Engineering: a NASA
journal (ISSE)},
year = 2020,
volume = {16},
number = {2},
pages = {143--159},
month = jun,
publisher = {Springer},
lrdeprojects = {Spot},
lrdenewsdate = {2020-06-02},
abstract = { The verification of temporal properties against a given
system may require the exploration of its full state space.
In explicit model checking, this exploration uses a
depth-first search and can be achieved with multiple
randomized threads to increase performance. Nonetheless,
the topology of the state space and the exploration order
can cap the speedup up to a certain number of threads. This
paper proposes a new technique that aims to tackle this
limitation by generating artificial initial states, using
genetic algorithms. Threads are then launched from these
states and thus explore different parts of the state space.
Our prototype implementation is 10\% faster than
state-of-the-art algorithms on a general benchmark and 40\%
on a specialized benchmark. Even if we expected a decrease
in an order of magnitude, these results are still
encouraging since they suggest a new way to handle existing
limitations. Empirically, our technique seems well suited
for "linear topology", i.e., the one we can obtain when
combining model checking algorithms with partial-order
reduction techniques. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renault.20.isse.pdf},
doi = {10.1007/s11334-020-00362-7}
}
@InProceedings{ renkin.20.atva,
author = {Florian Renkin and Alexandre Duret-Lutz and Adrien
Pommellet},
title = {Practical ``Paritizing'' of {E}merson--{L}ei Automata},
booktitle = {Proceedings of the 18th International Symposium on
Automated Technology for Verification and Analysis
(ATVA'20)},
year = {2020},
volume = {12302},
series = {Lecture Notes in Computer Science},
pages = {127--143},
month = oct,
publisher = {Springer},
abstract = {We introduce a new algorithm that takes a
\emph{Transition-based Emerson-Lei Automaton} (TELA), that
is, an $\omega$-automaton whose acceptance condition is an
arbitrary Boolean formula on sets of transitions to be seen
infinitely or finitely often, and converts it into a
\emph{Transition-based Parity Automaton} (TPA). To reduce
the size of the output TPA, the algorithm combines and
optimizes two procedures based on a \emph{latest appearance
record} principle, and introduces a \emph{partial
degeneralization}. Our motivation is to use this algorithm
to improve our LTL synthesis tool, where producing
deterministic parity automata is an intermediate step.},
lrdekeywords = {Spot},
lrdenewsdate = {2020-07-07},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/renkin.20.atva.pdf},
doi = {10.1007/978-3-030-59152-6_7}
}
@InProceedings{ ricou.07.adass,
author = {Olivier Ricou and Anthony Baillard and Emmanuel Bertin and
Frederic Magnard and Chiara Marmo and Yannick Mellier},
title = {Web services at {TERAPIX}},
booktitle = {Proceedings of the XVII conference on Astronomical Data
Analysis Software \& Systems (ADASS)},
month = sep,
year = 2007,
abstract = {We present an implementation of V.O.-compliant web
services built around software tools developed at the
TERAPIX centre. These services allow to operate from a
remote site several pipeline tasks dedicated to
astronomical data processing on the TERAPIX cluster,
including the latest EFIGI morphological analysis tool.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/ricou.07.adass.pdf},
lrdenewsdate = {2007-09-23}
}
@InProceedings{ ricou.07.eceg,
author = {Olivier Ricou},
title = {10 years of confrontation between {French} {Internet}
users and their successive governments},
booktitle = {Proceedings of the 7th European Conference on e-Government
(ECEG)},
month = jun,
year = 2007,
abstract = {This paper is a testimony on the relations between the
Internet users and their governments in France during the
last decade. It shows the complexity of communication
between two worlds that are strangers to each other. Since
most of the confrontation occurred over law proposals, it
analyses their impact on Internet users and focuses on two
examples. These example show the failure of Internet as a
political medium. French politicians do not seem to want an
active participation of the citizens in decisionmaking
processes. In order to end this paper on an optimistic
note, the last section enumerates the achievements of
egovernment which contributed preparing for a better
democracy by increasing transparency, accountability, and
education. This might push citizens to ask for more. },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/ricou.07.eceg.pdf},
lrdekeywords = {LRDE}
}
@InProceedings{ ricou.08.eceg,
author = {Olivier Ricou},
title = {A Survey of {French} Local e-Democracy},
booktitle = {Proceedings of the 8th European Conference on e-Government
(ECEG)},
month = jul,
year = 2008,
abstract = {Since the end of the last century, the Internet has shown
that it is a different media, a media of citizen
journalists. This paper surveys e-democratic tools used at
the local level in France in order to see how the Internet
can change our democracy and people's participation. It
describes the official tools provided by municipalities and
administrations as well as citizens' tools, like blogs,
which become more and more important in today's democratic
debate. It analyses how they help for more transparency,
accountability and participation, which might lead to
define new democratic rules.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/ricou.08.eceg.pdf}
}
@InProceedings{ rivet.19.isbi,
author = {Julie Rivet and Guillaume Tochon and Serge Meimon and
Michel Paques and Michael Atlan and Thierry G\'eraud},
title = {Motion Compensation in Digital Holography for Retinal
Imaging},
booktitle = {Proceedings of the IEEE International Symposium on
Biomedical Imaging (ISBI)},
month = apr,
year = 2019,
address = {Venice, Italy},
pages = {1428--1431},
doi = {10.1109/ISBI.2019.8759564},
abstract = {The measurement of medical images can be hindered by blur
and distortions caused by the physiological motion.
Specially for retinal imaging, images are greatly affected
by sharp movements of the eye. Stabilization methods have
been developed and applied to state-of-the-art retinal
imaging modalities; here we intend to adapt them for
coherent light detection schemes. In this paper, we
demonstrate experimentally cross-correlation-based lateral
and axial motion compensation in laser Doppler imaging and
optical coherence tomography by digital holography. Our
methods improve lateral and axial image resolution in those
innovative instruments and allow a better visualization
during motion.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/rivet.19.isbi.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-12-19}
}
@InProceedings{ rivet.19.spie,
author = {Julie Rivet and Guillaume Tochon and Serge Meimon and
Michel P\^aques and Thierry G\'eraud and Michael Atlan},
title = {Deep Neural Networks for Aberrations Compensation in
Digital Holographic Imaging of the Retina},
booktitle = {Proceedings of the SPIE Conference on Adaptive Optics and
Wavefront Control for Biological Systems V},
month = feb,
year = 2019,
address = {San Francisco, CA, USA},
doi = {10.1117/12.2509711},
abstract = {In computational imaging by digital holography, lateral
resolution of retinal images is limited to about 20 microns
by the aberrations of the eye. To overcome this limitation,
the aberrations have to be canceled. Digital aberration
compensation can be performed by post-processing of
full-field digital holograms. Aberration compensation was
demonstrated from wavefront measurement by reconstruction
of digital holograms in subapertures, and by measurement of
a guide star hologram. Yet, these wavefront measurement
methods have limited accuracy in practice. For holographic
tomography of the human retina, image reconstruction was
demonstrated by iterative digital aberration compensation,
by minimization of the local entropy of speckle-averaged
tomographic volumes. However image-based aberration
compensation is time-consuming, preventing real-time image
rendering. We are investigating a new digital aberration
compensation scheme with a deep neural network to
circumvent the limitations of these aberrations correction
methods. To train the network, 28.000 anonymized images of
eye fundus from patients of the 15-20 hospital in Paris
have been collected, and synthetic interferograms have been
reconstructed digitally by simulating the propagation of
eye fundus images recorded with standard cameras. With a
U-Net architecture, we demonstrate defocus correction of
these complex-valued synthetic interferograms. Other
aberration orders will be corrected with the same method,
to improve lateral resolution up to the diffraction limit
in digital holographic imaging of the retina.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/rivet.19.spie.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/rivet.19.spie.slides.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-10-25}
}
@PhDThesis{ rivet.20.phd,
author = {Julie Rivet},
title = {Non-iterative methods for image improvement in digital
holography of the retina},
school = {Sorbonne Universit\'e},
year = 2020,
address = {Paris, France},
month = jul,
abstract = {With the increase of the number of people with moderate to
severe visual impairment, monitoring and treatment of
vision disorders have become major issues in medicine
today. At the Quinze-Vingts national ophthalmology hospital
in Paris, two optical benches have been settled in recent
years to develop two real-time digital holography
techniques for the retina: holographic optical coherence
tomography (OCT) and laser Doppler holography. The first
reconstructs three-dimensional images, while the second
allows visualization of blood flow in vessels. Besides
problems inherent to the imaging system itself, optical
devices are subject to external disturbance, bringing also
difficulties in imaging and loss of accuracy. The main
obstacles these technologies face are eye motion and eye
aberrations. In this thesis, we have introduced several
methods for image quality improvement in digital
holography, and validated them experimentally. The
resolution of holographic images has been improved by
robust non-iterative methods: lateral and axial tracking
and compensation of translation movements, and measurement
and compensation of optical aberrations. This allows us to
be optimistic that structures on holographic images of the
retina will be more visible and sharper, which could
ultimately provide very valuable information to
clinicians.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/rivet.20.phd.pdf},
lrdenewsdate = {2020-07-17},
lrdeprojects = {Olena}
}
@InProceedings{ robert-seidowsky.15.visapp,
author = {Myriam Robert-Seidowsky and Jonathan Fabrizio and
S\'everine Dubuisson},
title = {{TextTrail}: {A} Robust Text Tracking Algorithm In Wild
Environments},
booktitle = {Proceedings of the 10th International Conference on
Computer Vision Theory and Applications (VISAPP)},
month = mar,
year = {2015},
pages = {268--276},
abstract = {In this paper, we propose TextTrail, a robust new
algorithm dedicated to text tracking in uncontrolled
environments (strong motion of camera and objects, partial
occlusions, blur, etc.). It is based on a particle filter
framework whose correction step has been improved. First,
we compare some likelihood functions and introduce a new
one that integrates tangent distance. We show that the
likelihood function has a strong influence on the text
tracking performances. Secondly, we compare our tracker
with another and finally present an example of application.
TextTrail has been tested on real video sequences and has
proven its efficiency. In particular, it can track texts in
complex situations starting from only one detection step
without needing another one to reinitialize the tracking
model.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/robert-seidowsky.15.visapp.pdf},
lrdeprojects = {Olena},
doi = {10.5220/0005292002680276}
}
@InProceedings{ royer.17.icdar,
title = {Benchmarking Keypoint Filtering Approaches for Document
Image Matching},
author = {E. Royer and J. Chazalon and M. Rusi{\~n}ol and F.
Bouchara},
booktitle = {Proceedings of the 14th International Conference on
Document Analysis and Recognition (ICDAR)},
year = {2017},
month = nov,
pages = {343--348},
address = {Kyoto, Japan},
abstract = {Reducing the amount of keypoints used to index an image is
particularly interesting to control processing time and
memory usage in real-time document image matching
applications, like augmented documents or smartphone
applications. This paper benchmarks two keypoint selection
methods on a task consisting of reducing keypoint sets
extracted from document images, while preserving detection
and segmentation accuracy. We first study the different
forms of keypoint filtering, and we introduce the use of
the CORE selection method on keypoints extracted from
document images. Then, we extend a previously published
benchmark by including evaluations of the new method, by
adding the SURF-BRISK detection/description scheme, and by
reporting processing speeds. Evaluations are conducted on
the publicly available dataset of ICDAR2015 SmartDOC
challenge 1. Finally, we prove that reducing the original
keypoint set is always feasible and can be beneficial not
only to processing speed but also to accuracy.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/royer.17.icdar.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2017-07-04},
doi = {10.1109/ICDAR.2017.64}
}
@InProceedings{ roynard.18.rrpr,
title = {An Image Processing Library in Modern {C++}: Getting
Simplicity and Efficiency with Generic Programming},
author = {Micha\"el Roynard and Edwin Carlinet and Thierry G\'eraud},
booktitle = {Proceedings of the 2nd Workshop on Reproducible Research
in Pattern Recognition (RRPR 2018)},
volume = {11455},
series = {Lecture Notes in Computer Science},
pages = {121--137},
year = {2019},
doi = {10.1007/978-3-030-23987-9_12},
abstract = {As there are as many clients as many usages of an Image
Processing library, each one may expect different services
from it. Some clients may look for efficient and
production-quality algorithms, some may look for a large
tool set, while others may look for extensibility and
genericity to inter-operate with their own code base... but
in most cases, they want a simple-to-use and stable
product. For a C++ Image Processing library designer, it is
difficult to conciliate genericity, efficiency and
simplicity at the same time. Modern C++ (post 2011) brings
new features for library developers that will help
designing a software solution combining those three points.
In this paper, we develop a method using these facilities
to abstract the library components and augment the
genericity of the algorithms. Furthermore, this method is
not specific to image processing; it can be applied to any
C++ scientific library.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/roynard.18.rrpr.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2018-10-25}
}
@Article{ rusinol.17.mtap,
title = {Augmented Songbook: an Augmented Reality Educational
Application for Raising Music Awareness},
author = {Rusi{\~{n}}ol, Mar{\c{c}}al and Chazalon, Joseph and
Diaz-Chito, Katerine},
journal = {Multimedia Tools and Applications},
year = {2018},
volume = {77},
number = {11},
pages = {13773--13798},
month = jun,
abstract = {This paper presents the development of an Augmented
Reality mobile application which aims at sensibilizing
young children to abstract concepts of music. Such concepts
are, for instance, the musical notation or the concept of
rythm. Recent studies in Augmented Reality for education
suggest that such technologies have multiple benefits for
students, including younger ones. As mobile document image
acquisition and processing gains maturity on mobile
platforms, we explore how it is possible to build a
markerless and real-time application to augment the
physical documents with didactical animations and
interactive content. Given a standard image processing
pipeline, we compare the performance of different local
descriptors at two key stages of the process. Results
suggest alternatives to the SIFT local descriptors,
regarding result quality and computationnal efficiency,
both for document model identification and pespective
transform estimation. All experiments are performed on an
original and public dataset we introduce here.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/rusinol.17.mtap.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
doi = {10.1007/s11042-017-4991-4},
lrdenewsdate = {2017-06-29}
}
@Article{ sekuboyina.21.media,
author = {Anjany Sekuboyina and Malek E. Husseini and Amirhossein
Bayat and Maximilian L\"offler and Hans Liebl and Hongwei
Li and Giles Tetteh and Jan Kuka\v{c}ka and Christian Payer
and Darko Stern and Martin Urschler and Maodong Chen and
Dalong Cheng and Nikolas Lessmann and Yujin Hu and Tianfu
Wang and Dong Yang and Daguang Xu and and Felix Ambellan
and Tamaz Amiranashvili and Moritz Ehlke and Hans Lamecker
and Sebastian Lehnert and Marilia Lirio and Nicol\'as
{P\'erez de Olaguer} and Heiko Ramm and Manish Sahu and
Alexander Tack and Stefan Zachow and Tao Jiang and Xinjun
Ma and Christoph Angerman and Xin Wang and Kevin Brown and
Matthias Wolf and Alexandre Kirszenberg and \'Elodie
Puybareau and Di Chen and Yiwei Bai and Brandon H. Rapazzo
and Timyoas Yeah and Amber Zhang and Shangliang Xu and Feng
Houa and Zhiqiang He and Chan Zeng and Zheng Xiangshang and
Xu Liming and Tucker J. Netherton and Raymond P. Mumme and
Laurence E. Court and Zixun Huang and Chenhang He and
Li-Wen Wang and Sai Ho Ling and L\^e Duy Hu\`ynh and
Nicolas Boutry and Roman Jakubicek and Jiri Chmelik and
Supriti Mulay and Mohanasankar Sivaprakasam and Johannes C.
Paetzold and Suprosanna Shit and Ivan Ezhov and Benedikt
Wiestler and Ben Glocker and Alexander Valentinitsch and
Markus Rempfler and Bj\"orn H. Menze and Jan S. Kirschke},
title = {{VerSe}: {A} Vertebrae Labelling and Segmentation
Benchmark for Multi-detector {CT} Images},
journal = {Medical Image Analysis},
number = {102166},
year = {2021},
month = jul,
doi = {10.1016/j.media.2021.102166},
abstract = {Vertebral labelling and segmentation are two fundamental
tasks in an automated spine processing pipeline. Reliable
and accurate processing of spine images is expected to
benefit clinical decision support systems for diagnosis,
surgery planning, and population-based analysis of spine
and bone health. However, designing automated algorithms
for spine processing is challenging predominantly due to
considerable variations in anatomy and acquisition
protocols and due to a severe shortage of publicly
available data. Addressing these limitations, the Large
Scale Vertebrae Segmentation Challenge (VerSe) was
organised in conjunction with the International Conference
on Medical Image Computing and Computer Assisted
Intervention (MICCAI) in 2019 and 2020, with a call for
algorithms tackling the labelling and segmentation of
vertebrae. Two datasets containing a total of 374
multi-detector CT scans from 355 patients were prepared and
4505 vertebrae have individually been annotated at voxel
level by a human-machine hybrid algorithm
(\url{https://osf.io/nqjyw/}, \url{https://osf.io/t98fz/}).
A total of 25 algorithms were benchmarked on these
datasets. In this work, we present the results of this
evaluation and further investigate the performance
variation at the vertebra level, scan level, and different
fields of view. We also evaluate the generalisability of
the approaches to an implicit domain shift in data by
evaluating the top-performing algorithms of one challenge
iteration on data from the other iteration. The principal
takeaway from VerSe: the performance of an algorithm in
labelling and segmenting a spine scan hinges on its ability
to correctly identify vertebrae in cases of rare anatomical
variations. The VerSe content and code can be accessed at:
\url{https://github.com/anjany/verse}.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/sekuboyina.21.media.pdf},
lrdeprojects = {Olena},
lrdenewsdate = {2021-07-22}
}
@InProceedings{ sennoussaoui.12.odyssey,
author = {M. Sennoussaoui and Najim Dehak and P. Kenny and R\'eda
Dehak and P. Dumouchel},
title = {First Attempt at {Boltzmann} Machines for Speaker
Recognition},
booktitle = {Odyssey Speaker and Language Recognition Workshop},
year = 2012,
address = {Singapore},
month = jun,
abstract = {Frequently organized by NIST, Speaker Recognition
evaluations (SRE) show high accuracy rates. This
demonstrates that this field of research is mature. The
latest progresses came from the proposition of low
dimensional i-vectors representation and new classifiers
such as Probabilistic Linear Discriminant Analysis (PLDA)
or Cosine Distance classifier. In this paper, we study some
variants of Boltzmann Machines (BM). BM is used in image
processing but still unexplored in Speaker Verification
(SR). Given two utterances, the SR task consists to decide
whether they come from the same speaker or not. Based on
this definition, we can illustrate SR as two-classes (same
vs. different speakers classes) classification problem. Our
first attempt of using BM is to model each class with one
generative Restricted Boltzmann Machine (RBM) with
symmetric Log-Likelihood Ratio on both models as decision
score. This new approach achieved an Equal Error Rate (EER)
of 7\% and a minimum Detection Cost Function (DCF) of 0.035
on the female content of the NIST SRE 2008. The objective
of this research is mainly to explore a new paradigm i.e.
BM without necessarily obtaining better performance than
the state-of-the-art system.}
}
@InProceedings{ senta.12.els,
author = {Laurent Senta and Christopher Chedeau and Didier Verna},
title = {Generic Image Processing with {C}limb},
booktitle = {European Lisp Symposium},
doi = {10.5281/zenodo.3248934},
year = 2012,
address = {Zadar, Croatia},
month = may,
abstract = {We present Climb, an experimental generic image processing
library written in Common Lisp. Most image processing
libraries are developed in static languages such as C or
C++ (often for performance reasons). The motivation behind
Climb is to provide an alternative view of the same domain,
from the perspective of dynamic languages. More precisely,
the main goal of Climb is to explore the dynamic way(s) of
addressing the question of genericity, while applying the
research to a concrete domain. Although still a prototype,
Climb already features several levels of genericity and
ships with a set of built-in algorithms as well as means to
combine them.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/senta.12.els.pdf},
lrdekeywords = {Software engineering},
lrdeprojects = {Climb}
}
@Article{ shum.13.taslp,
author = {S. Shum and Najim Dehak and R\'eda Dehak and J. Glass},
title = {Unsupervised Methods for Speaker Diarization: An
Integrated and Iterative Approach},
journal = {IEEE Transactions on Audio, Speech, and Language
Processing},
year = 2013,
volume = 21,
number = 10,
pages = {2015--2028},
month = oct,
abstract = {In speaker diarization, standard approaches typically
perform speaker clustering on some initial segmentation
before refining the segment boundaries in a re-segmentation
step to obtain a final diarization hypothesis. In this
paper, we integrate an improved clustering method with an
existing re-segmentation algorithm and, in iterative
fashion, optimize both speaker cluster assignments and
segmentation boundaries jointly. For clustering, we extend
our previous research using factor analysis for speaker
modeling. In continuing to take advantage of the
effectiveness of factor analysis as a front-end for
extracting speaker-specific features (i.e., i-vectors), we
develop a probabilistic approach to speaker clustering by
applying a Bayesian Gaussian Mixture Model (GMM) to
principal component analysis (PCA)-processed i-vectors. We
then utilize information at different temporal resolutions
to arrive at an iterative optimization scheme that, in
alternating between clustering and re-segmentation steps,
demonstrates the ability to improve both speaker cluster
assignments and segmentation boundaries in an unsupervised
manner. Our proposed methods attain results that are
comparable to those of a state-of-the-art benchmark set on
the multi-speaker CallHome telephone corpus. We further
compare our system with a Bayesian nonparametric approach
to diarization and attempt to reconcile their differences
in both methodology and performance.},
lrdenewsdate = {2013-06-07}
}
@InCollection{ tochon.17.chapter,
author = {Guillaume Tochon and Mauro {Dalla Mura} and {Miguel-Angel}
Veganzones and Silvia Valero and Philippe Salembier and
Jocelyn Chanussot},
title = {Advances in Utilization of Hierarchical Representations in
Remote Sensing Data Analysis},
booktitle = {Comprehensive Remote Sensing, 1st Edition},
publisher = {Elsevier},
editor = {Shunling Liang},
year = {2017},
month = nov,
volume = {2},
chapter = {5},
pages = {77--107},
abstract = {The latest developments in sensor design for remote
sensing and Earth observation purposes are leading to
images always more complex to analyze. Low-level
pixel-based processing is becoming unadapted to efficiently
handle the wealth of information they contain, and higher
levels of abstraction are required. Region-based
representations intend to exploit images as collections of
regions of interest bearing some semantic meaning, thus
easing their interpretation. However, the scale of analysis
of the images has to be fixed beforehand, which can be
problematic as different applications may not require the
same scale of analysis. On the other hand, hierarchical
representations are multiscale descriptions of images, as
they encompass in their structures all potential regions of
interest, organized in a hierarchical manner. Thus, they
allow to explore the image at various levels of details and
can serve as a single basis for many different further
processings. Thanks to its flexibility, the binary
partition tree (BPT) representation is one of the most
popular hierarchical representations, and has received a
lot of attention lately. This article draws a comprehensive
review of the most recent works involving BPT
representations for various remote sensing data analysis
tasks, such as image segmentation and filtering, object
detection or hyperspectral classification, and anomaly
detection.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/tochon.17.chapter.pdf},
lrdenewsdate = {2017-11-08}
}
@Article{ tochon.17.tgrs,
author = {Guillaume Tochon and Jocelyn Chanussot and Mauro {Dalla
Mura} and Andrea Bertozzi},
title = {Object tracking by hierarchical decomposition of
hyperspectral video sequences: {A}pplication to chemical
gas plume tracking},
journal = {IEEE Transactions on Geoscience and Remote Sensing},
volume = 55,
number = 8,
pages = {4567--4585},
month = aug,
year = 2017,
abstract = {It is now possible to collect hyperspectral video
sequences at a near real-time frame rate. The wealth of
spectral, spatial and temporal information of those
sequences is appealing for various applications, but
classical video processing techniques must be adapted to
handle the high dimensionality and huge size of the data to
process. In this article, we introduce a novel method based
on the hierarchical analysis of hyperspectral video
sequences to perform object tracking. This latter operation
is tackled as a sequential object detection process,
conducted on the hierarchical representation of the
hyperspectral video frames. We apply the proposed
methodology to the chemical gas plume tracking scenario and
compare its performances with state-of-the-art methods, for
two real hyperspectral video sequences, and show that the
proposed approach performs at least equally well.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/tochon.17.tgrs.pdf},
lrdenewsdate = {2017-04-20},
doi = {10.1109/TGRS.2017.2694159}
}
@InProceedings{ tochon.19.ismm,
author = {Guillaume Tochon and Mauro {Dalla Mura} and Jocelyn
Chanussot},
title = {Constructing a braid of partitions from hierarchies of
partitions},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 14th International
Symposium on Mathematical Morphology (ISMM)},
year = 2019,
series = {Lecture Notes in Computer Science Series},
address = {Saarbr\"ucken, Germany},
publisher = {Springer},
pages = {1--12},
month = jul,
lrdeprojects = {Olena},
abstract = {Braids of partitions have been introduced in a theoretical
framework as a generalization of hierarchies of partitions,
but practical guidelines to derive such structures remained
an open question. In a previous work, we proposed a
methodology to build a braid of partitions by
experimentally composing cuts extracted from two
hierarchies of partitions, notably paving the way for the
hierarchical representation of multimodal images. However,
we did not provide the formal proof that our proposed
methodology was yielding a braid structure. We remedy to
this point in the present paper and give a brief insight on
the structural properties of the resulting braid of
partitions.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/tochon.19.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2019-03-13},
doi = {10.1007/978-3-030-20867-7_9}
}
@Article{ tochon.19.pr,
author = {Guillaume Tochon and Mauro {Dalla Mura} and Miguel Angel
Veganzones and Thierry G\'eraud and Jocelyn Chanussot},
title = {Braids of Partitions for the Hierarchical Representation
and Segmentation of Multimodal Images},
journal = {Pattern Recognition},
volume = 95,
pages = {162--172},
year = 2019,
month = nov,
abstract = {Hierarchical data representations are powerful tools to
analyze images and have found numerous applications in
image processing. When it comes to multimodal images
however, the fusion of multiple hierarchies remains an open
question. Recently, the concept of braids of partitions has
been proposed as a theoretical tool and possible solution
to this issue. In this paper, we demonstrate the relevance
of the braid structure for the hierarchical representation
of multimodal images. We first propose a fully operable
procedure to build a braid of partitions from two
hierarchical representations. We then derive a framework
for multimodal image segmentation, relying on an energetic
minimization scheme conducted on the braid structure. The
proposed approach is investigated on different multimodal
images scenarios, and the obtained results confirm its
ability to efficiently handle the multimodal information to
produce more accurate segmentation outputs.},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/tochon.19.pr.pdf},
lrdenewsdate = {2019-07-01}
}
@InProceedings{ valais.19.els,
author = {L\'eo Valais and Jim Newton and Didier Verna},
title = {Implementing Baker's \texttt{SUBTYPEP} decision procedure},
address = {Genova, Italy},
booktitle = {12th European Lisp Symposium},
year = 2019,
month = apr,
abstract = {We present here our partial implementation of Baker's
decision procedure for SUBTYPEP. In his article ``A
Decision Procedure for Common Lisp's SUBTYPEP Predicate'',
he claims to provide implementation guidelines to obtain a
SUBTYPEP more accurate and as efficient as the average
implementation. However, he did not provide any serious
implementation and his description is sometimes obscure. In
this paper we present our implementation of part of his
procedure, only supporting primitive types, CLOS classes,
member, range and logical type specifiers. We explain in
our words our understanding of his procedure, with much
more detail and examples than in Baker's article. We
therefore clarify many parts of his description and fill in
some of its gaps or omissions. We also argue in favor and
against some of his choices and present our alternative
solutions. We further provide some proofs that might be
missing in his article and some early efficiency results.
We have not released any code yet but we plan to open
source it as soon as it is presentable.},
lrdekeywords = {type systems, Common Lisp},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/valais.19.els.pdf},
lrdeslides = {http://www.lrde.epita.fr/dload/papers/valais.19.els.slides.pdf},
lrdeprojects = {Climb},
lrdestatus = {accepted},
lrdenewsdate = {2019-04-01}
}
@InProceedings{ vallade.20.nfm,
author = {Vincent Vallade and Ludovic {Le Frioux} and Souheib Baarir
and Julien Sopena and Fabrice Kordon},
title = {On the Usefulness of Clause Strengthening in Parallel
{SAT} Solving},
booktitle = {Proceedings of the 12th NASA Formal Methods Symposium
(NFM'20)},
year = 2020,
month = aug,
volume = {12229},
pages = {222--229},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
abstract = {In the context of parallel SATisfiability solving, this
paper presents an implementation and evaluation of a clause
strengthening algorithm. The developed component can be
easily combined with (virtually) any CDCL-like SAT solver.
Our implementation is integrated as a part of Painless, a
generic and modular framework for building parallel SAT
solvers.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/vallade.20.nfm.pdf},
lrdekeywords = {Parallel satisfiability, tool, strengthening, clause
sharing, portfolio, divide-and-conquer}
}
@InProceedings{ vallade.20.sat,
author = {Vincent Vallade and Ludovic {Le Frioux} and Souheib Baarir
and Julien Sopena and Vijay Ganesh and Fabrice Kordon},
title = {Community and {LBD}-based Clause Sharing Policy for
Parallel {SAT} Solving},
booktitle = {Proceedings of the 23rd International Conference on Theory
and Applications of Satisfiability Testing (SAT'20)},
year = 2020,
month = jun,
volume = {12178},
pages = {11--27},
series = {Lecture Notes in Computer Science},
publisher = {Springer, Cham},
abstract = {Modern parallel SAT solvers rely heavily on effective
clause sharing policies for their performance. The core
problem being addressed by these policies can be succinctly
stated as "the problem of identifying high-quality learnt
clauses" that when shared between the worker nodes of
parallel solvers results in improved performance than
otherwise. The term "high-quality clauses" is often defined
in terms of metrics that solver designers have identified
over years of empirical study. Some of the more well-known
metrics to identify high-quality clauses for sharing
include clause length, literal block distance (LBD), and
clause usage in propagation. In this paper, we propose a
new metric aimed at identifying high-quality learnt clauses
and a concomitant clause-sharing policy based on a
combination of LBD and community structure of Boolean
formulas. The concept of community structure has been
proposed as a possible explanation for the extraordinary
performance of SAT solvers in industrial instances. Hence,
it is a natural candidate as a basis for a metric to
identify high-quality clauses. To be more precise, our
metric identifies clauses that have low LBD and low
community number as ones that are high-quality for
applications such as verification and testing. The
community number of a clause C measures the number of
different communities of a formula that the variables in C
span. We perform extensive empirical analysis of our metric
and clause-sharing policy, and show that our method
significantly outperforms state-of-the-art techniques on
the benchmark from the parallel track of the last four SAT
competitions.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/vallade.20.sat.pdf},
lrdekeywords = {Parallel satisfiability, clause sharing, community
structure}
}
@TechReport{ vaucanson.04.techrep,
author = {The \textsc{Vaucanson} group},
title = {Proposal: an {XML} representation for automata},
institution = {EPITA Research and Development Laboratory (LRDE)},
year = 2004,
number = 0414,
address = {France},
month = nov,
url = {http://www.lrde.epita.fr/cgi-bin/twiki/view/Publications/200414-TR},
lrdeprojects = {Vaucanson},
lrdepaper = {http://www.lrde.epita.fr/dload/techrep/xml_proposal_2004.pdf}
}
@InProceedings{ verna.00.vsmm,
author = {Didier Verna},
title = {Action recognition: how intelligent virtual environments
can ease human-machine interaction},
booktitle = {Proceedings of the 6th International Conference on Virtual
Systems and MultiMedia (VSMM)---Intelligent Environments
Workshop},
pages = {703--713},
year = 2000,
address = {Gifu, Japan},
month = oct,
publisher = {IOS Press, USA},
isbn = {1-58603-108-2},
lrdeprojects = {URBI},
abstract = {This paper describes a research that has been conducted in
the field of cognitive assistance to human-machine
interaction in virtual environments. The idea is to design
a system which, bearing in mind the actions performed by
the operator at present and the current state of the
environment, attempts to determine the global operation
that the user is in the process of executing, and
eventually takes control of the same process in order to
complete it automatically. This idea implies the conception
of an action recognition mechanism based on a specific
knowledge representation model. This mechanism is
implemented in a computer demonstrator, known as the TOASt
system, which is also presented.}
}
@InProceedings{ verna.01.sci,
author = {Didier Verna},
title = {Virtual reality and tele-operation: a common framework},
booktitle = {Proceedings of the 5th World Multi-Conference on
Systemics, Cybernetics and Informatics (SCI)---Emergent
Computing and Virtual Engineering},
year = 2001,
volume = 3,
pages = {499--504},
address = {Orlando, Florida, USA},
month = jul,
editors = {N. Callas and S. Esquivel and J. Burge},
lrdeprojects = {URBI},
abstract = {This paper proposes an overview of a study that
conceptually unify the fields of virtual reality and
tele-operation, by analyzing the notion of ``assistance''
to the operator of a virtual reality or tele-operation
system. This analysis demonstrates that cases of assistance
that are usually considered to belong to virtual reality
are not conceptually different from what has been done in
tele-operation since long before virtual reality appeared.
With this common framework for virtual reality and
tele-operation, we hope to provide a theoretical
formalization of many ideas acquired empirically, and hence
a basis onto which further discussion could be undertaken
in a constructive manner.}
}
@InProceedings{ verna.06.ecoop,
author = {Didier Verna},
title = {Beating {C} in Scientific Computing Applications},
booktitle = {Third European Lisp Workshop at ECOOP},
year = 2006,
address = {Nantes, France},
month = jul,
note = {Best paper award.},
lrdeprojects = {Climb},
abstract = {This paper presents an ongoing research on the behavior
and performance of Lisp with respect to C in the context of
scientific numerical computing. Several simple image
processing algorithms are used to evaluate the performance
of pixel access and arithmetic operations in both
languages. We demonstrate that the behavior of equivalent
Lisp and C code is similar with respect to the choice of
data structures and types, and also to external parameters
such as hardware optimization. We further demonstrate that
properly typed and optimized Lisp code runs as fast as the
equivalent C code, or even faster in some cases.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.06.ecoop.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2006-07-11}
}
@Article{ verna.06.ijcs,
author = {Didier Verna},
title = {How to make Lisp go faster than {C}},
journal = {IAENG International Journal of Computer Science},
year = 2006,
volume = 32,
number = 4,
month = dec,
issn = {1819-656X},
lrdeprojects = {Climb},
abstract = {Contrary to popular belief, Lisp code can be very
efficient today: it can run as fast as equivalent C code or
even faster in some cases. In this paper, we explain how to
tune Lisp code for performance by introducing the proper
type declarations, using the appropriate data structures
and compiler information. We also explain how efficiency is
achieved by the compilers. These techniques are applied to
simple image processing algorithms in order to demonstrate
the announced performance on pixel access and arithmetic
operations in both languages.}
}
@InProceedings{ verna.06.imecs,
author = {Didier Verna},
title = {How to make Lisp go faster than {C}},
booktitle = {Proceedings of the International MultiConference of
Engineers and Computer Scientists},
year = 2006,
address = {Hong Kong},
month = jun,
organization = {International Association of Engineers},
isbn = {988-98671-3-3},
lrdeprojects = {Climb},
abstract = {Contrary to popular belief, Lisp code can be very
efficient today: it can run as fast as equivalent C code or
even faster in some cases. In this paper, we explain how to
tune Lisp code for performance by introducing the proper
type declarations, using the appropriate data structures
and compiler information. We also explain how efficiency is
achieved by the compilers. These techniques are applied to
simple image processing algorithms in order to demonstrate
the announced performance on pixel access and arithmetic
operations in both languages.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.06.imecs.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2007-06-11}
}
@Article{ verna.06.practex,
author = {Didier Verna},
year = 2006,
volume = 2006,
number = 3,
month = aug,
lrdeprojects = {Typesetting},
abstract = {This paper presents {\CurVe}, a curriculum vitae class for
\LaTeX2e, in a progressive approach going from a first
contact with the class, through concrete examples of
customization, and some aspects of advanced usage.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.06.practex.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2006-08-20}
}
@InProceedings{ verna.07.imecs,
author = {Didier Verna},
title = {{CLOS} solutions to binary methods},
booktitle = {Proceedings of the International MultiConference of
Engineers and Computer Scientists},
year = 2007,
address = {Hong Kong},
month = mar,
organization = {International Association of Engineers},
lrdeprojects = {Climb},
abstract = {Implementing binary methods in traditional object oriented
languages is difficult: numerous problems arise, such as
typing (covariance vs. contra-variance of the arguments),
polymorphism on multiple arguments (lack of multi-methods)
etc. The purpose of this paper is to demonstrate how those
problems are either solved, or nonexistent in the Common
Lisp Object System (CLOS). Several solutions for different
levels of binary methods support in CLOS are proposed. They
mainly consist in re-programming a binary method specific
object system through the CLOS meta-object protocol.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.07.imecs.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2007-01-22}
}
@InProceedings{ verna.08.els,
author = {Didier Verna},
title = {Binary Methods Programming: the {CLOS} Perspective},
booktitle = {Proceedings of the First European Lisp Symposium},
doi = {10.5281/zenodo.3248977},
pages = {91--105},
year = 2008,
address = {Bordeaux, France},
month = may,
lrdeprojects = {Climb},
abstract = {Implementing binary methods in traditional object-oriented
languages is difficult: numerous problems arise regarding
the relationship between types and classes in the context
of inheritance, or the need for privileged access to the
internal representation of objects. Most of these problems
occur in the context of statically typed languages that
lack multi-methods (polymorphism on multiple arguments).
The purpose of this paper is twofold: first, we show why
some of these problems are either non-issues, or easily
solved in Common Lisp. Then, we demonstrate how the Common
Lisp Object System (CLOS) allows us not only to implement
binary methods in a straightforward way, but also to
support the concept directly, and even enforce it at
different levels (usage and implementation).},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.08.els.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2009-03-31}
}
@Article{ verna.08.jucs,
author = {Didier Verna},
title = {Binary Methods Programming: the {CLOS} Perspective
(extended version)},
journal = {Journal of Universal Computer Science},
doi = {10.3217/jucs-014-20-3389},
year = 2008,
volume = 14,
number = 20,
pages = {3389--3411},
lrdeprojects = {Climb},
abstract = {Implementing binary methods in traditional object-oriented
languages is difficult: numerous problems arise regarding
the relationship between types and classes in the context
of inheritance, or the need for privileged access to the
internal representation of objects. Most of these problems
occur in the context of statically typed languages that
lack multi-methods (polymorphism on multiple arguments).
The purpose of this paper is twofold: first, we show why
some of these problems are either non-issues, or easily
solved in Common Lisp. Then, we demonstrate how the Common
Lisp Object System (CLOS) allows us not only to implement
binary methods in a straightforward way, but also to
support the concept directly, and even enforce it at
different levels (usage and implementation).},
lrdekeywords = {Software engineering}
}
@InProceedings{ verna.08.lncs,
author = {Didier Verna and Charlotte Herzeel and Christophe Rhodes
and Hans H\"ubner},
title = {Report on the 5th Workshop {ELW} at {ECOOP 2008}},
booktitle = {Object-Oriented Technology. ECOOP 2008 Workshop Reader},
pages = {1--6},
year = 2008,
editor = {Patrick Eugster},
volume = 5475,
series = {Lecture Notes in Computer Science},
month = jul,
publisher = {Springer}
}
@InProceedings{ verna.09.accu,
author = {Didier Verna},
title = {Revisiting the Visitor: the Just Do It Pattern},
booktitle = {Proceedings of the ACCU Conference 2009},
year = 2009,
address = {Oxford},
lrdeprojects = {Climb},
abstract = { },
lrdekeywords = {Software engineering},
lrdenewsdate = {2008-12-29}
}
@InProceedings{ verna.09.ilc,
author = {Didier Verna},
title = {{CLOS} Efficiency: Instantiation},
booktitle = {Proceedings of the International Lisp Conference},
year = 2009,
month = mar,
pages = {76--90},
organization = {Association of Lisp Users},
abstract = {This article reports the results of an ongoing
experimental research on the behavior and performance of
CLOS, the Common Lisp Object System. Our purpose is to
evaluate the behavior and performance of the 3 most
important characteristics of any dynamic Object Oriented
system: class instantiation, slot access and dynamic
dispatch. This paper describes the results of our
experiments on instantiation. We evaluate the efficiency of
the instantiation process in both C++ and Lisp under a
combination of parameters such as slot types or classes
hierarchy. We show that in a non-optimized configuration
where safety is given priority on speed, the behavior of
C++ and Lisp instantiation can be quite different, which is
also the case amongst different Lisp compilers. On the
other hand, we demonstrate that when compilation is tuned
for speed, instantiation in Lisp becomes faster than in
C++.},
lrdeprojects = {Climb},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.09.ilc.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2009-01-13}
}
@InProceedings{ verna.10.els,
author = {Didier Verna},
title = {{CLoX}: {C}ommon {L}isp objects for {XEmacs}},
booktitle = {Proceedings of the 3rd European Lisp Symposium},
doi = {10.5281/zenodo.3248958},
year = 2010,
address = {Lisbon, Portugal},
month = may,
lrdeprojects = {Software},
abstract = {CLoX is an ongoing attempt to provide a full Emacs Lisp
implementation of the Common Lisp Object System, including
its underlying meta-object protocol, for XEmacs. This paper
describes the early development stages of this project.
CLoX currently consists in a port of Closette to Emacs
Lisp, with some additional features, most notably, a deeper
integration between types and classes and a comprehensive
test suite. All these aspects are described in the paper,
and we also provide a feature comparison with an
alternative project called Eieio.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.10.els.pdf},
lrdekeywords = {Software engineering},
lrdenewsdate = {2010-03-09}
}
@Article{ verna.10.jucs,
author = {Didier Verna},
title = {Revisiting the Visitor: the Just Do It Pattern},
journal = {Journal of Universal Computer Science},
doi = {10.3217/jucs-016-02-0246},
year = 2010,
volume = 16,
pages = {246--271},
lrdeprojects = {Climb},
abstract = {While software design patterns are a generally useful
concept, they are often (and mistakenly) seen as ready-made
universal recipes for solving common problems. In a way,
the danger is that programmers stop thinking about their
actual problem, and start looking for pre-cooked solutions
in some design pattern book instead. What people usually
forget about design patterns is that the underlying
programming language plays a major role in the exact shape
such or such pattern will have on the surface. The purpose
of this paper is twofold: we show why design pattern
expression is intimately linked to the expressiveness of
the programming language in use, and we also demonstrate
how a blind application of them can in fact lead to very
poorly designed code.},
lrdekeywords = {Software engineering}
}
@InProceedings{ verna.10.tug,
author = {Didier Verna},
title = {Classes, Styles, Conflicts: the Biological Realm of
{\LaTeX}},
booktitle = {TUGboat},
issn = 0896320,
pages = {162--172},
year = 2010,
editor = {Barbara Beeton and Karl Berry},
volume = 31,
number = 2,
lrdeprojects = {Typesetting},
abstract = {The \LaTeX{} world is composed of thousands of software
components, most notably classes and styles. Classes and
styles are born, evolve or die, interact with each other,
compete or cooperate, very much as living organisms do at
the cellular level. This paper attempts to draw an extended
analogy between the \LaTeX{} biotope and cellular biology.
By considering \LaTeX{} documents as living organisms and
styles as viruses that infect them, we are able to exhibit
a set of behavioral patterns common to both worlds. We
analyze infection methods, types and cures, and we show how
\LaTeX{} or cellular organisms are able to survive in a
world of perpetual war.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.10.tug.pdf},
lrdenewsdate = {2010-03-09}
}
@InProceedings{ verna.11.onward,
author = {Didier Verna},
title = {Biological Realms in Computer Science: the Way You Don't
(Want To) Think About Them},
booktitle = {Onward! 2011},
isbn = 9781450309417,
doi = {10.1145/2089131.2089140},
year = 2011,
lrdeprojects = {Software},
pages = {167--176},
abstract = {In biology, evolution is usually seen as a tinkering
process, different from what an engineer does when he plans
the development of his systems. Recently, studies have
shown that even in biology, there is a part of good
engineering. As computer scientists, we have much more
difficulty to admit that there is also a great deal of
tinkering in what we do, and that our software systems
behave more and more like biological realms every day. This
essay relates my personal experience about this
discovery.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.11.onward.pdf},
lrdekeywords = {Software engineering}
}
@InProceedings{ verna.11.tug,
author = {Didier Verna},
title = {Towards {\LaTeX} Coding Standards},
booktitle = {TUGboat},
issn = 0896320,
pages = {309--328},
year = 2011,
editor = {Barbara Beeton and Karl Berry},
volume = 32,
number = 3,
lrdeprojects = {Typesetting},
abstract = {Because \LaTeX{} is only a macro-expansion system, the
language does not impose any kind of good software
engineering practice, program structure or coding style.
Maybe because in the \LaTeX{} world, collaboration is not
so widespread, the idea of some \LaTeX{} Coding Standards
is not so pressing as with other programming languages.
Over the years, the permanent flow of personal development
experiences contributed to shape our own taste in terms of
coding style. In this paper, we report on all these
experiences and describe what we think are good programming
practices.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.11.tug.pdf}
}
@InCollection{ verna.12.dsl,
author = {Didier Verna},
title = {Extensible languages: blurring the distinction between
{DSL}s and {GPL}s},
booktitle = {Formal and Practical Aspects of Domain-Specific Languages:
Recent Developments},
publisher = {{IGI} Global},
editor = {Marjan Mernik},
isbn = 9781466620926,
doi = {10.4018/978-1-4666-2092-6.ch001},
year = 2012,
month = sep,
chapter = 1,
lrdeprojects = {Climb},
abstract = {Out of a concern for focus and concision, domain-specific
languages (DSLs) are usually very different from general
purpose programming languages (GPLs), both at the syntactic
and the semantic levels. One approach to DSL implementation
is to write a full language infrastructure, including
parser, interpreter or even compiler. Another approach
however, is to ground the DSL into an extensible GPL,
giving you control over its own syntax and semantics. The
DSL may then be designed merely as an \emph{extension} to
the original GPL, and its implementation may boil down to
expressing only the differences with it. The task of DSL
implementation is hence considerably eased. The purpose of
this chapter is to provide a tour of the features that make
a GPL extensible, and to demonstrate how, in this context,
the distinction between DSL and GPL can blur, sometimes to
the point of complete disappearance.},
lrdekeywords = {Software engineering}
}
@InProceedings{ verna.12.tug,
author = {Didier Verna},
title = {Star {\TeX}: the Next Generation},
booktitle = {TUGboat},
issn = 0896320,
year = 2012,
editor = {Barbara Beeton and Karl Berry},
volume = 33,
number = 2,
lrdeprojects = {Typesetting},
abstract = {While \TeX{} is unanimously praised for its typesetting
capabilities, it is also regularly blamed for its poor
programmatic offerings. A macro-expansion system is indeed
far from the best choice in terms of general-purpose
programming. Several solutions have been proposed to
modernize \TeX{} on the programming side. All of them is
mixed with a full-blown programming language. This paper
advocates another, homogeneous approach in which \TeX{} is
first rewritten in a modern language, \cl, which serves
both at the core of the program and at the scripting level.
All programmatic macros of \TeX{} are hence rendered
obsolete, as the underlying language itself can be used for
user-level programming.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.12.tug.pdf}
}
@InProceedings{ verna.13.tug-1,
author = {Didier Verna},
title = {The incredible tale of the author who didn't want to do
the publisher's job},
booktitle = {TUGboat},
issn = 0896320,
year = 2013,
editor = {Barbara Beeton and Karl Berry},
volume = 34,
number = 3,
lrdeprojects = {Typesetting},
abstract = {In this article, I relate on a recent experience of mine:
writing a book chapter for a publisher who doesn't have a
clue about typesetting. I confess my futile attempt at
using \TeX{} for writing the chapter in question. I
describe the hell that descended upon me for daring to do
that. I however admit that the hell in question would have
been even greater, hadn't I done so. This article is both a
nervous breakdown and a laughter, and I am seeking for the
reader's comfort.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.13.tug-1.pdf}
}
@InProceedings{ verna.13.tug-2,
author = {Didier Verna},
title = {{TiCL}: the Prototype ({Star \TeX}: the Next Generation,
Season 2) },
booktitle = {TUGboat},
issn = 0896320,
year = 2013,
editor = {Barbara Beeton and Karl Berry},
volume = 34,
number = 3,
lrdeprojects = {Typesetting},
abstract = {At TUG 2012, we presented some ideas about using one of
the oldest programming languages (Lisp), in order to
modernize one of the oldest typesetting systems (\TeX).
This talk was mostly focused on justifying the technical
fitness of Lisp for this task. This time, we would like to
take the opposite view and demonstrate a prototype, from
the user's perspective. This involves showing what a TiCL
document could look like, the implications in terms of
typesetting vs. programmatic features, and also in terms of
extensibility (relate this to class / style authoring). },
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.13.tug-2.pdf}
}
@InProceedings{ verna.15.cop,
author = {Didier Verna and Fran{\c{c}}ois Ripault},
title = {Context-Oriented Image Processing},
booktitle = {Context-Oriented Programming Workshop},
year = 2015,
isbn = 9781450336543,
doi = {10.1145/2786545.2786547},
abstract = {Genericity aims at providing a very high level of
abstraction in order, for instance, to separate the general
shape of an algorithm from specific implementation details.
Reaching a high level of genericity through regular
object-oriented techniques has two major drawbacks,
however: code cluttering (e.g. class / method
proliferation) and performance degradation (e.g. dynamic
dispatch). In this paper, we explore a potential use for
the Context-Oriented programming paradigm in order to
maintain a high level of genericity in an experimental
image processing library, without sacrificing either the
performance or the original object-oriented design of the
application. },
lrdeprojects = {Climb},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.15.cop.pdf}
}
@InProceedings{ verna.18.els,
author = {Didier Verna},
title = {Method Combinators},
booktitle = {11th European Lisp Symposium},
isbn = 9782955747421,
doi = {10.5281/zenodo.3247610},
year = 2018,
month = apr,
address = {Marbella, Spain},
lrdeprojects = {Climb},
lrdekeywords = {Software Engineering},
lrdenewsdate = {2018-03-25},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.18.els.pdf},
abstract = {In traditional object-oriented languages, the dynamic
dispatch algorithm is hardwired: for every polymorphic
call, only the most specific method is used. \textsc{Clos},
the Common Lisp Object System, goes beyond the traditional
approach by providing an abstraction known as \emph{method
combinations}: when several methods are applicable, it is
possible to select several of them, decide in which order
they will be called, and how to combine their results,
essentially making the dynamic dispatch algorithm
user-programmable.\par Although a powerful abstraction,
method combinations are under-specified in the Common Lisp
standard, and the \textsc{Mop}, the Meta-Object Protocol
underlying many implementations of \textsc{Clos}, worsens
the situation by either contradicting it or providing
unclear protocols. As a consequence, too much freedom is
granted to conforming implementations. The exact or
intended behavior of method combinations is unclear and not
necessarily coherent with the rest of \textsc{Clos}.\par In
this paper, we provide a detailed analysis of the problems
posed by method combinations, the consequences of their
lack of proper specification in one particular
implementation, and a \textsc{Mop}-based extension called
\emph{method combinators}, aiming at correcting these
problems and possibly offer new functionality.}
}
@Article{ verna.18.programming,
author = {Didier Verna},
title = {Lisp, Jazz, Aikido},
journal = {The Art, Science and Engineering of Programming Journal},
year = 2018,
volume = 2,
number = 3,
month = mar,
doi = {10.22152/programming-journal.org/2018/2/10},
lrdeprojects = {Software},
lrdekeywords = {Software Engineering},
lrdenewsdate = {2018-02-05},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.18.programming.pdf},
abstract = {The relation between Science (what we can explain) and Art
(what we can't) has long been acknowledged and while every
science contains an artistic part, every art form also
needs a bit of science. Among all scientific disciplines,
programming holds a special place for two reasons. First,
the artistic part is not only undeniable but also
essential. Second, and much like in a purely artistic
discipline, the act of programming is driven partly by the
notion of aesthetics: the pleasure we have in creating
beautiful things.\par Even though the importance of
aesthetics in the act of programming is now unquestioned,
more could still be written on the subject. The field
called ``psychology of programming'' focuses on the
cognitive aspects of the activity, with the goal of
improving the productivity of programmers. While many
scientists have emphasized their concern for aesthetics and
the impact it has on their activity, few computer
scientists have actually written about their thought
process while programming.\par What makes us like or
dislike such and such language or paradigm? Why do we shape
our programs the way we do? By answering these questions
from the angle of aesthetics, we may be able to shed some
new light on the art of programming. Starting from the
assumption that aesthetics is an inherently transversal
dimension, it should be possible for every programmer to
find the same aesthetic driving force in every creative
activity they undertake, not just programming, and in doing
so, get deeper insight on why and how they do things the
way they do.\par On the other hand, because our aesthetic
sensitivities are so personal, all we can really do is
relate our own experiences and share it with others, in the
hope that it will inspire them to do the same. My personal
life has been revolving around three major creative
activities, of equal importance: programming in Lisp,
playing Jazz music, and practicing Aikido. But why so many
of them, why so different ones, and why these
specifically?\par By introspecting my personal aesthetic
sensitivities, I eventually realized that my tastes in the
scientific, artistic, and physical domains are all
motivated by the same driving forces, hence unifying Lisp,
Jazz, and Aikido as three expressions of a single essence,
not so different after all. Lisp, Jazz, and Aikido are
governed by a limited set of rules which remain simple and
unobtrusive. Conforming to them is a pleasure. Because
Lisp, Jazz, and Aikido are inherently introspective
disciplines, they also invite you to transgress the rules
in order to find your own. Breaking the rules is fun.
Finally, if Lisp, Jazz, and Aikido unify so many paradigms,
styles, or techniques, it is not by mere accumulation but
because they live at the meta-level and let you reinvent
them. Working at the meta-level is an enlightening
experience.\par Understand your aesthetic sensitivities and
you may gain considerable insight on your own psychology of
programming. Mine is perhaps common to most lispers.
Perhaps also common to other programming communities, but
that, is for the reader to decide\ldots}
}
@InProceedings{ verna.19.els,
author = {Didier Verna},
title = {Parallelizing Quickref},
booktitle = {12th European Lisp Symposium},
year = 2019,
month = apr,
address = {Genova, Italy},
isbn = 9782955747438,
doi = {10.5281/zenodo.2632534},
pages = {89-96},
lrdenewsdate = {2019-04-01},
lrdeprojects = {Typesetting},
lrdekeywords = {Software Engineering},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.19.els.pdf},
abstract = {Quickref is a global documentation project for Common Lisp
software. It builds a website containing reference manuals
for Quicklisp libraries. Each library is first compiled,
loaded, and introspected. From the collected information, a
Texinfo file is generated, which is then processed into an
HTML one. Because of the large number of libraries in
Quicklisp, doing this sequentially may require several
hours of processing. We report on our experiments in
parallelizing Quickref. Experimental data on the morphology
of Quicklisp libraries has been collected. Based on this
data, we are able to propose a number of parallelization
schemes that reduce the total processing time by a factor
of 3.8 to 4.5, depending on the exact situation.}
}
@InProceedings{ verna.19.tug,
author = {Didier Verna},
title = {{Quickref}: {Common} {Lisp} Reference Documentation as a
Stress Test for {Texinfo}},
booktitle = {TUGboat},
volume = 40,
number = {2},
pages = {119--125},
editor = {Barbara Beeton and Karl Berry},
organization = {\TeX{} Users Group},
month = sep,
year = 2019,
publisher = {\TeX{} Users Group},
issn = 0896320,
lrdenewsdate = {2019-11-06},
lrdeprojects = {Typesetting},
lrdekeywords = {Software Engineering},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.19.tug.pdf},
abstract = {Quickref is a global documentation project for the Common
Lisp ecosystem. It creates reference manuals automatically
by introspecting libraries and generating corresponding
documentation in Texinfo format. The Texinfo files may
subsequently be converted into PDF or HTML. Quickref is
non-intrusive: software developers do not have anything to
do to get their libraries documented by the system.\par
Quickref may be used to create a local website documenting
your current, partial, working environment, but it is also
able to document the whole Common Lisp ecosystem at once.
The result is a website containing almost two thousand
reference manuals. Quickref provides a Docker image for an
easy recreation of this website, but a public version is
also available and actively maintained.\par Quickref
constitutes an enormous and successful stress test for
Texinfo. In this paper, we give an overview of the design
and architecture of the system, describe the challenges and
difficulties in generating valid Texinfo code
automatically, and put some emphasis on the currently
remaining problems and deficiencies.}
}
@PhDThesis{ verna.20.hdr,
author = {Didier Verna},
title = {(Dynamic (Programming Paradigms)) ;; Performance and
Expressivity},
school = {Sorbone-Universit\'e},
type = {Habilitation Thesis},
month = jul,
year = 2020,
lrdenewsdate = {2020-07-10},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/verna.20.hdr.pdf},
doi = {10.5281/zenodo.4244393}
}
@Article{ wang.19.tmi,
author = {Li Wang and Dong Nie and Guannan Li and \'{E}lodie
Puybareau and Jose Dolz and Qian Zhang and Fan Wang and
Jing Xia and Zhengwang Wu and Jiawei Chen and Kim-Han Thung
and Toan Duc Bui and Jitae Shin and Guodong Zeng and Guoyan
Zheng and Vladimir S. Fonov and Andrew Doyle and Yongchao
Xu and Pim Moeskops and Josien P.W. Pluim and Christian
Desrosiers and Ismail Ben Ayed and Gerard Sanroma and
Oualid M. Benkarim and Adri\`{a} Casamitjana and
Ver\'{o}nica Vilaplana and Weili Lin and Gang Li and
Dinggang Shen},
journal = {IEEE Transactions on Medical Imaging},
title = {Benchmark on Automatic 6-month-old Infant Brain
Segmentation Algorithms: {T}he {iSeg}-2017 Challenge},
year = {2019},
month = sep,
pages = {2219--2230},
volume = {38},
number = {9},
abstract = {Accurate segmentation of infant brain magnetic resonance
(MR) images into white matter (WM), gray matter (GM), and
cerebrospinal fluid (CSF) is an indispensable foundation
for early studying of brain growth patterns and
morphological changes in neurodevelopmental disorders.
Nevertheless, in the isointense phase (approximately 6-9
months of age), due to inherent myelination and maturation
process, WM and GM exhibit similar levels of intensity in
both T1-weighted (T1w) and T2-weighted (T2w) MR images,
making tissue segmentation very challenging. Despite many
efforts were devoted to brain segmentation, only few
studies have focused on the segmentation of 6-month infant
brain images. With the idea of boosting methodological
development in the community, iSeg-2017 challenge
(http://iseg2017.web.unc.edu) provides a set of 6-month
infant subjects with manual labels for training and testing
the participating methods. Among the 21 automatic
segmentation methods participating in iSeg-2017, we review
the 8 top-ranked teams, in terms of Dice ratio, modified
Hausdorff distance and average surface distance, and
introduce their pipelines, implementations, as well as
source codes. We further discuss limitations and possible
future directions. We hope the dataset in iSeg-2017 and
this review article could provide insights into
methodological development for the community.},
keywords = {Image segmentation; Magnetic resonance imaging; Manuals;
Pediatrics; Biomedical imaging; Testing; White matter},
lrdeprojects = {Olena},
lrdenewsdate = {2019-04-11},
doi = {10.1109/TMI.2019.2901712}
}
@InProceedings{ widynski.14.ius,
author = {Nicolas Widynski and Thierry G\'eraud and Damien Garcia},
title = {Speckle Spot Detection in Ultrasound Images: Application
to Speckle Reduction and Speckle Tracking},
booktitle = {Proceedings of the IEEE International Ultrasonics
Symposium (IUS)},
year = {2014},
pages = {1734--1737},
address = {Chicago, IL, USA},
abstract = {This paper investigates the speckle spot detection task in
ultrasound images. Speckle spots are described by
structural criteria: dimensions, shape, and topology. We
propose to represent the image using a morphological
inclusion tree, from which speckle spots are detected using
their structural appearance. This makes the method
independent of contrast, and hence robusts to intensity
correction. The detection was applied to speckle reduction
and speckle tracking, and experiments showed that this
approach performs well compared to state-of-the-art
methods.},
lrdekeywords = {Image},
lrdenewsdate = {2014-09-10},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/widynski.14.ius.pdf},
doi = {10.1109/ULTSYM.2014.0430}
}
@Article{ xiong.20.media,
title = {A Global Benchmark of Algorithms for Segmenting the Left
Atrium from Late Gadolinium-Enhanced Cardiac Magnetic
Resonance Imaging},
journal = {Medical Image Analysis},
volume = {67},
pages = {101832},
year = {2021},
month = jan,
issn = {1361-8415},
doi = {10.1016/j.media.2020.101832},
author = {Zhaohan Xiong and Qing Xia and Zhiqiang Hu and Ning Huang
and Cheng Bian and Yefeng Zheng and Sulaiman Vesal and
Nishant Ravikumar and Andreas Maier and Xin Yang and
Pheng-Ann Heng and Dong Ni and Caizi Li and Qianqian Tong
and Weixin Si and \'Elodie Puybareau and Younes Khoudli and
Thierry G\'{e}raud and Chen Chen and Wenjia Bai and Daniel
Rueckert and Lingchao Xu and Xiahai Zhuang and Xinzhe Luo
and Shuman Jia and Maxime Sermesant and Yashu Liu and
Kuanquan Wang and Davide Borra and Alessandro Masci and
Cristiana Corsi and Coen {de Vente} and Mitko Veta and
Rashed Karim and Chandrakanth Jayachandran Preetha and
Sandy Engelhardt and Menyun Qiao and Yuanyuan Wang and Qian
Tao and Marta Nunez-Garcia and Oscar Camara and Nicolo
Savioli and Pablo Lamata and Jichao Zhao},
keywords = {Left atrium, Convolutional neural networks, Late
gadolinium-enhanced magnetic resonance imaging, Image
segmentation},
abstract = {Segmentation of medical images, particularly late
gadolinium-enhanced magnetic resonance imaging (LGE-MRI)
used for visualizing diseased atrial structures, is a
crucial first step for ablation treatment of atrial
fibrillation. However, direct segmentation of LGE-MRIs is
challenging due to the varying intensities caused by
contrast agents. Since most clinical studies have relied on
manual, labor-intensive approaches, automatic methods are
of high interest, particularly optimized machine learning
approaches. To address this, we organized the 2018 Left
Atrium Segmentation Challenge using 154 3D LGE-MRIs,
currently the world's largest atrial LGE-MRI dataset, and
associated labels of the left atrium segmented by three
medical experts, ultimately attracting the participation of
27 international teams. In this paper, extensive analysis
of the submitted algorithms using technical and biological
metrics was performed by undergoing subgroup analysis and
conducting hyper-parameter analysis, offering an overall
picture of the major design choices of convolutional neural
networks (CNNs) and practical considerations for achieving
state-of-the-art left atrium segmentation. Results show
that the top method achieved a Dice score of 93.2\% and a
mean surface to surface distance of 0.7 mm, significantly
outperforming prior state-of-the-art. Particularly, our
analysis demonstrated that double sequentially used CNNs,
in which a first CNN is used for automatic
region-of-interest localization and a subsequent CNN is
used for refined regional segmentation, achieved superior
results than traditional methods and machine learning
approaches containing single CNNs. This large-scale
benchmarking study makes a significant step towards
much-improved segmentation methods for atrial LGE-MRIs, and
will serve as an important benchmark for evaluating and
comparing the future works in the field. Furthermore, the
findings from this study can potentially be extended to
other imaging datasets and modalities, having an impact on
the wider medical imaging community.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xiong.20.media.pdf},
lrdenewsdate = {2020-11-10}
}
@InProceedings{ xu.12.icip,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Context-Based Energy Estimator: Application to Object
Segmentation on the Tree of Shapes},
booktitle = {Proceedings of the 19th International Conference on Image
Processing (ICIP)},
year = 2012,
address = {Orlando, Florida, USA},
month = oct,
pages = {1577--1580},
organization = {IEEE},
lrdeprojects = {Olena},
abstract = {Image segmentation can be defined as the detection of
closed contours surrounding objects of interest. Given a
family of closed curves obtained by some means, a
difficulty is to extract the relevant ones. A classical
approach is to define an energy minimization framework,
where interesting contours correspond to local minima of
this energy. Active contours, graph cuts or minimum ratio
cuts are instances of such approaches. In this article, we
propose a novel, efficient ratio-cut estimator, which is
both context-based and can be interpreted as an active
contour. As a first example of the effectiveness of our
formulation, we consider the tree of shapes, which provides
a family of level lines organized in a tree hierarchy
through an inclusion relationship. Thanks to the tree
structure, the estimator can be computed incrementally in
an efficient fashion. Experimental results on synthetic and
real images demonstrate the robustness and usefulness of
our method.},
lrdepaper = {http://www.lrde.epita.fr/dload//papers/xu.12.icip.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2012-04-17}
}
@InProceedings{ xu.12.icpr,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Morphological Filtering in Shape Spaces : Applications
using Tree-Based Image Representations},
booktitle = {Proceedings of the 21st International Conference on
Pattern Recognition (ICPR)},
year = 2012,
address = {Tsukuba Science City, Japan},
month = nov,
pages = {485--488},
publisher = {IEEE Computer Society},
lrdeprojects = {Olena},
abstract = {Connected operators are filtering tools that act by
merging elementary regions of an image. A popular strategy
is based on tree-based image representations: for example,
one can compute a shape-based attribute on each node of the
tree and keep only the nodes for which the attribute is
sufficiently strong. This operation can be seen as a
thresholding of the tree, seen as a graph whose nodes are
weighted by the attribute. Rather than being satisfied with
a mere thresholding, we propose to expand on this idea, and
to apply connected filters on this latest graph.
Consequently, the filtering is done not in the space of the
image, but on the space of shapes build from the image.
Such a processing is a generalization of the existing
tree-based connected operators. Indeed, the framework
includes classical existing connected operators by
attributes. It also allows us to propose a class of novel
connected operators from the leveling family, based on
shape attributes. Finally, we also propose a novel class of
self-dual connected operators that we call morphological
shapings.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.12.icpr.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2012-06-16}
}
@InProceedings{ xu.13.icip,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Salient Level Lines Selection Using the {Mumford-Shah}
Functional},
booktitle = {Proceedings of the 20th International Conference on Image
Processing (ICIP)},
year = 2013,
address = {Melbourne, Australia},
month = sep,
pages = {1227--1231},
organization = {IEEE},
lrdeprojects = {Olena},
abstract = {Many methods relying on the morphological notion of
shapes, (i.e., connected components of level sets) have
been proved to be very useful for pattern analysis and
recognition. Selecting meaningful level lines (boundaries
of level sets) yields to simplify images while preserving
salient structures. Many image simplification and/or
segmentation methods are driven by the optimization of an
energy functional, for instance the Mumford-Shah
functional. In this article, we propose an efficient
shape-based morphological filtering that very quickly
compute to a locally (subordinated to the tree of shapes)
optimal solution of the piecewise-constant Mumford-Shah
functional. Experimental results demonstrate the
efficiency, usefulness, and robustness of our method, when
applied to image simplification, pre-segmentation, and
detection of affine regions with viewpoint changes.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.13.icip.pdf},
lrdenewsdate = {2013-05-27}
}
@InProceedings{ xu.13.ismm,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Two applications of shape-based morphology: blood vessels
segmentation and a generalization of constrained
connectivity},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 11th International
Symposium on Mathematical Morphology (ISMM)},
year = 2013,
editor = {C.L. Luengo Hendriks and G. Borgefors and R. Strand},
volume = 7883,
series = {Lecture Notes in Computer Science Series},
address = {Uppsala, Sweden},
publisher = {Springer},
pages = {390--401},
lrdeprojects = {Olena},
abstract = {Connected filtering is a popular strategy that relies on
tree- based image representations: for example, one can
compute an attribute on each node of the tree and keep only
the nodes for which the attribute is sufficiently strong.
This operation can be seen as a thresholding of the tree,
seen as a graph whose nodes are weighted by the attribute.
Rather than being satisfied with a mere thresholding, we
propose to expand on this idea, and to apply connected
filters on this latest graph. Consequently, the filtering
is done not in the space of the image, but on the space of
shapes built from the image. Such a processing, that we
called shape-based morphology, is a generalization of the
existing tree-based connected operators. In this paper, two
different applications are studied: in the first one, we
apply our framework to blood vessels segmentation in
retinal images. In the second one, we propose an extension
of constrained connectivity. In both cases, quantitative
evaluations demonstrate that shape-based filtering, a mere
filtering step that we compare to more evolved processings,
achieves state-of-the-art results.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.13.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2013-03-14}
}
@PhDThesis{ xu.13.phd,
author = {Yongchao Xu},
title = {Tree-based shape spaces: Definition and applications in
image processing and computer vision},
school = {Universit\'e Paris-Est},
year = 2013,
address = {Marne-la-Vall\'ee, France},
month = dec,
abstract = {The classical framework of connected filters relies on the
removal of some connected components of a graph. To apply
those filters, it is often useful to transform an image
into a component tree, and to prune the tree to simplify
the original image. Those trees have some remarkable
properties for computer vision. A first illustration of
their usefulness is the proposition of a local feature
detector, truly invariant to change of contrast. which
allows us to obtain the state-of-the-art results in image
registration and in multi-view 3D reconstruction. Going
further in the use of those trees, we propose to expand the
classical framework of connected filters. For this, we
introduce the notion of tree-based shape spaces: instead of
filtering the connected components of the graph
corresponding to the image, we propose to filter the
connected components of the graph given by the component
tree of the image. This general framework, which we call
shape-based morphology can be used for object detection and
segmentation, hierarchical segmentation, and image
filtering. Many applications and illustrations show the
usefulness of the proposed framework.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.13.phd.pdf},
lrdenewsdate = {2013-12-12},
lrdeprojects = {Olena}
}
@InProceedings{ xu.14.icip,
author = {Yongchao Xu and Edwin Carlinet and Thierry G\'eraud and
Laurent Najman},
title = {Meaningful disjoint level lines selection},
booktitle = {Proceedings of the 21st International Conference on Image
Processing (ICIP)},
year = 2014,
address = {Paris, France},
pages = {2938--2942},
lrdeprojects = {Olena},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.14.icip.pdf},
lrdeposter = {http://www.lrde.epita.fr/dload/papers/xu.14.icip.poster.pdf},
abstract = {Many methods based on the morphological notion of
\textit{shapes} (\textit{i.e.}, connected components of
level sets) have been proved to be very efficient in shape
recognition and shape analysis. The inclusion relationship
of the level lines (boundaries of level sets) forms the
tree of shapes, a tree-based image representation with a
high potential. Numerous applications using this tree
representation have been proposed. In this article, we
propose an efficient algorithm that extracts a set of
disjoint level lines in the image. These selected level
lines yields a simplified image with clean contours, which
also provides an intuitive idea about the main structure of
the tree of shapes. Besides, we obtain a saliency map
without transition problems around the contours by
weighting level lines with their significance. Experimental
results demonstrate the efficiency and usefulness of our
method. },
lrdekeywords = {Image},
lrdenewsdate = {2014-05-26},
doi = {10.1109/ICIP.2014.7025594}
}
@Article{ xu.14.itip,
author = {Yongchao Xu and Thierry G\'eraud and Pascal Monasse and
Laurent Najman},
title = {Tree-Based Morse Regions: A Topological Approach to Local
Feature Detection},
journal = {IEEE Transactions on Image Processing},
year = {2014},
volume = {23},
number = {12},
pages = {5612--5625},
month = dec,
lrdeprojects = {Olena},
abstract = {This paper introduces a topological approach to local
invariant feature detection motivated by Morse theory. We
use the critical points of the graph of the intensity
image, revealing directly the topology information as
initial ``interest'' points. Critical points are selected
from what we call a tree-based shape-space. Specifically,
they are selected from both the connected components of the
upper level sets of the image (the Max-tree) and those of
the lower level sets (the Min-tree). They correspond to
specific nodes on those two trees: (1) to the leaves
(extrema) and (2) to the nodes having bifurcation (saddle
points). We then associate to each critical point the
largest region that contains it and is topologically
equivalent in its tree. We call such largest regions the
Tree-Based Morse Regions (TBMR). TBMR can be seen as a
variant of MSER, which are contrasted regions. Contrarily
to MSER, TBMR relies only on topological information and
thus fully inherit the invariance properties of the space
of shapes ({\em e.g.}, invariance to affine contrast
changes and covariance to continuous transformations). In
particular, TBMR extracts the regions independently of the
contrast, which makes it truly contrast invariant.
Furthermore, it is quasi parameter-free. TBMR extraction is
fast, having the same complexity as MSER. Experimentally,
TBMR achieves a repeatability on par with state-of-the-art
methods, but obtains a significantly higher number of
features. Both the accuracy and the robustness of TBMR are
demonstrated by applications to image registration and 3D
reconstruction.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.14.itip.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2014-10-03},
url = {10.1109/TIP.2014.2364127}
}
@InProceedings{ xu.14.rfia,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Espaces des formes bas\'es sur des arbres : d\'efinition
et applications en traitement d'images et vision par
ordinateur},
booktitle = {Actes du 19\`eme Congr\`es National sur Reconnaissance des
Formes et l'Intelligence Artificielle (RFIA)},
year = 2014,
address = {Rouen, France},
month = jul,
volume = 1,
category = {national},
lrdeprojects = {Olena},
abstract = {Le cadre classique des filtres connexes consiste \`a
enlever d'un graphe certaines de ses composantes connexes.
Pour appliquer ces filtres, il est souvent utile de
transformer une image en un arbre de composantes, et on
\'elague cet arbre pour simplifier l'image de d\'epart. Les
arbres ainsi form\'es ont des propri\'et\'es remarquables
pour la vision par ordinateur. Une premi\`ere illustration
de leur int\'er\^et est la d\'efinition d'un d\'etecteur de
zones d'int\'er\^et, vraiment invariant aux changements de
contraste, qui nous permet d'obtenir des r\'esultats \`a
l'\'etat de l'art en recalage d'images et en reconstruction
3D \`a base d'images. Poursuivant dans l'utilisation de ces
arbres, nous proposons d'\'elargir le cadre des filtres
connexes. Pour cela, nous introduisons la notion d'espaces
des formes bas\'es sur des arbres~: au lieu de filtrer des
composantes connexes du graphe correspondant \`a l'image,
nous proposons de filtrer des composantes connexes du
graphe donn\'e par l'arbre des composantes de l'image. Ce
cadre g\'en\'eral, que nous appelons morphologie bas\'ee
sur les formes, peut \^etre utilis\'e pour la d\'etection
et la segmentation d'objets, l'obtention de segmentations
hi\'erarchiques, et le filtrage d'images. De nombreuses
applications et illustrations montrent l'int\'er\^et de ce cadre.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.14.rfia.pdf},
lrdekeywords = {Image}
}
@InProceedings{ xu.15.ismm,
author = {Yongchao Xu and Edwin Carlinet and Thierry G\'eraud and
Laurent Najman},
title = {Efficient Computation of Attributes and Saliency Maps on
Tree-Based Image Representations},
booktitle = {Mathematical Morphology and Its Application to Signal and
Image Processing -- Proceedings of the 12th International
Symposium on Mathematical Morphology (ISMM)},
year = {2015},
series = {Lecture Notes in Computer Science Series},
volume = {9082},
address = {Reykjavik, Iceland},
publisher = {Springer},
editor = {J.A. Benediktsson and J. Chanussot and L. Najman and H.
Talbot},
pages = {693--704},
lrdeprojects = {Olena},
abstract = {Tree-based image representations are popular tools for
many applications in mathematical morphology and image
processing. Classically, one computes an attribute on each
node of a tree and decides whether to preserve or remove
some nodes upon the attribute function. This attribute
function plays a key role for the good performance of
tree-based applications. In this paper, we propose several
algorithms to compute efficiently some attribute
information. The first one is incremental computation of
information on region, contour, and context. Then we show
how to compute efficiently extremal information along the
contour (e.g., minimal gradient's magnitude along the
contour). Lastly, we depict computation of extinction-based
saliency map using tree-based image representations. The
computation complexity and the memory cost of these
algorithms are analyzed. To the best of our knowledge,
except information on region, none of the other algorithms
is presented explicitly in any state-of-the-art paper.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.15.ismm.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-04-07},
doi = {10.1007/978-3-319-18720-4_58}
}
@Article{ xu.15.pami,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Connected Filtering on Tree-Based Shape-Spaces},
journal = {IEEE Transactions on Pattern Analysis and Machine
Intelligence},
year = {2016},
volume = {38},
number = {6},
pages = {1126--1140},
month = jun,
doi = {10.1109/TPAMI.2015.2441070},
lrdeprojects = {Olena},
abstract = {Connected filters are well-known for their good contour
preservation property. A popular implementation strategy
relies on tree-based image representations: for example,
one can compute an attribute characterizing the connected
component represented by each node of the tree and keep
only the nodes for which the attribute is sufficiently
high. This operation can be seen as a thresholding of the
tree, seen as a graph whose nodes are weighted by the
attribute. Rather than being satisfied with a mere
thresholding, we propose to expand on this idea, and to
apply connected filters on this latest graph. Consequently,
the filtering is performed not in the space of the image,
but in the space of shapes built from the image. Such a
processing of shape-space filtering is a generalization of
the existing tree-based connected operators. Indeed, the
framework includes the classical existing connected
operators by attributes. It also allows us to propose a
class of novel connected operators from the leveling
family, based on non-increasing attributes. Finally, we
also propose a new class of connected operators that we
call morphological {\em shapings}. Some illustrations and
quantitative evaluations demonstrate the usefulness and
robustness of the proposed shape-space filters.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.15.pami.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2015-06-05}
}
@Article{ xu.16.pami,
author = {Yongchao Xu and Edwin Carlinet and Thierry G\'eraud and
Laurent Najman},
title = {Hierarchical Segmentation Using Tree-Based Shape Spaces},
journal = {IEEE Transactions on Pattern Analysis and Machine
Intelligence},
year = {2017},
volume = {39},
number = {3},
pages = {457--469},
month = apr,
doi = {10.1109/TPAMI.2016.2554550},
lrdeprojects = {Olena},
abstract = {Current trends in image segmentation are to compute a
hierarchy of image segmentations from fine to coarse. A
classical approach to obtain a single meaningful image
partition from a given hierarchy is to cut it in an optimal
way, following the seminal approach of the scale-set
theory. While interesting in many cases, the resulting
segmentation, being a non-horizontal cut, is limited by the
structure of the hierarchy. In this paper, we propose a
novel approach that acts by transforming an input hierarchy
into a new saliency map. It relies on the notion of shape
space: a graph representation of a set of regions extracted
from the image. Each region is characterized with an
attribute describing it. We weigh the boundaries of a
subset of meaningful regions (local minima) in the shape
space by extinction values based on the attribute. This
extinction-based saliency map represents a new hierarchy of
segmentations highlighting regions having some specific
characteristics. Each threshold of this map represents a
segmentation which is generally different from any cut of
the original hierarchy. This new approach thus enlarges the
set of possible partition results that can be extracted
from a given hierarchy. Qualitative and quantitative
illustrations demonstrate the usefulness of the proposed
method.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.16.pami.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2016-04-11}
}
@Article{ xu.16.prl,
author = {Yongchao Xu and Thierry G\'eraud and Laurent Najman},
title = {Hierarchical image simplification and segmentation based
on {M}umford-{S}hah-salient level line selection},
journal = {Pattern Recognition Letters},
year = {2016},
month = nov,
volume = {83},
number = {3},
pages = {278--286},
doi = {10.1016/j.patrec.2016.05.006},
lrdeprojects = {Olena},
abstract = {Hierarchies, such as the tree of shapes, are popular
representations for image simplification and segmentation
thanks to their multiscale structures. Selecting meaningful
level lines (boundaries of shapes) yields to simplify image
while preserving intact salient structures. Many image
simplification and segmentation methods are driven by the
optimization of an energy functional, for instance the
celebrated Mumford-Shah functional. In this paper, we
propose an efficient approach to hierarchical image
simplification and segmentation based on the minimization
of the piecewise-constant Mumford-Shah functional. This
method conforms to the current trend that consists in
producing hierarchical results rather than a unique
partition. Contrary to classical approaches which compute
optimal hierarchical segmentations from an input hierarchy
of segmentations, we rely on the tree of shapes, a unique
and well-defined representation equivalent to the image.
Simply put, we compute for each level line of the image an
attribute function that characterizes its persistence under
the energy minimization. Then we stack the level lines from
meaningless ones to salient ones through a saliency map
based on extinction values defined on the tree-based shape
space. Qualitative illustrations and quantitative
evaluation on Weizmann segmentation evaluation database
demonstrate the state-of-the-art performance of our
method.},
lrdeinc = {Publications/xu.15.prl},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.16.prl.pdf},
lrdekeywords = {Image},
lrdenewsdate = {2016-05-20}
}
@InProceedings{ xu.17.gretsi,
author = {Yongchao Xu and Thierry G\'eraud and Isabelle Bloch},
title = {Segmentation d'{IRM} de cerveaux de nouveau-n\'es en
quelques secondes \`a l'aide d'un r\'eseau de neurones
convolutif {\it pseudo-{3D}} et de transfert d'apprentissage},
booktitle = {Actes du 26e Colloque GRETSI},
year = {2017},
address = {Juan-les-Pins, France},
category = {national},
month = sep,
abstract = {L'imagerie par r\'esonance magn\'etique (IRM) du cerveau
est utilis\'ee sur les nouveau-n\'es pour \'evaluer
l'\'evolution du cerveau et diagnostiquer des maladies
neurologiques. Ces examens n\'ecessitent souvent une
analyse quantitative des diff\'erents tissus du cerveau, de
sorte qu'avoir une segmentation pr\'ecise est essentiel.
Dans cet article, nous proposons une m\'ethode automatique
rapide de segmentation en diff\'erents tissus des images
IRM 3D de cerveaux de nouveau-n\'es ; elle utilise un
r\'eseau de neurones totalement convolutif (FCN) et du
transfert d'apprentissage. Par rapport aux approches
similaires qui reposent soit sur des patchs 2D ou 3D, soit
sur des FCN totalement 3D, notre m\'ethode est beaucoup
plus rapide : elle ne prend que quelques secondes, et une
seule modalit\'e (T2) est n\'ecessaire. Afin de prendre les
informations 3D en compte, trois coupes 2D successives sont
empil\'ees pour former une image 2D en couleurs, dont
l'ensemble sur tout le volume sert d'entr\'ee \`a un FCN,
pr\'e-entra\^in\'e sur ImageNet pour la classification
d'images naturelles. Nos exp\'eriences sur un ensemble de
donn\'ees de r\'ef\'erence montrent que notre m\'ethode
obtient des r\'esultats du niveau de ceux de l'\'etat de l'art.},
lrdeinc = {Publications/xu.17.icip.inc},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.17.gretsi.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2017-06-20}
}
@InProceedings{ xu.17.icip,
author = {Yongchao Xu and Thierry G\'eraud and Isabelle Bloch},
title = {From Neonatal to Adult Brain {MR} Image Segmentation in a
Few Seconds Using {3D}-Like Fully Convolutional Network and
Transfer Learning},
booktitle = {Proceedings of the 23rd IEEE International Conference on
Image Processing (ICIP)},
year = {2017},
pages = {4417--4421},
month = sep,
address = {Beijing, China},
doi = {10.1109/ICIP.2017.8297117},
abstract = {Brain magnetic resonance imaging (MRI) is widely used to
assess brain developments in neonates and to diagnose a
wide range of neurological diseases in adults. Such studies
are usually based on quantitative analysis of different
brain tissues, so it is essential to be able to classify
them accurately. In this paper, we propose a fast automatic
method that segments 3D brain MR images into different
tissues using fully convolutional network (FCN) and
transfer learning. As compared to existing deep
learning-based approaches that rely either on 2D patches or
on fully 3D FCN, our method is way much faster: it only
takes a few seconds, and only a single modality (T1 or T2)
is required. In order to take the 3D information into
account, all 3 successive 2D slices are stacked to form a
set of 2D color images, which serve as input for the FCN
pre-trained on ImageNet for natural image classification.
To the best of our knowledge, this is the first method that
applies transfer learning to segment both neonatal and
adult brain 3D MR images. Our experiments on two public
datasets show that our method achieves state-of-the-art
results.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.17.icip.pdf},
lrdeinc = {Publications/xu.17.icip.inc},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2017-06-12}
}
@InProceedings{ xu.18.brainles,
author = {Yongchao Xu and Thierry G{\'e}raud and \'Elodie Puybareau
and Isabelle Bloch and Joseph Chazalon},
title = {White Matter Hyperintensities Segmentation In a Few
Seconds Using Fully Convolutional Network and Transfer
Learning},
booktitle = {Brainlesion: Glioma, Multiple Sclerosis, Stroke and
Traumatic Brain Injuries--- 3rd International Workshop,
BrainLes 2017, Held in Conjunction with MICCAI 2017, Quebec
City, QC, Canada, September 14 2017, Revised Selected
Papers},
publisher = {Springer, Cham},
year = {2018},
editor = {A. Crimi and S. Bakas and H. Kuijf and B. Menze and M.
Reyes},
series = {Lecture Notes in Computer Science},
volume = {10670},
pages = {501--514},
doi = {10.1007/978-3-319-75238-9_42},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.18.brainles.pdf},
lrdekeywords = {Image},
lrdeprojects = {Olena},
lrdenewsdate = {2018-02-06}
}
@Article{ xu.18.media,
author = {Yongchao Xu and Baptiste Morel and Sonia Dahdouh and
\'Elodie Puybareau and Alessio Virz\`i and H\'el\`ene Urien
and Thierry~G\'eraud and Catherine Adamsbaum and Isabelle
Bloch},
title = {The Challenge of Cerebral Magnetic Resonance Imaging in
Neonates: {A} New Method using Mathematical Morphology for
the Segmentation of Structures Including Diffuse Excessive
High Signal Intensities},
journal = {Medical Image Analysis},
year = 2018,
month = aug,
pages = {75--94},
volume = {48},
doi = {10.1016/j.media.2018.05.003},
lrdeprojects = {Olena},
abstract = {Preterm birth is a multifactorial condition associated
with increased morbidity and mortality. Diffuse excessive
high signal intensity (DEHSI) has been recently described
on T2-weighted MR sequences in this population and thought
to be associated with neuropathologies. To date, no robust
and reproducible method to assess the presence of white
matter hyperintensities has been developed, perhaps
explaining the current controversy over their prognostic
value. The aim of this paper is to propose a new
semi-automated framework to detect DEHSI on neonatal brain
MR images having a particular pattern due to the
physiological lack of complete myelination of the white
matter. A novel method for semi- automatic segmentation of
neonatal brain structures and DEHSI, based on mathematical
morphology and on max-tree representations of the images is
thus described. It is a mandatory first step to identify
and clinically assess homogeneous cohorts of neonates for
DEHSI and/or volume of any other segmented structures.
Implemented in a user-friendly interface, the method makes
it straightforward to select relevant markers of structures
to be segmented, and if needed, apply eventually manual
corrections. This method responds to the increasing need
for providing medical experts with semi-automatic tools for
image analysis, and overcomes the limitations of visual
analysis alone, prone to subjectivity and variability.
Experimental results demonstrate that the method is
accurate, with excellent reproducibility and with very few
manual corrections needed. Although the method was intended
initially for images acquired at 1.5T, which corresponds to
usual clinical practice, preliminary results on images
acquired at 3T suggest that the proposed approach can be
generalized.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/xu.18.media.pdf},
lrdeinc = {Publications/xu.18.media.inc},
lrdenewsdate = {2018-03-22},
lrdekeywords = {Image}
}
@InProceedings{ xue.03.icip,
author = {Heru Xue and Thierry G\'eraud and Alexandre Duret-Lutz},
title = {Multi-band segmentation using morphological clustering and
fusion application to color image segmentation},
booktitle = {Proceedings of the IEEE International Conference on Image
Processing (ICIP)},
year = 2003,
pages = {353--356},
volume = 1,
address = {Barcelona, Spain},
month = sep,
lrdeprojects = {Olena},
abstract = {In this paper we propose a novel approach for color image
segmentation. Our approach is based on segmentation of
subsets of bands using mathematical morphology followed by
the fusion of the resulting segmentation channels. For
color images the band subsets are chosen as RG, RB and GB
pairs, whose 2D histograms are processed as projections of
a 3D histogram. The segmentations in 2D color spaces are
obtained using the watershed algorithm. These 2D
segmentations are then combined to obtain a final result
using a region split-and-merge process. The CIE L a b color
space is used to measure the color distance. Our approach
results in improved performance and can be generalized for
multi-band segmentation of images such as multi-spectral
satellite images information.},
lrdekeywords = {Image},
lrdenewsdate = {2003-04-10}
}
@InProceedings{ yoruk.04.eusipco,
author = {Erdem Yoruk and Ender Konukoglu and Bulent Sankur and
J\'er\^ome Darbon},
title = {Person authentication based on hand shape},
booktitle = {Proceedings of 12th European Signal Processing Conference
(EUSIPCO)},
year = 2004,
address = {Vienna, Austria},
month = sep,
lrdeprojects = {Olena},
abstract = {The problem of person identification based on their hand
images has been addressed. The system is based on the
images of the right hands of the subjects, captured by a
flatbed scanner in an unconstrained pose. In a
preprocessing stage of the algorithm, the silhouettes of
hand images are registered to a fixed pose, which involves
both rotation and translation of the hand and, separately,
of the individual fingers. Independent component features
of the hand silhouette images are used for recognition. The
classification performance is found to be very satisfactory
and it was shown that, at least for groups of one hundred
subjects, hand-based recognition is a viable secure access
control scheme.},
lrdekeywords = {Image}
}
@Article{ yoruk.06.itip,
author = {Erdem Y\"or\"uk and Ender Konukoglu and B\"ulent Sankur
and J\'er\^ome Darbon},
title = {Shape-based hand recognition},
journal = {IEEE Transactions on Image Processing},
year = 2006,
volume = 15,
number = 7,
pages = {1803--1815},
month = jul,
lrdeprojects = {Olena},
abstract = {The problem of person recognition and verification based
on their hand images has been addressed. The system is
based on the images of the right hands of the subjects,
captured by a flatbed scanner in an unconstrained pose at
45 dpi. In a preprocessing stage of the algorithm, the
silhouettes of hand images are registered to a fixed pose,
which involves both rotation and translation of the hand
and, separately, of the individual fingers. Two feature
sets have been comparatively assessed, Hausdorff distance
of the hand contours and independent component features of
the hand silhouette images. Both the classification and the
verification performances are found to be very satisfactory
as it was shown that, at least for groups of about five
hundred subjects, hand-based recognition is a viable secure
access control scheme.},
lrdekeywords = {Image}
}
@InProceedings{ zhao.19.stacom,
author = {Zhou Zhao and Nicolas Boutry and \'Elodie Puybareau and
Thierry G\'eraud},
title = {A Two-Stage Temporal-Like Fully Convolutional Network
Framework for Left Ventricle Segmentation and
Quantification on {MR} Images},
booktitle = {Statistical Atlases and Computational Models of the Heart.
Multi-Sequence CMR Segmentation, CRT-EPiggy and LV Full
Quantification Challenges---10th International Workshop,
STACOM 2019, Held in Conjunction with MICCAI 2019,
Shenzhen, China, October 13, 2019, Revised Selected
Papers},
year = 2020,
editor = {Mihaela Pop and Maxime Sermesant and Oscar Camara and
Xiahai Zhuang and Shuo Li and Alistair Young and Tommaso
Mansi and Avan Suinesiaputra},
series = {Lecture Notes in Computer Science},
publisher = {Springer},
volume = {12009},
pages = {405--413},
doi = {10.1007/978-3-030-39074-7_42},
abstract = {Automatic segmentation of the left ventricle (LV) of a
living human heart in a magnetic resonance (MR) image
(2D+t) allows to measure some clinical significant indices
like the regional wall thicknesses (RWT), cavity
dimensions, cavity and myocardium areas, and cardiac phase.
Here, we propose a novel framework made of a sequence of
two fully convolutional networks (FCN). The first is a
modified temporal-like VGG16 (the "localization network")
and is used to localize roughly the LV (filled-in)
epicardium position in each MR volume. The second FCN is a
modified temporal-like VGG16 too, but devoted to segment
the LV myocardium and cavity (the "segmentation network").
We evaluate the proposed method with
5-fold-cross-validation on the MICCAI 2019 LV Full
Quantification Challenge dataset. For the network used to
localize the epicardium, we obtain an average dice index of
0.8953 on validation set. For the segmentation network, we
obtain an average dice index of 0.8664 on validation set
(there, data augmentation is used). The mean absolute error
(MAE) of average cavity and myocardium areas, dimensions,
RWT are 114.77~mm^2; 0.9220~mm; 0.9185~mm respectively. The
computation time of the pipeline is less than 2~s for an
entire 3D volume. The error rate of phase classification is
7.6364\%, which indicates that the proposed approach has a
promising performance to estimate all these parameters.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/zhao.19.stacom.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2020-02-07}
}
@InProceedings{ zhao.20.icpr.1,
author = {Zhou Zhao and Nicolas Boutry and \'Elodie Puybareau and
Thierry G\'eraud},
title = {{FOANet}: {A} Focus of Attention Network with Application
to Myocardium Segmentation},
booktitle = {Proceedings of the 25th International Conference on
Pattern Recognition (ICPR)},
year = 2021,
pages = {1120--1127},
month = jan,
address = {Milan, Italy},
publisher = {IEEE},
abstract = {In myocardium segmentation of cardiac magnetic resonance
images, ambiguities often appear near the boundaries of the
target domains due to tissue similarities. To address this
issue, we propose a new architecture, called FOANet, which
can be decomposed in three main steps: a localization step,
a Gaussian-based contrast enhancement step, and a
segmentation step. This architecture is supplied with a
hybrid loss function that guides the FOANet to study the
transformation relationship between the input image and the
corresponding label in a three-level hierarchy (pixel-,
patch- and map-level), which is helpful to improve
segmentation and recovery of the boundaries. We demonstrate
the efficiency of our approach on two public datasets in
terms of regional and boundary segmentations.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/zhao.20.icpr.1.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2020-11-02},
doi = {10.1109/ICPR48806.2021.9412016}
}
@InProceedings{ zhao.20.icpr.2,
author = {Zhou Zhao and Nicolas Boutry and \'Elodie Puybareau and
Thierry G\'eraud},
title = {Do not Treat Boundaries and Regions Differently: {A}n
Example on Heart Left Atrial Segmentation},
booktitle = {Proceedings of the 25th International Conference on
Pattern Recognition (ICPR)},
year = 2021,
pages = {7447--7453},
month = jan,
address = {Milan, Italy},
publisher = {IEEE},
abstract = {Atrial fibrillation is the most common heart rhythm
disease. Due to a lack of understanding in matter of
underlying atrial structures, current treatments are still
not satisfying. Recently, with the popularity of deep
learning, many segmentation methods based on fully
convolutional networks have been proposed to analyze atrial
structures, especially from late gadolinium-enhanced
magnetic resonance imaging. However, two problems still
occur: 1) segmentation results include the atrial- like
background; 2) boundaries are very hard to segment. Most
segmentation approaches design a specific network that
mainly focuses on the regions, to the detriment of the
boundaries. Therefore, this paper proposes an attention
full convolutional network framework based on the
ResNet-101 architecture, which focuses on boundaries as
much as on regions. The additional attention module is
added to have the network pay more attention on regions and
then to reduce the impact of the misleading similarity of
neighboring tissues. We also use a hybrid loss composed of
a region loss and a boundary loss to treat boundaries and
regions at the same time. We demonstrate the efficiency of
the proposed approach on the MICCAI 2018 Atrial
Segmentation Challenge public dataset.},
lrdepaper = {http://www.lrde.epita.fr/dload/papers/zhao.20.icpr.2.pdf},
lrdeprojects = {Olena},
lrdekeywords = {Image},
lrdenewsdate = {2020-11-02},
doi = {10.1109/ICPR48806.2021.9412755}
}
%% Local Variables:
%% fill-column: 76
%% ispell-local-dictionary: "american"
%% End: