https://www.lrde.epita.fr/index.php?title=Special:NewPages&feed=atom&limit=50&offset=20210728125300&namespace=0&username=&tagfilter=&size-mode=max&size=0LRDE - New pages [en]2024-03-29T15:47:32ZFrom LRDEMediaWiki 1.35.3https://www.lrde.epita.fr/wiki/Publications/sekuboyina.21.mediaPublications/sekuboyina.21.media2021-07-22T09:47:00Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-07-22<br />
| authors = Anjany Sekuboyina, Malek E Husseini, Amirhossein Bayat, Maximilian Löffler, Hans Liebl, Hongwei Li, Giles Tetteh, Jan Kukačka, Christian Payer, Darko Stern, Martin Urschler, Maodong Chen, Dalong Cheng, Nikolas Lessmann, Yujin Hu, Tianfu Wang, Dong Yang, Daguang Xu, and Felix Ambellan, Tamaz Amiranashvili, Moritz Ehlke, Hans Lamecker, Sebastian Lehnert, Marilia Lirio, Nicolás Pérez de Olaguer, Heiko Ramm, Manish Sahu, Alexander Tack, Stefan Zachow, Tao Jiang, Xinjun Ma, Christoph Angerman, Xin Wang, Kevin Brown, Matthias Wolf, Alexandre Kirszenberg, Élodie Puybareau, Di Chen, Yiwei Bai, Brandon H Rapazzo, Timyoas Yeah, Amber Zhang, Shangliang Xu, Feng Houa, Zhiqiang He, Chan Zeng, Zheng Xiangshang, Xu Liming, Tucker J Netherton, Raymond P Mumme, Laurence E Court, Zixun Huang, Chenhang He, Li-Wen Wang, Sai Ho Ling, Lê Duy Huỳnh, Nicolas Boutry, Roman Jakubicek, Jiri Chmelik, Supriti Mulay, Mohanasankar Sivaprakasam, Johannes C Paetzold, Suprosanna Shit, Ivan Ezhov, Benedikt Wiestler, Ben Glocker, Alexander Valentinitsch, Markus Rempfler, Björn H Menze, Jan S Kirschke<br />
| title = VerSe: A Vertebrae Labelling and Segmentation Benchmark for Multi-detector CT Images<br />
| journal = Medical Image Analysis<br />
| number = 102166<br />
| abstract = Vertebral labelling and segmentation are two fundamental tasks in an automated spine processing pipeline. Reliable and accurate processing of spine images is expected to benefit clinical decision support systems for diagnosissurgery planning, and population-based analysis of spine and bone health. However, designing automated algorithms for spine processing is challenging predominantly due to considerable variations in anatomy and acquisition protocols and due to a severe shortage of publicly available data. Addressing these limitations, the Large Scale Vertebrae Segmentation Challenge (VerSe) was organised in conjunction with the International Conference on Medical Image Computing and Computer Assisted Intervention (MICCAI) in 2019 and 2020, with a call for algorithms tackling the labelling and segmentation of vertebrae. Two datasets containing a total of 374 multi-detector CT scans from 355 patients were prepared and 4505 vertebrae have individually been annotated at voxel level by a human-machine hybrid algorithm (https://osf.io/nqjyw/, urlhttps://osf.io/t98fz/). A total of 25 algorithms were benchmarked on these datasets. In this work, we present the results of this evaluation and further investigate the performance variation at the vertebra level, scan level, and different fields of view. We also evaluate the generalisability of the approaches to an implicit domain shift in data by evaluating the top-performing algorithms of one challenge iteration on data from the other iteration. The principal takeaway from VerSe: the performance of an algorithm in labelling and segmenting a spine scan hinges on its ability to correctly identify vertebrae in cases of rare anatomical variations. The VerSe content and code can be accessed at: https://github.com/anjany/verse.<br />
| volume = 73<br />
| issue = 102166<br />
| lrdepaper = http://www.lrde.epita.fr/dload/papers/sekuboyina.21.media.pdf<br />
| lrdeprojects = Olena<br />
| lrdenewsdate = 2021-07-22<br />
| type = article<br />
| id = sekuboyina.21.media<br />
| identifier = doi:10.1016/j.media.2021.102166<br />
| bibtex = <br />
@Article<nowiki>{</nowiki> sekuboyina.21.media,<br />
author = <nowiki>{</nowiki>Anjany Sekuboyina and Malek E. Husseini and Amirhossein<br />
Bayat and Maximilian L\"offler and Hans Liebl and Hongwei<br />
Li and Giles Tetteh and Jan Kuka\v<nowiki>{</nowiki>c<nowiki>}</nowiki>ka and Christian Payer<br />
and Darko Stern and Martin Urschler and Maodong Chen and<br />
Dalong Cheng and Nikolas Lessmann and Yujin Hu and Tianfu<br />
Wang and Dong Yang and Daguang Xu and and Felix Ambellan<br />
and Tamaz Amiranashvili and Moritz Ehlke and Hans Lamecker<br />
and Sebastian Lehnert and Marilia Lirio and Nicol\'as<br />
<nowiki>{</nowiki>P\'erez de Olaguer<nowiki>}</nowiki> and Heiko Ramm and Manish Sahu and<br />
Alexander Tack and Stefan Zachow and Tao Jiang and Xinjun<br />
Ma and Christoph Angerman and Xin Wang and Kevin Brown and<br />
Matthias Wolf and Alexandre Kirszenberg and \'Elodie<br />
Puybareau and Di Chen and Yiwei Bai and Brandon H. Rapazzo<br />
and Timyoas Yeah and Amber Zhang and Shangliang Xu and Feng<br />
Houa and Zhiqiang He and Chan Zeng and Zheng Xiangshang and<br />
Xu Liming and Tucker J. Netherton and Raymond P. Mumme and<br />
Laurence E. Court and Zixun Huang and Chenhang He and<br />
Li-Wen Wang and Sai Ho Ling and L\^e Duy Hu\`ynh and<br />
Nicolas Boutry and Roman Jakubicek and Jiri Chmelik and<br />
Supriti Mulay and Mohanasankar Sivaprakasam and Johannes C.<br />
Paetzold and Suprosanna Shit and Ivan Ezhov and Benedikt<br />
Wiestler and Ben Glocker and Alexander Valentinitsch and<br />
Markus Rempfler and Bj\"orn H. Menze and Jan S. Kirschke<nowiki>}</nowiki>,<br />
title = <nowiki>{</nowiki><nowiki>{</nowiki>VerSe<nowiki>}</nowiki>: <nowiki>{</nowiki>A<nowiki>}</nowiki> Vertebrae Labelling and Segmentation<br />
Benchmark for Multi-detector <nowiki>{</nowiki>CT<nowiki>}</nowiki> Images<nowiki>}</nowiki>,<br />
journal = <nowiki>{</nowiki>Medical Image Analysis<nowiki>}</nowiki>,<br />
number = <nowiki>{</nowiki>102166<nowiki>}</nowiki>,<br />
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,<br />
month = jul,<br />
doi = <nowiki>{</nowiki>10.1016/j.media.2021.102166<nowiki>}</nowiki>,<br />
abstract = <nowiki>{</nowiki>Vertebral labelling and segmentation are two fundamental<br />
tasks in an automated spine processing pipeline. Reliable<br />
and accurate processing of spine images is expected to<br />
benefit clinical decision support systems for diagnosis,<br />
surgery planning, and population-based analysis of spine<br />
and bone health. However, designing automated algorithms<br />
for spine processing is challenging predominantly due to<br />
considerable variations in anatomy and acquisition<br />
protocols and due to a severe shortage of publicly<br />
available data. Addressing these limitations, the Large<br />
Scale Vertebrae Segmentation Challenge (VerSe) was<br />
organised in conjunction with the International Conference<br />
on Medical Image Computing and Computer Assisted<br />
Intervention (MICCAI) in 2019 and 2020, with a call for<br />
algorithms tackling the labelling and segmentation of<br />
vertebrae. Two datasets containing a total of 374<br />
multi-detector CT scans from 355 patients were prepared and<br />
4505 vertebrae have individually been annotated at voxel<br />
level by a human-machine hybrid algorithm<br />
(\url<nowiki>{</nowiki>https://osf.io/nqjyw/<nowiki>}</nowiki>, \url<nowiki>{</nowiki>https://osf.io/t98fz/<nowiki>}</nowiki>).<br />
A total of 25 algorithms were benchmarked on these<br />
datasets. In this work, we present the results of this<br />
evaluation and further investigate the performance<br />
variation at the vertebra level, scan level, and different<br />
fields of view. We also evaluate the generalisability of<br />
the approaches to an implicit domain shift in data by<br />
evaluating the top-performing algorithms of one challenge<br />
iteration on data from the other iteration. The principal<br />
takeaway from VerSe: the performance of an algorithm in<br />
labelling and segmenting a spine scan hinges on its ability<br />
to correctly identify vertebrae in cases of rare anatomical<br />
variations. The VerSe content and code can be accessed at:<br />
\url<nowiki>{</nowiki>https://github.com/anjany/verse<nowiki>}</nowiki>.<nowiki>}</nowiki>,<br />
volume = <nowiki>{</nowiki>73<nowiki>}</nowiki>,<br />
issue = <nowiki>{</nowiki>102166<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/dubois.21.seminar/frPublications/dubois.21.seminar/fr2021-07-20T17:23:29Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Jérôme Dubois<br />
| titre = Réduction basée sur des simulations d'ω-automates<br />
| year = 2021<br />
| number = 2123<br />
| resume = Les ω-automates, capables de modéliser des comportements infinis, sont utilisés dans de nombreux domaines dont la vérification de modèle. Les algorithmes utilisés sont en général très coûteux. Pour cette raison, on veut réduire la taille des automates, tout en préservant le langage reconnu en appliquant de nombreuses réductions. L'une d'entre ellesbasée sur les simulations, est très lente. Dans ce rapport, nous allons montrer comment l'accélérer en utilisant des méthodes algorithmiques et de la parallélisation.<br />
| type = techreport<br />
| id = dubois.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/dubois.21.seminarPublications/dubois.21.seminar2021-07-20T17:23:26Z<p>Bot: Created page with "{{CSIReport | authors = Jérôme Dubois | title = Fast simulation based reduction for ω-automaton | year = 2021 | number = 2123 | abstract = The <math>\omega</math>-automata,..."</p>
<hr />
<div>{{CSIReport<br />
| authors = Jérôme Dubois<br />
| title = Fast simulation based reduction for ω-automaton<br />
| year = 2021<br />
| number = 2123<br />
| abstract = The <math>\omega</math>-automata, capable of modeling infinite behavior, are used in numerous domains including model checking. The algorithms used are in general very costly. For this reason, we want to reduce the automata size while preserving the recognized language by applying numerous reductions. One of them, based on simulation, is very slow. In this paper, we will how to speed up using algorithmic methods and parallelization.<br />
| type = techreport<br />
| id = dubois.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/hermary.21.seminar/frPublications/hermary.21.seminar/fr2021-07-05T14:27:14Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Romain Hermary<br />
| titre = Inégration des Opérateurs de la Morphologie Mathématique dans des Réseaux de Neurones<br />
| year = 2021<br />
| number = 2122<br />
| resume = Le traitement d'image est un domaine d'étude très vaste qui englobe une multitude d'opérations, chacune ayant des spécificités, des circonstances d'utilisation, des meilleurs résultats pour l'étude automatique des images complexités et des résultats différents. Aujourd'hui, les meilleurs résultats pour l'étude automatique des images (segmentation d'imagesclassification d'images, détection des objets, etc.) sont obtenus en utilisant l'apprentissage profond, et plus spécifiquement les réseaux neuronaux convolutifs. Nous explorons et menons des expériences sur une partie spécifique du traitement d'image, la morphologie mathématique, en recherchant la meilleure façon de contourner les complexités des opérations concernant leur intégration dans un pipeline d'apprentissage supervisé.<br />
| type = techreport<br />
| id = hermary.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/hermary.21.seminarPublications/hermary.21.seminar2021-07-05T14:27:12Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Romain Hermary<br />
| title = Integration of Morphological Operators in Neural Networks<br />
| year = 2021<br />
| number = 2122<br />
| abstract = Image processing is a very broad field of study that encompasses a multitude of operations, each of them with different purposes and circumstances of use, complexities and results. Nowadays, the bests results for automatic image study (image segmentation, image classificationobject detection, etc.) are obtained using deep learningand more specifically convolutional neural networks. We explore and conduct experiments on a specific part of image processing, mathematical morphology, investigating on the best way of circumventing operations' complexities regarding their integration in a supervised learning pipeline.<br />
| type = techreport<br />
| id = hermary.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/guillet.21.seminar/frPublications/guillet.21.seminar/fr2021-06-28T06:46:42Z<p>Bot: Created page with "{{CSIReportFR | authors = William Guillet | titre = Segmentation d'IRM de cerveau en utilisant un reseau de neurones convolutif | year = 2023 | resume = La detection des hyper..."</p>
<hr />
<div>{{CSIReportFR<br />
| authors = William Guillet<br />
| titre = Segmentation d'IRM de cerveau en utilisant un reseau de neurones convolutif<br />
| year = 2023<br />
| resume = La detection des hyperintensites de la matiere blanche de facon efficace est un enjeu important dans le medical. Une detection efficace de ces dernieres permettrait notamment de mieux diagnostiquer certaines maladies neuro- degeneratives mais aussi déviter certaines erreurs medicales. Cést la problematique a laquelle nous avons tente de repondre dans ce rapport. On propose une solution se basant sur un reseau de neurones convolutif accompagne dún pretraitement effectue sur les entrees<br />
| type = techreport<br />
| id = guillet.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/guillet.21.seminarPublications/guillet.21.seminar2021-06-28T06:46:41Z<p>Bot: Created page with "{{CSIReport | authors = William Guillet | title = Brain MRI Segmentation using fully convolutional network | year = 2023 | abstract = The detection of white matter hyperintens..."</p>
<hr />
<div>{{CSIReport<br />
| authors = William Guillet<br />
| title = Brain MRI Segmentation using fully convolutional network<br />
| year = 2023<br />
| abstract = The detection of white matter hyperintensities in an efficient way is an important issue in the medical field. Indeed, these hyperintensities are complicated to detect with the naked eye, even for medical personnel. An efficient detection of these hyperintensities would allow a better diagnosis of certain neuro-degenerative diseases but also to avoid certain medical errors. This is the issue we have tried to address in this report. We propose a solution based on a convolutional neural network accompanied by a preprocessing performed on these inputs<br />
| type = techreport<br />
| id = guillet.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/moreau.21.seminar/frPublications/moreau.21.seminar/fr2021-06-26T12:36:30Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Hugo Moreau<br />
| titre = Gestion des transitions blackbox dans go2pins<br />
| year = 2021<br />
| number = 2108<br />
| resume = go2pins est un outil utilisé pour interfacer un programme Go avec des outils de vérification formelle. À l'aide d'une série de transformations, un programme Go est compilé vers un programme au comportement identique, mais exposant une interface permettant d'itérer l'espace d'états de celui ci. Nous introduisons les transitions black-box, une technique efficace et évolutive pour gérer le runtime Go. Cette approche permet des abstractions facilesautomatiques et efficaces. Dans ce rapport, nous présentons le développement derrière l'introduction des transitions black-box dans textttgo2pins et les problèmes rencontrés.<br />
| type = techreport<br />
| id = moreau.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/moreau.21.seminarPublications/moreau.21.seminar2021-06-26T12:36:29Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Hugo Moreau<br />
| title = Handling blackbox transitions in go2pins<br />
| year = 2021<br />
| number = 2108<br />
| abstract = go2pins is a tool used to interface Go programs with model checking algorithms. Through a series of transformations, a standard Go program is compiled to another behaving the same way, but exposing an interface allowing to iterate over its various states. We introduce black-box transitions, an efficient and scalable technique for handling the Go runtime. This approachinspired from the hardware verification techniques, allows easy, automatic and efficient abstractions. In this reportwe introduce the development behind the introduction of the black-box transitions in textttgo2pins and the problems encountered.<br />
| type = techreport<br />
| id = moreau.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/oueslati.21.seminar/frPublications/oueslati.21.seminar/fr2021-06-25T23:10:09Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Mehdi OUESLATI<br />
| titre = genus.py: un Système Simple de Typage Embarqué pour le language Python<br />
| year = 2021<br />
| number = 2120<br />
| resume = Ce rapport technique résume le travail réalisé dans le cadre du projet genus.py tout en le remettant dans son contexte parmi l'état de l'art des projets de genus. De plus, il explicite les contributions de son auteur, et conclue en tirant des erreurs commises par son auteur des leçons utiles aux générations futures d'étudiants.<br />
| type = techreport<br />
| id = oueslati.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/oueslati.21.seminarPublications/oueslati.21.seminar2021-06-25T23:10:08Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Mehdi OUESLATI<br />
| title = genus.py: a Simple Embeddable Type System for the Python language<br />
| year = 2021<br />
| number = 2120<br />
| abstract = This technical report explores the work behind genus.py and contextualizes it within the state of the art of the genus projects. It carries on by expliciting its author's contribution, and ends by reflecting on the mistakes the author made while focusing on drawing lessons for the next generations of students.<br />
| type = techreport<br />
| id = oueslati.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/simonin.21.seminar/frPublications/simonin.21.seminar/fr2021-06-25T19:21:36Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Victor Simonin<br />
| titre = Implementations efficaces des representations hierarchiques morphologiques<br />
| year = 2021<br />
| number = 2111<br />
| resume = La morphologie mathématique est devenu un outil indispensable pour une bibliotheque de traitement d'image. Les algorithmes produit grace a cette technique sont tres efficaces et permettent des résultats tres satisfaisant notamment pour des opérations de segmentation d'image. Le travail produit a donc pour objectif d'implémenter et d'intégrer dans la bibliotheque Pylene les représentations hiérarchique morphologique. Notre travail s'est concentré principalement sur la pipeline globale de segmentation utilisant ces méthodes, ainsi que la visualisation grace aux cartes de saillance. L'objectif etant bien entendu de réaliser les algorithmes les plus optimisés possible pour pouvoir les utiliser sur de grandes images.<br />
| type = techreport<br />
| id = simonin.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/simonin.21.seminarPublications/simonin.21.seminar2021-06-25T19:21:35Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Victor Simonin<br />
| title = Efficient implementations of hierarchical morphological representations<br />
| year = 2021<br />
| number = 2111<br />
| abstract = Mathematical morphology has become an indispensable tool for an image processing library. The algorithms produced using this technique are very efficient and allow very satisfactory results, in particular for image segmentation operations. The work produced therefore aims to implement and integrate hierarchical morphological representations into the Pylene library. Our work has mainly focused on the global segmentation pipeline using these methods, as well as visualization using saliency maps. The objective being of course to achieve the most optimized algorithms possible to be able to use them on large images.<br />
| type = techreport<br />
| id = simonin.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/gasnault.21.seminar/frPublications/gasnault.21.seminar/fr2021-06-25T00:23:45Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Louis Gasnault<br />
| titre = Segmentation d'IRM du cerveau avec la morphologie mathematique<br />
| year = 2021<br />
| number = 2119<br />
| resume = Le developpement du cerveau peut etre evalue grace a l'imagerie par resonance magnetique (IRM) du cerveau. Elle est utile en cas de naissance prematuree pour s'assurer qu'aucune maladie cerebrale ne se developpe pendant la periode postnatale. De telles maladies sont visibles sur un image IRM ponderee en T2 sous la forme d'hyperintensites de la matiere blanche. Pour evaluer la presence de ces hyperintensites, ce travail met en oeuvre une nouvelle implementation d'un outil, semi-automatique, base sur la morphologie mathematique, specialise dans la segmentation du cerveau des nouveau-nes. Nous passerons en revue les travaux connexes, l'implementation des differentes etapes et les difficultes rencontrees. Au final, la version developpee au cours de ce stage n'est pas completement terminee mais elle est en bonne voie pour une finalisation ulterieure.<br />
| type = techreport<br />
| id = gasnault.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/gasnault.21.seminarPublications/gasnault.21.seminar2021-06-25T00:23:44Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Louis Gasnault<br />
| title = Brain MRI segmentation using mathematical morphology<br />
| year = 2021<br />
| number = 2119<br />
| abstract = Brain development can be evaluated using brain Magnetic Resonance Imaging (MRI). It is useful in cases of preterm birth to ensure that no brain disease develops during the postnatal period. Such diseases can be visible on T2-weighted MR image as high signal intensity (DEHSI). To assess the presence of white matter hyperintensities, this work implements a new robust, semi-automated frameworkbased on mathematical morphology, specialized on neonate brain segmentation. We will go over the related work, the implementation of the different steps and the difficulties encountered. In the end, the version developped during this internship is not completely finished but it is in good shape for a later finalization.<br />
| type = techreport<br />
| id = gasnault.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/habib.21.seminar/frPublications/habib.21.seminar/fr2021-06-24T17:43:01Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Nathan Habib<br />
| titre = Construire un perceptron quantique<br />
| year = 2021<br />
| number = 2117<br />
| resume = Les technologies quantiques étant en évolution constante, il est normal de se demander s'il est possible de prendre avantage de ces technologies dans le domaine de l'intelligence artificielle. Dans ce papier, nous présentons les rudimentaires de l'informatique quantique ainsi que l'implémentation d'un perceptron avec comme support une machine quantique et une explication de son fonctionnement.<br />
| type = techreport<br />
| id = habib.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/habib.21.seminarPublications/habib.21.seminar2021-06-24T17:43:00Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Nathan Habib<br />
| title = Building a quantum perceptron<br />
| year = 2021<br />
| number = 2117<br />
| abstract = With the continuous improvement of quantum technologiesone might wonder if it is possible to take advantage of them for machine learning. In this paper we present a first approach of quantum computing as well as the implementation of a quantum perceptron we then explain the reasoning behind these algorithms.<br />
| type = techreport<br />
| id = habib.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/pape.21.seminar/frPublications/pape.21.seminar/fr2021-06-18T12:57:26Z<p>Bot: </p>
<hr />
<div>{{CSIReportFR<br />
| authors = Corentin Pape<br />
| titre = Identification Polynomiale des omega-Langages<br />
| year = 2021<br />
| number = 2113<br />
| resume = Ce rapport présente l'implémentation en C++ de l'algorithme ID décrit par Dana Angluin et al dans Polynomial Identification of omega-Automata. Il permet l'identification, ou l'apprentissage passifd'omega-langages réguliers et des omega-automates associés, dans un temps et une mémoire polynomiaux. C'est un travail préliminaire à l'étude de l'apprentissage actif d'omega-langages. Le code est disponible sur https://gitlab.lrde.epita.fr/cpape/ID<br />
| type = techreport<br />
| id = pape.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/pape.21.seminarPublications/pape.21.seminar2021-06-18T12:57:24Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Corentin Pape<br />
| title = Polynomial Identification of omega-Language<br />
| year = 2021<br />
| number = 2113<br />
| abstract = The present report is the C++ implementation of the algorithm ID described by Dana Angluin and al in Polynomial Identification of omega-Automata. It allows identification, or passive learning, of regular omega-languages and omega-automata, in polynomial time and data. It is a preliminary work to the study of active learning of omega-languages. The code is available at https://gitlab.lrde.epita.fr/cpape/ID<br />
| type = techreport<br />
| id = pape.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/mom.21.seminar/frPublications/mom.21.seminar/fr2021-06-09T09:39:49Z<p>Bot: Created page with "{{CSIReportFR | authors = Ferdinand Mom | titre = ... Insert a title in French here ... | year = 2021 | number = 2109 | resume = ... Insert an abstract in French here ... | ty..."</p>
<hr />
<div>{{CSIReportFR<br />
| authors = Ferdinand Mom<br />
| titre = ... Insert a title in French here ...<br />
| year = 2021<br />
| number = 2109<br />
| resume = ... Insert an abstract in French here ...<br />
| type = techreport<br />
| id = mom.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/mom.21.seminarPublications/mom.21.seminar2021-06-09T09:39:48Z<p>Bot: </p>
<hr />
<div>{{CSIReport<br />
| authors = Ferdinand Mom<br />
| title = Real Time Face Expression Recognition<br />
| year = 2021<br />
| number = 2109<br />
| abstract = ... Insert an abstract in English here ...<br />
| type = techreport<br />
| id = mom.21.seminar<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/kirszenberg.21.spinPublications/kirszenberg.21.spin2021-06-08T06:29:20Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-06-08<br />
| authors = Alexandre Kirszenberg, Antoine Martin, Hugo Moreau, Etienne Renault<br />
| title = Go2Pins: A Framework for the LTL Verification of Go Programs<br />
| booktitle = Proceedings of the 27th International SPIN Symposium on Model Checking of Software (SPIN'21)<br />
| series = Lecture Notes in Computer Science<br />
| volume = 12864<br />
| address = Aarhus, Denmark (online)<br />
| publisher = Springer, Cham<br />
| pages = 140 to 156<br />
| abstract = We introduce Go2Pins, a tool that takes a program written in Go and links it with two model-checkers: LTSMin [19] and Spot [7]. Go2Pins is an effort to promote the integration of both formal verifica- tion and testing inside industrial-size projects. With this goal in mind, we introduce black-box transitions, an efficient and scalable technique for handling the Go runtime. This approachinspired by hardware ver- ification techniques, allows easy, automatic and efficient abstractions. Go2Pins also handles basic concurrent programs through the use of a dedicated scheduler. In this paper we demonstrate the usage of Go2Pins over benchmarks inspired by industrial problems and a set of LTL formulae. Even if Go2Pins is still at the early stages of development, our results are promising and show the the benefits of using black-box transitions.<br />
| lrdepaper = http://www.lrde.epita.fr/dload/papers/kirszenberg.21.spin.pdf<br />
| lrdekeywords = Spot<br />
| lrdenewsdate = 2021-06-08<br />
| type = inproceedings<br />
| id = kirszenberg.21.spin<br />
| identifier = doi:10.1007/978-3-030-84629-9_8<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> kirszenberg.21.spin,<br />
author = <nowiki>{</nowiki>Alexandre Kirszenberg and Antoine Martin and Hugo Moreau<br />
and Etienne Renault<nowiki>}</nowiki>,<br />
title = <nowiki>{</nowiki><nowiki>{</nowiki>Go2Pins<nowiki>}</nowiki>: <nowiki>{</nowiki>A<nowiki>}</nowiki> Framework for the <nowiki>{</nowiki>LTL<nowiki>}</nowiki> Verification of<br />
<nowiki>{</nowiki>Go<nowiki>}</nowiki> Programs<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the 27th International SPIN Symposium on<br />
Model Checking of Software (SPIN'21)<nowiki>}</nowiki>,<br />
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,<br />
series = <nowiki>{</nowiki>Lecture Notes in Computer Science<nowiki>}</nowiki>,<br />
volume = <nowiki>{</nowiki>12864<nowiki>}</nowiki>,<br />
month = may,<br />
address = <nowiki>{</nowiki>Aarhus, Denmark (online)<nowiki>}</nowiki>,<br />
publisher = <nowiki>{</nowiki>Springer, Cham<nowiki>}</nowiki>,<br />
pages = <nowiki>{</nowiki>140--156<nowiki>}</nowiki>,<br />
abstract = <nowiki>{</nowiki>We introduce Go2Pins, a tool that takes a program written<br />
in Go and links it with two model-checkers: LTSMin [19] and<br />
Spot [7]. Go2Pins is an effort to promote the integration<br />
of both formal verifica- tion and testing inside<br />
industrial-size projects. With this goal in mind, we<br />
introduce black-box transitions, an efficient and scalable<br />
technique for handling the Go runtime. This approach,<br />
inspired by hardware ver- ification techniques, allows<br />
easy, automatic and efficient abstractions. Go2Pins also<br />
handles basic concurrent programs through the use of a<br />
dedicated scheduler. In this paper we demonstrate the usage<br />
of Go2Pins over benchmarks inspired by industrial problems<br />
and a set of LTL formulae. Even if Go2Pins is still at the<br />
early stages of development, our results are promising and<br />
show the the benefits of using black-box transitions.<nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>10.1007/978-3-030-84629-9_8<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/bloch.21.dgmmPublications/bloch.21.dgmm2021-05-21T18:00:26Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-02-16<br />
| authors = Isabelle Bloch, Samy Blusseau, Ramón Pino Pérez, Élodie Puybareau, Guillaume Tochon<br />
| editors = Joakim Lindblad, Filip Malmberg, SladojeNataša<br />
| title = On Some Associations Between Mathematical Morphology and Artificial Intelligence<br />
| booktitle = Proceedings of the IAPR International Conference on Discrete Geometry and Mathematical Morphology (DGMM)<br />
| address = Uppsala, Sweden<br />
| series = Lecture Notes in Computer Science<br />
| volume = 12708<br />
| publisher = Springer<br />
| pages = 457 to 469<br />
| abstract = This paper aims at providing an overview of the use of mathematical morphology, in its algebraic setting, in several fields of artificial intelligence (AI). Three domains of AI will be covered. In the first domainmathematical morphology operators will be expressed in some logics (propositional, modal, description logics) to answer typical questions in knowledge representation and reasoning, such as revision, fusion, explanatory relationssatisfying usual postulates. In the second domain, spatial reasoning will benefit from spatial relations modeled using fuzzy sets and morphological operators, with applications in model-based image understanding. In the third domaininteractions between mathematical morphology and deep learning will be detailed. Morphological neural networks were introduced as an alternative to classical architectures, yielding a new geometry in decision surfaces. Deep networks were also trained to learn morphological operators and pipelines, and morphological algorithms were used as companion tools to machine learning, for pre/post processing or even regularization purposes. These ideas have known a large resurgence in the last few years and new ones are emerging.<br />
| lrdeprojects = Olena<br />
| lrdekeywords = Image<br />
| lrdenewsdate = 2021-02-16<br />
| type = inproceedings<br />
| id = bloch.21.dgmm<br />
| identifier = doi:10.1007/978-3-030-76657-3_33<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> bloch.21.dgmm,<br />
doi = <nowiki>{</nowiki>10.1007/978-3-030-76657-3_33<nowiki>}</nowiki>,<br />
author = <nowiki>{</nowiki>Isabelle Bloch and Samy Blusseau and Ram\'on <nowiki>{</nowiki>Pino<br />
P\'erez<nowiki>}</nowiki> and \'Elodie Puybareau and Guillaume Tochon<nowiki>}</nowiki>,<br />
editor = <nowiki>{</nowiki>Lindblad, Joakim and Malmberg, Filip and Sladoje,<br />
Nata<nowiki>{</nowiki>\v<nowiki>{</nowiki>s<nowiki>}</nowiki><nowiki>}</nowiki>a<nowiki>}</nowiki>,<br />
title = <nowiki>{</nowiki>On Some Associations Between Mathematical Morphology and<br />
Artificial Intelligence<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the IAPR International Conference on<br />
Discrete Geometry and Mathematical Morphology (DGMM)<nowiki>}</nowiki>,<br />
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,<br />
address = <nowiki>{</nowiki>Uppsala, Sweden<nowiki>}</nowiki>,<br />
series = <nowiki>{</nowiki>Lecture Notes in Computer Science<nowiki>}</nowiki>,<br />
volume = <nowiki>{</nowiki>12708<nowiki>}</nowiki>,<br />
publisher = <nowiki>{</nowiki>Springer<nowiki>}</nowiki>,<br />
pages = <nowiki>{</nowiki>457--469<nowiki>}</nowiki>,<br />
month = may,<br />
abstract = <nowiki>{</nowiki>This paper aims at providing an overview of the use of<br />
mathematical morphology, in its algebraic setting, in<br />
several fields of artificial intelligence (AI). Three<br />
domains of AI will be covered. In the first domain,<br />
mathematical morphology operators will be expressed in some<br />
logics (propositional, modal, description logics) to answer<br />
typical questions in knowledge representation and<br />
reasoning, such as revision, fusion, explanatory relations,<br />
satisfying usual postulates. In the second domain, spatial<br />
reasoning will benefit from spatial relations modeled using<br />
fuzzy sets and morphological operators, with applications<br />
in model-based image understanding. In the third domain,<br />
interactions between mathematical morphology and deep<br />
learning will be detailed. Morphological neural networks<br />
were introduced as an alternative to classical<br />
architectures, yielding a new geometry in decision<br />
surfaces. Deep networks were also trained to learn<br />
morphological operators and pipelines, and morphological<br />
algorithms were used as companion tools to machine<br />
learning, for pre/post processing or even regularization<br />
purposes. These ideas have known a large resurgence in the<br />
last few years and new ones are emerging.<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/chen.21.icdarPublications/chen.21.icdar2021-05-21T15:44:09Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-05-17<br />
| title = Vectorization of Historical Maps Using Deep Edge Filtering and Closed Shape Extraction<br />
| authors = Yizi Chen, Edwin Carlinet, Joseph Chazalon, Clément Mallet, Bertrand Duménieu, Julien Perret<br />
| booktitle = Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR'21)<br />
| pages = 510 to 525<br />
| series = Lecture Notes in Computer Science<br />
| publisher = Springer, Cham<br />
| volume = 12824<br />
| address = Lausanne, Switzerland<br />
| abstract = Maps have been a unique source of knowledge for centuries. Such historical documents provide invaluable information for analyzing the complex spatial transformation of landscapes over important time frames. This is particularly true for urban areas that encompass multiple interleaved research domains (social sciences, economy, etc.). The large amount and significant diversity of map sources call for automatic image processing techniques in order to extract the relevant objects under a vectorial shape. The complexity of maps (text, noise, digitization artifactsetc.) has hindered the capacity of proposing a versatile and efficient raster-to-vector approaches for decades. We propose a learnable, reproducible, and reusable solution for the automatic transformation of raster maps into vector objects (building blocks, streets, rivers). It is built upon the complementary strength of mathematical morphology and convolutional neural networks through efficient edge filtering. Evenmore, we modify ConnNet and combine with deep edge filtering architecture to make use of pixel connectivity information and built an end-to-end system without requiring any post-processing techniques. In this paper, we focus on the comprehensive benchmark on various architectures on multiple datasets coupled with a novel vectorization step. Our experimental results on a new public dataset using COCO Panoptic metric exhibit very encouraging results confirmed by a qualitative analysis of the success and failure cases of our approach. Codedataset, results and extra illustrations are freely available at https://github.com/soduco/ICDAR-2021-Vectorization.<br />
| lrdepaper = http://www.lrde.epita.fr/dload/papers/chen.21.icdar.pdf<br />
| lrdeprojects = Olena<br />
| lrdekeywords = Image<br />
| lrdenewsdate = 2021-05-17<br />
| type = inproceedings<br />
| id = chen.21.icdar<br />
| identifier = doi:10.1007/978-3-030-86337-1_34<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> chen.21.icdar,<br />
title = <nowiki>{</nowiki>Vectorization of Historical Maps Using Deep Edge Filtering<br />
and Closed Shape Extraction<nowiki>}</nowiki>,<br />
author = <nowiki>{</nowiki>Yizi Chen and Edwin Carlinet and Joseph Chazalon and<br />
Cl\'ement Mallet and Bertrand Dum\'enieu and Julien Perret<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the 16th International Conference on<br />
Document Analysis and Recognition (ICDAR'21)<nowiki>}</nowiki>,<br />
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,<br />
month = sep,<br />
pages = <nowiki>{</nowiki>510--525<nowiki>}</nowiki>,<br />
series = <nowiki>{</nowiki>Lecture Notes in Computer Science<nowiki>}</nowiki>,<br />
publisher = <nowiki>{</nowiki>Springer, Cham<nowiki>}</nowiki>,<br />
volume = <nowiki>{</nowiki>12824<nowiki>}</nowiki>,<br />
address = <nowiki>{</nowiki>Lausanne, Switzerland<nowiki>}</nowiki>,<br />
abstract = <nowiki>{</nowiki>Maps have been a unique source of knowledge for centuries.<br />
Such historical documents provide invaluable information<br />
for analyzing the complex spatial transformation of<br />
landscapes over important time frames. This is particularly<br />
true for urban areas that encompass multiple interleaved<br />
research domains (social sciences, economy, etc.). The<br />
large amount and significant diversity of map sources call<br />
for automatic image processing techniques in order to<br />
extract the relevant objects under a vectorial shape. The<br />
complexity of maps (text, noise, digitization artifacts,<br />
etc.) has hindered the capacity of proposing a versatile<br />
and efficient raster-to-vector approaches for decades. We<br />
propose a learnable, reproducible, and reusable solution<br />
for the automatic transformation of raster maps into vector<br />
objects (building blocks, streets, rivers). It is built<br />
upon the complementary strength of mathematical morphology<br />
and convolutional neural networks through efficient edge<br />
filtering. Evenmore, we modify ConnNet and combine with<br />
deep edge filtering architecture to make use of pixel<br />
connectivity information and built an end-to-end system<br />
without requiring any post-processing techniques. In this<br />
paper, we focus on the comprehensive benchmark on various<br />
architectures on multiple datasets coupled with a novel<br />
vectorization step. Our experimental results on a new<br />
public dataset using COCO Panoptic metric exhibit very<br />
encouraging results confirmed by a qualitative analysis of<br />
the success and failure cases of our approach. Code,<br />
dataset, results and extra illustrations are freely<br />
available at<br />
\url<nowiki>{</nowiki>https://github.com/soduco/ICDAR-2021-Vectorization<nowiki>}</nowiki>. <nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>10.1007/978-3-030-86337-1_34<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/chazalon.21.icdar.2Publications/chazalon.21.icdar.22021-05-21T15:44:07Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-05-17<br />
| title = ICDAR 2021 Competition on Historical Map Segmentation<br />
| authors = Joseph Chazalon, Edwin Carlinet, Yizi Chen, Julien Perret, Bertrand Duménieu, Clément Mallet, Thierry Géraud, Vincent Nguyen, Nam Nguyen, Josef Baloun, Ladislav Lenc, Pavel Král<br />
| booktitle = Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR'21)<br />
| pages = 693 to 707<br />
| series = Lecture Notes in Computer Science<br />
| publisher = Springer, Cham<br />
| volume = 12824<br />
| address = Lausanne, Switzerland<br />
| abstract = This paper presents the final results of the ICDAR 2021 Competition on Historical Map Segmentation (MapSeg)encouraging research on a series of historical atlases of Paris, France, drawn at 1/5000 scale between 1894 and 1937. The competition featured three tasks, awarded separately. Task&nbsp;1 consists in detecting building blocks and was won by the L3IRIS team using a DenseNet-121 network trained in a weakly supervised fashion. This task is evaluated on 3 large images containing hundreds of shapes to detect. Task&nbsp;2 consists in segmenting map content from the larger map sheet, and was won by the UWB team using a U-Net-like FCN combined with a binarization method to increase detection edge accuracy. Task&nbsp;3 consists in locating intersection points of geo-referencing lines, and was also won by the UWB team who used a dedicated pipeline combining binarization, line detection with Hough transformcandidate filtering, and template matching for intersection refinement. Tasks&nbsp;2 and&nbsp;3 are evaluated on 95 map sheets with complex content. Dataset, evaluation tools and results are available under permissive licensing at https://icdar21-mapseg.github.io/.<br />
| lrdepaper = http://www.lrde.epita.fr/dload/papers/chazalon.21.icdar.2.pdf<br />
| lrdeprojects = Olena<br />
| lrdekeywords = Image<br />
| lrdenewsdate = 2021-05-17<br />
| type = inproceedings<br />
| id = chazalon.21.icdar.2<br />
| identifier = doi:10.1007/978-3-030-86337-1_46<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> chazalon.21.icdar.2,<br />
title = <nowiki>{</nowiki><nowiki>{</nowiki>ICDAR<nowiki>}</nowiki> 2021 Competition on Historical Map Segmentation<nowiki>}</nowiki>,<br />
author = <nowiki>{</nowiki>Joseph Chazalon and Edwin Carlinet and Yizi Chen and<br />
Julien Perret and Bertrand Dum\'enieu and Cl\'ement Mallet<br />
and Thierry G\'eraud and Vincent Nguyen and Nam Nguyen and<br />
Josef Baloun and Ladislav Lenc and Pavel Kr\'al<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the 16th International Conference on<br />
Document Analysis and Recognition (ICDAR'21)<nowiki>}</nowiki>,<br />
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,<br />
month = sep,<br />
pages = <nowiki>{</nowiki>693--707<nowiki>}</nowiki>,<br />
series = <nowiki>{</nowiki>Lecture Notes in Computer Science<nowiki>}</nowiki>,<br />
publisher = <nowiki>{</nowiki>Springer, Cham<nowiki>}</nowiki>,<br />
volume = <nowiki>{</nowiki>12824<nowiki>}</nowiki>,<br />
address = <nowiki>{</nowiki>Lausanne, Switzerland<nowiki>}</nowiki>,<br />
abstract = <nowiki>{</nowiki>This paper presents the final results of the ICDAR 2021<br />
Competition on Historical Map Segmentation (MapSeg),<br />
encouraging research on a series of historical atlases of<br />
Paris, France, drawn at 1/5000 scale between 1894 and 1937.<br />
The competition featured three tasks, awarded separately.<br />
Task~1 consists in detecting building blocks and was won by<br />
the L3IRIS team using a DenseNet-121 network trained in a<br />
weakly supervised fashion. This task is evaluated on 3<br />
large images containing hundreds of shapes to detect.<br />
Task~2 consists in segmenting map content from the larger<br />
map sheet, and was won by the UWB team using a U-Net-like<br />
FCN combined with a binarization method to increase<br />
detection edge accuracy. Task~3 consists in locating<br />
intersection points of geo-referencing lines, and was also<br />
won by the UWB team who used a dedicated pipeline combining<br />
binarization, line detection with Hough transform,<br />
candidate filtering, and template matching for intersection<br />
refinement. Tasks~2 and~3 are evaluated on 95 map sheets<br />
with complex content. Dataset, evaluation tools and results<br />
are available under permissive licensing at<br />
\url<nowiki>{</nowiki>https://icdar21-mapseg.github.io/<nowiki>}</nowiki>.<nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>10.1007/978-3-030-86337-1_46<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/chazalon.21.icdar.1Publications/chazalon.21.icdar.12021-05-21T15:44:05Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-05-17<br />
| title = Revisiting the Coco Panoptic Metric to Enable Visual and Qualitative Analysis of Historical Map Instance Segmentation<br />
| authors = Joseph Chazalon, Edwin Carlinet<br />
| booktitle = Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR'21)<br />
| series = Lecture Notes in Computer Science<br />
| publisher = Springer, Cham<br />
| volume = 12824<br />
| pages = 367 to 382<br />
| address = Lausanne, Switzerland<br />
| abstract = Segmentation is an important task. It is so important that there exist tens of metrics trying to score and rank segmentation systems. It is so important that each topic has its own metric because their problem is too specific. Does it? What are the fundamental differences with the ZoneMap metric used for page segmentation, the COCO Panoptic metric used in computer vision and metrics used to rank hierarchical segmentations? In this paper, while assessing segmentation accuracy for historical maps, we explain, compare and demystify some the most used segmentation evaluation protocols. In particular, we focus on an alternative view of the COCO Panoptic metric as a classification evaluation; we show its soundness and propose extensions with more “shape-oriented” metrics. Beyond a quantitative metric, this paper aims also at providing qualitative measures through precision-recall maps that enable visualizing the success and the failures of a segmentation method.<br />
| lrdepaper = https://www.lrde.epita.fr/dload/papers/chazalon.21.icdar.1.pdf<br />
| lrdeposter = https://www.lrde.epita.fr/dload/papers/chazalon.21.icdar.1.poster.pdf<br />
| lrdeprojects = Olena<br />
| lrdekeywords = Image<br />
| lrdenewsdate = 2021-05-17<br />
| type = inproceedings<br />
| id = chazalon.21.icdar.1<br />
| identifier = doi:10.1007/978-3-030-86337-1_25<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> chazalon.21.icdar.1,<br />
title = <nowiki>{</nowiki>Revisiting the <nowiki>{</nowiki>C<nowiki>}</nowiki>oco Panoptic Metric to Enable Visual and<br />
Qualitative Analysis of Historical Map Instance<br />
Segmentation<nowiki>}</nowiki>,<br />
author = <nowiki>{</nowiki>Joseph Chazalon and Edwin Carlinet<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the 16th International Conference on<br />
Document Analysis and Recognition (ICDAR'21)<nowiki>}</nowiki>,<br />
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,<br />
month = sep,<br />
series = <nowiki>{</nowiki>Lecture Notes in Computer Science<nowiki>}</nowiki>,<br />
publisher = <nowiki>{</nowiki>Springer, Cham<nowiki>}</nowiki>,<br />
volume = <nowiki>{</nowiki>12824<nowiki>}</nowiki>,<br />
pages = <nowiki>{</nowiki>367--382<nowiki>}</nowiki>,<br />
address = <nowiki>{</nowiki>Lausanne, Switzerland<nowiki>}</nowiki>,<br />
abstract = <nowiki>{</nowiki>Segmentation is an important task. It is so important that<br />
there exist tens of metrics trying to score and rank<br />
segmentation systems. It is so important that each topic<br />
has its own metric because their problem is too specific.<br />
Does it? What are the fundamental differences with the<br />
ZoneMap metric used for page segmentation, the COCO<br />
Panoptic metric used in computer vision and metrics used to<br />
rank hierarchical segmentations? In this paper, while<br />
assessing segmentation accuracy for historical maps, we<br />
explain, compare and demystify some the most used<br />
segmentation evaluation protocols. In particular, we focus<br />
on an alternative view of the COCO Panoptic metric as a<br />
classification evaluation; we show its soundness and<br />
propose extensions with more ``shape-oriented'' metrics.<br />
Beyond a quantitative metric, this paper aims also at<br />
providing qualitative measures through<br />
\emph<nowiki>{</nowiki>precision-recall maps<nowiki>}</nowiki> that enable visualizing the<br />
success and the failures of a segmentation method.<nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>10.1007/978-3-030-86337-1_25<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/estopinan.21.eusipcoPublications/estopinan.21.eusipco2021-05-20T12:05:08Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-05-04<br />
| authors = Joaquim Estopinan, Guillaume Tochon, Lucas Drumetz<br />
| title = Learning Sentinel-2 Spectral Dynamics for Long-Run Predictions Using Residual Neural Networks<br />
| booktitle = Proceedings of the 29th European Signal Processing Conference (EUSIPCO)<br />
| address = Dublin, Ireland<br />
| lrdeprojects = Olena<br />
| abstract = Making the most of multispectral image time-series is a promising but still relatively under-explored research direction because of the complexity of jointly analyzing spatial, spectral and temporal information. Capturing and characterizing temporal dynamics is one of the important and challenging issues. Our new method paves the way to capture real data dynamics and should eventually benefit applications like unmixing or classification. Dealing with time-series dynamics classically requires the knowledge of a dynamical model and an observation model. The former may be incorrect or computationally hard to handle, thus motivating data-driven strategies aiming at learning dynamics directly from data. In this paper, we adapt neural network architectures to learn periodic dynamics of both simulated and real multispectral time-series. We emphasize the necessity of choosing the right state variable to capture periodic dynamics and show that our models can reproduce the average seasonal dynamics of vegetation using only one year of training data.<br />
| lrdekeywords = Image<br />
| lrdenewsdate = 2021-05-04<br />
| type = inproceedings<br />
| id = estopinan.21.eusipco<br />
| identifier = doi:10.23919/EUSIPCO54536.2021.9616304<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> estopinan.21.eusipco,<br />
author = <nowiki>{</nowiki>Joaquim Estopinan and Guillaume Tochon and Lucas Drumetz<nowiki>}</nowiki>,<br />
title = <nowiki>{</nowiki>Learning <nowiki>{</nowiki>Sentinel-2<nowiki>}</nowiki> Spectral Dynamics for Long-Run<br />
Predictions Using Residual Neural Networks<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the 29th European Signal Processing<br />
Conference (EUSIPCO)<nowiki>}</nowiki>,<br />
year = 2021,<br />
address = <nowiki>{</nowiki>Dublin, Ireland<nowiki>}</nowiki>,<br />
month = aug,<br />
abstract = <nowiki>{</nowiki>Making the most of multispectral image time-series is a<br />
promising but still relatively under-explored research<br />
direction because of the complexity of jointly analyzing<br />
spatial, spectral and temporal information. Capturing and<br />
characterizing temporal dynamics is one of the important<br />
and challenging issues. Our new method paves the way to<br />
capture real data dynamics and should eventually benefit<br />
applications like unmixing or classification. Dealing with<br />
time-series dynamics classically requires the knowledge of<br />
a dynamical model and an observation model. The former may<br />
be incorrect or computationally hard to handle, thus<br />
motivating data-driven strategies aiming at learning<br />
dynamics directly from data. In this paper, we adapt neural<br />
network architectures to learn periodic dynamics of both<br />
simulated and real multispectral time-series. We emphasize<br />
the necessity of choosing the right state variable to<br />
capture periodic dynamics and show that our models can<br />
reproduce the average seasonal dynamics of vegetation using<br />
only one year of training data.<nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>10.23919/EUSIPCO54536.2021.9616304<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/newton.21.elsPublications/newton.21.els2021-04-26T07:03:19Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2021-04-26<br />
| authors = Jim Newton, Adrien Pommellet<br />
| title = A Portable, Simple, Embeddable Type System<br />
| booktitle = Proceedings of the 14th European Lisp Symposium (ELS)<br />
| lrdekeywords = infinite alphabets, type systems, Common Lisp, ClojureScala<br />
| lrdenewsdate = 2021-04-26<br />
| lrdepaper = http://www.lrde.epita.fr/dload/papers/newton.21.els.pdf<br />
| lrdeprojects = Spot<br />
| address = Online<br />
| abstract = We present a simple type system inspired by that of Common Lisp. The type system is intended to be embedded into a host language and accepts certain fundamental types from that language as axiomatically given. The type calculus provided in the type system is capable of expressing union, intersection, and complement types, as well as membership, subtype, disjoint, and habitation (non-emptiness) checks. We present a theoretical foundation and two sample implementations, one in Clojure and one in Scala.<br />
| pages = 11 to 20<br />
| publisher = European Lisp Symposium<br />
| type = inproceedings<br />
| id = newton.21.els<br />
| identifier = doi:10.5281/zenodo.4709777<br />
| bibtex = <br />
@InProceedings<nowiki>{</nowiki> newton.21.els,<br />
author = <nowiki>{</nowiki>Jim Newton and Adrien Pommellet<nowiki>}</nowiki>,<br />
title = <nowiki>{</nowiki>A Portable, Simple, Embeddable Type System<nowiki>}</nowiki>,<br />
booktitle = <nowiki>{</nowiki>Proceedings of the 14th European Lisp Symposium (ELS)<nowiki>}</nowiki>,<br />
year = 2021,<br />
address = <nowiki>{</nowiki>Online<nowiki>}</nowiki>,<br />
month = may,<br />
abstract = <nowiki>{</nowiki> We present a simple type system inspired by that of<br />
Common Lisp. The type system is intended to be embedded<br />
into a host language and accepts certain fundamental types<br />
from that language as axiomatically given. The type<br />
calculus provided in the type system is capable of<br />
expressing union, intersection, and complement types, as<br />
well as membership, subtype, disjoint, and habitation<br />
(non-emptiness) checks. We present a theoretical foundation<br />
and two sample implementations, one in Clojure and one in<br />
Scala.<nowiki>}</nowiki>,<br />
pages = <nowiki>{</nowiki>11--20<nowiki>}</nowiki>,<br />
publisher = <nowiki>{</nowiki>European Lisp Symposium<nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>10.5281/zenodo.4709777<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Seminar/2021-05-12Seminar/2021-05-122021-04-25T23:53:50Z<p>Bot: </p>
<hr />
<div>{{SeminarHeader<br />
| id = 2021-05-12<br />
| date = Mercredi 12 mai 2021<br />
| schedule = 11h - 12h<br />
| location = Https://meet.jit.si/SeminaireLRDE<br />
}}<br />
{{Talk<br />
| id = 2021-05-12<br />
| abstract = Topological Data Analysis (TDA) is a recent area of computer science that focuses on discovering intrinsic structures hidden in data. Based on solid mathematical tools such as Morse theory and Persistent Homology, TDA enables the robust extraction of the main features of a data set into stable, concise, and multi-scale descriptors that facilitate data analysis and visualization. In this talk, I will give an intuitive overview of the main tools used in TDA (persistence diagrams, Reeb graphs, Morse-Smale complexes, etc.) with applications to concrete use cases in computational fluid dynamics, medical imaging, quantum chemistry, and climate modeling. This talk will be illustrated with results produced with the "Topology ToolKit" (TTK), an open-source library (BSD license) that we develop with collaborators to showcase our research. Tutorials for re-producing these experiments are available on the TTK website.<br />
| duration = 1h<br />
| orator = Julien Tierny, Sorbonne Université<br />
| resume = Julien Tierny received his Ph.D. degree in Computer Science from the University of Lille in 2008 and<br />
the Habilitation degree (HDR) from Sorbonne University in 2016. Currently a CNRS permanent<br />
research scientist affiliated with Sorbonne University, his research expertise lies in topological methods<br />
for data analysis and visualization. Author on the topic and award winner for his research, he regularly<br />
serves as an international program committee member for the top venues in data visualization (IEEE VIS,<br />
EuroVis, etc.) and is an associate editor for IEEE Transactions on Visualization and Computer Graphics.<br />
Julien Tierny is also founder and lead developer of the Topology ToolKit (TTK), an open source library for<br />
topological data analysis.<br />
| schedule = 11h - 12h<br />
| title = An Introduction to Topological Data Analysis with the Topology ToolKit<br />
| urls = https://topology-tool-kit.github.io/<br />
}}</div>Bothttps://www.lrde.epita.fr/wiki/Publications/rivet.20.phdPublications/rivet.20.phd2021-04-13T07:15:55Z<p>Bot: </p>
<hr />
<div>{{Publication<br />
| published = true<br />
| date = 2020-07-17<br />
| authors = Julie Rivet<br />
| title = Non-iterative methods for image improvement in digital holography of the retina<br />
| school = Sorbonne Université<br />
| address = Paris, France<br />
| abstract = With the increase of the number of people with moderate to severe visual impairment, monitoring and treatment of vision disorders have become major issues in medicine today. At the Quinze-Vingts national ophthalmology hospital in Paris, two optical benches have been settled in recent years to develop two real-time digital holography techniques for the retina: holographic optical coherence tomography (OCT) and laser Doppler holography. The first reconstructs three-dimensional images, while the second allows visualization of blood flow in vessels. Besides problems inherent to the imaging system itself, optical devices are subject to external disturbance, bringing also difficulties in imaging and loss of accuracy. The main obstacles these technologies face are eye motion and eye aberrations. In this thesis, we have introduced several methods for image quality improvement in digital holography, and validated them experimentally. The resolution of holographic images has been improved by robust non-iterative methods: lateral and axial tracking and compensation of translation movements, and measurement and compensation of optical aberrations. This allows us to be optimistic that structures on holographic images of the retina will be more visible and sharper, which could ultimately provide very valuable information to clinicians.<br />
| lrdepaper = http://www.lrde.epita.fr/dload/papers/rivet.20.phd.pdf<br />
| lrdenewsdate = 2020-07-17<br />
| lrdeprojects = Olena<br />
| type = phdthesis<br />
| id = rivet.20.phd<br />
| identifier = doi:FIXME<br />
| bibtex = <br />
@PhDThesis<nowiki>{</nowiki> rivet.20.phd,<br />
author = <nowiki>{</nowiki>Julie Rivet<nowiki>}</nowiki>,<br />
title = <nowiki>{</nowiki>Non-iterative methods for image improvement in digital<br />
holography of the retina<nowiki>}</nowiki>,<br />
school = <nowiki>{</nowiki>Sorbonne Universit\'e<nowiki>}</nowiki>,<br />
year = 2020,<br />
address = <nowiki>{</nowiki>Paris, France<nowiki>}</nowiki>,<br />
month = jul,<br />
abstract = <nowiki>{</nowiki>With the increase of the number of people with moderate to<br />
severe visual impairment, monitoring and treatment of<br />
vision disorders have become major issues in medicine<br />
today. At the Quinze-Vingts national ophthalmology hospital<br />
in Paris, two optical benches have been settled in recent<br />
years to develop two real-time digital holography<br />
techniques for the retina: holographic optical coherence<br />
tomography (OCT) and laser Doppler holography. The first<br />
reconstructs three-dimensional images, while the second<br />
allows visualization of blood flow in vessels. Besides<br />
problems inherent to the imaging system itself, optical<br />
devices are subject to external disturbance, bringing also<br />
difficulties in imaging and loss of accuracy. The main<br />
obstacles these technologies face are eye motion and eye<br />
aberrations. In this thesis, we have introduced several<br />
methods for image quality improvement in digital<br />
holography, and validated them experimentally. The<br />
resolution of holographic images has been improved by<br />
robust non-iterative methods: lateral and axial tracking<br />
and compensation of translation movements, and measurement<br />
and compensation of optical aberrations. This allows us to<br />
be optimistic that structures on holographic images of the<br />
retina will be more visible and sharper, which could<br />
ultimately provide very valuable information to<br />
clinicians.<nowiki>}</nowiki>,<br />
doi = <nowiki>{</nowiki>FIXME<nowiki>}</nowiki><br />
<nowiki>}</nowiki><br />
<br />
}}</div>Bot