Difference between revisions of "Publications/chen.21.icdar"

From LRDE

 
(2 intermediate revisions by the same user not shown)
Line 1: Line 1:
 
{{Publication
 
{{Publication
 
| published = true
 
| published = true
| date = 2020-05-17
+
| date = 2021-05-17
 
| title = Vectorization of Historical Maps Using Deep Edge Filtering and Closed Shape Extraction
 
| title = Vectorization of Historical Maps Using Deep Edge Filtering and Closed Shape Extraction
 
| authors = Yizi Chen, Edwin Carlinet, Joseph Chazalon, Clément Mallet, Bertrand Duménieu, Julien Perret
 
| authors = Yizi Chen, Edwin Carlinet, Joseph Chazalon, Clément Mallet, Bertrand Duménieu, Julien Perret
 
| booktitle = Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR'21)
 
| booktitle = Proceedings of the 16th International Conference on Document Analysis and Recognition (ICDAR'21)
| pages =
+
| pages = 510 to 525
  +
| series = Lecture Notes in Computer Science
  +
| publisher = Springer, Cham
  +
| volume = 12824
 
| address = Lausanne, Switzerland
 
| address = Lausanne, Switzerland
| abstract = Maps have been a unique source of knowledge for centuries. Such historical documents provide invaluable information for analyzing the complex spatial transformation of landscapes over important time frames. This is particularly true for urban areas that encompass multiple interleaved research domains (social scienceseconomy, etc.). The large amount and significant diversity of map sources call for automatic image processing techniques in order to extract the relevant objects under a vectorial shape. The complexity of maps (text, noise, digitization artifacts, etc.) has hindered the capacity of proposing a versatile and efficient raster-to-vector approaches for decades. We propose a learnable, reproducible, and reusable solution for the automatic transformation of raster maps into vector objects (building blocks, streets, rivers). It is built upon the complementary strength of mathematical morphology and convolutional neural networks through efficient edge filtering. Evenmore, we modify ConnNet and combine with deep edge filtering architecture to make use of pixel connectivity information and built an end-to-end system without requiring any post-processing techniques. In this paper, we focus on the comprehensive benchmark on various architectures on multiple datasets coupled with a novel vectorization step. Our experimental results on a new public dataset using COCO Panoptic metric exhibit very encouraging results confirmed by a qualitative analysis of the success and failure cases of our approach. Codedataset, results and extra illustrations are freely available at https://github.com/soduco/ICDAR-2021-Vectorization.
+
| abstract = Maps have been a unique source of knowledge for centuries. Such historical documents provide invaluable information for analyzing the complex spatial transformation of landscapes over important time frames. This is particularly true for urban areas that encompass multiple interleaved research domains (social sciences, economy, etc.). The large amount and significant diversity of map sources call for automatic image processing techniques in order to extract the relevant objects under a vectorial shape. The complexity of maps (text, noise, digitization artifactsetc.) has hindered the capacity of proposing a versatile and efficient raster-to-vector approaches for decades. We propose a learnable, reproducible, and reusable solution for the automatic transformation of raster maps into vector objects (building blocks, streets, rivers). It is built upon the complementary strength of mathematical morphology and convolutional neural networks through efficient edge filtering. Evenmore, we modify ConnNet and combine with deep edge filtering architecture to make use of pixel connectivity information and built an end-to-end system without requiring any post-processing techniques. In this paper, we focus on the comprehensive benchmark on various architectures on multiple datasets coupled with a novel vectorization step. Our experimental results on a new public dataset using COCO Panoptic metric exhibit very encouraging results confirmed by a qualitative analysis of the success and failure cases of our approach. Codedataset, results and extra illustrations are freely available at https://github.com/soduco/ICDAR-2021-Vectorization.
 
| lrdepaper = http://www.lrde.epita.fr/dload/papers/chen.21.icdar.pdf
 
| lrdepaper = http://www.lrde.epita.fr/dload/papers/chen.21.icdar.pdf
 
| lrdeprojects = Olena
 
| lrdeprojects = Olena
 
| lrdekeywords = Image
 
| lrdekeywords = Image
| lrdenewsdate = 2020-05-17
+
| lrdenewsdate = 2021-05-17
| note = To appear
 
 
| type = inproceedings
 
| type = inproceedings
 
| id = chen.21.icdar
 
| id = chen.21.icdar
  +
| identifier = doi:10.1007/978-3-030-86337-1_34
 
| bibtex =
 
| bibtex =
 
@InProceedings<nowiki>{</nowiki> chen.21.icdar,
 
@InProceedings<nowiki>{</nowiki> chen.21.icdar,
 
title = <nowiki>{</nowiki>Vectorization of Historical Maps Using Deep Edge Filtering
 
title = <nowiki>{</nowiki>Vectorization of Historical Maps Using Deep Edge Filtering
 
and Closed Shape Extraction<nowiki>}</nowiki>,
 
and Closed Shape Extraction<nowiki>}</nowiki>,
author = <nowiki>{</nowiki> Yizi Chen and Edwin Carlinet and Joseph Chazalon and
+
author = <nowiki>{</nowiki>Yizi Chen and Edwin Carlinet and Joseph Chazalon and
 
Cl\'ement Mallet and Bertrand Dum\'enieu and Julien Perret<nowiki>}</nowiki>,
 
Cl\'ement Mallet and Bertrand Dum\'enieu and Julien Perret<nowiki>}</nowiki>,
 
booktitle = <nowiki>{</nowiki>Proceedings of the 16th International Conference on
 
booktitle = <nowiki>{</nowiki>Proceedings of the 16th International Conference on
Line 25: Line 28:
 
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,
 
year = <nowiki>{</nowiki>2021<nowiki>}</nowiki>,
 
month = sep,
 
month = sep,
pages = <nowiki>{</nowiki><nowiki>}</nowiki>,
+
pages = <nowiki>{</nowiki>510--525<nowiki>}</nowiki>,
  +
series = <nowiki>{</nowiki>Lecture Notes in Computer Science<nowiki>}</nowiki>,
  +
publisher = <nowiki>{</nowiki>Springer, Cham<nowiki>}</nowiki>,
  +
volume = <nowiki>{</nowiki>12824<nowiki>}</nowiki>,
 
address = <nowiki>{</nowiki>Lausanne, Switzerland<nowiki>}</nowiki>,
 
address = <nowiki>{</nowiki>Lausanne, Switzerland<nowiki>}</nowiki>,
abstract = <nowiki>{</nowiki> Maps have been a unique source of knowledge for
+
abstract = <nowiki>{</nowiki>Maps have been a unique source of knowledge for centuries.
centuries. Such historical documents provide invaluable
+
Such historical documents provide invaluable information
information for analyzing the complex spatial
+
for analyzing the complex spatial transformation of
transformation of landscapes over important time frames.
+
landscapes over important time frames. This is particularly
This is particularly true for urban areas that encompass
+
true for urban areas that encompass multiple interleaved
multiple interleaved research domains (social sciences,
+
research domains (social sciences, economy, etc.). The
economy, etc.). The large amount and significant diversity
+
large amount and significant diversity of map sources call
of map sources call for automatic image processing
+
for automatic image processing techniques in order to
techniques in order to extract the relevant objects under a
+
extract the relevant objects under a vectorial shape. The
vectorial shape. The complexity of maps (text, noise,
+
complexity of maps (text, noise, digitization artifacts,
digitization artifacts, etc.) has hindered the capacity of
+
etc.) has hindered the capacity of proposing a versatile
proposing a versatile and efficient raster-to-vector
+
and efficient raster-to-vector approaches for decades. We
approaches for decades. We propose a learnable,
+
propose a learnable, reproducible, and reusable solution
 
for the automatic transformation of raster maps into vector
reproducible, and reusable solution for the automatic
 
 
objects (building blocks, streets, rivers). It is built
transformation of raster maps into vector objects (building
 
 
upon the complementary strength of mathematical morphology
blocks, streets, rivers). It is built upon the
 
 
and convolutional neural networks through efficient edge
complementary strength of mathematical morphology and
 
convolutional neural networks through efficient edge
 
 
filtering. Evenmore, we modify ConnNet and combine with
 
filtering. Evenmore, we modify ConnNet and combine with
 
deep edge filtering architecture to make use of pixel
 
deep edge filtering architecture to make use of pixel
Line 58: Line 63:
 
available at
 
available at
 
\url<nowiki>{</nowiki>https://github.com/soduco/ICDAR-2021-Vectorization<nowiki>}</nowiki>. <nowiki>}</nowiki>,
 
\url<nowiki>{</nowiki>https://github.com/soduco/ICDAR-2021-Vectorization<nowiki>}</nowiki>. <nowiki>}</nowiki>,
note = <nowiki>{</nowiki>To appear<nowiki>}</nowiki>
+
doi = <nowiki>{</nowiki>10.1007/978-3-030-86337-1_34<nowiki>}</nowiki>
 
<nowiki>}</nowiki>
 
<nowiki>}</nowiki>
   

Latest revision as of 09:55, 8 September 2021

Abstract

Maps have been a unique source of knowledge for centuries. Such historical documents provide invaluable information for analyzing the complex spatial transformation of landscapes over important time frames. This is particularly true for urban areas that encompass multiple interleaved research domains (social sciences, economy, etc.). The large amount and significant diversity of map sources call for automatic image processing techniques in order to extract the relevant objects under a vectorial shape. The complexity of maps (text, noise, digitization artifactsetc.) has hindered the capacity of proposing a versatile and efficient raster-to-vector approaches for decades. We propose a learnable, reproducible, and reusable solution for the automatic transformation of raster maps into vector objects (building blocks, streets, rivers). It is built upon the complementary strength of mathematical morphology and convolutional neural networks through efficient edge filtering. Evenmore, we modify ConnNet and combine with deep edge filtering architecture to make use of pixel connectivity information and built an end-to-end system without requiring any post-processing techniques. In this paper, we focus on the comprehensive benchmark on various architectures on multiple datasets coupled with a novel vectorization step. Our experimental results on a new public dataset using COCO Panoptic metric exhibit very encouraging results confirmed by a qualitative analysis of the success and failure cases of our approach. Codedataset, results and extra illustrations are freely available at https://github.com/soduco/ICDAR-2021-Vectorization.

Documents

Bibtex (lrde.bib)

@InProceedings{	  chen.21.icdar,
  title		= {Vectorization of Historical Maps Using Deep Edge Filtering
		  and Closed Shape Extraction},
  author	= {Yizi Chen and Edwin Carlinet and Joseph Chazalon and
		  Cl\'ement Mallet and Bertrand Dum\'enieu and Julien Perret},
  booktitle	= {Proceedings of the 16th International Conference on
		  Document Analysis and Recognition (ICDAR'21)},
  year		= {2021},
  month		= sep,
  pages		= {510--525},
  series	= {Lecture Notes in Computer Science},
  publisher	= {Springer, Cham},
  volume	= {12824},
  address	= {Lausanne, Switzerland},
  abstract	= {Maps have been a unique source of knowledge for centuries.
		  Such historical documents provide invaluable information
		  for analyzing the complex spatial transformation of
		  landscapes over important time frames. This is particularly
		  true for urban areas that encompass multiple interleaved
		  research domains (social sciences, economy, etc.). The
		  large amount and significant diversity of map sources call
		  for automatic image processing techniques in order to
		  extract the relevant objects under a vectorial shape. The
		  complexity of maps (text, noise, digitization artifacts,
		  etc.) has hindered the capacity of proposing a versatile
		  and efficient raster-to-vector approaches for decades. We
		  propose a learnable, reproducible, and reusable solution
		  for the automatic transformation of raster maps into vector
		  objects (building blocks, streets, rivers). It is built
		  upon the complementary strength of mathematical morphology
		  and convolutional neural networks through efficient edge
		  filtering. Evenmore, we modify ConnNet and combine with
		  deep edge filtering architecture to make use of pixel
		  connectivity information and built an end-to-end system
		  without requiring any post-processing techniques. In this
		  paper, we focus on the comprehensive benchmark on various
		  architectures on multiple datasets coupled with a novel
		  vectorization step. Our experimental results on a new
		  public dataset using COCO Panoptic metric exhibit very
		  encouraging results confirmed by a qualitative analysis of
		  the success and failure cases of our approach. Code,
		  dataset, results and extra illustrations are freely
		  available at
		  \url{https://github.com/soduco/ICDAR-2021-Vectorization}. },
  doi		= {10.1007/978-3-030-86337-1_34}
}