Difference between revisions of "Publications/dangla.18.das"
From LRDE
(4 intermediate revisions by the same user not shown) | |||
Line 6: | Line 6: | ||
| booktitle = Proceedings of the IAPR International Workshop on Document Analysis Systems (DAS) |
| booktitle = Proceedings of the IAPR International Workshop on Document Analysis Systems (DAS) |
||
| address = Vienna, Austria |
| address = Vienna, Austria |
||
+ | | abstract = Text detection is an important topic in pattern recognition, but evaluating the reliability of such detection algorithms is challenging. While many evaluation protocols have been developed for that purpose, they often show dissimilar behaviors when applied in the same context. As a consequence, their usage may lead to misinterpretations, potentially yielding erroneous comparisons between detection algorithms or their incorrect parameters tuning. This paper is a first attempt to derive a methodology to perform the comparison of evaluation protocols. We then apply it on five state-of-the-art protocols, and exhibit that there indeed exist inconsistencies among their evaluation criteria. Our aim here is not to rank the investigated evaluation protocolsbut rather raising awareness in the community that we should carefully reconsider them in order to converge to their optimal usage. |
||
− | | abstract = . |
||
| lrdeprojects = Olena |
| lrdeprojects = Olena |
||
| lrdekeywords = Image |
| lrdekeywords = Image |
||
+ | | lrdepaper = http://www.lrde.epita.fr/dload/papers/dangla.18.das.pdf |
||
+ | | lrdeposter = http://www.lrde.epita.fr/dload/papers/dangla.18.das.poster.pdf |
||
| lrdenewsdate = 2018-02-02 |
| lrdenewsdate = 2018-02-02 |
||
− | | note = Accepted |
||
| type = inproceedings |
| type = inproceedings |
||
| id = dangla.18.das |
| id = dangla.18.das |
||
+ | | identifier = doi:10.1109/DAS.2018.55 |
||
| bibtex = |
| bibtex = |
||
@InProceedings<nowiki>{</nowiki> dangla.18.das, |
@InProceedings<nowiki>{</nowiki> dangla.18.das, |
||
− | author = <nowiki>{</nowiki>Aliona Dangla and |
+ | author = <nowiki>{</nowiki>Aliona Dangla and \'Elodie Puybareau and Guillaume Tochon |
− | + | and Jonathan Fabrizio<nowiki>}</nowiki>, |
|
title = <nowiki>{</nowiki>A first step toward a fair comparison of evaluation |
title = <nowiki>{</nowiki>A first step toward a fair comparison of evaluation |
||
protocols for text detection algorithms<nowiki>}</nowiki>, |
protocols for text detection algorithms<nowiki>}</nowiki>, |
||
Line 24: | Line 26: | ||
month = apr, |
month = apr, |
||
address = <nowiki>{</nowiki>Vienna, Austria<nowiki>}</nowiki>, |
address = <nowiki>{</nowiki>Vienna, Austria<nowiki>}</nowiki>, |
||
− | abstract = <nowiki>{</nowiki> |
+ | abstract = <nowiki>{</nowiki>Text detection is an important topic in pattern |
+ | recognition, but evaluating the reliability of such |
||
⚫ | |||
+ | detection algorithms is challenging. While many evaluation |
||
+ | protocols have been developed for that purpose, they often |
||
+ | show dissimilar behaviors when applied in the same context. |
||
+ | As a consequence, their usage may lead to |
||
+ | misinterpretations, potentially yielding erroneous |
||
+ | comparisons between detection algorithms or their incorrect |
||
+ | parameters tuning. This paper is a first attempt to derive |
||
+ | a methodology to perform the comparison of evaluation |
||
+ | protocols. We then apply it on five state-of-the-art |
||
+ | protocols, and exhibit that there indeed exist |
||
+ | inconsistencies among their evaluation criteria. Our aim |
||
+ | here is not to rank the investigated evaluation protocols, |
||
+ | but rather raising awareness in the community that we |
||
+ | should carefully reconsider them in order to converge to |
||
+ | their optimal usage.<nowiki>}</nowiki>, |
||
⚫ | |||
<nowiki>}</nowiki> |
<nowiki>}</nowiki> |
||
Latest revision as of 17:00, 27 May 2021
- Authors
- Aliona Dangla, Élodie Puybareau, Guillaume Tochon, Jonathan Fabrizio
- Where
- Proceedings of the IAPR International Workshop on Document Analysis Systems (DAS)
- Place
- Vienna, Austria
- Type
- inproceedings
- Projects
- Olena
- Keywords
- Image
- Date
- 2018-02-02
Abstract
Text detection is an important topic in pattern recognition, but evaluating the reliability of such detection algorithms is challenging. While many evaluation protocols have been developed for that purpose, they often show dissimilar behaviors when applied in the same context. As a consequence, their usage may lead to misinterpretations, potentially yielding erroneous comparisons between detection algorithms or their incorrect parameters tuning. This paper is a first attempt to derive a methodology to perform the comparison of evaluation protocols. We then apply it on five state-of-the-art protocols, and exhibit that there indeed exist inconsistencies among their evaluation criteria. Our aim here is not to rank the investigated evaluation protocolsbut rather raising awareness in the community that we should carefully reconsider them in order to converge to their optimal usage.
Documents
Bibtex (lrde.bib)
@InProceedings{ dangla.18.das, author = {Aliona Dangla and \'Elodie Puybareau and Guillaume Tochon and Jonathan Fabrizio}, title = {A first step toward a fair comparison of evaluation protocols for text detection algorithms}, booktitle = {Proceedings of the IAPR International Workshop on Document Analysis Systems (DAS)}, year = {2018}, month = apr, address = {Vienna, Austria}, abstract = {Text detection is an important topic in pattern recognition, but evaluating the reliability of such detection algorithms is challenging. While many evaluation protocols have been developed for that purpose, they often show dissimilar behaviors when applied in the same context. As a consequence, their usage may lead to misinterpretations, potentially yielding erroneous comparisons between detection algorithms or their incorrect parameters tuning. This paper is a first attempt to derive a methodology to perform the comparison of evaluation protocols. We then apply it on five state-of-the-art protocols, and exhibit that there indeed exist inconsistencies among their evaluation criteria. Our aim here is not to rank the investigated evaluation protocols, but rather raising awareness in the community that we should carefully reconsider them in order to converge to their optimal usage.}, doi = {10.1109/DAS.2018.55} }