Difference between revisions of "Publications/veyrin-forrer.22.ijcai"

From LRDE

 
(2 intermediate revisions by the same user not shown)
Line 2: Line 2:
 
| published = true
 
| published = true
 
| date = 2022-07-23
 
| date = 2022-07-23
| title = What Does My GNN Really Capture? On Exploring Internal GNN Representations
+
| title = What Does my GNN Really Capture? On Exploring Internal GNN Representations
 
| authors = Luca Veyrin-Forrer, Ataollah Kamal, Stefan Duffner, Marc Plantevit, Céline Robardet
 
| authors = Luca Veyrin-Forrer, Ataollah Kamal, Stefan Duffner, Marc Plantevit, Céline Robardet
 
| booktitle = International Joint Conference on Artificial Intelligence 2022
 
| booktitle = International Joint Conference on Artificial Intelligence 2022
 
| hal_id = hal-03700710
 
| hal_id = hal-03700710
| pages = 1 to 7
+
| pages = 747 to 752
 
| abstract = GNNs are efficient for classifying graphs but their internal workings is opaque which limits their field of application. Existing methods for explaining GNN focus on disclosing the relationships between input graphs and the model's decision. In contrary, the method we propose isolates internal features, hidden in the network layerswhich are automatically identified by the GNN to classify graphs. We show that this method makes it possible to know the parts of the input graphs used by GNN with much less bias than the SOTA methods and therefore to provide confidence in the decision process.
 
| abstract = GNNs are efficient for classifying graphs but their internal workings is opaque which limits their field of application. Existing methods for explaining GNN focus on disclosing the relationships between input graphs and the model's decision. In contrary, the method we propose isolates internal features, hidden in the network layerswhich are automatically identified by the GNN to classify graphs. We show that this method makes it possible to know the parts of the input graphs used by GNN with much less bias than the SOTA methods and therefore to provide confidence in the decision process.
  +
| publisher = ijcai.org
 
| lrdekeywords = IA
 
| lrdekeywords = IA
 
| lrdenewsdate = 2022-07-23
 
| lrdenewsdate = 2022-07-23
 
| type = inproceedings
 
| type = inproceedings
 
| id = veyrin-forrer.22.ijcai
 
| id = veyrin-forrer.22.ijcai
  +
| identifier = doi:https://doi.org/10.24963/ijcai.2022/105
 
| bibtex =
 
| bibtex =
 
@InProceedings<nowiki>{</nowiki> veyrin-forrer.22.ijcai,
 
@InProceedings<nowiki>{</nowiki> veyrin-forrer.22.ijcai,
title = <nowiki>{</nowiki>What Does My GNN Really Capture? On Exploring Internal GNN
+
title = <nowiki>{</nowiki>What Does my <nowiki>{</nowiki>GNN<nowiki>}</nowiki> Really Capture? <nowiki>{</nowiki>O<nowiki>}</nowiki>n Exploring Internal
Representations<nowiki>}</nowiki>,
+
GNN Representations<nowiki>}</nowiki>,
author = <nowiki>{</nowiki> Luca Veyrin-Forrer and Ataollah Kamal and Stefan Duffner
+
author = <nowiki>{</nowiki>Luca Veyrin-Forrer and Ataollah Kamal and Stefan Duffner
 
and Marc Plantevit and C\'eline Robardet<nowiki>}</nowiki>,
 
and Marc Plantevit and C\'eline Robardet<nowiki>}</nowiki>,
 
booktitle = <nowiki>{</nowiki>International Joint Conference on Artificial Intelligence
 
booktitle = <nowiki>{</nowiki>International Joint Conference on Artificial Intelligence
 
2022<nowiki>}</nowiki>,
 
2022<nowiki>}</nowiki>,
 
year = <nowiki>{</nowiki>2022<nowiki>}</nowiki>,
 
year = <nowiki>{</nowiki>2022<nowiki>}</nowiki>,
  +
month = jul,
 
hal_id = <nowiki>{</nowiki>hal-03700710<nowiki>}</nowiki>,
 
hal_id = <nowiki>{</nowiki>hal-03700710<nowiki>}</nowiki>,
pages = <nowiki>{</nowiki>1-7<nowiki>}</nowiki>,
+
pages = <nowiki>{</nowiki>747--752<nowiki>}</nowiki>,
 
abstract = <nowiki>{</nowiki>GNNs are efficient for classifying graphs but their
 
abstract = <nowiki>{</nowiki>GNNs are efficient for classifying graphs but their
 
internal workings is opaque which limits their field of
 
internal workings is opaque which limits their field of
Line 33: Line 36:
 
the parts of the input graphs used by GNN with much less
 
the parts of the input graphs used by GNN with much less
 
bias than the SOTA methods and therefore to provide
 
bias than the SOTA methods and therefore to provide
confidence in the decision process.<nowiki>}</nowiki>
+
confidence in the decision process.<nowiki>}</nowiki>,
  +
publisher = <nowiki>{</nowiki>ijcai.org<nowiki>}</nowiki>,
  +
doi = <nowiki>{</nowiki>https://doi.org/10.24963/ijcai.2022/105<nowiki>}</nowiki>
 
<nowiki>}</nowiki>
 
<nowiki>}</nowiki>
   

Latest revision as of 17:19, 14 December 2022

Abstract

GNNs are efficient for classifying graphs but their internal workings is opaque which limits their field of application. Existing methods for explaining GNN focus on disclosing the relationships between input graphs and the model's decision. In contrary, the method we propose isolates internal features, hidden in the network layerswhich are automatically identified by the GNN to classify graphs. We show that this method makes it possible to know the parts of the input graphs used by GNN with much less bias than the SOTA methods and therefore to provide confidence in the decision process.


Bibtex (lrde.bib)

@InProceedings{	  veyrin-forrer.22.ijcai,
  title		= {What Does my {GNN} Really Capture? {O}n Exploring Internal
		  GNN Representations},
  author	= {Luca Veyrin-Forrer and Ataollah Kamal and Stefan Duffner
		  and Marc Plantevit and C\'eline Robardet},
  booktitle	= {International Joint Conference on Artificial Intelligence
		  2022},
  year		= {2022},
  month		= jul,
  hal_id	= {hal-03700710},
  pages		= {747--752},
  abstract	= {GNNs are efficient for classifying graphs but their
		  internal workings is opaque which limits their field of
		  application. Existing methods for explaining GNN focus on
		  disclosing the relationships between input graphs and the
		  model's decision. In contrary, the method we propose
		  isolates internal features, hidden in the network layers,
		  which are automatically identified by the GNN to classify
		  graphs. We show that this method makes it possible to know
		  the parts of the input graphs used by GNN with much less
		  bias than the SOTA methods and therefore to provide
		  confidence in the decision process.},
  publisher	= {ijcai.org},
  doi		= {https://doi.org/10.24963/ijcai.2022/105}
}