Difference between revisions of "Publications/veyrin-forrer.22.ijcai"
From LRDE
Line 12: | Line 12: | ||
| type = inproceedings |
| type = inproceedings |
||
| id = veyrin-forrer.22.ijcai |
| id = veyrin-forrer.22.ijcai |
||
+ | | identifier = doi:https://doi.org/10.24963/ijcai.2022/105 |
||
| bibtex = |
| bibtex = |
||
@InProceedings<nowiki>{</nowiki> veyrin-forrer.22.ijcai, |
@InProceedings<nowiki>{</nowiki> veyrin-forrer.22.ijcai, |
||
Line 21: | Line 22: | ||
2022<nowiki>}</nowiki>, |
2022<nowiki>}</nowiki>, |
||
year = <nowiki>{</nowiki>2022<nowiki>}</nowiki>, |
year = <nowiki>{</nowiki>2022<nowiki>}</nowiki>, |
||
+ | month = jul, |
||
hal_id = <nowiki>{</nowiki>hal-03700710<nowiki>}</nowiki>, |
hal_id = <nowiki>{</nowiki>hal-03700710<nowiki>}</nowiki>, |
||
pages = <nowiki>{</nowiki>1-7<nowiki>}</nowiki>, |
pages = <nowiki>{</nowiki>1-7<nowiki>}</nowiki>, |
||
Line 33: | Line 35: | ||
the parts of the input graphs used by GNN with much less |
the parts of the input graphs used by GNN with much less |
||
bias than the SOTA methods and therefore to provide |
bias than the SOTA methods and therefore to provide |
||
− | confidence in the decision process.<nowiki>}</nowiki> |
+ | confidence in the decision process.<nowiki>}</nowiki>, |
+ | doi = <nowiki>{</nowiki>https://doi.org/10.24963/ijcai.2022/105<nowiki>}</nowiki> |
||
<nowiki>}</nowiki> |
<nowiki>}</nowiki> |
||
Revision as of 13:03, 9 December 2022
- Authors
- Luca Veyrin-Forrer, Ataollah Kamal, Stefan Duffner, Marc Plantevit, Céline Robardet
- Where
- International Joint Conference on Artificial Intelligence 2022
- Type
- inproceedings
- Keywords
- IA
- Date
- 2022-07-23
Abstract
GNNs are efficient for classifying graphs but their internal workings is opaque which limits their field of application. Existing methods for explaining GNN focus on disclosing the relationships between input graphs and the model's decision. In contrary, the method we propose isolates internal features, hidden in the network layerswhich are automatically identified by the GNN to classify graphs. We show that this method makes it possible to know the parts of the input graphs used by GNN with much less bias than the SOTA methods and therefore to provide confidence in the decision process.
Bibtex (lrde.bib)
@InProceedings{ veyrin-forrer.22.ijcai, title = {What Does my {GNN} Really Capture? {O}n Exploring Internal GNN Representations}, author = {Luca Veyrin-Forrer and Ataollah Kamal and Stefan Duffner and Marc Plantevit and C\'eline Robardet}, booktitle = {International Joint Conference on Artificial Intelligence 2022}, year = {2022}, month = jul, hal_id = {hal-03700710}, pages = {1-7}, abstract = {GNNs are efficient for classifying graphs but their internal workings is opaque which limits their field of application. Existing methods for explaining GNN focus on disclosing the relationships between input graphs and the model's decision. In contrary, the method we propose isolates internal features, hidden in the network layers, which are automatically identified by the GNN to classify graphs. We show that this method makes it possible to know the parts of the input graphs used by GNN with much less bias than the SOTA methods and therefore to provide confidence in the decision process.}, doi = {https://doi.org/10.24963/ijcai.2022/105} }