Difference between revisions of "Publications/dehak.14.odyssey"
From LRDE
Line 13: | Line 13: | ||
| id = dehak.14.odyssey |
| id = dehak.14.odyssey |
||
| bibtex = |
| bibtex = |
||
+ | @InProceedings<nowiki>{</nowiki> dehak.14.odyssey, |
||
+ | author = <nowiki>{</nowiki>N. Dehak and O. Plchot and M.H. Bahari and L. Burget and |
||
+ | H. Van hamme and R. Dehak<nowiki>}</nowiki>, |
||
+ | title = <nowiki>{</nowiki>GMM Weights Adaptation Based on Subspace Approaches for |
||
+ | Speaker Verification<nowiki>}</nowiki>, |
||
+ | booktitle = <nowiki>{</nowiki>Odyssey 2014, The Speaker and Language Recognition |
||
+ | Workshop<nowiki>}</nowiki>, |
||
+ | year = 2014, |
||
+ | address = <nowiki>{</nowiki>Joensuu, Finland<nowiki>}</nowiki>, |
||
+ | month = jun, |
||
+ | project = <nowiki>{</nowiki>SpeakerId<nowiki>}</nowiki>, |
||
+ | abstract = <nowiki>{</nowiki>In this paper, we explored the use of Gaussian Mixture |
||
+ | Model (GMM) weights adaptation for speaker verifica- tion. |
||
+ | We compared two different subspace weight adap- tation |
||
+ | approaches: Subspace Multinomial Model (SMM) and |
||
+ | Non-Negative factor Analysis (NFA). Both techniques |
||
+ | achieved similar results and seemed to outperform the |
||
+ | retraining maximum likelihood (ML) weight adaptation. |
||
+ | However, the training process for the NFA approach is |
||
+ | substantially faster than the SMM technique. The i-vector |
||
+ | fusion between each weight adaptation approach and the |
||
+ | classical i-vector yielded slight improvements on the tele- |
||
+ | phone part of the NIST 2010 Speaker Recognition Eval- |
||
+ | uation dataset.<nowiki>}</nowiki>, |
||
+ | pages = <nowiki>{</nowiki>48--53<nowiki>}</nowiki> |
||
+ | <nowiki>}</nowiki> |
||
+ | |||
}} |
}} |
Revision as of 09:00, 4 July 2014
- Authors
- N Dehak, O Plchot, M H Bahari, L Burget, H Van hamme, Réda Dehak
- Where
- Odyssey 2014, The Speaker and Language Recognition Workshop
- Place
- Joensuu, Finland
- Type
- inproceedings
- Date
- 2014-06-16
Abstract
In this paper, we explored the use of Gaussian Mixture Model (GMM) weights adaptation for speaker verifica- tion. We compared two different subspace weight adap- tation approaches: Subspace Multinomial Model (SMM) and Non-Negative factor Analysis (NFA). Both techniques achieved similar results and seemed to outperform the retraining maximum likelihood (ML) weight adaptation. However, the training process for the NFA approach is substantially faster than the SMM technique. The i-vector fusion between each weight adaptation approach and the classical i-vector yielded slight improvements on the tele- phone part of the NIST 2010 Speaker Recognition Eval- uation dataset.
Bibtex (lrde.bib)
@InProceedings{ dehak.14.odyssey, author = {N. Dehak and O. Plchot and M.H. Bahari and L. Burget and H. Van hamme and R. Dehak}, title = {GMM Weights Adaptation Based on Subspace Approaches for Speaker Verification}, booktitle = {Odyssey 2014, The Speaker and Language Recognition Workshop}, year = 2014, address = {Joensuu, Finland}, month = jun, project = {SpeakerId}, abstract = {In this paper, we explored the use of Gaussian Mixture Model (GMM) weights adaptation for speaker verifica- tion. We compared two different subspace weight adap- tation approaches: Subspace Multinomial Model (SMM) and Non-Negative factor Analysis (NFA). Both techniques achieved similar results and seemed to outperform the retraining maximum likelihood (ML) weight adaptation. However, the training process for the NFA approach is substantially faster than the SMM technique. The i-vector fusion between each weight adaptation approach and the classical i-vector yielded slight improvements on the tele- phone part of the NIST 2010 Speaker Recognition Eval- uation dataset.}, pages = {48--53} }