@conference{magron;2018;interspeech, author = "Paul Magron and Konstantinos Drossos and Stylianos Ioannis Mimilakis and Tuomas Virtanen", abstract = "State-of-the-art methods for monaural singing voice separation consist in estimating the magnitude spectrum of the voice in the short-term Fourier transform (STFT) domain by means of deep neural networks (DNNs). The resulting magnitude estimate is then combined with the mixture's phase to retrieve the complex-valued STFT of the voice, which is further synthesized into a time-domain signal. However, when the sources overlap in time and frequency, the STFT phase of the voice differs from the mixture's phase, which results in interference and artifacts in the estimated signals. In this paper, we investigate on recent phase recovery algorithms that tackle this issue and can further enhance the separation quality. These algorithms exploit phase constraints that originate from a sinusoidal model or from consistency , a property that is a direct consequence of the STFT redundancy. Experiments conducted on real music songs show that those algorithms are efficient for reducing interference in the estimated voice compared to the baseline approach.", booktitle = "Interspeech", issn = "2308-457X", keywords = "monaural singing voice separation; voice recovery; deep neural networks; MaD TwinNet; Wiener Filtering", title = "{R}educing {I}nterference with {P}hase {R}ecovery in {DNN}-based {M}onaural {S}inging {V}oice {S}eparation", url = "https://hal.archives-ouvertes.fr/hal-01741278v2", year = "2018", }