@conference{c7f1c25f09e14643af7ad59de5af2c70, author = " Pasi Pertil{\"a}", abstract = "The time-frequency mask and the magnitude spectrum are two common targets for deep learning-based speech enhancement. Both the ensemble and the neural network fusion of magnitude spectra obtained with these approaches have been shown to improve the objective perceptual quality with synthetic mixtures of data. This work generalizes the ensemble approach by proposing neural network layers to predict time-frequency varying weights for the combination of the two magnitude spectra. In order to combine the best individual magnitude spectrum estimates, the weight prediction network is trained after the time-frequency mask and magnitude spectrum sub-networks have been separately trained for their corresponding objectives and their weights have been frozen. Using the publicly available CHiME3 -challenge data, which consists of both simulated and real speech recordings in everyday environments with noise and interference, the proposed approach leads to significantly higher noise suppression in terms of segmental source-to-distortion ratio over the alternative approaches. In addition, the approach achieves similar improvements in the average objective instrumentally measured intelligibility scores with respect to the best achieved scores.", booktitle = "2019 IEEE 21st International Workshop on Multimedia Signal Processing (MMSP)", doi = "10.1109/MMSP.2019.8901800", isbn = "978-1-7281-1818-5", month = "9", publisher = "IEEE", series = "IEEE International Workshop on Multimedia Signal Processing", title = "{D}ata-{D}ependent {E}nsemble of {M}agnitude {S}pectrum {P}redictions for {S}ingle {C}hannel {S}peech {E}nhancement", year = "2019", }