@conference{5c7fa0a7e30349339abf6086b431f88a, author = " Niccol{\'o} Nicodemo and Gaurav Naithani and Konstantinos Drossos and Tuomas Virtanen and Roberto Saletti", abstract = "Effective employment of deep neural networks (DNNs) in mobile devices and embedded systems is hampered by requirements for memory and computational power. This paper presents a non-uniform quantization approach which allows for dynamic quantization of DNN parameters for different layers and within the same layer. A virtual bit shift (VBS) scheme is also proposed to improve the accuracy of the proposed scheme. Our method reduces the memory requirements, preserving the performance of the network. The performance of our method is validated in a speech enhancement application, where a fully connected DNN is used to predict the clean speech spectrum from the input noisy speech spectrum. A DNN is optimized and its memory footprint and performance are evaluated using the short-time objective intelligibility, STOI, metric. The application of the low-bit quantization allows a 50{\%} reduction of the DNN memory footprint while the STOI performance drops only by 2.7{\%}.", booktitle = "28th European Signal Processing Conference", title = "{M}emory {R}equirement {R}eduction of {D}eep {N}eural {N}etworks {U}sing {L}ow-bit {Q}uantization of {P}arameters", year = "2020", }