@conference{436172fb9c8a4228adebf56cd668cf82, author = "Helen L. Bear and Toni Heittola and Annamaria Mesaros and Emmanouil Benetos and Tuomas Virtanen", abstract = "The majority of sound scene analysis work focuses on one of two clearly defined tasks: acoustic scene classification or sound event detection. Whilst this separation of tasks is useful for problem definition, they inherently ignore some subtleties of the real-world, in particular how humans vary in how they describe a scene. Some will describe the weather and features within it, others will use a holistic descriptor like `park', and others still will use unique identifiers such as cities or names. In this paper, we undertake the task of automatic city classification to ask whether we can recognize a city from a set of sound scenes? In this problem each city has recordings from multiple scenes. We test a series of methods for this novel task and show that a simple convolutional neural network (CNN) can achieve accuracy of 50%. This is less than the acoustic scene classification task baseline in the DCASE 2018 ASC challenge on the same data. A simple adaptation to the class labels of pairing city labels with grouped scenes, accuracy increases to 52%, closer to the simpler scene classification task. Finally we also formulate the problem in a multi-task learning framework and achieve an accuracy of 56%, outperforming the aforementioned approaches.", booktitle = "2019 IEEE Workshop on Applications of Signal Processing to Audio and Acoustics (WASPAA)", doi = "10.1109/WASPAA.2019.8937271", isbn = "978-1-7281-1124-7", keywords = "Acoustic scene classification; location identification; city classification; computational sound scene analysis", month = "10", pages = "11--15", publisher = "IEEE", series = "IEEE Workshop on Applications of Signal Processing to Audio and Acoustics", title = "{C}ity {C}lassification from {M}ultiple {R}eal-{W}orld {S}ound {S}cenes", year = "2019", }