@inproceedings{30c4e2963d9b47309d7ce1329f933b8a,
title = "Acoustic Scene Classification Using Joint Time-Frequency Image-Based Feature Representations",
abstract = "The classification of acoustic scenes is important in emerging applications such as automatic audio surveillance, machine listening and multimedia content analysis. In this paper, we present an approach for acoustic scene classification by using joint time-frequency image-based feature representations. In acoustic scene classification, joint time-frequency representation (TFR) is shown to better represent important information across a wide range of low and middle frequencies in the audio signal. The audio signal is converted to Constant-Q Transform (CQT) and Mel-spectrum TFRs and local binary patterns (LBP) are used to extract the features from these TFRs. To ensure localized spectral information is not lost, the TFRs are divided into a number of zones. Then, we perform score level fusion to further improve the classification performance accuracy. Our technique achieves a competitive performance with a classification accuracy of 83.4% on the DCASE 2016 development dataset compared to the existing current state of the art.",
author = "Shamsiah Abidin and Roberto Togneri and Ferdous Sohel",
year = "2019",
month = feb,
day = "11",
doi = "10.1109/AVSS.2018.8639164",
language = "English",
series = "Proceedings of AVSS 2018 - 2018 15th IEEE International Conference on Advanced Video and Signal-Based Surveillance",
publisher = "IEEE, Institute of Electrical and Electronics Engineers",
booktitle = "Proceedings of AVSS 2018 - 2018 15th IEEE International Conference on Advanced Video and Signal-Based Surveillance",
address = "United States",
note = "15th IEEE International Conference on Advanced Video and Signal-Based Surveillance, AVSS 2018 ; Conference date: 27-11-2018 Through 30-11-2018",
}