@inproceedings{98e0a373d9ae4aad8176050d9f0ade96,
title = "Multimodal models for contextual affect assessment in real-time",
abstract = "Most affect classification schemes rely on near accurate single-cue models resulting in less than required accuracy under certain peculiar conditions. We investigate how the holism of a multimodal solution could be exploited for affect classification. This paper presents the design and implementation of a prototype, stand-alone, real-time multimodal affective state classification system. The presented system utilizes speech and facial muscle movements to create a holistic classifier. The system combines a facial expression classifier and a speech classifier that analyses speech through paralanguage and propositional content. The proposed classification scheme includes a Support Vector Machine (SVM) - paralanguage; a K-Nearest Neighbor (KNN) - propositional content and an InceptionV3 neural network - facial expressions of affective states. The SVM and Inception models boasted respective validation accuracies of 99.2% and 92.78%.",
keywords = "Affect assessment, Affective state classification, Multimodal classifier ensemble, Paralanguage and propositional content analyses, Speech features",
author = "Jordan Vice and Khan, {Masood Mehmood} and Svetlana Yanushkevich",
year = "2019",
month = dec,
doi = "10.1109/CogMI48466.2019.00020",
language = "English",
isbn = "978-1-7281-6738-1",
series = "Proceedings - 2019 IEEE 1st International Conference on Cognitive Machine Intelligence, CogMI 2019",
publisher = "IEEE, Institute of Electrical and Electronics Engineers",
pages = "87--92",
booktitle = "Proceedings - 2019 IEEE 1st International Conference on Cognitive Machine Intelligence, CogMI 2019",
address = "United States",
note = "1st IEEE International Conference on Cognitive Machine Intelligence, CogMI 2019 ; Conference date: 12-12-2019 Through 14-12-2019",
}