@article { , title = {Predicting user confidence during visual decision making}, abstract = {© 2018 ACM People are not infallible consistent “oracles”: their confidence in decision-making may vary significantly between tasks and over time. We have previously reported the benefits of using an interface and algorithms that explicitly captured and exploited users’ confidence: error rates were reduced by up to 50\% for an industrial multi-class learning problem; and the number of interactions required in a design-optimisation context was reduced by 33\%. Having access to users’ confidence judgements could significantly benefit intelligent interactive systems in industry, in areas such as intelligent tutoring systems and in health care. There are many reasons for wanting to capture information about confidence implicitly. Some are ergonomic, but others are more “social”—such as wishing to understand (and possibly take account of) users’ cognitive state without interrupting them. We investigate the hypothesis that users’ confidence can be accurately predicted from measurements of their behaviour. Eye-tracking systems were used to capture users’ gaze patterns as they undertook a series of visual decision tasks, after each of which they reported their confidence on a 5-point Likert scale. Subsequently, predictive models were built using “conventional” machine learning approaches for numerical summary features derived from users’ behaviour. We also investigate the extent to which the deep learning paradigm can reduce the need to design features specific to each application by creating “gaze maps”—visual representations of the trajectories and durations of users’ gaze fixations—and then training deep convolutional networks on these images. Treating the prediction of user confidence as a two-class problem (confident/not confident), we attained classification accuracy of 88\% for the scenario of new users on known tasks, and 87\% for known users on new tasks. Considering the confidence as an ordinal variable, we produced regression models with a mean absolute error of ≈0.7 in both cases. Capturing just a simple subset of non-task-specific numerical features gave slightly worse, but still quite high accuracy (e.g., MAE ≈ 1.0). Results obtained with gaze maps and convolutional networks are competitive, despite not having access to longer-term information about users and tasks, which was vital for the “summary” feature sets. This suggests that the gaze-map-based approach forms a viable, transferable alternative to handcrafting features for each different application. These results provide significant evidence to confirm our hypothesis, and offer a way of substantially improving many interactive artificial intelligence applications via the addition of cheap non-intrusive hardware and computationally cheap prediction algorithms.}, doi = {10.1145/3185524}, eissn = {2160-6463}, issn = {2160-6455}, issue = {2}, journal = {ACM Transactions on Interactive Intelligent Systems}, publicationstatus = {Published}, publisher = {Association for Computing Machinery (ACM)}, url = {https://uwe-repository.worktribe.com/output/867441}, volume = {8}, keyword = {Computer Science Research Centre, human-centred machine learning, confidence}, year = {2018}, author = {Smith, Jim and Legg, Phil and Matovis, Milos and Kinsey, Kris} }