@article { , title = {An incremental learning framework to enhance teaching by demonstration based on multimodal sensor fusion}, abstract = {Though a robot can reproduce the demonstration trajectory from a human demonstrator by teleoperation, there is a certain error between the reproduced trajectory and the desired trajectory. To minimize this error, we propose a multimodal incremental learning framework based on a teleoperation strategy that can enable the robot to reproduce the demonstration task accurately. The multimodal demonstration data are collected from two different kinds of sensors in the demonstration phase. Then, the Kalman filter (KF) and dynamic time warping (DTW) algorithms are used to preprocessing the data for the multiple sensor signals. The KF algorithm is mainly used to fuse sensor data of different modalities, and the DTW algorithm is used to align the data in the same timeline. The preprocessed demonstration data are further trained and learned by the incremental learning network and sent to a Baxter robot for reproducing the task demonstrated by the human. Comparative experiments have been performed to verify the effectiveness of the proposed framework.}, doi = {10.3389/fnbot.2020.00055}, eissn = {1662-5218}, issue = {55}, journal = {Frontiers in Neurorobotics}, publicationstatus = {Published}, publisher = {Frontiers Media}, url = {https://uwe-repository.worktribe.com/output/6738241}, volume = {14}, keyword = {Artificial Intelligence, Biomedical Engineering}, year = {2020}, author = {Li, Jie and Zhong, Junpei and Yang, Jingfeng and Yang, Chenguang} }