@COMMENT This file was generated by bib2html.pl version 0.94 @COMMENT written by Patrick Riley @COMMENT This file came from Freek Stulp's publication pages at @COMMENT http://www-clmc.usc.edu/~stulp/publications @InProceedings{stulp11learning, title = {Learning to Grasp under Uncertainty}, author = {Freek Stulp and Evangelos Theodorou and Jonas Buchli and Stefan Schaal}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA)}, year = {2011}, abstract = {We present an approach that enables robots to learn motion primitives that are robust towards state estimation uncertainties. During reaching and preshaping, the robot learns to use fine manipulation strategies to maneuver the object into a pose at which closing the hand to perform the grasp is more likely to succeed. In contrast, common assumptions in grasp planning and motion planning for reaching are that these tasks can be performed independently, and that the robot has perfect knowledge of the pose of the objects in the environment. We implement our approach using Dynamic Movement Primitives and the probabilistic model-free reinforcement learning algorithm Policy Improvement with Path Integrals (PI2). The cost function that PI2 optimizes is a simple boolean that penalizes failed grasps. The key to acquiring robust motion primitives is to sample the actual pose of the object from a distribution that represents the state estimation uncertainty. During learning, the robot will thus optimize the chance of grasping an object from this distribution, rather than at one specific pose. In our empirical evaluation, we demonstrate how the motion primitives become more robust when grasping simple cylindrical objects, as well as more complex, non-convex objects. We also investigate how well the learned motion primitives generalize towards new object positions and other state estimation uncertainty distributions.}, bib2html_pubtype = {Refereed Conference Paper}, bib2html_rescat = {Reinforcement Learning of Robot Skills} }