@article{Rietz1659937, author = {Rietz, Finn and Magg, Sven and Heintz, Fredrik and Stoyanov, Todor and Wermter, Stefan and Stork, Johannes A}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Informatics, University of Hamburg, Hamburg, Germany}, institution = {Hamburger Informatik Technologie-Center, Universität Hamburg, Hamburg, Germany}, institution = {Department of Computer and Information Science, Linköping University, Linköping, Sweden}, institution = {Department of Informatics, University of Hamburg, Hamburg, Germany}, journal = {Neural Computing & Applications}, note = {Funding agencies:{\"O}rebro UniversityWallenberg AI, Autonomous Systems and Software Program (WASP) - Knut and Alice Wallenberg FoundationFederal Ministry for Economic Affairs and Climate FKZ 20X1905A-D}, number = {23}, pages = {16693--16704}, title = {Hierarchical goals contextualize local reward decomposition explanations}, volume = {35}, DOI = {10.1007/s00521-022-07280-8}, keywords = {Reinforcement learning, Explainable AI, Reward decomposition, Hierarchical goals, Local explanations}, abstract = {One-step reinforcement learning explanation methods account for individual actions but fail to consider the agent's future behavior, which can make their interpretation ambiguous. We propose to address this limitation by providing hierarchical goals as context for one-step explanations. By considering the current hierarchical goal as a context, one-step explanations can be interpreted with higher certainty, as the agent's future behavior is more predictable. We combine reward decomposition with hierarchical reinforcement learning into a novel explainable reinforcement learning framework, which yields more interpretable, goal-contextualized one-step explanations. With a qualitative analysis of one-step reward decomposition explanations, we first show that their interpretability is indeed limited in scenarios with multiple, different optimal policies-a characteristic shared by other one-step explanation methods. Then, we show that our framework retains high interpretability in such cases, as the hierarchical goal can be considered as context for the explanation. To the best of our knowledge, our work is the first to investigate hierarchical goals not as an explanation directly but as additional context for one-step reinforcement learning explanations. }, year = {2023} } @inproceedings{Yang1802120, author = {Yang, Quantao and Stork, Johannes A. and Stoyanov, Todor}, booktitle = {2023 IEEE 19th International Conference on Automation Science and Engineering (CASE) : }, institution = {Örebro University, School of Science and Technology}, title = {Learn from Robot : Transferring Skills for Diverse Manipulation via Cycle Generative Networks}, series = {IEEE International Conference on Automation Science and Engineering}, DOI = {10.1109/CASE56687.2023.10260484}, keywords = {Reinforcement Learning, Transfer Learning, Generative Models}, abstract = {Reinforcement learning (RL) has shown impressive results on a variety of robot tasks, but it requires a large amount of data for learning a single RL policy. However, in manufacturing there is a wide demand of reusing skills from different robots and it is hard to transfer the learned policy to different hardware due to diverse robot body morphology, kinematics, and dynamics. In this paper, we address the problem of transferring policies between different robot platforms. We learn a set of skills on each specific robot and represent them in a latent space. We propose to transfer the skills between different robots by mapping latent action spaces through a cycle generative network in a supervised learning manner. We extend the policy model learned on one robot with a pre-trained generative network to enable the robot to learn from the skill of another robot. We evaluate our method on several simulated experiments and demonstrate that our Learn from Robot (LfR) method accelerates new skill learning. }, ISBN = {9798350320701}, ISBN = {9798350320695}, year = {2023} } @article{Dominguez1706746, author = {Dominguez, David Caceres and Iannotta, Marco and Stork, Johannes Andreas and Schaffernicht, Erik and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, note = {Funding agencies:Industrial Graduate School Collaborative AI {\&}amp; Robotics (CoAIRob)General Electric Dnr:20190128}, number = {4}, pages = {12110--12117}, title = {A Stack-of-Tasks Approach Combined With Behavior Trees : A New Framework for Robot Control}, volume = {7}, DOI = {10.1109/LRA.2022.3211481}, keywords = {Behavior-based systems, control architectures and programming}, abstract = {Stack-of-Tasks (SoT) control allows a robot to simultaneously fulfill a number of prioritized goals formulated in terms of (in)equality constraints in error space. Since this approach solves a sequence of Quadratic Programs (QP) at each time-step, without taking into account any temporal state evolution, it is suitable for dealing with local disturbances. However, its limitation lies in the handling of situations that require non-quadratic objectives to achieve a specific goal, as well as situations where countering the control disturbance would require a locally suboptimal action. Recent works address this shortcoming by exploiting Finite State Machines (FSMs) to compose the tasks in such a way that the robot does not get stuck in local minima. Nevertheless, the intrinsic trade-off between reactivity and modularity that characterizes FSMs makes them impractical for defining reactive behaviors in dynamic environments. In this letter, we combine the SoT control strategy with Behavior Trees (BTs), a task switching structure that addresses some of the limitations of the FSMs in terms of reactivity, modularity and re-usability. Experimental results on a Franka Emika Panda 7-DOF manipulator show the robustness of our framework, that allows the robot to benefit from the reactivity of both SoT and BTs. }, year = {2022} } @inproceedings{Hoang1648882, author = {Hoang, Dinh-Cuong and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2022 International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {ICT Department, FPT University, Hanoi, Vietnam}, pages = {1492--1498}, title = {Context-Aware Grasp Generation in Cluttered Scenes}, DOI = {10.1109/ICRA46639.2022.9811371}, abstract = {Conventional methods to autonomous grasping rely on a pre-computed database with known objects to synthesize grasps, which is not possible for novel objects. On the other hand, recently proposed deep learning-based approaches have demonstrated the ability to generalize grasp for unknown objects. However, grasp generation still remains a challenging problem, especially in cluttered environments under partial occlusion. In this work, we propose an end-to-end deep learning approach for generating 6-DOF collision-free grasps given a 3D scene point cloud. To build robustness to occlusion, the proposed model generates candidates by casting votes and accumulating evidence for feasible grasp configurations. We exploit contextual information by encoding the dependency of objects in the scene into features to boost the performance of grasp generation. The contextual information enables our model to increase the likelihood that the generated grasps are collision-free. Our experimental results confirm that the proposed system performs favorably in terms of predicting object grasps in cluttered environments in comparison to the current state of the art methods. }, ISBN = {9781728196824}, ISBN = {9781728196817}, year = {2022} } @inproceedings{Iannotta1724688, author = {Iannotta, Marco and Dominguez, David Caceres and Stork, Johannes Andreas and Schaffernicht, Erik and Stoyanov, Todor}, booktitle = {IROS 2022 Workshop on Mobile Manipulation and Embodied Intelligence (MOMA): Challenges and  Opportunities : }, institution = {Örebro University, School of Science and Technology}, title = {Heterogeneous Full-body Control of a Mobile Manipulator with Behavior Trees}, DOI = {10.48550/arXiv.2210.08600}, abstract = {Integrating the heterogeneous controllers of a complex mechanical system, such as a mobile manipulator, within the same structure and in a modular way is still challenging. In this work we extend our framework based on Behavior Trees for the control of a redundant mechanical system to the problem of commanding more complex systems that involve multiple low-level controllers. This allows the integrated systems to achieve non-trivial goals that require coordination among the sub-systems. }, year = {2022} } @inproceedings{Yang1731600, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {3rd Workshop on Robotic Manipulation of Deformable Objects: Challenges in Perception, Planning and Control for Soft Interaction (ROMADO-SI), IROS 2022, Kyoto, Japan : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing and Software, McMaster University, Canada}, title = {Learn to Predict Posterior Probability in Particle Filtering for Tracking Deformable Linear Objects}, abstract = {Tracking deformable linear objects (DLOs) is a key element for applications where robots manipulate DLOs. However, the lack of distinctive features or appearance on the DLO and the object’s high-dimensional state space make tracking challenging and still an open question in robotics. In this paper, we propose a method for tracking the state of a DLO by applying a particle filter approach, where the posterior probability of each sample is estimated by a learned predictor. Our method can achieve accurate tracking even with no prerequisite segmentation which many related works require. Due to the differentiability of the posterior probability predictor, our method can leverage the gradients of posterior probabilities with respect to the latent states to improve the motion model in the particle filter. The preliminary experiments suggest that the proposed method can provide robust tracking results and the estimated DLO state converges quickly to the true state if the initial state is unknown. }, URL = {https://romado-workshop.github.io/ROMADO2022/}, year = {2022} } @article{Yang1696745, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, eid = {104258}, title = {Learning differentiable dynamics models for shape control of deformable linear objects}, volume = {158}, DOI = {10.1016/j.robot.2022.104258}, keywords = {Deformable linear object, Model learning, Parameter identification, Model predictive control}, abstract = {Robots manipulating deformable linear objects (DLOs) – such as surgical sutures in medical robotics, or cables and hoses in industrial assembly – can benefit substantially from accurate and fast differentiable predictive models. However, the off-the-shelf analytic physics models fall short of differentiability. Recently, neural-network-based data-driven models have shown promising results in learning DLO dynamics. These models have additional advantages compared to analytic physics models, as they are differentiable and can be used in gradient-based trajectory planning. Still, the data-driven approaches demand a large amount of training data, which can be challenging for real-world applications. In this paper, we propose a framework for learning a differentiable data-driven model for DLO dynamics with a minimal set of real-world data. To learn DLO twisting and bending dynamics in a 3D environment, we first introduce a new suitable DLO representation. Next, we use a recurrent network module to propagate effects between different segments along a DLO, thereby addressing a critical limitation of current state-of-the-art methods. Then, we train a data-driven model on synthetic data generated in simulation, instead of foregoing the time-consuming and laborious data collection process for real-world applications. To achieve a good correspondence between real and simulated models, we choose a set of simulation model parameters through parameter identification with only a few trajectories of a real DLO required. We evaluate several optimization methods for parameter identification and demonstrate that the differential evolution algorithm is efficient and effective for parameter identification. In DLO shape control tasks with a model-based controller, the data-driven model trained on synthetic data generated by the resulting models performs on par with the ones trained with a comparable amount of real-world data which, however, would be intractable to collect. }, year = {2022} } @article{Yang1677520, author = {Yang, Quantao and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing and Software, McMaster University, Canada}, journal = {IEEE Robotics and Automation Letters}, note = {Funding agency:Wallenberg AI, Autonomous Systems and Software Program (WASP) - Knut and Alice Wallenberg Foundation}, number = {3}, pages = {7652--7659}, title = {MPR-RL : Multi-Prior Regularized Reinforcement Learning for Knowledge Transfer}, volume = {7}, DOI = {10.1109/LRA.2022.3184805}, keywords = {Machine Learning for Robot Control, Reinforcement Learning, Transfer Learning}, abstract = {In manufacturing, assembly tasks have been a challenge for learning algorithms due to variant dynamics of different environments. Reinforcement learning (RL) is a promising framework to automatically learn these tasks, yet it is still not easy to apply a learned policy or skill, that is the ability of solving a task, to a similar environment even if the deployment conditions are only slightly different. In this letter, we address the challenge of transferring knowledge within a family of similar tasks by leveraging multiple skill priors. We propose to learn prior distribution over the specific skill required to accomplish each task and compose the family of skill priors to guide learning the policy for a new task by comparing the similarity between the target task and the prior ones. Our method learns a latent action space representing the skill embedding from demonstrated trajectories for each prior task. We have evaluated our method on a task in simulation and a set of peg-in-hole insertion tasks and demonstrate better generalization to new tasks that have never been encountered during training. Our Multi-Prior Regularized RL (MPR-RL) method is deployed directly on a real world Franka Panda arm, requiring only a set of demonstrated trajectories from similar, but crucially not identical, problem instances. }, year = {2022} } @article{Ivan1691786, author = {Ivan, Jean-Paul A. and Stoyanov, Todor and Stork, Johannes A.}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {4}, pages = {8996--9003}, title = {Online Distance Field Priors for Gaussian Process Implicit Surfaces}, volume = {7}, DOI = {10.1109/LRA.2022.3189434}, keywords = {Gaussian processes, machine learning, robot sensing systems, supervised learning}, abstract = {Gaussian process (GP) implicit surface models provide environment and object representations which elegantly address noise and uncertainty while remaining sufficiently flexible to capture complex geometry. However, GP models quickly become intractable as the size of the observation set grows-a trait which is difficult to reconcile with the rate at which modern range sensors produce data. Furthermore, naive applications of GPs to implicit surface models allocate model resources uniformly, thus using precious resources to capture simple geometry. In contrast to prior work addressing these challenges though model sparsification, spatial partitioning, or ad-hoc filtering, we propose introducing model bias online through the GP's mean function. We achieve more accurate distance fields using smaller models by creating a distance field prior from features which are easy to extract and have analytic distance fields. In particular, we demonstrate this approach using linear features. We show the proposed distance field halves model size in a 2D mapping task using data from a SICK S300 sensor. When applied to a single 3D scene from the TUM RGB-D SLAM dataset, we achieve a fivefold reduction in model size. Our proposed prior results in more accurate GP implicit surfaces, while allowing existing models to function in larger environments or with larger spatial partitions due to reduced model size. }, year = {2022} } @inproceedings{Yang1727549, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {4056--4062}, title = {Online Model Learning for Shape Control of Deformable Linear Objects}, DOI = {10.1109/IROS47612.2022.9981080}, abstract = {Traditional approaches to manipulating the state of deformable linear objects (DLOs) - i.e., cables, ropes - rely on model-based planning. However, constructing an accurate dynamic model of a DLO is challenging due to the complexity of interactions and a high number of degrees of freedom. This renders the task of achieving a desired DLO shape particularly difficult and motivates the use of model-free alternatives, which while maintaining generality suffer from a high sample complexity. In this paper, we bridge the gap between these fundamentally different approaches and propose a framework that learns dynamic models of DLOs through trial-and-error interaction. Akin to model-based reinforcement learning (RL), we interleave learning and exploration to solve a 3D shape control task for a DLO. Our approach requires only a fraction of the interaction samples of the current state-of-the-art model-free RL alternatives to achieve superior shape control performance. Unlike offline model learning, our approach does not require expert knowledge for data collection, retains the ability to explore, and automatically selects relevant experience. }, ISBN = {9781665479271}, ISBN = {9781665479288}, year = {2022} } @article{Yang1716908, author = {Yang, Yuxuan and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {4}, pages = {12577--12584}, title = {Particle Filters in Latent Space for Robust Deformable Linear Object Tracking}, volume = {7}, DOI = {10.1109/LRA.2022.3216985}, keywords = {Deep learning for visual perception, perception for grasping and manipulation, RGB-D perception}, abstract = {Tracking of deformable linear objects (DLOs) is important for many robotic applications. However, achieving robust and accurate tracking is challenging due to the lack of distinctive features or appearance on the DLO, the object's high-dimensional state space, and the presence of occlusion. In this letter, we propose a method for tracking the state of a DLO by applying a particle filter approach within a lower-dimensional state embedding learned by an autoencoder. The dimensionality reduction preserves state variation, while simultaneously enabling a particle filter to accurately track DLO state evolution with a practically feasible number of particles. Compared to previous works, our method requires neither running a high-fidelity physics simulation, nor manual designs of constraints and regularization. Without the assumption of knowing the initial DLO state, our method can achieve accurate tracking even under complex DLO motions and in the presence of severe occlusions. }, year = {2022} } @inproceedings{Rietz1709450, author = {Rietz, Finn and Schaffernicht, Erik and Stoyanov, Todor and Stork, Johannes Andreas}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Towards Task-Prioritized Policy Composition}, abstract = {Combining learned policies in a prioritized, ordered manner is desirable because it allows for modular design and facilitates data reuse through knowledge transfer. In control theory, prioritized composition is realized by null-space control, where low-priority control actions are projected into the null-space of high-priority control actions. Such a method is currently unavailable for Reinforcement Learning. We propose a novel, task-prioritized composition framework for Reinforcement Learning, which involves a novel concept: The indifferent-space of Reinforcement Learning policies. Our framework has the potential to facilitate knowledge transfer and modular design while greatly increasing data efficiency and data reuse for Reinforcement Learning agents. Further, our approach can ensure high-priority constraint satisfaction, which makes it promising for learning in safety-critical domains like robotics. Unlike null-space control, our approach allows learning globally optimal policies for the compound task by online learning in the indifference-space of higher-level policies after initial compound policy construction.  }, year = {2022} } @inproceedings{Yang1708933, author = {Yang, Quantao and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Transferring Knowledge for Reinforcement Learning in Contact-Rich Manipulation}, abstract = {In manufacturing, assembly tasks have been a challenge for learning algorithms due to variant dynamics of different environments. Reinforcement learning (RL) is a promising framework to automatically learn these tasks, yet it is still not easy to apply a learned policy or skill, that is the ability of solving a task, to a similar environment even if the deployment conditions are only slightly different. In this paper, we address the challenge of transferring knowledge within a family of similar tasks by leveraging multiple skill priors. We propose to learn prior distribution over the specific skill required to accomplish each task and compose the family of skill priors to guide learning the policy for a new task by comparing the similarity between the target task and the prior ones. Our method learns a latent action space representing the skill embedding from demonstrated trajectories for each prior task. We have evaluated our method on a set of peg-in-hole insertion tasks and demonstrate better generalization to new tasks that have never been encountered during training.  }, URL = {https://arxiv.org/abs/2210.02891}, year = {2022} } @article{Yang1685070, author = {Yang, Quantao and D{\"u}rr, Alexander and Topp, Elin Anna and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, Faculty of Engineering (LTH), Lund University, Lund, Sweden}, institution = {Department of Computer Science, Faculty of Engineering (LTH), Lund University, Lund, Sweden}, institution = {Department of Computing and Software, McMaster University, Hamilton ON, Canada }, journal = {IEEE Robotics and Automation Letters}, number = {3}, pages = {8391--8398}, title = {Variable Impedance Skill Learning for Contact-Rich Manipulation}, volume = {7}, DOI = {10.1109/LRA.2022.3187276}, keywords = {Machine learning for robot control, reinforcement learning, variable impedance control}, abstract = {Contact-rich manipulation tasks remain a hard problem in robotics that requires interaction with unstructured environments. Reinforcement Learning (RL) is one potential solution to such problems, as it has been successfully demonstrated on complex continuous control tasks. Nevertheless, current state-of-the-art methods require policy training in simulation to prevent undesired behavior and later domain transfer even for simple skills involving contact. In this paper, we address the problem of learning contact-rich manipulation policies by extending an existing skill-based RL framework with a variable impedance action space. Our method leverages a small set of suboptimal demonstration trajectories and learns from both position, but also crucially impedance-space information. We evaluate our method on a number of peg-in-hole task variants with a Franka Panda arm and demonstrate that learning variable impedance actions for RL in Cartesian space can be deployed directly on the real robot, without resorting to learning in simulation. }, year = {2022} } @article{Guler1693298, author = {G{\"u}ler, P{\"u}ren and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {Örebro University, Örebro, Sweden}, journal = {Frontiers in Robotics and AI}, eid = {833173}, title = {Visual state estimation in unseen environments through domain adaptation and metric learning}, volume = {9}, DOI = {10.3389/frobt.2022.833173}, keywords = {articulated pose estimation, deep metric learning, domain augmentation, joint state estimation, triplet loss}, abstract = {In robotics, deep learning models are used in many visual perception applications, including the tracking, detection and pose estimation of robotic manipulators. The state of the art methods however are conditioned on the availability of annotated training data, which may in practice be costly or even impossible to collect. Domain augmentation is one popular method to improve generalization to out-of-domain data by extending the training data set with predefined sources of variation, unrelated to the primary task. While this typically results in better performance on the target domain, it is not always clear that the trained models are capable to accurately separate the signals relevant to solving the task (e.g., appearance of an object of interest) from those associated with differences between the domains (e.g., lighting conditions). In this work we propose to improve the generalization capabilities of models trained with domain augmentation by formulating a secondary structured metric-space learning objective. We concentrate on one particularly challenging domain transfer task-visual state estimation for an articulated underground mining machine-and demonstrate the benefits of imposing structure on the encoding space. Our results indicate that the proposed method has the potential to transfer feature embeddings learned on the source domain, through a suitably designed augmentation procedure, and on to an unseen target domain. }, year = {2022} } @article{Hoang1691597, author = {Hoang, Dinh-Cuong and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {CT Department, FPT University, Hanoi, Vietnam}, institution = {Department of Computing and Software, McMaster University, Hamilton ON, Canada}, journal = {IEEE Robotics and Automation Letters}, number = {4}, pages = {8980--8987}, title = {Voting and Attention-Based Pose Relation Learning for Object Pose Estimation From 3D Point Clouds}, volume = {7}, DOI = {10.1109/LRA.2022.3189158}, keywords = {6D object pose estimation, 3D point cloud, robot manipulation}, abstract = {Estimating the 6DOF pose of objects is an important function in many applications, such as robot manipulation or augmented reality. However, accurate and fast pose estimation from 3D point clouds is challenging, because of the complexity of object shapes, measurement noise, and presence of occlusions. We address this challenging task using an end-to-end learning approach for object pose estimation given a raw point cloud input. Our architecture pools geometric features together using a self-attention mechanism and adopts a deep Hough voting scheme for pose proposal generation. To build robustness to occlusion, the proposed network generates candidates by casting votes and accumulating evidence for object locations. Specifically, our model learns higher-level features by leveraging the dependency of object parts and object instances, thereby boosting the performance of object pose estimation. Our experiments show that our method outperforms state-of-the-art approaches in public benchmarks including the Sileane dataset 135 and the Fraunhofer IPA dataset [36]. We also deploy our proposed method to a real robot pick-and-place based on the estimated pose. }, year = {2022} } @inproceedings{Yang1620121, author = {Yang, Quantao and D{\"u}rr, Alexander and Topp, Elin Anna and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {NeurIPS 2021 Workshop on Deployable Decision Making in Embodied Systems (DDM) : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, Lund University, Sweden}, institution = {Department of Computer Science, Lund University, Sweden}, title = {Learning Impedance Actions for Safe Reinforcement Learning in Contact-Rich Tasks}, abstract = {Reinforcement Learning (RL) has the potential of solving complex continuous control tasks, with direct applications to robotics. Nevertheless, current state-of-the-art methods are generally unsafe to learn directly on a physical robot as exploration by trial-and-error can cause harm to the real world systems. In this paper, we leverage a framework for learning latent action spaces for RL agents from demonstrated trajectories. We extend this framework by connecting it to a variable impedance Cartesian space controller, allowing us to learn contact-rich tasks safely and efficiently. Our method learns from trajectories that incorporate both positional, but also crucially impedance-space information. We evaluate our method on a number of peg-in-hole task variants with a Franka Panda arm and demonstrate that learning variable impedance actions for RL in Cartesian space can be safely deployed on the real robot directly, without resorting to learning in simulation and a subsequent policy transfer. }, year = {2021} } @inproceedings{Yang1610216, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA) : IEEE International Conference on Robotics and Automation (ICRA 2021), Xi'an, China, May 30 - June 5, 2021}, institution = {Örebro University, School of Science and Technology}, pages = {1950--1957}, title = {Learning to Propagate Interaction Effects for Modeling Deformable Linear Objects Dynamics}, series = {2021 IEEE International Conference on Robotics and Automation (ICRA)}, DOI = {10.1109/ICRA48506.2021.9561636}, abstract = {Modeling dynamics of deformable linear objects (DLOs), such as cables, hoses, sutures, and catheters, is an important and challenging problem for many robotic manipulation applications. In this paper, we propose the first method to model and learn full 3D dynamics of DLOs from data. Our approach is capable of capturing the complex twisting and bending dynamics of DLOs and allows local effects to propagate globally. To this end, we adapt the interaction network (IN) dynamics learning method for capturing the interaction between neighboring segments in a DLO and augment it with a recurrent model for propagating interaction effects along the length of a DLO. For learning twisting and bending dynamics in 3D, we also introduce a new suitable representation of DLO segments and their relationships. Unlike the original IN method, our model learns to propagate the effects of local interaction between neighboring segments to each segment in the chain within a single time step, without the need for iterated propagation steps. Evaluation of our model with synthetic and newly collected real-world data shows better accuracy and generalization in short-term and long-term predictions than the current state of the art. We further integrate our learned model in a model predictive control scheme and use it to successfully control the shape of a DLO. Our implementation is available at https : //gitsvn-nt.oru.se/ammlab-public/in-bilstm. }, ISBN = {9781728190778}, ISBN = {9781728190785}, year = {2021} } @inproceedings{Yang1605135, author = {Yang, Quantao and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2021 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, note = {Funding agency:Wallenberg Artificial Intelligence, Autonomous Systems and Software Program (WASP)}, title = {Null space based efficient reinforcement learning with hierarchical safety constraints}, DOI = {10.1109/ECMR50962.2021.9568848}, abstract = {Reinforcement learning is inherently unsafe for use in physical systems, as learning by trial-and-error can cause harm to the environment or the robot itself. One way to avoid unpredictable exploration is to add constraints in the action space to restrict the robot behavior. In this paper, we proposea null space based framework of integrating reinforcement learning methods in constrained continuous action spaces. We leverage a hierarchical control framework to decompose target robotic skills into higher ranked tasks (e. g., joint limits and obstacle avoidance) and lower ranked reinforcement learning task. Safe exploration is guaranteed by only learning policies in the null space of higher prioritized constraints. Meanwhile multiple constraint phases for different operational spaces are constructed to guide the robot exploration. Also, we add penalty loss for violating higher ranked constraints to accelerate the learning procedure. We have evaluated our method on different redundant robotic tasks in simulation and show that our null space based reinforcement learning method can explore and learn safely and efficiently. }, ISBN = {9781665412131}, year = {2021} } @article{Sun1369388, author = {Sun, Da and Kiselev, Andrey and Liao, Qianfang and Stoyanov, Todor and Loutfi, Amy}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on Human-Machine Systems}, number = {1}, pages = {55--67}, title = {A New Mixed Reality - based Teleoperation System for Telepresence and Maneuverability Enhancement}, volume = {50}, DOI = {10.1109/THMS.2019.2960676}, keywords = {Force control, motion regulation, telerobotics, virtual reality}, abstract = {Virtual Reality (VR) is regarded as a useful tool for teleoperation system that provides operators an immersive visual feedback on the robot and the environment. However, without any haptic feedback or physical constructions, VR-based teleoperation systems normally have poor maneuverability and may cause operational faults in some fine movements. In this paper, we employ Mixed Reality (MR), which combines real and virtual worlds, to develop a novel teleoperation system. New system design and control algorithms are proposed. For the system design, a MR interface is developed based on a virtual environment augmented with real-time data from the task space with a goal to enhance the operator’s visual perception. To allow the operator to be freely decoupled from the control loop and offload the operator’s burden, a new interaction proxy is proposed to control the robot. For the control algorithms, two control modes are introduced to improve long-distance movements and fine movements of the MR-based teleoperation. In addition, a set of fuzzy logic based methods are proposed to regulate the position, velocity and force of the robot in order to enhance the system maneuverability and deal with the potential operational faults. Barrier Lyapunov Function (BLF) and back-stepping methods are leveraged to design the control laws and simultaneously guarantee the system stability under state constraints.  Experiments conducted using a 6-Degree of Freedom (DoF) robotic arm prove the feasibility of the system. }, year = {2020} } @inproceedings{Stork1523713, author = {Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {IEEE International Conference on Robotics and Automation : }, institution = {Örebro University, School of Science and Technology}, pages = {10758--10764}, eid = {9196620}, title = {Ensemble of Sparse Gaussian Process Experts for Implicit Surface Mapping with Streaming Data}, DOI = {10.1109/ICRA40945.2020.9196620}, abstract = {Creating maps is an essential task in robotics and provides the basis for effective planning and navigation. In this paper, we learn a compact and continuous implicit surface map of an environment from a stream of range data with known poses. For this, we create and incrementally adjust an ensemble of approximate Gaussian process (GP) experts which are each responsible for a different part of the map. Instead of inserting all arriving data into the GP models, we greedily trade-off between model complexity and prediction error. Our algorithm therefore uses less resources on areas with few geometric features and more where the environment is rich in variety. We evaluate our approach on synthetic and real-world data sets and analyze sensitivity to parameters and measurement noise. The results show that we can learn compact and accurate implicit surface models under different conditions, with a performance … }, year = {2020} } @article{Hoang1513204, author = {Hoang, Dinh-Cuong and Lilienthal, Achim and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, eid = {103632}, title = {Object-RPE : Dense 3D Reconstruction and Pose Estimation with Convolutional Neural Networks}, volume = {133}, DOI = {10.1016/j.robot.2020.103632}, keywords = {Object pose estimation, 3D reconstruction, Semantic mapping, 3D registration}, abstract = {We present an approach for recognizing objects present in a scene and estimating their full pose by means of an accurate 3D instance-aware semantic reconstruction. Our framework couples convolutional neural networks (CNNs) and a state-of-the-art dense Simultaneous Localisation and Mapping(SLAM) system, ElasticFusion [1], to achieve both high-quality semantic reconstruction as well as robust 6D pose estimation for relevant objects. We leverage the pipeline of ElasticFusion as a back-bone and propose a joint geometric and photometric error function with per-pixel adaptive weights. While the main trend in CNN-based 6D pose estimation has been to infer an object’s position and orientation from single views of the scene, our approach explores performing pose estimation from multiple viewpoints, under the conjecture that combining multiple predictions can improve the robustness of an object detection system. The resulting system is capable of producing high-quality instance-aware semantic reconstructions of room-sized environments, as well as accurately detecting objects and their 6D poses. The developed method has been verified through extensive experiments on different datasets. Experimental results confirmed that the proposed system achieves improvements over state-of-the-art methods in terms of surface reconstruction and object pose prediction. Our code and video are available at https://sites.google.com/view/object-rpe. }, year = {2020} } @article{Hoang1427623, author = {Hoang, Dinh-Cuong and Lilienthal, Achim and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {2}, pages = {1962--1969}, title = {Panoptic 3D Mapping and Object Pose Estimation Using Adaptively Weighted Semantic Information}, volume = {5}, DOI = {10.1109/LRA.2020.2970682}, keywords = {RGB-D perception, object detection, segmen-tation and categorization, mapping}, abstract = {We present a system capable of reconstructing highly detailed object-level models and estimating the 6D pose of objects by means of an RGB-D camera. In this work, we integrate deep-learning-based semantic segmentation, instance segmentation, and 6D object pose estimation into a state of the art RGB-D mapping system. We leverage the pipeline of ElasticFusion as a backbone and propose modifications of the registration cost function to make full use of the semantic class labels in the process. The proposed objective function features tunable weights for the depth, appearance, and semantic information channels, which are learned from data. A fast semantic segmentation and registration weight prediction convolutional neural network (Fast-RGBD-SSWP) suited to efficient computation is introduced. In addition, our approach explores performing 6D object pose estimation from multiple viewpoints supported by the high-quality reconstruction system. The developed method has been verified through experimental validation on the YCB-Video dataset and a dataset of warehouse objects. Our results confirm that the proposed system performs favorably in terms of surface reconstruction, segmentation quality, and accurate object pose estimation in comparison to other state-of-the-art systems. Our code and video are available at https://sites.google.com/view/panoptic-mope. }, year = {2020} } @article{Sun1467594, author = {Sun, Da and Liao, Qianfang and Kiselev, Andrey and Stoyanov, Todor and Loutfi, Amy}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, note = {Funding Agencies:European Union (EU) Karlskoga Municipality  {\"O}rebro County }, eid = {103648}, title = {Shared mixed reality-bilateral telerobotic system}, volume = {134}, DOI = {10.1016/j.robot.2020.103648}, keywords = {Bilateral teleoperation, Shared control, Virtual reality}, abstract = {This study proposes a new shared mixed reality (MR)-bilateral telerobotic system. The main contribution of this study is to combine MR teleoperation and bilateral teleoperation, which takes advantage of the two types of teleoperation and compensates for each other's drawbacks. With this combination, the proposed system can address the asymmetry issues in bilateral teleoperation, such as kinematic redundancy and workspace inequality, and provide force feedback, which is lacking in MR teleoperation. In addition, this system effectively supports long-distance movements and fine movements. In this system, a new MR interface is developed to provide the operator with an immersive visual feedback of the workspace, in which a useful virtual controller known as an interaction proxy—is designed. Compared with previous virtual reality-based teleoperation systems, this interaction proxy can freely decouple the operator from the control loop, such that the operational burden can be substantially alleviated. Additionally, the force feedback provided by the bilateral teleoperation gives the operator an advanced perception about the remote workspace and can improve task performance. Experiments on multiple pick-and-place tasks are provided to demonstrate the feasibility and effectiveness of the proposed system. }, year = {2020} } @article{Sun1317799, author = {Sun, Da and Liao, Qianfang and Stoyanov, Todor and Kiselev, Andrey and Loutfi, Amy}, institution = {Örebro University, School of Science and Technology}, journal = {Automatica}, pages = {358--373}, title = {Bilateral telerobotic system using Type-2 fuzzy neural network based moving horizon estimation force observer for enhancement of environmental force compliance and human perception}, volume = {106}, DOI = {10.1016/j.automatica.2019.04.033}, keywords = {Force estimation and control, Type-2 fuzzy neural network, Moving horizon estimation, Bilateral teleoperation, Machine vision}, abstract = {This paper firstly develops a novel force observer using Type-2 Fuzzy Neural Network (T2FNN)-based Moving Horizon Estimation (MHE) to estimate external force/torque information and simultaneously filter out the system disturbances. Then, by using the proposed force observer, a new bilateral teleoperation system is proposed that allows the slave industrial robot to be more compliant to the environment and enhances the situational awareness of the human operator by providing multi-level force feedback. Compared with existing force observer algorithms that highly rely on knowing exact mathematical models, the proposed force estimation strategy can derive more accurate external force/torque information of the robots with complex mechanism and with unknown dynamics. Applying the estimated force information, an external-force-regulated Sliding Mode Control (SMC) strategy with the support of machine vision is proposed to enhance the adaptability of the slave robot and the perception of the operator about various scenarios by virtue of the detected location of the task object. The proposed control system is validated by the experiment platform consisting of a universal robot (UR10), a haptic device and an RGB-D sensor. }, year = {2019} } @inproceedings{Hoang1374210, author = {Hoang, Dinh-Cuong and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {2019 European Conference on Mobile Robots, ECMR 2019 : Proceedings}, institution = {Örebro University, School of Science and Technology}, eid = {152970}, title = {Object-RPE : Dense 3D Reconstruction and Pose Estimation with Convolutional Neural Networks for Warehouse Robots}, DOI = {10.1109/ECMR.2019.8870927}, abstract = {We present a system for accurate 3D instance-aware semantic reconstruction and 6D pose estimation, using an RGB-D camera. Our framework couples convolutional neural networks (CNNs) and a state-of-the-art dense Simultaneous Localisation and Mapping (SLAM) system, ElasticFusion, to achieve both high-quality semantic reconstruction as well as robust 6D pose estimation for relevant objects. The method presented in this paper extends a high-quality instance-aware semantic 3D Mapping system from previous work [1] by adding a 6D object pose estimator. While the main trend in CNN-based 6D pose estimation has been to infer object's position and orientation from single views of the scene, our approach explores performing pose estimation from multiple viewpoints, under the conjecture that combining multiple predictions can improve the robustness of an object detection system. The resulting system is capable of producing high-quality object-aware semantic reconstructions of room-sized environments, as well as accurately detecting objects and their 6D poses. The developed method has been verified through experimental validation on the YCB-Video dataset and a newly collected warehouse object dataset. Experimental results confirmed that the proposed system achieves improvements over state-of-the-art methods in terms of surface reconstruction and object pose prediction. Our code and video are available at https://sites.google.com/view/object-rpe. }, ISBN = {978-1-7281-3605-9}, year = {2019} } @article{Gabellieri1372196, author = {Gabellieri, Chiara and Palleschi, Alessandro and Mannucci, Anna and Pierallini, Michele and Stefanini, Elisa and Catalano, Manuel G. and Caporale, Danilo and Settimi, Alessandro and Stoyanov, Todor and Magnusson, Martin and Garabini, Manolo and Pallottino, Lucia}, institution = {Örebro University, School of Science and Technology}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Istituto Italiano di Tecnologia, Genova GE, Italy}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Ministero dell' Istruzione, dell' Universita e della Ricerca (MIUR)}, number = {4}, pages = {4603--4610}, title = {Towards an Autonomous Unwrapping System for Intralogistics}, volume = {4}, DOI = {10.1109/LRA.2019.2934710}, keywords = {Pallets, Wrapping, Robots, Plastics, Task analysis, Impedance, Surface impedance, Logistics, compliance and impedance control, industrial robots, automatic unwrapping}, abstract = {Warehouse logistics is a rapidly growing market for robots. However, one key procedure that has not received much attention is the unwrapping of pallets to prepare them for objects picking. In fact, to prevent the goods from falling and to protect them, pallets are normally wrapped in plastic when they enter the warehouse. Currently, unwrapping is mainly performed by human operators, due to the complexity of its planning and control phases. Autonomous solutions exist, but usually they are designed for specific situations, require a large footprint and are characterized by low flexibility. In this work, we propose a novel integrated robotic solution for autonomous plastic film removal relying on an impedance-controlled robot. The main contribution is twofold: on one side, a strategy to plan Cartesian impedance and trajectory to execute the cut without damaging the goods is discussed; on the other side, we present a cutting device that we designed for this purpose. The proposed solution presents the characteristics of high versatility and the need for a reduced footprint, due to the adopted technologies and the integration with a mobile base. Experimental results are shown to validate the proposed approach. }, year = {2019} } @article{DellaCorte1291440, author = {Della Corte, Bartolomeo and Andreasson, Henrik and Stoyanov, Todor and Grisetti, Giorgio}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS) }, number = {2}, pages = {902--909}, title = {Unified Motion-Based Calibration of Mobile Multi-Sensor Platforms With Time Delay Estimation}, volume = {4}, DOI = {10.1109/LRA.2019.2892992}, keywords = {Calibration and Identification}, abstract = {The ability to maintain and continuously update geometric calibration parameters of a mobile platform is a key functionality for every robotic system. These parameters include the intrinsic kinematic parameters of the platform, the extrinsic parameters of the sensors mounted on it, and their time delays. In this letter, we present a unified pipeline for motion-based calibration of mobile platforms equipped with multiple heterogeneous sensors. We formulate a unified optimization problem to concurrently estimate the platform kinematic parameters, the sensors extrinsic parameters, and their time delays. We analyze the influence of the trajectory followed by the robot on the accuracy of the estimate. Our framework automatically selects appropriate trajectories to maximize the information gathered and to obtain a more accurate parameters estimate. In combination with that, our pipeline observes the parameters evolution in long-term operation to detect possible values change in the parameters set. The experiments conducted on real data show a smooth convergence along with the ability to detect changes in parameters value. We release an open-source version of our framework to the community. }, year = {2019} } @inproceedings{Canelhas1232362, author = {Canelhas, Daniel Ricão and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), : }, institution = {Örebro University, School of Science and Technology}, institution = {Univrses AB, Strängnäs, Sweden}, pages = {6337--6343}, title = {A Survey of Voxel Interpolation Methods and an Evaluation of Their Impact on Volumetric Map-Based Visual Odometry}, keywords = {Voxels, Compression, Interpolation, TSDF, Visual Odometry}, abstract = {Voxel volumes are simple to implement and lend themselves to many of the tools and algorithms available for 2D images. However, the additional dimension of voxels may be costly to manage in memory when mapping large spaces at high resolutions. While lowering the resolution and using interpolation is common work-around, in the literature we often find that authors either use trilinear interpolation or nearest neighbors and rarely any of the intermediate options. This paper presents a survey of geometric interpolation methods for voxel-based map representations. In particular we study the truncated signed distance field (TSDF) and the impact of using fewer than 8 samples to perform interpolation within a depth-camera pose tracking and mapping scenario. We find that lowering the number of samples fetched to perform the interpolation results in performance similar to the commonly used trilinear interpolation method, but leads to higher framerates. We also report that lower bit-depth generally leads to performance degradation, though not as much as may be expected, with voxels containing as few as 3 bits sometimes resulting in adequate estimation of camera trajectories. }, year = {2018} } @inproceedings{Stoyanov1277231, author = {Stoyanov, Todor and Krug, Robert and Kiselev, Andrey and Sun, Da and Loutfi, Amy}, booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, institution = {Robotics, Learning and Perception lab, Royal Institute of Technology, Stockholm, Sweden}, pages = {6640--6645}, title = {Assisted Telemanipulation : A Stack-Of-Tasks Approach to Remote Manipulator Control}, series = {IEEE International Conference on Intelligent Robots and Systems. Proceedings}, DOI = {10.1109/IROS.2018.8594457}, abstract = {This article presents an approach for assisted teleoperation of a robot arm, formulated within a real-time stack-of-tasks (SoT) whole-body motion control framework. The approach leverages the hierarchical nature of the SoT framework to integrate operator commands with assistive tasks, such as joint limit and obstacle avoidance or automatic gripper alignment. Thereby some aspects of the teleoperation problem are delegated to the controller and carried out autonomously. The key contributions of this work are two-fold: the first is a method for unobtrusive integration of autonomy in a telemanipulation system; and the second is a user study evaluation of the proposed system in the context of teleoperated pick-and-place tasks. The proposed approach of assistive control was found to result in higher grasp success rates and shorter trajectories than achieved through manual control, without incurring additional cognitive load to the operator. }, ISBN = {978-1-5386-8094-0}, ISBN = {978-1-5386-8095-7}, year = {2018} } @inproceedings{Lundell1277232, author = {Lundell, Jens and Krug, Robert and Schaffernicht, Erik and Stoyanov, Todor and Kyrki, Ville}, booktitle = {IEEE-RAS Conference on Humanoid Robots : }, institution = {Örebro University, School of Science and Technology}, institution = {Intelligent Robotics Group, Aalto University, Helsinki, Finland}, institution = {Royal Institute of Technology, Stockholm, Sweden}, institution = {Intelligent Robotics Group, Aalto University, Helsinki, Finland}, note = {Funding Agency:Academy of Finland  314180}, pages = {132--138}, title = {Safe-To-Explore State Spaces : Ensuring Safe Exploration in Policy Search with Hierarchical Task Optimization}, series = {IEEE-RAS International Conference on Humanoid Robots}, keywords = {Sensorimotor learning, Grasping and Manipulation, Concept and strategy learning}, abstract = {Policy search reinforcement learning allows robots to acquire skills by themselves. However, the learning procedure is inherently unsafe as the robot has no a-priori way to predict the consequences of the exploratory actions it takes. Therefore, exploration can lead to collisions with the potential to harm the robot and/or the environment. In this work we address the safety aspect by constraining the exploration to happen in safe-to-explore state spaces. These are formed by decomposing target skills (e.g., grasping) into higher ranked sub-tasks (e.g., collision avoidance, joint limit avoidance) and lower ranked movement tasks (e.g., reaching). Sub-tasks are defined as concurrent controllers (policies) in different operational spaces together with associated Jacobians representing their joint-space mapping. Safety is ensured by only learning policies corresponding to lower ranked sub-tasks in the redundant null space of higher ranked ones. As a side benefit, learning in sub-manifolds of the state-space also facilitates sample efficiency. Reaching skills performed in simulation and grasping skills performed on a real robot validate the usefulness of the proposed approach. }, year = {2018} } @article{Canelhas1175909, author = {Canelhas, Daniel R. and Schaffernicht, Erik and Stoyanov, Todor and Lilienthal, Achim and Davison, Andrew J.}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing, Imperial College London, London, United Kingdom}, journal = {Robotics}, note = {Funding Agencies:European Commission  FP7-ICT-270350 H-ICT  732737 }, number = {3}, eid = {15}, publisher = {MDPI AG}, title = {Compressed Voxel-Based Mapping Using Unsupervised Learning}, volume = {6}, DOI = {10.3390/robotics6030015}, keywords = {3D mapping, TSDF, compression, dictionary learning, auto-encoder, denoising}, abstract = {In order to deal with the scaling problem of volumetric map representations, we propose spatially local methods for high-ratio compression of 3D maps, represented as truncated signed distance fields. We show that these compressed maps can be used as meaningful descriptors for selective decompression in scenarios relevant to robotic applications. As compression methods, we compare using PCA-derived low-dimensional bases to nonlinear auto-encoder networks. Selecting two application-oriented performance metrics, we evaluate the impact of different compression rates on reconstruction fidelity as well as to the task of map-aided ego-motion estimation. It is demonstrated that lossily reconstructed distance fields used as cost functions for ego-motion estimation can outperform the original maps in challenging scenarios from standard RGB-D (color plus depth) data sets due to the rejection of high-frequency noise content. }, year = {2017} } @inproceedings{Andreasson1159885, author = {Andreasson, Henrik and Adolfsson, Daniel and Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim}, booktitle = {2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {1389--1395}, title = {Incorporating Ego-motion Uncertainty Estimates in Range Data Registration}, series = {Proceedings of the ... IEEE/RSJ International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2017.8202318}, abstract = {Local scan registration approaches commonlyonly utilize ego-motion estimates (e.g. odometry) as aninitial pose guess in an iterative alignment procedure. Thispaper describes a new method to incorporate ego-motionestimates, including uncertainty, into the objective function of aregistration algorithm. The proposed approach is particularlysuited for feature-poor and self-similar environments,which typically present challenges to current state of theart registration algorithms. Experimental evaluation showssignificant improvements in accuracy when using data acquiredby Automatic Guided Vehicles (AGVs) in industrial productionand warehouse environments. }, ISBN = {978-1-5386-2682-5}, ISBN = {978-1-5386-2683-2}, year = {2017} } @article{Ahtiainen1044255, author = {Ahtiainen, Juhana and Stoyanov, Todor and Saarinen, Jari}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Electrical Engineering and Automation, Aalto University, Espoo, Finland}, institution = {GIM Ltd., Espoo, Finland}, journal = {Journal of Field Robotics}, note = {Funding Agencies:Finnish Society of Automation  Finnish Funding Agency for Technology and Innovation (TEKES)  Forum for Intelligent Machines (FIMA)  Energy and Life Cycle Cost Efficient Machines (EFFIMA) research program }, number = {3}, pages = {600--621}, title = {Normal Distributions Transform Traversability Maps : LIDAR-Only Approach for Traversability Mapping in Outdoor Environments}, volume = {34}, DOI = {10.1002/rob.21657}, abstract = {Safe and reliable autonomous navigation in unstructured environments remains a challenge for field robots. In particular, operating on vegetated terrain is problematic, because simple purely geometric traversability analysis methods typically classify dense foliage as nontraversable. As traversing through vegetated terrain is often possible and even preferable in some cases (e.g., to avoid executing longer paths), more complex multimodal traversability analysis methods are necessary. In this article, we propose a three-dimensional (3D) traversability mapping algorithm for outdoor environments, able to classify sparsely vegetated areas as traversable, without compromising accuracy on other terrain types. The proposed normal distributions transform traversability mapping (NDT-TM) representation exploits 3D LIDAR sensor data to incrementally expand normal distributions transform occupancy (NDT-OM) maps. In addition to geometrical information, we propose to augment the NDT-OM representation with statistical data of the permeability and reflectivity of each cell. Using these additional features, we train a support-vector machine classifier to discriminate between traversable and nondrivable areas of the NDT-TM maps. We evaluate classifier performance on a set of challenging outdoor environments and note improvements over previous purely geometrical traversability analysis approaches. }, year = {2017} } @article{Canelhas1044256, author = {Canelhas, Daniel R. and Stoyanov, Todor and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {2}, pages = {1148--1155}, title = {From Feature Detection in Truncated Signed Distance Fields to Sparse Stable Scene Graphs}, volume = {1}, DOI = {10.1109/LRA.2016.2523555}, keywords = {Mapping, recognition}, abstract = {With the increased availability of GPUs and multicore CPUs, volumetric map representations are an increasingly viable option for robotic applications. A particularly important representation is the truncated signed distance field (TSDF) that is at the core of recent advances in dense 3D mapping. However, there is relatively little literature exploring the characteristics of 3D feature detection in volumetric representations. In this paper we evaluate the performance of features extracted directly from a 3D TSDF representation. We compare the repeatability of Integral invariant features, specifically designed for volumetric images, to the 3D extensions of Harris and Shi & Tomasi corners. We also study the impact of different methods for obtaining gradients for their computation. We motivate our study with an example application for building sparse stable scene graphs, and present an efficient GPU-parallel algorithm to obtain the graphs, made possible by the combination of TSDF and 3D feature points. Our findings show that while the 3D extensions of 2D corner-detection perform as expected, integral invariants have shortcomings when applied to discrete TSDFs. We conclude with a discussion of the cause for these points of failure that sheds light on possible mitigation strategies. }, year = {2016} } @inproceedings{Stoyanov1044252, author = {Stoyanov, Todor and Krug, Robert and Muthusamy, Rajkumar and Kyrki, Ville}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, institution = {Aalto University, Esbo, Finland}, institution = {Aalto University, Esbo, Finland}, pages = {885--892}, title = {Grasp Envelopes : Extracting Constraints on Gripper Postures from Online Reconstructed 3D Models}, DOI = {10.1109/IROS.2016.7759155}, abstract = { Grasping systems that build upon meticulously planned hand postures rely on precise knowledge of object geometry, mass and frictional properties - assumptions which are often violated in practice. In this work, we propose an alternative solution to the problem of grasp acquisition in simple autonomous pick and place scenarios, by utilizing the concept of grasp envelopes: sets of constraints on gripper postures. We propose a fast method for extracting grasp envelopes for objects that fit within a known shape category, placed in an unknown environment. Our approach is based on grasp envelope primitives, which encode knowledge of human grasping strategies. We use environment models, reconstructed from noisy sensor observations, to refine the grasp envelope primitives and extract bounded envelopes of collision-free gripper postures. Also, we evaluate the envelope extraction procedure both in a stand alone fashion, as well as an integrated component of an autonomous picking system. }, ISBN = {978-1-5090-3762-9}, year = {2016} } @article{Stoyanov1044254, author = {Stoyanov, Todor and Vaskevicius, Narunas and Mueller, Christian Atanas and Fromm, Tobias and Krug, Robert and Tincani, Vinicio and Mojtahedzadeh, Rasoul and Kunaschk, Stefan and Ernits, R. Mortensen and Canelhas, Daniel R. and Bonilla, Manuell and Schwertfeger, Soeren and Bonini, Marco and Halfar, Harry and Pathak, Kaustubh and Rohde, Moritz and Fantoni, Gualtiero and Bicchi, Antonio and Birk, Andreas and Lilienthal, Achim J. and Echelmeyer, Wolfgang}, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {Bremer Institut für Produktion und Logistik (BIBA), Bremen, Germany}, institution = {Bremer Institut für Produktion und Logistik (BIBA), Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {ShanghaiTech University, Shanghai, China}, institution = {Reutlingen University, Reutlingen, Germany}, institution = {Reutlingen University, Reutlingen, Germany}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {Bremer Institut für Produktion und Logistik (BIBA), Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {Università di Pisa & Istituto Italiano di Tecnologia, Genova, Italy}, institution = {Jacobs University, Bremen, Germany}, institution = {Reutlingen University, Reutlingen, Germany}, journal = {IEEE robotics & automation magazine}, note = {Funding Agency:EU FP7 project ROBLOG ICT-270350}, number = {4}, pages = {94--106}, title = {No More Heavy Lifting : Robotic Solutions to the Container-Unloading Problem}, volume = {23}, DOI = {10.1109/MRA.2016.2535098}, year = {2016} } @article{Krug1044259, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = { University of Pisa, Pisa, Italy}, journal = {IEEE Robotics and Automation Letters}, number = {1}, pages = {546--553}, title = {The Next Step in Robot Commissioning : Autonomous Picking and Palletizing}, volume = {1}, DOI = {10.1109/LRA.2016.2519944}, keywords = {Logistics, grasping, autonomous vehicle navigation, robot safety, mobile manipulation}, abstract = {So far, autonomous order picking (commissioning) systems have not been able to meet the stringent demands regarding speed, safety, and accuracy of real-world warehouse automation, resulting in reliance on human workers. In this letter, we target the next step in autonomous robot commissioning: automatizing the currently manual order picking procedure. To this end, we investigate the use case of autonomous picking and palletizing with a dedicated research platform and discuss lessons learned during testing in simplified warehouse settings. The main theoretical contribution is a novel grasp representation scheme which allows for redundancy in the gripper pose placement. This redundancy is exploited by a local, prioritized kinematic controller which generates reactive manipulator motions on-the-fly. We validated our grasping approach by means of a large set of experiments, which yielded an average grasp acquisition time of 23.5 s at a success rate of 94.7%. Our system is able to autonomously carry out simple order picking tasks in a humansafe manner, and as such serves as an initial step toward future commercial-scale in-house logistics automation solutions. }, year = {2016} } @article{Andreasson807693, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and Cirillo, Marcello and Dimitrov, Dimitar Nikolaev and Driankov, Dimiter and Karlsson, Lars and Lilienthal, Achim J. and Pecora, Federico and Saarinen, Jari Pekka and Sherikov, Aleksander and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {INRIA - Grenoble, Meylan, France}, institution = {Aalto University, Espo, Finland }, institution = {Centre de recherche Grenoble Rhône-Alpes, Grenoble, France }, journal = {IEEE robotics & automation magazine}, number = {1}, pages = {64--75}, title = {Autonomous transport vehicles : where we are and what is missing}, volume = {22}, DOI = {10.1109/MRA.2014.2381357}, keywords = {Intelligent vehicles; Mobile robots; Resource management; Robot kinematics; Trajectory; Vehicle dynamics}, abstract = {In this article, we address the problem of realizing a complete efficient system for automated management of fleets of autonomous ground vehicles in industrial sites. We elicit from current industrial practice and the scientific state of the art the key challenges related to autonomous transport vehicles in industrial environments and relate them to enabling techniques in perception, task allocation, motion planning, coordination, collision prediction, and control. We propose a modular approach based on least commitment, which integrates all modules through a uniform constraint-based paradigm. We describe an instantiation of this system and present a summary of the results, showing evidence of increased flexibility at the control level to adapt to contingencies. }, year = {2015} } @inproceedings{Magnusson847086, author = {Magnusson, Martin and Vaskevicius, Narunas and Stoyanov, Todor and Pathak, Kaustubh and Birk, Andreas}, booktitle = {2015 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Deptartment of EECS, Jacobs University, Bremen, Germany}, institution = {Deptartment of EECS, Jacobs University, Bremen, Germany}, institution = {Deptartment of EECS, Jacobs University, Bremen, Germany}, pages = {3631--3637}, publisher = {IEEE conference proceedings}, title = {Beyond points : Evaluating recent 3D scan-matching algorithms}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, number = {2015-June}, volume = {2015 June}, DOI = {10.1109/ICRA.2015.7139703}, keywords = {Normal distribution, robot vision, 3D scan registration algorithm, 3D scan-matching algorithm, ICP method, MUMC algorithm, NDT, benchmark protocol, iterative closest point method, large-scale investigation, local surface structure, minimally uncertain maximum consensus algorithm, normal distribution transform, robot, Benchmark testing, Gaussian distribution, Iterative closest point algorithm, Optimization, Protocols, Three-dimensional displays, Transforms}, abstract = {Given that 3D scan matching is such a central part of the perception pipeline for robots, thorough and large-scale investigations of scan matching performance are still surprisingly few. A crucial part of the scientific method is to perform experiments that can be replicated by other researchers in order to compare different results. In light of this fact, this paper presents a thorough comparison of 3D scan registration algorithms using a recently published benchmark protocol which makes use of a publicly available challenging data set that covers a wide range of environments. In particular, we evaluate two types of recent 3D registration algorithms - one local and one global. Both approaches take local surface structure into account, rather than matching individual points. After well over 100 000 individual tests, we conclude that algorithms using the normal distributions transform (NDT) provides accurate results compared to a modern implementation of the iterative closest point (ICP) method, when faced with scan data that has little overlap and weak geometric structure. We also demonstrate that the minimally uncertain maximum consensus (MUMC) algorithm provides accurate results in structured environments without needing an initial guess, and that it provides useful measures to detect whether it has succeeded or not. We also propose two amendments to the experimental protocol, in order to provide more valuable results in future implementations. }, ISBN = {978-1-4799-6923-4}, year = {2015} } @inproceedings{Andreasson894653, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA), 2015 : }, institution = {Örebro University, School of Science and Technology}, institution = {SCANIA AB, Södertälje, Sweden}, pages = {662--669}, title = {Fast, continuous state path smoothing to improve navigation accuracy}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2015.7139250}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not be widely adopted in commercial AGV systems. The main contribution of this paper addresses this shortcoming by introducing a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. In real world tests presented in this paper we demonstrate that the proposed approach is fast enough for online use (it computes trajectories faster than they can be driven) and highly accurate. In 100 repetitions we achieve mean end-point pose errors below 0.01 meters in translation and 0.002 radians in orientation. Even the maximum errors are very small: only 0.02 meters in translation and 0.008 radians in orientation. }, ISBN = {9781479969234}, year = {2015} } @inproceedings{Krug842706, author = {Krug, Robert and Stoyanov, Todor and Lilienthal, Achim}, booktitle = {Robotics: Science and Systems Conference : Workshop on Bridging the Gap between Data-driven and Analytical Physics-based Grasping and Manipulation}, institution = {Örebro University, School of Science and Technology}, title = {Grasp Envelopes for Constraint-based Robot Motion Planning and Control}, keywords = {Grasping, Grasp Control, Motion Control}, abstract = {We suggest a grasp represen-tation in form of a set of enveloping spatial constraints. Our representation transforms the grasp synthesisproblem (i. e., the question of where to position the graspingdevice) from finding a suitable discrete manipulator wrist pose to finding a suitable pose manifold. Also the correspondingmotion planning and execution problem is relaxed – insteadof transitioning the wrist to a discrete pose, it is enough tomove it anywhere within the grasp envelope which allows toexploit kinematic redundancy. }, year = {2015} } @inproceedings{Krug808145, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Bicchi, Antonio and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA) - Workshop on Robotic Hands, Grasping, and Manipulation : }, institution = {Örebro University, School of Science and Technology}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, title = {On Using Optimization-based Control instead of Path-Planning for Robot Grasp Motion Generation}, keywords = {Grasping, Motion Planning, Control}, year = {2015} } @inproceedings{Tincani900484, author = {Tincani, Vinicio and Catalano, Manuel and Grioli, Giorgio and Stoyanov, Todor and Krug, Robert and Lilienthal, Achim J. and Fantoni, Gualtiero and Bicchi, Antonio}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy; Department of Advanced Robotics, Istituto Italiano di Tecnologia, Genova, Italy}, pages = {2744--2750}, title = {Sensitive Active Surfaces on the Velvet II Dexterous Gripper}, URL = {https://www.ias.informatik.tu-darmstadt.de/uploads/Workshops/ICRA2015TactileForce/03_icra_ws_tactileforce.pdf}, year = {2015} } @inproceedings{Tincani900487, author = {Tincani, Vinicio and Stoyanov, Todor and Krug, Robert and Catalano, Manuel and Grioli, Giorgio and Lilienthal, Achim J. and Fantoni, Gualtiero and Bicchi, Antonio}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {Istituto Italiano di Tecnologia, Genova, Italy}, title = {The Grasp Acquisition Strategy of the Velvet II}, year = {2015} } @article{Andreasson780236, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics}, number = {4}, pages = {400--416}, publisher = {M D P I AG}, title = {Drive the Drive : From Discrete Motion Plans to Smooth Drivable Trajectories}, volume = {3}, DOI = {10.3390/robotics3040400}, keywords = {Motion planning, motion and path planning, autonomous navigation}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not been widely adopted in commercial AGV systems. The main contribution of this paper is a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. The proposed approach is evaluated in several industrially relevant scenarios and found to be both fast (less than 2 s per vehicle trajectory) and accurate (end-point pose errors below 0.01 m in translation and 0.005 radians in orientation). }, year = {2014} } @inproceedings{Krug780127, author = {Krug, Robert and Stoyanov, Todor and Bonilla, Manuel and Tincani, Vinicio and Vaskevicius, Narunas and Fantoni, Gualtiero and Birk, Andreas and Lilienthal, Achim and Bicchi, Antonio}, booktitle = {Workshop on Autonomous Grasping and Manipulation : An Open Challenge}, institution = {Örebro University, School of Science and Technology}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, title = {Improving Grasp Robustness via In-Hand Manipulation with Active Surfaces}, keywords = {Grasping, Grasp Control, Grasp Planning}, year = {2014} } @inproceedings{Vaskevicius772382, author = {Vaskevicius, N. and Mueller, C. A. and Bonilla, M. and Tincani, V. and Stoyanov, Todor and Fantoni, G. and Pathak, K. and Lilienthal, Achim J. and Bicchi, A. and Birk, A.}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University, Bremen, Germany}, institution = {Jacobs University, Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {Jacobs University, Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {Jacobs University, Bremen, Germany}, pages = {1270--1277}, title = {Object recognition and localization for robust grasping with a dexterous gripper in the context of container unloading}, DOI = {10.1109/CoASE.2014.6899490}, keywords = {containers;control engineering computing;dexterous manipulators;goods distribution;grippers;industrial robots;logistics;object recognition;autonomous shipping-container unloading;dexterous gripper;object recognition;perception system;pose estimation errors;table-top scenarios;Educational institutions;Grasping;Grippers;Robot sensing systems;Thumb}, abstract = {The work presented here is embedded in research on an industrial application scenario, namely autonomous shipping-container unloading, which has several challenging constraints: the scene is very cluttered, objects can be much larger than in common table-top scenarios; the perception must be highly robust, while being as fast as possible. These contradicting goals force a compromise between speed and accuracy. In this work, we investigate a state of the art perception system integrated with a dexterous gripper. In particular, we are interested in pose estimation errors from the recognition module and whether these errors can be handled by the abilities of the gripper. }, year = {2014} } @inproceedings{Bennetts1072051, author = {Bennetts, Victor Hernandez and Schaffernicht, Erik and Stoyanov, Todor and Lilienthal, Achim J. and Trincavelli, Marco}, booktitle = {2014 IEEE INTERNATIONAL CONFERENCE ON ROBOTICS AND AUTOMATION (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {6362--6367}, title = {Robot Assisted Gas Tomography - Localizing Methane Leaks in Outdoor Environments}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ICRA.2014.6907798}, abstract = {In this paper we present an inspection robot to produce gas distribution maps and localize gas sources in large outdoor environments. The robot is equipped with a 3D laser range finder and a remote gas sensor that returns integral concentration measurements. We apply principles of tomography to create a spatial gas distribution model from integral gas concentration measurements. The gas distribution algorithm is framed as a convex optimization problem and it models the mean distribution and the fluctuations of gases. This is important since gas dispersion is not an static phenomenon and furthermore, areas of high fluctuation can be correlated with the location of an emitting source. We use a compact surface representation created from the measurements of the 3D laser range finder with a state of the art mapping algorithm to get a very accurate localization and estimation of the path of the laser beams. In addition, a conic model for the beam of the remote gas sensor is introduced. We observe a substantial improvement in the gas source localization capabilities over previous state-of-the-art in our evaluation carried out in an open field environment. }, ISBN = {978-1-4799-3685-4}, year = {2014} } @inproceedings{HernandezBennetts748476, author = {Hernandez Bennetts, Victor and Schaffernicht, Erik and Stoyanov, Todor and Lilienthal, Achim J. and Trincavelli, Marco}, booktitle = {Workshop on Robot Monitoring : }, institution = {Örebro University, School of Science and Technology}, title = {Robot assisted gas tomography : an alternative approach for the detection of fugitive methane emissions}, abstract = {Methane (CH4) based combustibles, such as Natural Gas (NG) and BioGas (BG), are considered bridge fuels towards a decarbonized global energy system. NG emits less CO2 during combustion than other fossil fuels and BG can be produced from organic waste. However, at BG production sites, leaks are common and CH4 can escape through fissures in pipes and insulation layers. While by regulation BG producers shall issue monthly CH4 emission reports, measurements are sparsely collected, only at a few predefined locations. Due to the high global warming potential of CH4, efficient leakage detection systems are critical. We present a robotics approach to localize CH4 leaks. In Robot assisted Gas Tomography (RGT), a mobile robot is equipped with remote gas sensors to create gas distribution maps, which can be used to infer the location of emitting sources. Spectroscopy based remote gas sensors report integral concentrations, which means that the measurements are spatially unresolved, with neither information regarding the gas distribution over the optical path nor the length of the s beam. Thus, RGT fuses different sensing modalities, such as range sensors for robot localization and ray tracing, in order to infer plausible gas distribution models that explain the acquired integral concentration measurements. }, year = {2014} } @inproceedings{Krug696464, author = {Krug, Robert and Stoyanov, Todor and Bonilla, Manuel and Tincani, Vinicio and Vaskevicius, Narunas and Fantoni, Gualtiero and Birk, Andreas and Lilienthal, Achim J. and Bicchi, Antonio}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, institution = {Robotics Group, School of Engineering and Science, Jacobs University Bremen, Bremen, Germany}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, institution = {Robotics Group, School of Engineering and Science, Jacobs University Bremen, Bremen, Germany}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, pages = {3669--3675}, title = {Velvet fingers : grasp planning and execution for an underactuated gripper with active surfaces}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ICRA.2014.6907390}, keywords = {Grasp Planning, Grasp Control, Underactuation}, abstract = {In this work we tackle the problem of planning grasps for an underactuated gripper which enable it to retrieve target objects from a cluttered environment. Furthermore,we investigate how additional manipulation capabilities of the gripping device, provided by active surfaces on the inside of the fingers, can lead to performance improvement in the grasp execution process. To this end, we employ a simple strategy, in which the target object is ‘pulled-in’ towards the palm during grasping which results in firm enveloping grasps. We show the effectiveness of the suggested methods by means of experiments conducted in a real-world scenario. }, ISBN = {978-1-4799-3685-4}, year = {2014} } @article{Saarinen644380, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding agency:Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {14}, pages = {1627--1644}, title = {3D normal distributions transform occupancy maps : an efficient representation for mapping in dynamic environments}, volume = {32}, DOI = {10.1177/0278364913499415}, abstract = {In order to enable long-term operation of autonomous vehicles in industrial environments numerous challenges need to be addressed. A basic requirement for many applications is the creation and maintenance of consistent 3D world models. This article proposes a novel 3D spatial representation for online real-world mapping, building upon two known representations: normal distributions transform (NDT) maps and occupancy grid maps. The proposed normal distributions transform occupancy map (NDT-OM) combines the advantages of both representations; compactness of NDT maps and robustness of occupancy maps. One key contribution in this article is that we formulate an exact recursive updates for NDT-OMs. We show that the recursive update equations provide natural support for multi-resolution maps. Next, we describe a modification of the recursive update equations that allows adaptation in dynamic environments. As a second key contribution we introduce NDT-OMs and formulate the occupancy update equations that allow to build consistent maps in dynamic environments. The update of the occupancy values are based on an efficient probabilistic sensor model that is specially formulated for NDT-OMs. In several experiments with a total of 17 hours of data from a milk factory we demonstrate that NDT-OMs enable real-time performance in large-scale, long-term industrial setups. }, year = {2013} } @inproceedings{Mojtahedzadeh698571, author = {Mojtahedzadeh, Rasoul and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, pages = {313--318}, title = {Application Based 3D Sensor Evaluation : A Case Study in 3D Object Pose Estimation for Automated Unloading of Containers}, DOI = {10.1109/ECMR.2013.6698860}, abstract = {A fundamental task in the design process of a complex system that requires 3D visual perception is the choice of suitable 3D range sensors. Identifying the utility of 3D range sensors in an industrial application solely based on an evaluation of their distance accuracy and the noise level may lead to an inappropriate selection. To assess the actual effect on the performance of the system as a whole requires a more involved analysis. In this paper, we examine the problem of selecting a set of 3D range sensors when designing autonomous systems for specific industrial applications in a holistic manner. As an instance of this problem we present a case study with an experimental evaluation of the utility of four 3D range sensors for object pose estimation in the process of automation of unloading containers. }, year = {2013} } @article{Stoyanov618586, author = {Stoyanov, Todor and Mojtahedzadeh, Rasoul and Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, number = {10}, pages = {1094--1105}, title = {Comparative evaluation of range sensor accuracy for indoor mobile robotics and automated logistics applications}, volume = {61}, DOI = {10.1016/j.robot.2012.08.011}, abstract = {3D range sensing is an important topic in robotics, as it is a component in vital autonomous subsystems such as for collision avoidance, mapping and perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements, without using a precise ground truth environment model, is proposed. This article presents an extensive evaluation of three novel depth sensors — the Swiss Ranger SR-4000, Fotonic B70 and Microsoft Kinect. Tests are concentrated on the automated logistics scenario of container unloading. Six different setups of box-, cylinder-, and sack-shaped goods inside a mock-up container are used to collect range measurements. Comparisons are performed against hand-crafted ground truth data, as well as against a reference actuated Laser Range Finder (aLRF) system. Additional test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2013} } @article{Stoyanov618700, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Journal of Field Robotics}, number = {2}, pages = {216--236}, title = {Comparative evaluation of the consistency of three-dimensional spatial representations used in autonomous robot navigation}, volume = {30}, DOI = {10.1002/rob.21446}, abstract = {An increasing number of robots for outdoor applications rely on complex three-dimensional (3D) environmental models. In many cases, 3D maps are used for vital tasks, such as path planning and collision detection in challenging semistructured environments. Thus, acquiring accurate three-dimensional maps is an important research topic of high priority for autonomously navigating robots. This article proposes an evaluation method that is designed to compare the consistency with which different representations model the environment. In particular, the article examines several popular (probabilistic) spatial representations that are capable of predicting the occupancy of any point in space, given prior 3D range measurements. This work proposes to reformulate the obtained environmental models as probabilistic binary classifiers, thus allowing for the use of standard evaluation and comparison procedures. To avoid introducing localization errors, this article concentrates on evaluating models constructed from measurements acquired at fixed sensor poses. Using a cross-validation approach, the consistency of different representations, i.e., the likelihood of correctly predicting unseen measurements in the sensor field of view, can be evaluated. Simulated and real-world data sets are used to benchmark the precision of four spatial models—occupancy grid, triangle mesh, and two variations of the three-dimensional normal distributions transform (3D-NDT)—over various environments and sensor noise levels. Overall, the consistency of representation of the 3D-NDT is found to be the highest among the tested models, with a similar performance over varying input data. }, year = {2013} } @inproceedings{Saarinen644375, author = {Saarinen, Jari and Stoyanov, Todor and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4694--4701}, title = {Fast 3D mapping in highly dynamic environments using normal distributions transform occupancy maps}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697032}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Canelhas644372, author = {Canelhas, Daniel R. and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {3203--3209}, title = {Improved local shape feature stability through dense model tracking}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696811}, abstract = {In this work we propose a method to effectively remove noise from depth images obtained with a commodity structured light sensor. The proposed approach fuses data into a consistent frame of reference over time, thus utilizing prior depth measurements and viewpoint information in the noise removal process. The effectiveness of the approach is compared to two state of the art, single-frame denoising methods in the context of feature descriptor matching and keypoint detection stability. To make more general statements about the effect of noise removal in these applications, we extend a method for evaluating local image gradient feature descriptors to the domain of 3D shape descriptors. We perform a comparative study of three classes of such descriptors: Normal Aligned Radial Features, Fast Point Feature Histograms and Depth Kernel Descriptors; and evaluate their performance on a real-world industrial application data set. We demonstrate that noise removal enabled by the dense map representation results in major improvements in matching across all classes of descriptors as well as having a substantial positive impact on keypoint detection reliability }, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Almqvist644368, author = {Almqvist, H{\aa}kan and Magnusson, Martin and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {2013 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {733--738}, title = {Improving Point-Cloud Accuracy from a Moving Platform in Field Operations}, DOI = {10.1109/ICRA.2013.6630654}, abstract = {This paper presents a method for improving the quality of distorted 3D point clouds made from a vehicle equipped with a laser scanner moving over uneven terrain. Existing methods that use 3D point-cloud data (for tasks such as mapping, localisation, and object detection) typically assume that each point cloud is accurate. For autonomous robots moving in rough terrain, it is often the case that the vehicle moves a substantial amount during the acquisition of one point cloud, in which case the data will be distorted. The method proposed in this paper is capable of increasing the accuracy of 3D point clouds, without assuming any specific features of the environment (such as planar walls), without resorting to a "stop-scan-go" approach, and without relying on specialised and expensive hardware. Each new point cloud is matched to the previous using normal-distribution-transform (NDT) registration, after which a mini-loop closure is performed with a local, per-scan, graph-based SLAM method. The proposed method increases the accuracy of both the measured platform trajectory and the point cloud. The method is validated on both real-world and simulated data. }, ISBN = {978-1-4673-5641-1}, ISBN = {978-1-4673-5643-5}, year = {2013} } @inproceedings{Saarinen644376, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {382--389}, title = {Normal distributions transform monte-carlo localization (NDT-MCL)}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696380}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Stoyanov644379, author = {Stoyanov, Todor and Saarinen, Jari and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4702--4708}, title = {Normal distributions transform occupancy map fusion : simultaneous mapping and tracking in large scale dynamic environments}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697033}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Saarinen622633, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Ala-Luhtala, Juha and Lilienthal, Achim J.}, booktitle = {IEEE International Conference on Robotics and Automation : }, institution = {Örebro University, School of Science and Technology}, institution = {Aalto University of Technology, Aalto, Finland}, pages = {2233--2238}, title = {Normal distributions transform occupancy maps : application to large-scale online 3D mapping}, DOI = {10.1109/ICRA.2013.6630878}, abstract = {Autonomous vehicles operating in real-world industrial environments have to overcome numerous challenges, chief among which is the creation and maintenance of consistent 3D world models. This paper proposes to address the challenges of online real-world mapping by building upon previous work on compact spatial representation and formulating a novel 3D mapping approach — the Normal Distributions Transform Occupancy Map (NDT-OM). The presented algorithm enables accurate real-time 3D mapping in large-scale dynamic nvironments employing a recursive update strategy. In addition, the proposed approach can seamlessly provide maps at multiple resolutions allowing for fast utilization in high-level functions such as localization or path planning. Compared to previous approaches that use the NDT representation, the proposed NDT-OM formulates an exact and efficient recursive update formulation and models the full occupancy of the map. }, year = {2013} } @inproceedings{Canelhas644377, author = {Canelhas, Daniel R. and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {3671--3676}, title = {SDF tracker : a parallel algorithm for on-line pose estimation and scene reconstruction from depth images}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696880}, abstract = {Ego-motion estimation and environment mapping are two recurring problems in the field of robotics. In this work we propose a simple on-line method for tracking the pose of a depth camera in six degrees of freedom and simultaneously maintaining an updated 3D map, represented as a truncated signed distance function. The distance function representation implicitly encodes surfaces in 3D-space and is used directly to define a cost function for accurate registration of new data. The proposed algorithm is highly parallel and achieves good accuracy compared to state of the art methods. It is suitable for reconstructing single household items, workspace environments and small rooms at near real-time rates, making it practical for use on modern CPU hardware }, ISBN = {978-1-4673-6358-7}, year = {2013} } @article{Stoyanov618701, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding Agencies:European Union FP7 - 270350Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {12}, pages = {1377--1393}, title = {Fast and accurate scan registration through minimization of the distance between compact 3D NDT Representations}, volume = {31}, DOI = {10.1177/0278364912460895}, keywords = {point set registration; mapping; normal distributions transform}, abstract = {Registration of range sensor measurements is an important task in mobile robotics and has received a lot of attention. Several iterative optimization schemes have been proposed in order to align three-dimensional (3D) point scans. With the more widespread use of high-frame-rate 3D sensors and increasingly more challenging application scenarios for mobile robots, there is a need for fast and accurate registration methods that current state-of-the-art algorithms cannot always meet. This work proposes a novel algorithm that achieves accurate point cloud registration an order of a magnitude faster than the current state of the art. The speedup is achieved through the use of a compact spatial representation: the Three-Dimensional Normal Distributions Transform (3D-NDT). In addition, a fast, global-descriptor based on the 3D-NDT is defined and used to achieve reliable initial poses for the iterative algorithm. Finally, a closed-form expression for the covariance of the proposed method is also derived. The proposed algorithms are evaluated on two standard point cloud data sets, resulting in stable performance on a par with or better than the state of the art. The implementation is available as an open-source package for the Robot Operating system (ROS). }, year = {2012} } @inproceedings{Charusta543057, author = {Charusta, Krzysztof and Krug, Robert and Stoyanov, Todor and Dimitrov, Dimitar and Iliev, Boyko}, booktitle = {2012 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {1338--1344}, title = {Generation of independent contact regions on objects reconstructed from noisy real-world range data}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2012.6225046}, keywords = {cameras, image reconstruction, manipulators, prototypes, robot sensing systems, dexterous manipulators, filtering theory, grippers, image reconstruction}, abstract = {The synthesis and evaluation of multi-fingered grasps on complex objects is a challenging problem that has received much attention in the robotics community. Although several promising approaches have been developed, applications to real-world systems are limited to simple objects or gripper configurations. The paradigm of Independent Contact Regions (ICRs) has been proposed as a way to increase the tolerance to grasp positioning errors. This concept is well established, though only on precise geometric object models. This work is concerned with the application of the ICR paradigm to models reconstructed from real-world range data. We propose a method for increasing the robustness of grasp synthesis on uncertain geometric models. The sensitivity of the ICR algorithm to noisy data is evaluated and a filtering approach is proposed to improve the quality of the final result. }, ISBN = {9781467314053}, ISBN = {9781467314039}, year = {2012} } @inproceedings{Stoyanov524119, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J.}, booktitle = {2012 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, note = {Accepted for publication. Advance copy available at http://aass.oru.se/Research/Learning/publications/2012/Stoyanov_etal_2012-ICRA.pdf}, pages = {5196--5201}, title = {Point Set Registration through Minimization of the L-2 Distance between 3D-NDT Models}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2012.6224717}, abstract = {Point set registration — the task of finding the best fitting alignment between two sets of point samples, is an important problem in mobile robotics. This article proposes a novel registration algorithm, based on the distance between Three- Dimensional Normal Distributions Transforms. 3D-NDT models — a sub-class of Gaussian Mixture Models with uniformly weighted, largely disjoint components, can be quickly computed from range point data. The proposed algorithm constructs 3DNDT representations of the input point sets and then formulates an objective function based on the L2 distance between the considered models. Analytic first and second order derivatives of the objective function are computed and used in a standard Newton method optimization scheme, to obtain the best-fitting transformation. The proposed algorithm is evaluated and shown to be more accurate and faster, compared to a state of the art implementation of the Iterative Closest Point and 3D-NDT Point-to-Distribution algorithms. }, ISBN = {9781467314053}, ISBN = {9781467314039}, year = {2012} } @inproceedings{Andreasson618702, author = {Andreasson, Henrik and Stoyanov, Todor}, booktitle = {Proc. of International Conference on Robotics and Automation (ICRA) Workshop on Semantic Perception, Mapping and Exploration (SPME) : }, institution = {Örebro University, School of Science and Technology}, note = {The conference table of contents may be found on http://toc.proceedings.com/15154webtoc.pdf}, title = {Real time registration of RGB-D data using local visual features and 3D-NDT registration}, abstract = {Recent increased popularity of RGB-D capable sensors in robotics has resulted in a surge of related RGBD registration methods. This paper presents several RGB-D registration algorithms based on combinations between local visual feature and geometric registration. Fast and accurate transformation refinement is obtained by using a recently proposed geometric registration algorithm, based on the Three-Dimensional Normal Distributions Transform (3D-NDT). Results obtained on standard data sets have demonstrated mean translational errors on the order of 1 cm and rotational errors bellow 1 degree, at frame processing rates of about 15 Hz. }, ISBN = {9781467314039}, year = {2012} } @phdthesis{Stoyanov507812, author = {Stoyanov, Todor Dimitrov}, institution = {Örebro University, School of Science and Technology}, pages = {145}, publisher = {Örebro universitet}, school = {Örebro University, School of Science and Technology}, title = {Reliable autonomus navigation in semi-structured environments using the three-dimensional normal distributions transform (3D-NDT)}, series = {Örebro Studies in Technology}, ISSN = {1650-8580}, number = {54}, ISBN = {978-91-7668-861-8}, year = {2012} } @inproceedings{Stoyanov540987, author = {Stoyanov, Todor and Louloudi, Athanasia and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the 5th European Conference on Mobile Robots, ECMR 2011 : }, institution = {Örebro University, School of Science and Technology}, pages = {19--24}, title = {Comparative evaluation of range sensor accuracy in indoor environments}, abstract = {3D range sensing is one of the important topics in robotics, as it is often a component in vital autonomous subsystems like collision avoidance, mapping and semantic perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements is proposed. The approach is then used to compare the behavior of three integrated range sensing devices, to that of a standard actuated laser range sensor. Test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2011} } @inproceedings{Stoyanov524116, author = {Stoyanov, Todor and Magnusson, Martin and Almqvist, H{\aa}kan and Lilienthal, Achim J.}, booktitle = {2011 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, note = {Proceedings athttp://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5501116}, title = {On the Accuracy of the 3D Normal Distributions Transform as a Tool for Spatial Representation}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ICRA.2011.5979584}, abstract = {The Three-Dimensional Normal Distributions Transform (3D-NDT) is a spatial modeling technique with applications in point set registration, scan similarity comparison, change detection and path planning. This work concentrates on evaluating three common variations of the 3D-NDT in terms of accuracy of representing sampled semi-structured environments. In a novel approach to spatial representation quality measurement, the 3D geometrical modeling task is formulated as a classification problem and its accuracy is evaluated with standard machine learning performance metrics. In this manner the accuracy of the 3D-NDT variations is shown to be comparable to, and in some cases to outperform that of the standard occupancy grid mapping model. }, ISBN = {978-1-61284-385-8}, year = {2011} } @inproceedings{Ferri524121, author = {Ferri, Gabriele and Mondini, Alessio and Manzi, Alessandro and Mazzolai, Barbara and Laschi, Cecilia and Mattoli, Virgilio and Reggente, Matteo and Stoyanov, Todor and Lilienthal, Achim J. and Lettere, Marco and Dario, Paolo.}, booktitle = {Proceedings of ICRA Workshop on Networked and Mobile Robot Olfaction in Natural, Dynamic Environments : }, institution = {Örebro University, School of Science and Technology}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, note = {Conference url: http://icra2010.grasp.upenn.edu/?q=overview}, title = {DustCart, a Mobile Robot for Urban Environments : Experiments of Pollution Monitoring and Mapping during Autonomous Navigation in Urban Scenarios}, keywords = {mobile robots, urban robots, gas mapping, navigation}, abstract = {In the framework of DustBot European project, aimed at developing a new multi-robot system for urban hygiene management, we have developed a twowheeled robot: DustCart. DustCart aims at providing a solution to door-to-door garbage collection: the robot, called by a user, navigates autonomously to his/her house; collects the garbage from the user and discharges it in an apposite area. An additional feature of DustCart is the capability to monitor the air pollution by means of an on board Air Monitoring Module (AMM). The AMM integrates sensors to monitor several atmospheric pollutants, such as carbon monoxide (CO), particular matter (PM10), nitrogen dioxide (NO2), ozone (O3) plus temperature (T) and relative humidity (rHu). An Ambient Intelligence platform (AmI) manages the robots’ operations through a wireless connection. AmI is able to collect measurements taken by different robots and to process them to create a pollution distribution map. In this paper we describe the DustCart robot system, focusing on the AMM and on the process of creating the pollutant distribution maps. We report results of experiments of one DustCart robot moving in urban scenarios and producing gas distribution maps using the Kernel DM+V algorithm. These experiments can be considered as one of the first attempts to use robots as mobile monitoring devices that can complement the traditional fixed stations. }, year = {2010} } @inproceedings{Stoyanov445259, author = {Stoyanov, Todor and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {IEEE/RSJ 2010 International Conference on Intelligent Robots and Systems (IROS 2010) : }, institution = {Örebro University, School of Science and Technology}, pages = {3263--3268}, title = {Path planning in 3D environments using the normal distributions transform}, DOI = {10.1109/IROS.2010.5650789}, abstract = {Planning feasible paths in fully three-dimensional environments is a challenging problem. Application of existing algorithms typically requires the use of limited 3D representations that discard potentially useful information. This article proposes a novel approach to path planning that utilizes a full 3D representation directly: the Three-Dimensional Normal Distributions Transform (3D-NDT). The well known wavefront planner is modified to use 3D-NDT as a basis for map representation and evaluated using both indoor and outdoor data sets. The use of 3D-NDT for path planning is thus demonstrated to be a viable choice with good expressive capabilities. }, ISBN = {978-1-4244-6675-7}, year = {2010} } @inproceedings{Stoyanov524115, author = {Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE International Conference on Advanced Robotics (ICAR) : }, institution = {Örebro University, School of Science and Technology}, note = {Proceedings athttp://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5166725}, title = {Maximum Likelihood Point Cloud Acquisition from a Rotating Laser Scanner on a Moving Platform}, abstract = {This paper describes an approach to acquire locally consistent range data scans from a moving sensor platform. Data from a vertically mounted rotating laser scanner and odometry position estimates are fused and used to estimate maximum likelihood point clouds. An estimation algorithm is applied to reduce the accumulated error after a full rotation of the range finder. A configuration consisting of a SICK laser scanner mounted on a rotational actuator is described and used to evaluate the proposed approach. The data sets analyzed suggest a significant improvement in point cloud consistency, even over a short travel distance. }, URL = {https://ieeexplore.ieee.org/abstract/document/5174672}, year = {2009} } @inproceedings{Stoyanov274893, author = {Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {International conference on advanced robotics, ICAR 2009. : }, institution = {Örebro University, School of Science and Technology}, pages = {1--6}, title = {Maximum likelihood point cloud acquisition from a mobile platform}, abstract = {This paper describes an approach to acquire locally consistent range data scans from a moving sensor platform. Data from a vertically mounted rotating laser scanner and odometry position estimates are fused and used to estimate maximum likelihood point clouds. An estimation algorithm is applied to reduce the accumulated error after a full rotation of the range finder. A configuration consisting of a SICK laser scanner mounted on a rotational actuator is described and used to evaluate the proposed approach. The data sets analyzed suggest a significant improvement in point cloud consistency, even over a short travel distance. }, ISBN = {978-1-4244-4855-5}, year = {2009} } @inproceedings{Birk538841, author = {Birk, Andreas and Poppinga, Jann and Stoyanov, Todor and Nevatia, Yashodhan}, booktitle = {RoboCup 2008 : Robot Soccer World Cup XII}, institution = {Örebro University, School of Science and Technology}, note = {Proceedings details: Lecture Notes in Computer Science (LNCS), 2009, Volume 5399, Volume DOI:10.1007/978-3-642-02921-9, Sublibrary S7 - Lecture Notes in Artificial Intelligence, editors R. Goebel, J. Siekmann, and W.Wahlster. Conference paper DOI: 10.1007/978-3-642-02921-9_40}, pages = {463--472}, publisher = {Springer Berlin Heidelberg}, title = {Planetary Exploration in USARSim : A Case Study including Real World Data from Mars}, series = {Lecture Notes in Computer Science}, DOI = {10.1007/978-3-642-02921-9_40}, abstract = { Intelligent Mobile Robots are increasingly used in unstructured domains; one particularly challenging example for this is, planetary exploration. The preparation of according missions is highly non-trivial, especially as it is difficult to carry out realistic experiments without, very sophisticated infrastructures. In this paper, we argue that, the, Unified System for Automation and Robot Simulation (USARSim) offers interesting opportunities for research on planetary exploration by mobile robots. With the example of work on terrain classification, it, is shown how synthetic as well as real world data, from Mars call be used to test an algorithm's performance in USARSim. Concretely, experiments with an algorithm for the detection of negotiable ground oil a, planetary surface are presented. It is shown that the approach performs fast; and robust on planetary surfaces. }, ISBN = {978-3-642-02920-2}, ISBN = {3-642-02920-5}, year = {2009} } @incollection{Pfingsthorn538840, author = {Pfingsthorn, Max and Nevatia, Yashodhan and Stoyanov, Todor and Rathnam, Ravi and Markov, Stefan and Birk, Andreas}, booktitle = {RoboCup 2008 : Robot Soccer World Cup XII Vol 5399}, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, pages = {225--234}, publisher = {Springer Berlin / Heidelberg}, title = {Towards Cooperative and Decentralized Mapping in the Jacobs Virtual Rescue Team}, series = {Lecture Notes in Computer Science}, volume = {5399}, DOI = {10.1007/978-3-642-02921-9_20}, abstract = {The task of mapping and exploring an unknown environment remains one of the fundamental problems of mobile robotics. It is a task that can intuitively benefit significantly from a multi-robot approach. In this paper, we describe the design of the multi-robot mapping system used in the Jacobs Virtual Rescue team. The team competed in the World Cup 2007 and won the second place. It is shown how the recently proposed pose graph map representation facilitates not only map merging but also allows transmitting map updates efficiently }, URL = {http://dx.doi.org/10.1007/978-3-642-02921-9_20}, year = {2009} } @inproceedings{Nevatia538842, author = {Nevatia, Yashodhan and Stoyanov, Todor and Rathnam, Ravi and Pfingsthorn, Max and Markov, Stefan and Ambrus, Rares and Birk, Andreas}, booktitle = {2008 IEEE/RSJ International Conference on Robots and Intelligent Systems, vols 1-3, conference proceedings : }, institution = {Örebro University, School of Science and Technology}, institution = {Univ Bremen, Dept EECS, Robot Lab, D-28725 Bremen, Germany}, institution = {Univ Bremen, Dept EECS, Robot Lab, D-28725 Bremen, Germany}, pages = {2103--2108}, title = {Augmented Autonomy : Improving human-robot team performance in Urban Search and Rescue}, DOI = {10.1109/IROS.2008.4651034}, abstract = {Exploration of unknown environments remains one of the fundamental problems of mobile robotics. It is also a prime example for a task that can benefit significantly from multi-robot teams. We present an integrated system for semi-autonomous cooperative exploration, augmented by an intuitive user interface for efficient human supervision and control. In this preliminary study we demonstrate the effectiveness of the system as a whole and the intuitive interface in particular. Congruent with previous findings, results confirm that having a human in the loop improves task performance, especially with larger numbers of robots. Specific to our interface, we find that even untrained operators can efficiently manage a decently sized team of robots. }, ISBN = {978-1-4244-2057-5}, ISBN = {978-1-4244-2058-2}, year = {2008} } @inproceedings{Birk538838, author = {Birk, Andreas and Stoyanov, Todor and Nevatia, Yashodhan and Ambrus, Rares and Poppinga, Jan and Pathak, Kaustubh}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Terrain Classification for Autonomous Robot Mobility : from Safety, Security Rescue Robotics to Planetary Exploration}, URL = {https://ewh.ieee.org/conf/icra/2008/workshops/PlanetaryRovers/}, year = {2008} } @inproceedings{Carpin538839, author = {Carpin, Stefano and Stoyanov, Todor and Nevatia, Yashodhan and Lewis, M. and Wang, J.}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Quantitative Assessments of USARSim Accuracy}, year = {2006} } @unpublished{Yang1797956, author = {Yang, Yuxuan and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, title = {Tracking Branched Deformable Linear Objects Using Particle Filtering on Depth Images}, } @article{Rietz1659937, author = {Rietz, Finn and Magg, Sven and Heintz, Fredrik and Stoyanov, Todor and Wermter, Stefan and Stork, Johannes A}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Informatics, University of Hamburg, Hamburg, Germany}, institution = {Hamburger Informatik Technologie-Center, Universität Hamburg, Hamburg, Germany}, institution = {Department of Computer and Information Science, Linköping University, Linköping, Sweden}, institution = {Department of Informatics, University of Hamburg, Hamburg, Germany}, journal = {Neural Computing & Applications}, note = {Funding agencies:{\"O}rebro UniversityWallenberg AI, Autonomous Systems and Software Program (WASP) - Knut and Alice Wallenberg FoundationFederal Ministry for Economic Affairs and Climate FKZ 20X1905A-D}, number = {23}, pages = {16693--16704}, title = {Hierarchical goals contextualize local reward decomposition explanations}, volume = {35}, DOI = {10.1007/s00521-022-07280-8}, keywords = {Reinforcement learning, Explainable AI, Reward decomposition, Hierarchical goals, Local explanations}, abstract = {One-step reinforcement learning explanation methods account for individual actions but fail to consider the agent's future behavior, which can make their interpretation ambiguous. We propose to address this limitation by providing hierarchical goals as context for one-step explanations. By considering the current hierarchical goal as a context, one-step explanations can be interpreted with higher certainty, as the agent's future behavior is more predictable. We combine reward decomposition with hierarchical reinforcement learning into a novel explainable reinforcement learning framework, which yields more interpretable, goal-contextualized one-step explanations. With a qualitative analysis of one-step reward decomposition explanations, we first show that their interpretability is indeed limited in scenarios with multiple, different optimal policies-a characteristic shared by other one-step explanation methods. Then, we show that our framework retains high interpretability in such cases, as the hierarchical goal can be considered as context for the explanation. To the best of our knowledge, our work is the first to investigate hierarchical goals not as an explanation directly but as additional context for one-step reinforcement learning explanations. }, year = {2023} } @inproceedings{Yang1802120, author = {Yang, Quantao and Stork, Johannes A. and Stoyanov, Todor}, booktitle = {2023 IEEE 19th International Conference on Automation Science and Engineering (CASE) : }, institution = {Örebro University, School of Science and Technology}, title = {Learn from Robot : Transferring Skills for Diverse Manipulation via Cycle Generative Networks}, series = {IEEE International Conference on Automation Science and Engineering}, DOI = {10.1109/CASE56687.2023.10260484}, keywords = {Reinforcement Learning, Transfer Learning, Generative Models}, abstract = {Reinforcement learning (RL) has shown impressive results on a variety of robot tasks, but it requires a large amount of data for learning a single RL policy. However, in manufacturing there is a wide demand of reusing skills from different robots and it is hard to transfer the learned policy to different hardware due to diverse robot body morphology, kinematics, and dynamics. In this paper, we address the problem of transferring policies between different robot platforms. We learn a set of skills on each specific robot and represent them in a latent space. We propose to transfer the skills between different robots by mapping latent action spaces through a cycle generative network in a supervised learning manner. We extend the policy model learned on one robot with a pre-trained generative network to enable the robot to learn from the skill of another robot. We evaluate our method on several simulated experiments and demonstrate that our Learn from Robot (LfR) method accelerates new skill learning. }, ISBN = {9798350320701}, ISBN = {9798350320695}, year = {2023} } @article{Dominguez1706746, author = {Dominguez, David Caceres and Iannotta, Marco and Stork, Johannes Andreas and Schaffernicht, Erik and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, note = {Funding agencies:Industrial Graduate School Collaborative AI {\&}amp; Robotics (CoAIRob)General Electric Dnr:20190128}, number = {4}, pages = {12110--12117}, title = {A Stack-of-Tasks Approach Combined With Behavior Trees : A New Framework for Robot Control}, volume = {7}, DOI = {10.1109/LRA.2022.3211481}, keywords = {Behavior-based systems, control architectures and programming}, abstract = {Stack-of-Tasks (SoT) control allows a robot to simultaneously fulfill a number of prioritized goals formulated in terms of (in)equality constraints in error space. Since this approach solves a sequence of Quadratic Programs (QP) at each time-step, without taking into account any temporal state evolution, it is suitable for dealing with local disturbances. However, its limitation lies in the handling of situations that require non-quadratic objectives to achieve a specific goal, as well as situations where countering the control disturbance would require a locally suboptimal action. Recent works address this shortcoming by exploiting Finite State Machines (FSMs) to compose the tasks in such a way that the robot does not get stuck in local minima. Nevertheless, the intrinsic trade-off between reactivity and modularity that characterizes FSMs makes them impractical for defining reactive behaviors in dynamic environments. In this letter, we combine the SoT control strategy with Behavior Trees (BTs), a task switching structure that addresses some of the limitations of the FSMs in terms of reactivity, modularity and re-usability. Experimental results on a Franka Emika Panda 7-DOF manipulator show the robustness of our framework, that allows the robot to benefit from the reactivity of both SoT and BTs. }, year = {2022} } @inproceedings{Hoang1648882, author = {Hoang, Dinh-Cuong and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2022 International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {ICT Department, FPT University, Hanoi, Vietnam}, pages = {1492--1498}, title = {Context-Aware Grasp Generation in Cluttered Scenes}, DOI = {10.1109/ICRA46639.2022.9811371}, abstract = {Conventional methods to autonomous grasping rely on a pre-computed database with known objects to synthesize grasps, which is not possible for novel objects. On the other hand, recently proposed deep learning-based approaches have demonstrated the ability to generalize grasp for unknown objects. However, grasp generation still remains a challenging problem, especially in cluttered environments under partial occlusion. In this work, we propose an end-to-end deep learning approach for generating 6-DOF collision-free grasps given a 3D scene point cloud. To build robustness to occlusion, the proposed model generates candidates by casting votes and accumulating evidence for feasible grasp configurations. We exploit contextual information by encoding the dependency of objects in the scene into features to boost the performance of grasp generation. The contextual information enables our model to increase the likelihood that the generated grasps are collision-free. Our experimental results confirm that the proposed system performs favorably in terms of predicting object grasps in cluttered environments in comparison to the current state of the art methods. }, ISBN = {9781728196824}, ISBN = {9781728196817}, year = {2022} } @inproceedings{Iannotta1724688, author = {Iannotta, Marco and Dominguez, David Caceres and Stork, Johannes Andreas and Schaffernicht, Erik and Stoyanov, Todor}, booktitle = {IROS 2022 Workshop on Mobile Manipulation and Embodied Intelligence (MOMA): Challenges and  Opportunities : }, institution = {Örebro University, School of Science and Technology}, title = {Heterogeneous Full-body Control of a Mobile Manipulator with Behavior Trees}, DOI = {10.48550/arXiv.2210.08600}, abstract = {Integrating the heterogeneous controllers of a complex mechanical system, such as a mobile manipulator, within the same structure and in a modular way is still challenging. In this work we extend our framework based on Behavior Trees for the control of a redundant mechanical system to the problem of commanding more complex systems that involve multiple low-level controllers. This allows the integrated systems to achieve non-trivial goals that require coordination among the sub-systems. }, year = {2022} } @inproceedings{Yang1731600, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {3rd Workshop on Robotic Manipulation of Deformable Objects: Challenges in Perception, Planning and Control for Soft Interaction (ROMADO-SI), IROS 2022, Kyoto, Japan : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing and Software, McMaster University, Canada}, title = {Learn to Predict Posterior Probability in Particle Filtering for Tracking Deformable Linear Objects}, abstract = {Tracking deformable linear objects (DLOs) is a key element for applications where robots manipulate DLOs. However, the lack of distinctive features or appearance on the DLO and the object’s high-dimensional state space make tracking challenging and still an open question in robotics. In this paper, we propose a method for tracking the state of a DLO by applying a particle filter approach, where the posterior probability of each sample is estimated by a learned predictor. Our method can achieve accurate tracking even with no prerequisite segmentation which many related works require. Due to the differentiability of the posterior probability predictor, our method can leverage the gradients of posterior probabilities with respect to the latent states to improve the motion model in the particle filter. The preliminary experiments suggest that the proposed method can provide robust tracking results and the estimated DLO state converges quickly to the true state if the initial state is unknown. }, URL = {https://romado-workshop.github.io/ROMADO2022/}, year = {2022} } @article{Yang1696745, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, eid = {104258}, title = {Learning differentiable dynamics models for shape control of deformable linear objects}, volume = {158}, DOI = {10.1016/j.robot.2022.104258}, keywords = {Deformable linear object, Model learning, Parameter identification, Model predictive control}, abstract = {Robots manipulating deformable linear objects (DLOs) – such as surgical sutures in medical robotics, or cables and hoses in industrial assembly – can benefit substantially from accurate and fast differentiable predictive models. However, the off-the-shelf analytic physics models fall short of differentiability. Recently, neural-network-based data-driven models have shown promising results in learning DLO dynamics. These models have additional advantages compared to analytic physics models, as they are differentiable and can be used in gradient-based trajectory planning. Still, the data-driven approaches demand a large amount of training data, which can be challenging for real-world applications. In this paper, we propose a framework for learning a differentiable data-driven model for DLO dynamics with a minimal set of real-world data. To learn DLO twisting and bending dynamics in a 3D environment, we first introduce a new suitable DLO representation. Next, we use a recurrent network module to propagate effects between different segments along a DLO, thereby addressing a critical limitation of current state-of-the-art methods. Then, we train a data-driven model on synthetic data generated in simulation, instead of foregoing the time-consuming and laborious data collection process for real-world applications. To achieve a good correspondence between real and simulated models, we choose a set of simulation model parameters through parameter identification with only a few trajectories of a real DLO required. We evaluate several optimization methods for parameter identification and demonstrate that the differential evolution algorithm is efficient and effective for parameter identification. In DLO shape control tasks with a model-based controller, the data-driven model trained on synthetic data generated by the resulting models performs on par with the ones trained with a comparable amount of real-world data which, however, would be intractable to collect. }, year = {2022} } @article{Yang1677520, author = {Yang, Quantao and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing and Software, McMaster University, Canada}, journal = {IEEE Robotics and Automation Letters}, note = {Funding agency:Wallenberg AI, Autonomous Systems and Software Program (WASP) - Knut and Alice Wallenberg Foundation}, number = {3}, pages = {7652--7659}, title = {MPR-RL : Multi-Prior Regularized Reinforcement Learning for Knowledge Transfer}, volume = {7}, DOI = {10.1109/LRA.2022.3184805}, keywords = {Machine Learning for Robot Control, Reinforcement Learning, Transfer Learning}, abstract = {In manufacturing, assembly tasks have been a challenge for learning algorithms due to variant dynamics of different environments. Reinforcement learning (RL) is a promising framework to automatically learn these tasks, yet it is still not easy to apply a learned policy or skill, that is the ability of solving a task, to a similar environment even if the deployment conditions are only slightly different. In this letter, we address the challenge of transferring knowledge within a family of similar tasks by leveraging multiple skill priors. We propose to learn prior distribution over the specific skill required to accomplish each task and compose the family of skill priors to guide learning the policy for a new task by comparing the similarity between the target task and the prior ones. Our method learns a latent action space representing the skill embedding from demonstrated trajectories for each prior task. We have evaluated our method on a task in simulation and a set of peg-in-hole insertion tasks and demonstrate better generalization to new tasks that have never been encountered during training. Our Multi-Prior Regularized RL (MPR-RL) method is deployed directly on a real world Franka Panda arm, requiring only a set of demonstrated trajectories from similar, but crucially not identical, problem instances. }, year = {2022} } @article{Ivan1691786, author = {Ivan, Jean-Paul A. and Stoyanov, Todor and Stork, Johannes A.}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {4}, pages = {8996--9003}, title = {Online Distance Field Priors for Gaussian Process Implicit Surfaces}, volume = {7}, DOI = {10.1109/LRA.2022.3189434}, keywords = {Gaussian processes, machine learning, robot sensing systems, supervised learning}, abstract = {Gaussian process (GP) implicit surface models provide environment and object representations which elegantly address noise and uncertainty while remaining sufficiently flexible to capture complex geometry. However, GP models quickly become intractable as the size of the observation set grows-a trait which is difficult to reconcile with the rate at which modern range sensors produce data. Furthermore, naive applications of GPs to implicit surface models allocate model resources uniformly, thus using precious resources to capture simple geometry. In contrast to prior work addressing these challenges though model sparsification, spatial partitioning, or ad-hoc filtering, we propose introducing model bias online through the GP's mean function. We achieve more accurate distance fields using smaller models by creating a distance field prior from features which are easy to extract and have analytic distance fields. In particular, we demonstrate this approach using linear features. We show the proposed distance field halves model size in a 2D mapping task using data from a SICK S300 sensor. When applied to a single 3D scene from the TUM RGB-D SLAM dataset, we achieve a fivefold reduction in model size. Our proposed prior results in more accurate GP implicit surfaces, while allowing existing models to function in larger environments or with larger spatial partitions due to reduced model size. }, year = {2022} } @inproceedings{Yang1727549, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {4056--4062}, title = {Online Model Learning for Shape Control of Deformable Linear Objects}, DOI = {10.1109/IROS47612.2022.9981080}, abstract = {Traditional approaches to manipulating the state of deformable linear objects (DLOs) - i.e., cables, ropes - rely on model-based planning. However, constructing an accurate dynamic model of a DLO is challenging due to the complexity of interactions and a high number of degrees of freedom. This renders the task of achieving a desired DLO shape particularly difficult and motivates the use of model-free alternatives, which while maintaining generality suffer from a high sample complexity. In this paper, we bridge the gap between these fundamentally different approaches and propose a framework that learns dynamic models of DLOs through trial-and-error interaction. Akin to model-based reinforcement learning (RL), we interleave learning and exploration to solve a 3D shape control task for a DLO. Our approach requires only a fraction of the interaction samples of the current state-of-the-art model-free RL alternatives to achieve superior shape control performance. Unlike offline model learning, our approach does not require expert knowledge for data collection, retains the ability to explore, and automatically selects relevant experience. }, ISBN = {9781665479271}, ISBN = {9781665479288}, year = {2022} } @article{Yang1716908, author = {Yang, Yuxuan and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {4}, pages = {12577--12584}, title = {Particle Filters in Latent Space for Robust Deformable Linear Object Tracking}, volume = {7}, DOI = {10.1109/LRA.2022.3216985}, keywords = {Deep learning for visual perception, perception for grasping and manipulation, RGB-D perception}, abstract = {Tracking of deformable linear objects (DLOs) is important for many robotic applications. However, achieving robust and accurate tracking is challenging due to the lack of distinctive features or appearance on the DLO, the object's high-dimensional state space, and the presence of occlusion. In this letter, we propose a method for tracking the state of a DLO by applying a particle filter approach within a lower-dimensional state embedding learned by an autoencoder. The dimensionality reduction preserves state variation, while simultaneously enabling a particle filter to accurately track DLO state evolution with a practically feasible number of particles. Compared to previous works, our method requires neither running a high-fidelity physics simulation, nor manual designs of constraints and regularization. Without the assumption of knowing the initial DLO state, our method can achieve accurate tracking even under complex DLO motions and in the presence of severe occlusions. }, year = {2022} } @inproceedings{Rietz1709450, author = {Rietz, Finn and Schaffernicht, Erik and Stoyanov, Todor and Stork, Johannes Andreas}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Towards Task-Prioritized Policy Composition}, abstract = {Combining learned policies in a prioritized, ordered manner is desirable because it allows for modular design and facilitates data reuse through knowledge transfer. In control theory, prioritized composition is realized by null-space control, where low-priority control actions are projected into the null-space of high-priority control actions. Such a method is currently unavailable for Reinforcement Learning. We propose a novel, task-prioritized composition framework for Reinforcement Learning, which involves a novel concept: The indifferent-space of Reinforcement Learning policies. Our framework has the potential to facilitate knowledge transfer and modular design while greatly increasing data efficiency and data reuse for Reinforcement Learning agents. Further, our approach can ensure high-priority constraint satisfaction, which makes it promising for learning in safety-critical domains like robotics. Unlike null-space control, our approach allows learning globally optimal policies for the compound task by online learning in the indifference-space of higher-level policies after initial compound policy construction.  }, year = {2022} } @inproceedings{Yang1708933, author = {Yang, Quantao and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Transferring Knowledge for Reinforcement Learning in Contact-Rich Manipulation}, abstract = {In manufacturing, assembly tasks have been a challenge for learning algorithms due to variant dynamics of different environments. Reinforcement learning (RL) is a promising framework to automatically learn these tasks, yet it is still not easy to apply a learned policy or skill, that is the ability of solving a task, to a similar environment even if the deployment conditions are only slightly different. In this paper, we address the challenge of transferring knowledge within a family of similar tasks by leveraging multiple skill priors. We propose to learn prior distribution over the specific skill required to accomplish each task and compose the family of skill priors to guide learning the policy for a new task by comparing the similarity between the target task and the prior ones. Our method learns a latent action space representing the skill embedding from demonstrated trajectories for each prior task. We have evaluated our method on a set of peg-in-hole insertion tasks and demonstrate better generalization to new tasks that have never been encountered during training.  }, URL = {https://arxiv.org/abs/2210.02891}, year = {2022} } @article{Yang1685070, author = {Yang, Quantao and D{\"u}rr, Alexander and Topp, Elin Anna and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, Faculty of Engineering (LTH), Lund University, Lund, Sweden}, institution = {Department of Computer Science, Faculty of Engineering (LTH), Lund University, Lund, Sweden}, institution = {Department of Computing and Software, McMaster University, Hamilton ON, Canada }, journal = {IEEE Robotics and Automation Letters}, number = {3}, pages = {8391--8398}, title = {Variable Impedance Skill Learning for Contact-Rich Manipulation}, volume = {7}, DOI = {10.1109/LRA.2022.3187276}, keywords = {Machine learning for robot control, reinforcement learning, variable impedance control}, abstract = {Contact-rich manipulation tasks remain a hard problem in robotics that requires interaction with unstructured environments. Reinforcement Learning (RL) is one potential solution to such problems, as it has been successfully demonstrated on complex continuous control tasks. Nevertheless, current state-of-the-art methods require policy training in simulation to prevent undesired behavior and later domain transfer even for simple skills involving contact. In this paper, we address the problem of learning contact-rich manipulation policies by extending an existing skill-based RL framework with a variable impedance action space. Our method leverages a small set of suboptimal demonstration trajectories and learns from both position, but also crucially impedance-space information. We evaluate our method on a number of peg-in-hole task variants with a Franka Panda arm and demonstrate that learning variable impedance actions for RL in Cartesian space can be deployed directly on the real robot, without resorting to learning in simulation. }, year = {2022} } @article{Guler1693298, author = {G{\"u}ler, P{\"u}ren and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {Örebro University, Örebro, Sweden}, journal = {Frontiers in Robotics and AI}, eid = {833173}, title = {Visual state estimation in unseen environments through domain adaptation and metric learning}, volume = {9}, DOI = {10.3389/frobt.2022.833173}, keywords = {articulated pose estimation, deep metric learning, domain augmentation, joint state estimation, triplet loss}, abstract = {In robotics, deep learning models are used in many visual perception applications, including the tracking, detection and pose estimation of robotic manipulators. The state of the art methods however are conditioned on the availability of annotated training data, which may in practice be costly or even impossible to collect. Domain augmentation is one popular method to improve generalization to out-of-domain data by extending the training data set with predefined sources of variation, unrelated to the primary task. While this typically results in better performance on the target domain, it is not always clear that the trained models are capable to accurately separate the signals relevant to solving the task (e.g., appearance of an object of interest) from those associated with differences between the domains (e.g., lighting conditions). In this work we propose to improve the generalization capabilities of models trained with domain augmentation by formulating a secondary structured metric-space learning objective. We concentrate on one particularly challenging domain transfer task-visual state estimation for an articulated underground mining machine-and demonstrate the benefits of imposing structure on the encoding space. Our results indicate that the proposed method has the potential to transfer feature embeddings learned on the source domain, through a suitably designed augmentation procedure, and on to an unseen target domain. }, year = {2022} } @article{Hoang1691597, author = {Hoang, Dinh-Cuong and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {CT Department, FPT University, Hanoi, Vietnam}, institution = {Department of Computing and Software, McMaster University, Hamilton ON, Canada}, journal = {IEEE Robotics and Automation Letters}, number = {4}, pages = {8980--8987}, title = {Voting and Attention-Based Pose Relation Learning for Object Pose Estimation From 3D Point Clouds}, volume = {7}, DOI = {10.1109/LRA.2022.3189158}, keywords = {6D object pose estimation, 3D point cloud, robot manipulation}, abstract = {Estimating the 6DOF pose of objects is an important function in many applications, such as robot manipulation or augmented reality. However, accurate and fast pose estimation from 3D point clouds is challenging, because of the complexity of object shapes, measurement noise, and presence of occlusions. We address this challenging task using an end-to-end learning approach for object pose estimation given a raw point cloud input. Our architecture pools geometric features together using a self-attention mechanism and adopts a deep Hough voting scheme for pose proposal generation. To build robustness to occlusion, the proposed network generates candidates by casting votes and accumulating evidence for object locations. Specifically, our model learns higher-level features by leveraging the dependency of object parts and object instances, thereby boosting the performance of object pose estimation. Our experiments show that our method outperforms state-of-the-art approaches in public benchmarks including the Sileane dataset 135 and the Fraunhofer IPA dataset [36]. We also deploy our proposed method to a real robot pick-and-place based on the estimated pose. }, year = {2022} } @inproceedings{Yang1620121, author = {Yang, Quantao and D{\"u}rr, Alexander and Topp, Elin Anna and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {NeurIPS 2021 Workshop on Deployable Decision Making in Embodied Systems (DDM) : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, Lund University, Sweden}, institution = {Department of Computer Science, Lund University, Sweden}, title = {Learning Impedance Actions for Safe Reinforcement Learning in Contact-Rich Tasks}, abstract = {Reinforcement Learning (RL) has the potential of solving complex continuous control tasks, with direct applications to robotics. Nevertheless, current state-of-the-art methods are generally unsafe to learn directly on a physical robot as exploration by trial-and-error can cause harm to the real world systems. In this paper, we leverage a framework for learning latent action spaces for RL agents from demonstrated trajectories. We extend this framework by connecting it to a variable impedance Cartesian space controller, allowing us to learn contact-rich tasks safely and efficiently. Our method learns from trajectories that incorporate both positional, but also crucially impedance-space information. We evaluate our method on a number of peg-in-hole task variants with a Franka Panda arm and demonstrate that learning variable impedance actions for RL in Cartesian space can be safely deployed on the real robot directly, without resorting to learning in simulation and a subsequent policy transfer. }, year = {2021} } @inproceedings{Yang1610216, author = {Yang, Yuxuan and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2021 IEEE International Conference on Robotics and Automation (ICRA) : IEEE International Conference on Robotics and Automation (ICRA 2021), Xi'an, China, May 30 - June 5, 2021}, institution = {Örebro University, School of Science and Technology}, pages = {1950--1957}, title = {Learning to Propagate Interaction Effects for Modeling Deformable Linear Objects Dynamics}, series = {2021 IEEE International Conference on Robotics and Automation (ICRA)}, DOI = {10.1109/ICRA48506.2021.9561636}, abstract = {Modeling dynamics of deformable linear objects (DLOs), such as cables, hoses, sutures, and catheters, is an important and challenging problem for many robotic manipulation applications. In this paper, we propose the first method to model and learn full 3D dynamics of DLOs from data. Our approach is capable of capturing the complex twisting and bending dynamics of DLOs and allows local effects to propagate globally. To this end, we adapt the interaction network (IN) dynamics learning method for capturing the interaction between neighboring segments in a DLO and augment it with a recurrent model for propagating interaction effects along the length of a DLO. For learning twisting and bending dynamics in 3D, we also introduce a new suitable representation of DLO segments and their relationships. Unlike the original IN method, our model learns to propagate the effects of local interaction between neighboring segments to each segment in the chain within a single time step, without the need for iterated propagation steps. Evaluation of our model with synthetic and newly collected real-world data shows better accuracy and generalization in short-term and long-term predictions than the current state of the art. We further integrate our learned model in a model predictive control scheme and use it to successfully control the shape of a DLO. Our implementation is available at https : //gitsvn-nt.oru.se/ammlab-public/in-bilstm. }, ISBN = {9781728190778}, ISBN = {9781728190785}, year = {2021} } @inproceedings{Yang1605135, author = {Yang, Quantao and Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {2021 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, note = {Funding agency:Wallenberg Artificial Intelligence, Autonomous Systems and Software Program (WASP)}, title = {Null space based efficient reinforcement learning with hierarchical safety constraints}, DOI = {10.1109/ECMR50962.2021.9568848}, abstract = {Reinforcement learning is inherently unsafe for use in physical systems, as learning by trial-and-error can cause harm to the environment or the robot itself. One way to avoid unpredictable exploration is to add constraints in the action space to restrict the robot behavior. In this paper, we proposea null space based framework of integrating reinforcement learning methods in constrained continuous action spaces. We leverage a hierarchical control framework to decompose target robotic skills into higher ranked tasks (e. g., joint limits and obstacle avoidance) and lower ranked reinforcement learning task. Safe exploration is guaranteed by only learning policies in the null space of higher prioritized constraints. Meanwhile multiple constraint phases for different operational spaces are constructed to guide the robot exploration. Also, we add penalty loss for violating higher ranked constraints to accelerate the learning procedure. We have evaluated our method on different redundant robotic tasks in simulation and show that our null space based reinforcement learning method can explore and learn safely and efficiently. }, ISBN = {9781665412131}, year = {2021} } @article{Sun1369388, author = {Sun, Da and Kiselev, Andrey and Liao, Qianfang and Stoyanov, Todor and Loutfi, Amy}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on Human-Machine Systems}, number = {1}, pages = {55--67}, title = {A New Mixed Reality - based Teleoperation System for Telepresence and Maneuverability Enhancement}, volume = {50}, DOI = {10.1109/THMS.2019.2960676}, keywords = {Force control, motion regulation, telerobotics, virtual reality}, abstract = {Virtual Reality (VR) is regarded as a useful tool for teleoperation system that provides operators an immersive visual feedback on the robot and the environment. However, without any haptic feedback or physical constructions, VR-based teleoperation systems normally have poor maneuverability and may cause operational faults in some fine movements. In this paper, we employ Mixed Reality (MR), which combines real and virtual worlds, to develop a novel teleoperation system. New system design and control algorithms are proposed. For the system design, a MR interface is developed based on a virtual environment augmented with real-time data from the task space with a goal to enhance the operator’s visual perception. To allow the operator to be freely decoupled from the control loop and offload the operator’s burden, a new interaction proxy is proposed to control the robot. For the control algorithms, two control modes are introduced to improve long-distance movements and fine movements of the MR-based teleoperation. In addition, a set of fuzzy logic based methods are proposed to regulate the position, velocity and force of the robot in order to enhance the system maneuverability and deal with the potential operational faults. Barrier Lyapunov Function (BLF) and back-stepping methods are leveraged to design the control laws and simultaneously guarantee the system stability under state constraints.  Experiments conducted using a 6-Degree of Freedom (DoF) robotic arm prove the feasibility of the system. }, year = {2020} } @inproceedings{Stork1523713, author = {Stork, Johannes Andreas and Stoyanov, Todor}, booktitle = {IEEE International Conference on Robotics and Automation : }, institution = {Örebro University, School of Science and Technology}, pages = {10758--10764}, eid = {9196620}, title = {Ensemble of Sparse Gaussian Process Experts for Implicit Surface Mapping with Streaming Data}, DOI = {10.1109/ICRA40945.2020.9196620}, abstract = {Creating maps is an essential task in robotics and provides the basis for effective planning and navigation. In this paper, we learn a compact and continuous implicit surface map of an environment from a stream of range data with known poses. For this, we create and incrementally adjust an ensemble of approximate Gaussian process (GP) experts which are each responsible for a different part of the map. Instead of inserting all arriving data into the GP models, we greedily trade-off between model complexity and prediction error. Our algorithm therefore uses less resources on areas with few geometric features and more where the environment is rich in variety. We evaluate our approach on synthetic and real-world data sets and analyze sensitivity to parameters and measurement noise. The results show that we can learn compact and accurate implicit surface models under different conditions, with a performance … }, year = {2020} } @article{Hoang1513204, author = {Hoang, Dinh-Cuong and Lilienthal, Achim and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, eid = {103632}, title = {Object-RPE : Dense 3D Reconstruction and Pose Estimation with Convolutional Neural Networks}, volume = {133}, DOI = {10.1016/j.robot.2020.103632}, keywords = {Object pose estimation, 3D reconstruction, Semantic mapping, 3D registration}, abstract = {We present an approach for recognizing objects present in a scene and estimating their full pose by means of an accurate 3D instance-aware semantic reconstruction. Our framework couples convolutional neural networks (CNNs) and a state-of-the-art dense Simultaneous Localisation and Mapping(SLAM) system, ElasticFusion [1], to achieve both high-quality semantic reconstruction as well as robust 6D pose estimation for relevant objects. We leverage the pipeline of ElasticFusion as a back-bone and propose a joint geometric and photometric error function with per-pixel adaptive weights. While the main trend in CNN-based 6D pose estimation has been to infer an object’s position and orientation from single views of the scene, our approach explores performing pose estimation from multiple viewpoints, under the conjecture that combining multiple predictions can improve the robustness of an object detection system. The resulting system is capable of producing high-quality instance-aware semantic reconstructions of room-sized environments, as well as accurately detecting objects and their 6D poses. The developed method has been verified through extensive experiments on different datasets. Experimental results confirmed that the proposed system achieves improvements over state-of-the-art methods in terms of surface reconstruction and object pose prediction. Our code and video are available at https://sites.google.com/view/object-rpe. }, year = {2020} } @article{Hoang1427623, author = {Hoang, Dinh-Cuong and Lilienthal, Achim and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {2}, pages = {1962--1969}, title = {Panoptic 3D Mapping and Object Pose Estimation Using Adaptively Weighted Semantic Information}, volume = {5}, DOI = {10.1109/LRA.2020.2970682}, keywords = {RGB-D perception, object detection, segmen-tation and categorization, mapping}, abstract = {We present a system capable of reconstructing highly detailed object-level models and estimating the 6D pose of objects by means of an RGB-D camera. In this work, we integrate deep-learning-based semantic segmentation, instance segmentation, and 6D object pose estimation into a state of the art RGB-D mapping system. We leverage the pipeline of ElasticFusion as a backbone and propose modifications of the registration cost function to make full use of the semantic class labels in the process. The proposed objective function features tunable weights for the depth, appearance, and semantic information channels, which are learned from data. A fast semantic segmentation and registration weight prediction convolutional neural network (Fast-RGBD-SSWP) suited to efficient computation is introduced. In addition, our approach explores performing 6D object pose estimation from multiple viewpoints supported by the high-quality reconstruction system. The developed method has been verified through experimental validation on the YCB-Video dataset and a dataset of warehouse objects. Our results confirm that the proposed system performs favorably in terms of surface reconstruction, segmentation quality, and accurate object pose estimation in comparison to other state-of-the-art systems. Our code and video are available at https://sites.google.com/view/panoptic-mope. }, year = {2020} } @article{Sun1467594, author = {Sun, Da and Liao, Qianfang and Kiselev, Andrey and Stoyanov, Todor and Loutfi, Amy}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, note = {Funding Agencies:European Union (EU) Karlskoga Municipality  {\"O}rebro County }, eid = {103648}, title = {Shared mixed reality-bilateral telerobotic system}, volume = {134}, DOI = {10.1016/j.robot.2020.103648}, keywords = {Bilateral teleoperation, Shared control, Virtual reality}, abstract = {This study proposes a new shared mixed reality (MR)-bilateral telerobotic system. The main contribution of this study is to combine MR teleoperation and bilateral teleoperation, which takes advantage of the two types of teleoperation and compensates for each other's drawbacks. With this combination, the proposed system can address the asymmetry issues in bilateral teleoperation, such as kinematic redundancy and workspace inequality, and provide force feedback, which is lacking in MR teleoperation. In addition, this system effectively supports long-distance movements and fine movements. In this system, a new MR interface is developed to provide the operator with an immersive visual feedback of the workspace, in which a useful virtual controller known as an interaction proxy—is designed. Compared with previous virtual reality-based teleoperation systems, this interaction proxy can freely decouple the operator from the control loop, such that the operational burden can be substantially alleviated. Additionally, the force feedback provided by the bilateral teleoperation gives the operator an advanced perception about the remote workspace and can improve task performance. Experiments on multiple pick-and-place tasks are provided to demonstrate the feasibility and effectiveness of the proposed system. }, year = {2020} } @article{Sun1317799, author = {Sun, Da and Liao, Qianfang and Stoyanov, Todor and Kiselev, Andrey and Loutfi, Amy}, institution = {Örebro University, School of Science and Technology}, journal = {Automatica}, pages = {358--373}, title = {Bilateral telerobotic system using Type-2 fuzzy neural network based moving horizon estimation force observer for enhancement of environmental force compliance and human perception}, volume = {106}, DOI = {10.1016/j.automatica.2019.04.033}, keywords = {Force estimation and control, Type-2 fuzzy neural network, Moving horizon estimation, Bilateral teleoperation, Machine vision}, abstract = {This paper firstly develops a novel force observer using Type-2 Fuzzy Neural Network (T2FNN)-based Moving Horizon Estimation (MHE) to estimate external force/torque information and simultaneously filter out the system disturbances. Then, by using the proposed force observer, a new bilateral teleoperation system is proposed that allows the slave industrial robot to be more compliant to the environment and enhances the situational awareness of the human operator by providing multi-level force feedback. Compared with existing force observer algorithms that highly rely on knowing exact mathematical models, the proposed force estimation strategy can derive more accurate external force/torque information of the robots with complex mechanism and with unknown dynamics. Applying the estimated force information, an external-force-regulated Sliding Mode Control (SMC) strategy with the support of machine vision is proposed to enhance the adaptability of the slave robot and the perception of the operator about various scenarios by virtue of the detected location of the task object. The proposed control system is validated by the experiment platform consisting of a universal robot (UR10), a haptic device and an RGB-D sensor. }, year = {2019} } @inproceedings{Hoang1374210, author = {Hoang, Dinh-Cuong and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {2019 European Conference on Mobile Robots, ECMR 2019 : Proceedings}, institution = {Örebro University, School of Science and Technology}, eid = {152970}, title = {Object-RPE : Dense 3D Reconstruction and Pose Estimation with Convolutional Neural Networks for Warehouse Robots}, DOI = {10.1109/ECMR.2019.8870927}, abstract = {We present a system for accurate 3D instance-aware semantic reconstruction and 6D pose estimation, using an RGB-D camera. Our framework couples convolutional neural networks (CNNs) and a state-of-the-art dense Simultaneous Localisation and Mapping (SLAM) system, ElasticFusion, to achieve both high-quality semantic reconstruction as well as robust 6D pose estimation for relevant objects. The method presented in this paper extends a high-quality instance-aware semantic 3D Mapping system from previous work [1] by adding a 6D object pose estimator. While the main trend in CNN-based 6D pose estimation has been to infer object's position and orientation from single views of the scene, our approach explores performing pose estimation from multiple viewpoints, under the conjecture that combining multiple predictions can improve the robustness of an object detection system. The resulting system is capable of producing high-quality object-aware semantic reconstructions of room-sized environments, as well as accurately detecting objects and their 6D poses. The developed method has been verified through experimental validation on the YCB-Video dataset and a newly collected warehouse object dataset. Experimental results confirmed that the proposed system achieves improvements over state-of-the-art methods in terms of surface reconstruction and object pose prediction. Our code and video are available at https://sites.google.com/view/object-rpe. }, ISBN = {978-1-7281-3605-9}, year = {2019} } @article{Gabellieri1372196, author = {Gabellieri, Chiara and Palleschi, Alessandro and Mannucci, Anna and Pierallini, Michele and Stefanini, Elisa and Catalano, Manuel G. and Caporale, Danilo and Settimi, Alessandro and Stoyanov, Todor and Magnusson, Martin and Garabini, Manolo and Pallottino, Lucia}, institution = {Örebro University, School of Science and Technology}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Istituto Italiano di Tecnologia, Genova GE, Italy}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, institution = {Centro di Ricerca “E. Piaggio” e Departimento di Ingnegneria dell’Informazione, Università di Pisa, Pisa, Italia}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Ministero dell' Istruzione, dell' Universita e della Ricerca (MIUR)}, number = {4}, pages = {4603--4610}, title = {Towards an Autonomous Unwrapping System for Intralogistics}, volume = {4}, DOI = {10.1109/LRA.2019.2934710}, keywords = {Pallets, Wrapping, Robots, Plastics, Task analysis, Impedance, Surface impedance, Logistics, compliance and impedance control, industrial robots, automatic unwrapping}, abstract = {Warehouse logistics is a rapidly growing market for robots. However, one key procedure that has not received much attention is the unwrapping of pallets to prepare them for objects picking. In fact, to prevent the goods from falling and to protect them, pallets are normally wrapped in plastic when they enter the warehouse. Currently, unwrapping is mainly performed by human operators, due to the complexity of its planning and control phases. Autonomous solutions exist, but usually they are designed for specific situations, require a large footprint and are characterized by low flexibility. In this work, we propose a novel integrated robotic solution for autonomous plastic film removal relying on an impedance-controlled robot. The main contribution is twofold: on one side, a strategy to plan Cartesian impedance and trajectory to execute the cut without damaging the goods is discussed; on the other side, we present a cutting device that we designed for this purpose. The proposed solution presents the characteristics of high versatility and the need for a reduced footprint, due to the adopted technologies and the integration with a mobile base. Experimental results are shown to validate the proposed approach. }, year = {2019} } @article{DellaCorte1291440, author = {Della Corte, Bartolomeo and Andreasson, Henrik and Stoyanov, Todor and Grisetti, Giorgio}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS) }, number = {2}, pages = {902--909}, title = {Unified Motion-Based Calibration of Mobile Multi-Sensor Platforms With Time Delay Estimation}, volume = {4}, DOI = {10.1109/LRA.2019.2892992}, keywords = {Calibration and Identification}, abstract = {The ability to maintain and continuously update geometric calibration parameters of a mobile platform is a key functionality for every robotic system. These parameters include the intrinsic kinematic parameters of the platform, the extrinsic parameters of the sensors mounted on it, and their time delays. In this letter, we present a unified pipeline for motion-based calibration of mobile platforms equipped with multiple heterogeneous sensors. We formulate a unified optimization problem to concurrently estimate the platform kinematic parameters, the sensors extrinsic parameters, and their time delays. We analyze the influence of the trajectory followed by the robot on the accuracy of the estimate. Our framework automatically selects appropriate trajectories to maximize the information gathered and to obtain a more accurate parameters estimate. In combination with that, our pipeline observes the parameters evolution in long-term operation to detect possible values change in the parameters set. The experiments conducted on real data show a smooth convergence along with the ability to detect changes in parameters value. We release an open-source version of our framework to the community. }, year = {2019} } @inproceedings{Canelhas1232362, author = {Canelhas, Daniel Ricão and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA), : }, institution = {Örebro University, School of Science and Technology}, institution = {Univrses AB, Strängnäs, Sweden}, pages = {6337--6343}, title = {A Survey of Voxel Interpolation Methods and an Evaluation of Their Impact on Volumetric Map-Based Visual Odometry}, keywords = {Voxels, Compression, Interpolation, TSDF, Visual Odometry}, abstract = {Voxel volumes are simple to implement and lend themselves to many of the tools and algorithms available for 2D images. However, the additional dimension of voxels may be costly to manage in memory when mapping large spaces at high resolutions. While lowering the resolution and using interpolation is common work-around, in the literature we often find that authors either use trilinear interpolation or nearest neighbors and rarely any of the intermediate options. This paper presents a survey of geometric interpolation methods for voxel-based map representations. In particular we study the truncated signed distance field (TSDF) and the impact of using fewer than 8 samples to perform interpolation within a depth-camera pose tracking and mapping scenario. We find that lowering the number of samples fetched to perform the interpolation results in performance similar to the commonly used trilinear interpolation method, but leads to higher framerates. We also report that lower bit-depth generally leads to performance degradation, though not as much as may be expected, with voxels containing as few as 3 bits sometimes resulting in adequate estimation of camera trajectories. }, year = {2018} } @inproceedings{Stoyanov1277231, author = {Stoyanov, Todor and Krug, Robert and Kiselev, Andrey and Sun, Da and Loutfi, Amy}, booktitle = {2018 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, institution = {Robotics, Learning and Perception lab, Royal Institute of Technology, Stockholm, Sweden}, pages = {6640--6645}, title = {Assisted Telemanipulation : A Stack-Of-Tasks Approach to Remote Manipulator Control}, series = {IEEE International Conference on Intelligent Robots and Systems. Proceedings}, DOI = {10.1109/IROS.2018.8594457}, abstract = {This article presents an approach for assisted teleoperation of a robot arm, formulated within a real-time stack-of-tasks (SoT) whole-body motion control framework. The approach leverages the hierarchical nature of the SoT framework to integrate operator commands with assistive tasks, such as joint limit and obstacle avoidance or automatic gripper alignment. Thereby some aspects of the teleoperation problem are delegated to the controller and carried out autonomously. The key contributions of this work are two-fold: the first is a method for unobtrusive integration of autonomy in a telemanipulation system; and the second is a user study evaluation of the proposed system in the context of teleoperated pick-and-place tasks. The proposed approach of assistive control was found to result in higher grasp success rates and shorter trajectories than achieved through manual control, without incurring additional cognitive load to the operator. }, ISBN = {978-1-5386-8094-0}, ISBN = {978-1-5386-8095-7}, year = {2018} } @inproceedings{Lundell1277232, author = {Lundell, Jens and Krug, Robert and Schaffernicht, Erik and Stoyanov, Todor and Kyrki, Ville}, booktitle = {IEEE-RAS Conference on Humanoid Robots : }, institution = {Örebro University, School of Science and Technology}, institution = {Intelligent Robotics Group, Aalto University, Helsinki, Finland}, institution = {Royal Institute of Technology, Stockholm, Sweden}, institution = {Intelligent Robotics Group, Aalto University, Helsinki, Finland}, note = {Funding Agency:Academy of Finland  314180}, pages = {132--138}, title = {Safe-To-Explore State Spaces : Ensuring Safe Exploration in Policy Search with Hierarchical Task Optimization}, series = {IEEE-RAS International Conference on Humanoid Robots}, keywords = {Sensorimotor learning, Grasping and Manipulation, Concept and strategy learning}, abstract = {Policy search reinforcement learning allows robots to acquire skills by themselves. However, the learning procedure is inherently unsafe as the robot has no a-priori way to predict the consequences of the exploratory actions it takes. Therefore, exploration can lead to collisions with the potential to harm the robot and/or the environment. In this work we address the safety aspect by constraining the exploration to happen in safe-to-explore state spaces. These are formed by decomposing target skills (e.g., grasping) into higher ranked sub-tasks (e.g., collision avoidance, joint limit avoidance) and lower ranked movement tasks (e.g., reaching). Sub-tasks are defined as concurrent controllers (policies) in different operational spaces together with associated Jacobians representing their joint-space mapping. Safety is ensured by only learning policies corresponding to lower ranked sub-tasks in the redundant null space of higher ranked ones. As a side benefit, learning in sub-manifolds of the state-space also facilitates sample efficiency. Reaching skills performed in simulation and grasping skills performed on a real robot validate the usefulness of the proposed approach. }, year = {2018} } @article{Canelhas1175909, author = {Canelhas, Daniel R. and Schaffernicht, Erik and Stoyanov, Todor and Lilienthal, Achim and Davison, Andrew J.}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing, Imperial College London, London, United Kingdom}, journal = {Robotics}, note = {Funding Agencies:European Commission  FP7-ICT-270350 H-ICT  732737 }, number = {3}, eid = {15}, publisher = {MDPI AG}, title = {Compressed Voxel-Based Mapping Using Unsupervised Learning}, volume = {6}, DOI = {10.3390/robotics6030015}, keywords = {3D mapping, TSDF, compression, dictionary learning, auto-encoder, denoising}, abstract = {In order to deal with the scaling problem of volumetric map representations, we propose spatially local methods for high-ratio compression of 3D maps, represented as truncated signed distance fields. We show that these compressed maps can be used as meaningful descriptors for selective decompression in scenarios relevant to robotic applications. As compression methods, we compare using PCA-derived low-dimensional bases to nonlinear auto-encoder networks. Selecting two application-oriented performance metrics, we evaluate the impact of different compression rates on reconstruction fidelity as well as to the task of map-aided ego-motion estimation. It is demonstrated that lossily reconstructed distance fields used as cost functions for ego-motion estimation can outperform the original maps in challenging scenarios from standard RGB-D (color plus depth) data sets due to the rejection of high-frequency noise content. }, year = {2017} } @inproceedings{Andreasson1159885, author = {Andreasson, Henrik and Adolfsson, Daniel and Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim}, booktitle = {2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {1389--1395}, title = {Incorporating Ego-motion Uncertainty Estimates in Range Data Registration}, series = {Proceedings of the ... IEEE/RSJ International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2017.8202318}, abstract = {Local scan registration approaches commonlyonly utilize ego-motion estimates (e.g. odometry) as aninitial pose guess in an iterative alignment procedure. Thispaper describes a new method to incorporate ego-motionestimates, including uncertainty, into the objective function of aregistration algorithm. The proposed approach is particularlysuited for feature-poor and self-similar environments,which typically present challenges to current state of theart registration algorithms. Experimental evaluation showssignificant improvements in accuracy when using data acquiredby Automatic Guided Vehicles (AGVs) in industrial productionand warehouse environments. }, ISBN = {978-1-5386-2682-5}, ISBN = {978-1-5386-2683-2}, year = {2017} } @article{Ahtiainen1044255, author = {Ahtiainen, Juhana and Stoyanov, Todor and Saarinen, Jari}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Electrical Engineering and Automation, Aalto University, Espoo, Finland}, institution = {GIM Ltd., Espoo, Finland}, journal = {Journal of Field Robotics}, note = {Funding Agencies:Finnish Society of Automation  Finnish Funding Agency for Technology and Innovation (TEKES)  Forum for Intelligent Machines (FIMA)  Energy and Life Cycle Cost Efficient Machines (EFFIMA) research program }, number = {3}, pages = {600--621}, title = {Normal Distributions Transform Traversability Maps : LIDAR-Only Approach for Traversability Mapping in Outdoor Environments}, volume = {34}, DOI = {10.1002/rob.21657}, abstract = {Safe and reliable autonomous navigation in unstructured environments remains a challenge for field robots. In particular, operating on vegetated terrain is problematic, because simple purely geometric traversability analysis methods typically classify dense foliage as nontraversable. As traversing through vegetated terrain is often possible and even preferable in some cases (e.g., to avoid executing longer paths), more complex multimodal traversability analysis methods are necessary. In this article, we propose a three-dimensional (3D) traversability mapping algorithm for outdoor environments, able to classify sparsely vegetated areas as traversable, without compromising accuracy on other terrain types. The proposed normal distributions transform traversability mapping (NDT-TM) representation exploits 3D LIDAR sensor data to incrementally expand normal distributions transform occupancy (NDT-OM) maps. In addition to geometrical information, we propose to augment the NDT-OM representation with statistical data of the permeability and reflectivity of each cell. Using these additional features, we train a support-vector machine classifier to discriminate between traversable and nondrivable areas of the NDT-TM maps. We evaluate classifier performance on a set of challenging outdoor environments and note improvements over previous purely geometrical traversability analysis approaches. }, year = {2017} } @article{Canelhas1044256, author = {Canelhas, Daniel R. and Stoyanov, Todor and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {2}, pages = {1148--1155}, title = {From Feature Detection in Truncated Signed Distance Fields to Sparse Stable Scene Graphs}, volume = {1}, DOI = {10.1109/LRA.2016.2523555}, keywords = {Mapping, recognition}, abstract = {With the increased availability of GPUs and multicore CPUs, volumetric map representations are an increasingly viable option for robotic applications. A particularly important representation is the truncated signed distance field (TSDF) that is at the core of recent advances in dense 3D mapping. However, there is relatively little literature exploring the characteristics of 3D feature detection in volumetric representations. In this paper we evaluate the performance of features extracted directly from a 3D TSDF representation. We compare the repeatability of Integral invariant features, specifically designed for volumetric images, to the 3D extensions of Harris and Shi & Tomasi corners. We also study the impact of different methods for obtaining gradients for their computation. We motivate our study with an example application for building sparse stable scene graphs, and present an efficient GPU-parallel algorithm to obtain the graphs, made possible by the combination of TSDF and 3D feature points. Our findings show that while the 3D extensions of 2D corner-detection perform as expected, integral invariants have shortcomings when applied to discrete TSDFs. We conclude with a discussion of the cause for these points of failure that sheds light on possible mitigation strategies. }, year = {2016} } @inproceedings{Stoyanov1044252, author = {Stoyanov, Todor and Krug, Robert and Muthusamy, Rajkumar and Kyrki, Ville}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, institution = {Aalto University, Esbo, Finland}, institution = {Aalto University, Esbo, Finland}, pages = {885--892}, title = {Grasp Envelopes : Extracting Constraints on Gripper Postures from Online Reconstructed 3D Models}, DOI = {10.1109/IROS.2016.7759155}, abstract = { Grasping systems that build upon meticulously planned hand postures rely on precise knowledge of object geometry, mass and frictional properties - assumptions which are often violated in practice. In this work, we propose an alternative solution to the problem of grasp acquisition in simple autonomous pick and place scenarios, by utilizing the concept of grasp envelopes: sets of constraints on gripper postures. We propose a fast method for extracting grasp envelopes for objects that fit within a known shape category, placed in an unknown environment. Our approach is based on grasp envelope primitives, which encode knowledge of human grasping strategies. We use environment models, reconstructed from noisy sensor observations, to refine the grasp envelope primitives and extract bounded envelopes of collision-free gripper postures. Also, we evaluate the envelope extraction procedure both in a stand alone fashion, as well as an integrated component of an autonomous picking system. }, ISBN = {978-1-5090-3762-9}, year = {2016} } @article{Stoyanov1044254, author = {Stoyanov, Todor and Vaskevicius, Narunas and Mueller, Christian Atanas and Fromm, Tobias and Krug, Robert and Tincani, Vinicio and Mojtahedzadeh, Rasoul and Kunaschk, Stefan and Ernits, R. Mortensen and Canelhas, Daniel R. and Bonilla, Manuell and Schwertfeger, Soeren and Bonini, Marco and Halfar, Harry and Pathak, Kaustubh and Rohde, Moritz and Fantoni, Gualtiero and Bicchi, Antonio and Birk, Andreas and Lilienthal, Achim J. and Echelmeyer, Wolfgang}, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {Bremer Institut für Produktion und Logistik (BIBA), Bremen, Germany}, institution = {Bremer Institut für Produktion und Logistik (BIBA), Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {ShanghaiTech University, Shanghai, China}, institution = {Reutlingen University, Reutlingen, Germany}, institution = {Reutlingen University, Reutlingen, Germany}, institution = {Jacobs University Bremen, Bremen, Germany}, institution = {Bremer Institut für Produktion und Logistik (BIBA), Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {Università di Pisa & Istituto Italiano di Tecnologia, Genova, Italy}, institution = {Jacobs University, Bremen, Germany}, institution = {Reutlingen University, Reutlingen, Germany}, journal = {IEEE robotics & automation magazine}, note = {Funding Agency:EU FP7 project ROBLOG ICT-270350}, number = {4}, pages = {94--106}, title = {No More Heavy Lifting : Robotic Solutions to the Container-Unloading Problem}, volume = {23}, DOI = {10.1109/MRA.2016.2535098}, year = {2016} } @article{Krug1044259, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = { University of Pisa, Pisa, Italy}, journal = {IEEE Robotics and Automation Letters}, number = {1}, pages = {546--553}, title = {The Next Step in Robot Commissioning : Autonomous Picking and Palletizing}, volume = {1}, DOI = {10.1109/LRA.2016.2519944}, keywords = {Logistics, grasping, autonomous vehicle navigation, robot safety, mobile manipulation}, abstract = {So far, autonomous order picking (commissioning) systems have not been able to meet the stringent demands regarding speed, safety, and accuracy of real-world warehouse automation, resulting in reliance on human workers. In this letter, we target the next step in autonomous robot commissioning: automatizing the currently manual order picking procedure. To this end, we investigate the use case of autonomous picking and palletizing with a dedicated research platform and discuss lessons learned during testing in simplified warehouse settings. The main theoretical contribution is a novel grasp representation scheme which allows for redundancy in the gripper pose placement. This redundancy is exploited by a local, prioritized kinematic controller which generates reactive manipulator motions on-the-fly. We validated our grasping approach by means of a large set of experiments, which yielded an average grasp acquisition time of 23.5 s at a success rate of 94.7%. Our system is able to autonomously carry out simple order picking tasks in a humansafe manner, and as such serves as an initial step toward future commercial-scale in-house logistics automation solutions. }, year = {2016} } @article{Andreasson807693, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and Cirillo, Marcello and Dimitrov, Dimitar Nikolaev and Driankov, Dimiter and Karlsson, Lars and Lilienthal, Achim J. and Pecora, Federico and Saarinen, Jari Pekka and Sherikov, Aleksander and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {INRIA - Grenoble, Meylan, France}, institution = {Aalto University, Espo, Finland }, institution = {Centre de recherche Grenoble Rhône-Alpes, Grenoble, France }, journal = {IEEE robotics & automation magazine}, number = {1}, pages = {64--75}, title = {Autonomous transport vehicles : where we are and what is missing}, volume = {22}, DOI = {10.1109/MRA.2014.2381357}, keywords = {Intelligent vehicles; Mobile robots; Resource management; Robot kinematics; Trajectory; Vehicle dynamics}, abstract = {In this article, we address the problem of realizing a complete efficient system for automated management of fleets of autonomous ground vehicles in industrial sites. We elicit from current industrial practice and the scientific state of the art the key challenges related to autonomous transport vehicles in industrial environments and relate them to enabling techniques in perception, task allocation, motion planning, coordination, collision prediction, and control. We propose a modular approach based on least commitment, which integrates all modules through a uniform constraint-based paradigm. We describe an instantiation of this system and present a summary of the results, showing evidence of increased flexibility at the control level to adapt to contingencies. }, year = {2015} } @inproceedings{Magnusson847086, author = {Magnusson, Martin and Vaskevicius, Narunas and Stoyanov, Todor and Pathak, Kaustubh and Birk, Andreas}, booktitle = {2015 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Deptartment of EECS, Jacobs University, Bremen, Germany}, institution = {Deptartment of EECS, Jacobs University, Bremen, Germany}, institution = {Deptartment of EECS, Jacobs University, Bremen, Germany}, pages = {3631--3637}, publisher = {IEEE conference proceedings}, title = {Beyond points : Evaluating recent 3D scan-matching algorithms}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, number = {2015-June}, volume = {2015 June}, DOI = {10.1109/ICRA.2015.7139703}, keywords = {Normal distribution, robot vision, 3D scan registration algorithm, 3D scan-matching algorithm, ICP method, MUMC algorithm, NDT, benchmark protocol, iterative closest point method, large-scale investigation, local surface structure, minimally uncertain maximum consensus algorithm, normal distribution transform, robot, Benchmark testing, Gaussian distribution, Iterative closest point algorithm, Optimization, Protocols, Three-dimensional displays, Transforms}, abstract = {Given that 3D scan matching is such a central part of the perception pipeline for robots, thorough and large-scale investigations of scan matching performance are still surprisingly few. A crucial part of the scientific method is to perform experiments that can be replicated by other researchers in order to compare different results. In light of this fact, this paper presents a thorough comparison of 3D scan registration algorithms using a recently published benchmark protocol which makes use of a publicly available challenging data set that covers a wide range of environments. In particular, we evaluate two types of recent 3D registration algorithms - one local and one global. Both approaches take local surface structure into account, rather than matching individual points. After well over 100 000 individual tests, we conclude that algorithms using the normal distributions transform (NDT) provides accurate results compared to a modern implementation of the iterative closest point (ICP) method, when faced with scan data that has little overlap and weak geometric structure. We also demonstrate that the minimally uncertain maximum consensus (MUMC) algorithm provides accurate results in structured environments without needing an initial guess, and that it provides useful measures to detect whether it has succeeded or not. We also propose two amendments to the experimental protocol, in order to provide more valuable results in future implementations. }, ISBN = {978-1-4799-6923-4}, year = {2015} } @inproceedings{Andreasson894653, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA), 2015 : }, institution = {Örebro University, School of Science and Technology}, institution = {SCANIA AB, Södertälje, Sweden}, pages = {662--669}, title = {Fast, continuous state path smoothing to improve navigation accuracy}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2015.7139250}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not be widely adopted in commercial AGV systems. The main contribution of this paper addresses this shortcoming by introducing a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. In real world tests presented in this paper we demonstrate that the proposed approach is fast enough for online use (it computes trajectories faster than they can be driven) and highly accurate. In 100 repetitions we achieve mean end-point pose errors below 0.01 meters in translation and 0.002 radians in orientation. Even the maximum errors are very small: only 0.02 meters in translation and 0.008 radians in orientation. }, ISBN = {9781479969234}, year = {2015} } @inproceedings{Krug842706, author = {Krug, Robert and Stoyanov, Todor and Lilienthal, Achim}, booktitle = {Robotics: Science and Systems Conference : Workshop on Bridging the Gap between Data-driven and Analytical Physics-based Grasping and Manipulation}, institution = {Örebro University, School of Science and Technology}, title = {Grasp Envelopes for Constraint-based Robot Motion Planning and Control}, keywords = {Grasping, Grasp Control, Motion Control}, abstract = {We suggest a grasp represen-tation in form of a set of enveloping spatial constraints. Our representation transforms the grasp synthesisproblem (i. e., the question of where to position the graspingdevice) from finding a suitable discrete manipulator wrist pose to finding a suitable pose manifold. Also the correspondingmotion planning and execution problem is relaxed – insteadof transitioning the wrist to a discrete pose, it is enough tomove it anywhere within the grasp envelope which allows toexploit kinematic redundancy. }, year = {2015} } @inproceedings{Krug808145, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Bicchi, Antonio and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA) - Workshop on Robotic Hands, Grasping, and Manipulation : }, institution = {Örebro University, School of Science and Technology}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, title = {On Using Optimization-based Control instead of Path-Planning for Robot Grasp Motion Generation}, keywords = {Grasping, Motion Planning, Control}, year = {2015} } @inproceedings{Tincani900484, author = {Tincani, Vinicio and Catalano, Manuel and Grioli, Giorgio and Stoyanov, Todor and Krug, Robert and Lilienthal, Achim J. and Fantoni, Gualtiero and Bicchi, Antonio}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy; Department of Advanced Robotics, Istituto Italiano di Tecnologia, Genova, Italy}, pages = {2744--2750}, title = {Sensitive Active Surfaces on the Velvet II Dexterous Gripper}, URL = {https://www.ias.informatik.tu-darmstadt.de/uploads/Workshops/ICRA2015TactileForce/03_icra_ws_tactileforce.pdf}, year = {2015} } @inproceedings{Tincani900487, author = {Tincani, Vinicio and Stoyanov, Todor and Krug, Robert and Catalano, Manuel and Grioli, Giorgio and Lilienthal, Achim J. and Fantoni, Gualtiero and Bicchi, Antonio}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {Istituto Italiano di Tecnologia, Genova, Italy}, title = {The Grasp Acquisition Strategy of the Velvet II}, year = {2015} } @article{Andreasson780236, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics}, number = {4}, pages = {400--416}, publisher = {M D P I AG}, title = {Drive the Drive : From Discrete Motion Plans to Smooth Drivable Trajectories}, volume = {3}, DOI = {10.3390/robotics3040400}, keywords = {Motion planning, motion and path planning, autonomous navigation}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not been widely adopted in commercial AGV systems. The main contribution of this paper is a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. The proposed approach is evaluated in several industrially relevant scenarios and found to be both fast (less than 2 s per vehicle trajectory) and accurate (end-point pose errors below 0.01 m in translation and 0.005 radians in orientation). }, year = {2014} } @inproceedings{Krug780127, author = {Krug, Robert and Stoyanov, Todor and Bonilla, Manuel and Tincani, Vinicio and Vaskevicius, Narunas and Fantoni, Gualtiero and Birk, Andreas and Lilienthal, Achim and Bicchi, Antonio}, booktitle = {Workshop on Autonomous Grasping and Manipulation : An Open Challenge}, institution = {Örebro University, School of Science and Technology}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, institution = {Faculty of Engineering, Interdepart. Research Center "Enrico Piaggio", University of Pisa, Pisa, Italy}, title = {Improving Grasp Robustness via In-Hand Manipulation with Active Surfaces}, keywords = {Grasping, Grasp Control, Grasp Planning}, year = {2014} } @inproceedings{Vaskevicius772382, author = {Vaskevicius, N. and Mueller, C. A. and Bonilla, M. and Tincani, V. and Stoyanov, Todor and Fantoni, G. and Pathak, K. and Lilienthal, Achim J. and Bicchi, A. and Birk, A.}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University, Bremen, Germany}, institution = {Jacobs University, Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {University of Pisa, Pisa, Italy}, institution = {Jacobs University, Bremen, Germany}, institution = {University of Pisa, Pisa, Italy}, institution = {Jacobs University, Bremen, Germany}, pages = {1270--1277}, title = {Object recognition and localization for robust grasping with a dexterous gripper in the context of container unloading}, DOI = {10.1109/CoASE.2014.6899490}, keywords = {containers;control engineering computing;dexterous manipulators;goods distribution;grippers;industrial robots;logistics;object recognition;autonomous shipping-container unloading;dexterous gripper;object recognition;perception system;pose estimation errors;table-top scenarios;Educational institutions;Grasping;Grippers;Robot sensing systems;Thumb}, abstract = {The work presented here is embedded in research on an industrial application scenario, namely autonomous shipping-container unloading, which has several challenging constraints: the scene is very cluttered, objects can be much larger than in common table-top scenarios; the perception must be highly robust, while being as fast as possible. These contradicting goals force a compromise between speed and accuracy. In this work, we investigate a state of the art perception system integrated with a dexterous gripper. In particular, we are interested in pose estimation errors from the recognition module and whether these errors can be handled by the abilities of the gripper. }, year = {2014} } @inproceedings{Bennetts1072051, author = {Bennetts, Victor Hernandez and Schaffernicht, Erik and Stoyanov, Todor and Lilienthal, Achim J. and Trincavelli, Marco}, booktitle = {2014 IEEE INTERNATIONAL CONFERENCE ON ROBOTICS AND AUTOMATION (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {6362--6367}, title = {Robot Assisted Gas Tomography - Localizing Methane Leaks in Outdoor Environments}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ICRA.2014.6907798}, abstract = {In this paper we present an inspection robot to produce gas distribution maps and localize gas sources in large outdoor environments. The robot is equipped with a 3D laser range finder and a remote gas sensor that returns integral concentration measurements. We apply principles of tomography to create a spatial gas distribution model from integral gas concentration measurements. The gas distribution algorithm is framed as a convex optimization problem and it models the mean distribution and the fluctuations of gases. This is important since gas dispersion is not an static phenomenon and furthermore, areas of high fluctuation can be correlated with the location of an emitting source. We use a compact surface representation created from the measurements of the 3D laser range finder with a state of the art mapping algorithm to get a very accurate localization and estimation of the path of the laser beams. In addition, a conic model for the beam of the remote gas sensor is introduced. We observe a substantial improvement in the gas source localization capabilities over previous state-of-the-art in our evaluation carried out in an open field environment. }, ISBN = {978-1-4799-3685-4}, year = {2014} } @inproceedings{HernandezBennetts748476, author = {Hernandez Bennetts, Victor and Schaffernicht, Erik and Stoyanov, Todor and Lilienthal, Achim J. and Trincavelli, Marco}, booktitle = {Workshop on Robot Monitoring : }, institution = {Örebro University, School of Science and Technology}, title = {Robot assisted gas tomography : an alternative approach for the detection of fugitive methane emissions}, abstract = {Methane (CH4) based combustibles, such as Natural Gas (NG) and BioGas (BG), are considered bridge fuels towards a decarbonized global energy system. NG emits less CO2 during combustion than other fossil fuels and BG can be produced from organic waste. However, at BG production sites, leaks are common and CH4 can escape through fissures in pipes and insulation layers. While by regulation BG producers shall issue monthly CH4 emission reports, measurements are sparsely collected, only at a few predefined locations. Due to the high global warming potential of CH4, efficient leakage detection systems are critical. We present a robotics approach to localize CH4 leaks. In Robot assisted Gas Tomography (RGT), a mobile robot is equipped with remote gas sensors to create gas distribution maps, which can be used to infer the location of emitting sources. Spectroscopy based remote gas sensors report integral concentrations, which means that the measurements are spatially unresolved, with neither information regarding the gas distribution over the optical path nor the length of the s beam. Thus, RGT fuses different sensing modalities, such as range sensors for robot localization and ray tracing, in order to infer plausible gas distribution models that explain the acquired integral concentration measurements. }, year = {2014} } @inproceedings{Krug696464, author = {Krug, Robert and Stoyanov, Todor and Bonilla, Manuel and Tincani, Vinicio and Vaskevicius, Narunas and Fantoni, Gualtiero and Birk, Andreas and Lilienthal, Achim J. and Bicchi, Antonio}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, institution = {Robotics Group, School of Engineering and Science, Jacobs University Bremen, Bremen, Germany}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, institution = {Robotics Group, School of Engineering and Science, Jacobs University Bremen, Bremen, Germany}, institution = {Interdepart. Research Center “E. Piaggio”, University of Pisa, Pisa, Italy}, pages = {3669--3675}, title = {Velvet fingers : grasp planning and execution for an underactuated gripper with active surfaces}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ICRA.2014.6907390}, keywords = {Grasp Planning, Grasp Control, Underactuation}, abstract = {In this work we tackle the problem of planning grasps for an underactuated gripper which enable it to retrieve target objects from a cluttered environment. Furthermore,we investigate how additional manipulation capabilities of the gripping device, provided by active surfaces on the inside of the fingers, can lead to performance improvement in the grasp execution process. To this end, we employ a simple strategy, in which the target object is ‘pulled-in’ towards the palm during grasping which results in firm enveloping grasps. We show the effectiveness of the suggested methods by means of experiments conducted in a real-world scenario. }, ISBN = {978-1-4799-3685-4}, year = {2014} } @article{Saarinen644380, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding agency:Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {14}, pages = {1627--1644}, title = {3D normal distributions transform occupancy maps : an efficient representation for mapping in dynamic environments}, volume = {32}, DOI = {10.1177/0278364913499415}, abstract = {In order to enable long-term operation of autonomous vehicles in industrial environments numerous challenges need to be addressed. A basic requirement for many applications is the creation and maintenance of consistent 3D world models. This article proposes a novel 3D spatial representation for online real-world mapping, building upon two known representations: normal distributions transform (NDT) maps and occupancy grid maps. The proposed normal distributions transform occupancy map (NDT-OM) combines the advantages of both representations; compactness of NDT maps and robustness of occupancy maps. One key contribution in this article is that we formulate an exact recursive updates for NDT-OMs. We show that the recursive update equations provide natural support for multi-resolution maps. Next, we describe a modification of the recursive update equations that allows adaptation in dynamic environments. As a second key contribution we introduce NDT-OMs and formulate the occupancy update equations that allow to build consistent maps in dynamic environments. The update of the occupancy values are based on an efficient probabilistic sensor model that is specially formulated for NDT-OMs. In several experiments with a total of 17 hours of data from a milk factory we demonstrate that NDT-OMs enable real-time performance in large-scale, long-term industrial setups. }, year = {2013} } @inproceedings{Mojtahedzadeh698571, author = {Mojtahedzadeh, Rasoul and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, pages = {313--318}, title = {Application Based 3D Sensor Evaluation : A Case Study in 3D Object Pose Estimation for Automated Unloading of Containers}, DOI = {10.1109/ECMR.2013.6698860}, abstract = {A fundamental task in the design process of a complex system that requires 3D visual perception is the choice of suitable 3D range sensors. Identifying the utility of 3D range sensors in an industrial application solely based on an evaluation of their distance accuracy and the noise level may lead to an inappropriate selection. To assess the actual effect on the performance of the system as a whole requires a more involved analysis. In this paper, we examine the problem of selecting a set of 3D range sensors when designing autonomous systems for specific industrial applications in a holistic manner. As an instance of this problem we present a case study with an experimental evaluation of the utility of four 3D range sensors for object pose estimation in the process of automation of unloading containers. }, year = {2013} } @article{Stoyanov618586, author = {Stoyanov, Todor and Mojtahedzadeh, Rasoul and Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, number = {10}, pages = {1094--1105}, title = {Comparative evaluation of range sensor accuracy for indoor mobile robotics and automated logistics applications}, volume = {61}, DOI = {10.1016/j.robot.2012.08.011}, abstract = {3D range sensing is an important topic in robotics, as it is a component in vital autonomous subsystems such as for collision avoidance, mapping and perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements, without using a precise ground truth environment model, is proposed. This article presents an extensive evaluation of three novel depth sensors — the Swiss Ranger SR-4000, Fotonic B70 and Microsoft Kinect. Tests are concentrated on the automated logistics scenario of container unloading. Six different setups of box-, cylinder-, and sack-shaped goods inside a mock-up container are used to collect range measurements. Comparisons are performed against hand-crafted ground truth data, as well as against a reference actuated Laser Range Finder (aLRF) system. Additional test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2013} } @article{Stoyanov618700, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Journal of Field Robotics}, number = {2}, pages = {216--236}, title = {Comparative evaluation of the consistency of three-dimensional spatial representations used in autonomous robot navigation}, volume = {30}, DOI = {10.1002/rob.21446}, abstract = {An increasing number of robots for outdoor applications rely on complex three-dimensional (3D) environmental models. In many cases, 3D maps are used for vital tasks, such as path planning and collision detection in challenging semistructured environments. Thus, acquiring accurate three-dimensional maps is an important research topic of high priority for autonomously navigating robots. This article proposes an evaluation method that is designed to compare the consistency with which different representations model the environment. In particular, the article examines several popular (probabilistic) spatial representations that are capable of predicting the occupancy of any point in space, given prior 3D range measurements. This work proposes to reformulate the obtained environmental models as probabilistic binary classifiers, thus allowing for the use of standard evaluation and comparison procedures. To avoid introducing localization errors, this article concentrates on evaluating models constructed from measurements acquired at fixed sensor poses. Using a cross-validation approach, the consistency of different representations, i.e., the likelihood of correctly predicting unseen measurements in the sensor field of view, can be evaluated. Simulated and real-world data sets are used to benchmark the precision of four spatial models—occupancy grid, triangle mesh, and two variations of the three-dimensional normal distributions transform (3D-NDT)—over various environments and sensor noise levels. Overall, the consistency of representation of the 3D-NDT is found to be the highest among the tested models, with a similar performance over varying input data. }, year = {2013} } @inproceedings{Saarinen644375, author = {Saarinen, Jari and Stoyanov, Todor and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4694--4701}, title = {Fast 3D mapping in highly dynamic environments using normal distributions transform occupancy maps}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697032}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Canelhas644372, author = {Canelhas, Daniel R. and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {3203--3209}, title = {Improved local shape feature stability through dense model tracking}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696811}, abstract = {In this work we propose a method to effectively remove noise from depth images obtained with a commodity structured light sensor. The proposed approach fuses data into a consistent frame of reference over time, thus utilizing prior depth measurements and viewpoint information in the noise removal process. The effectiveness of the approach is compared to two state of the art, single-frame denoising methods in the context of feature descriptor matching and keypoint detection stability. To make more general statements about the effect of noise removal in these applications, we extend a method for evaluating local image gradient feature descriptors to the domain of 3D shape descriptors. We perform a comparative study of three classes of such descriptors: Normal Aligned Radial Features, Fast Point Feature Histograms and Depth Kernel Descriptors; and evaluate their performance on a real-world industrial application data set. We demonstrate that noise removal enabled by the dense map representation results in major improvements in matching across all classes of descriptors as well as having a substantial positive impact on keypoint detection reliability }, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Almqvist644368, author = {Almqvist, H{\aa}kan and Magnusson, Martin and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {2013 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {733--738}, title = {Improving Point-Cloud Accuracy from a Moving Platform in Field Operations}, DOI = {10.1109/ICRA.2013.6630654}, abstract = {This paper presents a method for improving the quality of distorted 3D point clouds made from a vehicle equipped with a laser scanner moving over uneven terrain. Existing methods that use 3D point-cloud data (for tasks such as mapping, localisation, and object detection) typically assume that each point cloud is accurate. For autonomous robots moving in rough terrain, it is often the case that the vehicle moves a substantial amount during the acquisition of one point cloud, in which case the data will be distorted. The method proposed in this paper is capable of increasing the accuracy of 3D point clouds, without assuming any specific features of the environment (such as planar walls), without resorting to a "stop-scan-go" approach, and without relying on specialised and expensive hardware. Each new point cloud is matched to the previous using normal-distribution-transform (NDT) registration, after which a mini-loop closure is performed with a local, per-scan, graph-based SLAM method. The proposed method increases the accuracy of both the measured platform trajectory and the point cloud. The method is validated on both real-world and simulated data. }, ISBN = {978-1-4673-5641-1}, ISBN = {978-1-4673-5643-5}, year = {2013} } @inproceedings{Saarinen644376, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {382--389}, title = {Normal distributions transform monte-carlo localization (NDT-MCL)}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696380}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Stoyanov644379, author = {Stoyanov, Todor and Saarinen, Jari and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4702--4708}, title = {Normal distributions transform occupancy map fusion : simultaneous mapping and tracking in large scale dynamic environments}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697033}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Saarinen622633, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Ala-Luhtala, Juha and Lilienthal, Achim J.}, booktitle = {IEEE International Conference on Robotics and Automation : }, institution = {Örebro University, School of Science and Technology}, institution = {Aalto University of Technology, Aalto, Finland}, pages = {2233--2238}, title = {Normal distributions transform occupancy maps : application to large-scale online 3D mapping}, DOI = {10.1109/ICRA.2013.6630878}, abstract = {Autonomous vehicles operating in real-world industrial environments have to overcome numerous challenges, chief among which is the creation and maintenance of consistent 3D world models. This paper proposes to address the challenges of online real-world mapping by building upon previous work on compact spatial representation and formulating a novel 3D mapping approach — the Normal Distributions Transform Occupancy Map (NDT-OM). The presented algorithm enables accurate real-time 3D mapping in large-scale dynamic nvironments employing a recursive update strategy. In addition, the proposed approach can seamlessly provide maps at multiple resolutions allowing for fast utilization in high-level functions such as localization or path planning. Compared to previous approaches that use the NDT representation, the proposed NDT-OM formulates an exact and efficient recursive update formulation and models the full occupancy of the map. }, year = {2013} } @inproceedings{Canelhas644377, author = {Canelhas, Daniel R. and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {3671--3676}, title = {SDF tracker : a parallel algorithm for on-line pose estimation and scene reconstruction from depth images}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696880}, abstract = {Ego-motion estimation and environment mapping are two recurring problems in the field of robotics. In this work we propose a simple on-line method for tracking the pose of a depth camera in six degrees of freedom and simultaneously maintaining an updated 3D map, represented as a truncated signed distance function. The distance function representation implicitly encodes surfaces in 3D-space and is used directly to define a cost function for accurate registration of new data. The proposed algorithm is highly parallel and achieves good accuracy compared to state of the art methods. It is suitable for reconstructing single household items, workspace environments and small rooms at near real-time rates, making it practical for use on modern CPU hardware }, ISBN = {978-1-4673-6358-7}, year = {2013} } @article{Stoyanov618701, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding Agencies:European Union FP7 - 270350Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {12}, pages = {1377--1393}, title = {Fast and accurate scan registration through minimization of the distance between compact 3D NDT Representations}, volume = {31}, DOI = {10.1177/0278364912460895}, keywords = {point set registration; mapping; normal distributions transform}, abstract = {Registration of range sensor measurements is an important task in mobile robotics and has received a lot of attention. Several iterative optimization schemes have been proposed in order to align three-dimensional (3D) point scans. With the more widespread use of high-frame-rate 3D sensors and increasingly more challenging application scenarios for mobile robots, there is a need for fast and accurate registration methods that current state-of-the-art algorithms cannot always meet. This work proposes a novel algorithm that achieves accurate point cloud registration an order of a magnitude faster than the current state of the art. The speedup is achieved through the use of a compact spatial representation: the Three-Dimensional Normal Distributions Transform (3D-NDT). In addition, a fast, global-descriptor based on the 3D-NDT is defined and used to achieve reliable initial poses for the iterative algorithm. Finally, a closed-form expression for the covariance of the proposed method is also derived. The proposed algorithms are evaluated on two standard point cloud data sets, resulting in stable performance on a par with or better than the state of the art. The implementation is available as an open-source package for the Robot Operating system (ROS). }, year = {2012} } @inproceedings{Charusta543057, author = {Charusta, Krzysztof and Krug, Robert and Stoyanov, Todor and Dimitrov, Dimitar and Iliev, Boyko}, booktitle = {2012 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {1338--1344}, title = {Generation of independent contact regions on objects reconstructed from noisy real-world range data}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2012.6225046}, keywords = {cameras, image reconstruction, manipulators, prototypes, robot sensing systems, dexterous manipulators, filtering theory, grippers, image reconstruction}, abstract = {The synthesis and evaluation of multi-fingered grasps on complex objects is a challenging problem that has received much attention in the robotics community. Although several promising approaches have been developed, applications to real-world systems are limited to simple objects or gripper configurations. The paradigm of Independent Contact Regions (ICRs) has been proposed as a way to increase the tolerance to grasp positioning errors. This concept is well established, though only on precise geometric object models. This work is concerned with the application of the ICR paradigm to models reconstructed from real-world range data. We propose a method for increasing the robustness of grasp synthesis on uncertain geometric models. The sensitivity of the ICR algorithm to noisy data is evaluated and a filtering approach is proposed to improve the quality of the final result. }, ISBN = {9781467314053}, ISBN = {9781467314039}, year = {2012} } @inproceedings{Stoyanov524119, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J.}, booktitle = {2012 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, note = {Accepted for publication. Advance copy available at http://aass.oru.se/Research/Learning/publications/2012/Stoyanov_etal_2012-ICRA.pdf}, pages = {5196--5201}, title = {Point Set Registration through Minimization of the L-2 Distance between 3D-NDT Models}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2012.6224717}, abstract = {Point set registration — the task of finding the best fitting alignment between two sets of point samples, is an important problem in mobile robotics. This article proposes a novel registration algorithm, based on the distance between Three- Dimensional Normal Distributions Transforms. 3D-NDT models — a sub-class of Gaussian Mixture Models with uniformly weighted, largely disjoint components, can be quickly computed from range point data. The proposed algorithm constructs 3DNDT representations of the input point sets and then formulates an objective function based on the L2 distance between the considered models. Analytic first and second order derivatives of the objective function are computed and used in a standard Newton method optimization scheme, to obtain the best-fitting transformation. The proposed algorithm is evaluated and shown to be more accurate and faster, compared to a state of the art implementation of the Iterative Closest Point and 3D-NDT Point-to-Distribution algorithms. }, ISBN = {9781467314053}, ISBN = {9781467314039}, year = {2012} } @inproceedings{Andreasson618702, author = {Andreasson, Henrik and Stoyanov, Todor}, booktitle = {Proc. of International Conference on Robotics and Automation (ICRA) Workshop on Semantic Perception, Mapping and Exploration (SPME) : }, institution = {Örebro University, School of Science and Technology}, note = {The conference table of contents may be found on http://toc.proceedings.com/15154webtoc.pdf}, title = {Real time registration of RGB-D data using local visual features and 3D-NDT registration}, abstract = {Recent increased popularity of RGB-D capable sensors in robotics has resulted in a surge of related RGBD registration methods. This paper presents several RGB-D registration algorithms based on combinations between local visual feature and geometric registration. Fast and accurate transformation refinement is obtained by using a recently proposed geometric registration algorithm, based on the Three-Dimensional Normal Distributions Transform (3D-NDT). Results obtained on standard data sets have demonstrated mean translational errors on the order of 1 cm and rotational errors bellow 1 degree, at frame processing rates of about 15 Hz. }, ISBN = {9781467314039}, year = {2012} } @phdthesis{Stoyanov507812, author = {Stoyanov, Todor Dimitrov}, institution = {Örebro University, School of Science and Technology}, pages = {145}, publisher = {Örebro universitet}, school = {Örebro University, School of Science and Technology}, title = {Reliable autonomus navigation in semi-structured environments using the three-dimensional normal distributions transform (3D-NDT)}, series = {Örebro Studies in Technology}, ISSN = {1650-8580}, number = {54}, ISBN = {978-91-7668-861-8}, year = {2012} } @inproceedings{Stoyanov540987, author = {Stoyanov, Todor and Louloudi, Athanasia and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the 5th European Conference on Mobile Robots, ECMR 2011 : }, institution = {Örebro University, School of Science and Technology}, pages = {19--24}, title = {Comparative evaluation of range sensor accuracy in indoor environments}, abstract = {3D range sensing is one of the important topics in robotics, as it is often a component in vital autonomous subsystems like collision avoidance, mapping and semantic perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements is proposed. The approach is then used to compare the behavior of three integrated range sensing devices, to that of a standard actuated laser range sensor. Test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2011} } @inproceedings{Stoyanov524116, author = {Stoyanov, Todor and Magnusson, Martin and Almqvist, H{\aa}kan and Lilienthal, Achim J.}, booktitle = {2011 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, note = {Proceedings athttp://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5501116}, title = {On the Accuracy of the 3D Normal Distributions Transform as a Tool for Spatial Representation}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ICRA.2011.5979584}, abstract = {The Three-Dimensional Normal Distributions Transform (3D-NDT) is a spatial modeling technique with applications in point set registration, scan similarity comparison, change detection and path planning. This work concentrates on evaluating three common variations of the 3D-NDT in terms of accuracy of representing sampled semi-structured environments. In a novel approach to spatial representation quality measurement, the 3D geometrical modeling task is formulated as a classification problem and its accuracy is evaluated with standard machine learning performance metrics. In this manner the accuracy of the 3D-NDT variations is shown to be comparable to, and in some cases to outperform that of the standard occupancy grid mapping model. }, ISBN = {978-1-61284-385-8}, year = {2011} } @inproceedings{Ferri524121, author = {Ferri, Gabriele and Mondini, Alessio and Manzi, Alessandro and Mazzolai, Barbara and Laschi, Cecilia and Mattoli, Virgilio and Reggente, Matteo and Stoyanov, Todor and Lilienthal, Achim J. and Lettere, Marco and Dario, Paolo.}, booktitle = {Proceedings of ICRA Workshop on Networked and Mobile Robot Olfaction in Natural, Dynamic Environments : }, institution = {Örebro University, School of Science and Technology}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, institution = {Scuola Superiore Sant'Anna, Pisa, Italy}, note = {Conference url: http://icra2010.grasp.upenn.edu/?q=overview}, title = {DustCart, a Mobile Robot for Urban Environments : Experiments of Pollution Monitoring and Mapping during Autonomous Navigation in Urban Scenarios}, keywords = {mobile robots, urban robots, gas mapping, navigation}, abstract = {In the framework of DustBot European project, aimed at developing a new multi-robot system for urban hygiene management, we have developed a twowheeled robot: DustCart. DustCart aims at providing a solution to door-to-door garbage collection: the robot, called by a user, navigates autonomously to his/her house; collects the garbage from the user and discharges it in an apposite area. An additional feature of DustCart is the capability to monitor the air pollution by means of an on board Air Monitoring Module (AMM). The AMM integrates sensors to monitor several atmospheric pollutants, such as carbon monoxide (CO), particular matter (PM10), nitrogen dioxide (NO2), ozone (O3) plus temperature (T) and relative humidity (rHu). An Ambient Intelligence platform (AmI) manages the robots’ operations through a wireless connection. AmI is able to collect measurements taken by different robots and to process them to create a pollution distribution map. In this paper we describe the DustCart robot system, focusing on the AMM and on the process of creating the pollutant distribution maps. We report results of experiments of one DustCart robot moving in urban scenarios and producing gas distribution maps using the Kernel DM+V algorithm. These experiments can be considered as one of the first attempts to use robots as mobile monitoring devices that can complement the traditional fixed stations. }, year = {2010} } @inproceedings{Stoyanov445259, author = {Stoyanov, Todor and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {IEEE/RSJ 2010 International Conference on Intelligent Robots and Systems (IROS 2010) : }, institution = {Örebro University, School of Science and Technology}, pages = {3263--3268}, title = {Path planning in 3D environments using the normal distributions transform}, DOI = {10.1109/IROS.2010.5650789}, abstract = {Planning feasible paths in fully three-dimensional environments is a challenging problem. Application of existing algorithms typically requires the use of limited 3D representations that discard potentially useful information. This article proposes a novel approach to path planning that utilizes a full 3D representation directly: the Three-Dimensional Normal Distributions Transform (3D-NDT). The well known wavefront planner is modified to use 3D-NDT as a basis for map representation and evaluated using both indoor and outdoor data sets. The use of 3D-NDT for path planning is thus demonstrated to be a viable choice with good expressive capabilities. }, ISBN = {978-1-4244-6675-7}, year = {2010} } @inproceedings{Stoyanov524115, author = {Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE International Conference on Advanced Robotics (ICAR) : }, institution = {Örebro University, School of Science and Technology}, note = {Proceedings athttp://ieeexplore.ieee.org/xpl/mostRecentIssue.jsp?punumber=5166725}, title = {Maximum Likelihood Point Cloud Acquisition from a Rotating Laser Scanner on a Moving Platform}, abstract = {This paper describes an approach to acquire locally consistent range data scans from a moving sensor platform. Data from a vertically mounted rotating laser scanner and odometry position estimates are fused and used to estimate maximum likelihood point clouds. An estimation algorithm is applied to reduce the accumulated error after a full rotation of the range finder. A configuration consisting of a SICK laser scanner mounted on a rotational actuator is described and used to evaluate the proposed approach. The data sets analyzed suggest a significant improvement in point cloud consistency, even over a short travel distance. }, URL = {https://ieeexplore.ieee.org/abstract/document/5174672}, year = {2009} } @inproceedings{Stoyanov274893, author = {Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {International conference on advanced robotics, ICAR 2009. : }, institution = {Örebro University, School of Science and Technology}, pages = {1--6}, title = {Maximum likelihood point cloud acquisition from a mobile platform}, abstract = {This paper describes an approach to acquire locally consistent range data scans from a moving sensor platform. Data from a vertically mounted rotating laser scanner and odometry position estimates are fused and used to estimate maximum likelihood point clouds. An estimation algorithm is applied to reduce the accumulated error after a full rotation of the range finder. A configuration consisting of a SICK laser scanner mounted on a rotational actuator is described and used to evaluate the proposed approach. The data sets analyzed suggest a significant improvement in point cloud consistency, even over a short travel distance. }, ISBN = {978-1-4244-4855-5}, year = {2009} } @inproceedings{Birk538841, author = {Birk, Andreas and Poppinga, Jann and Stoyanov, Todor and Nevatia, Yashodhan}, booktitle = {RoboCup 2008 : Robot Soccer World Cup XII}, institution = {Örebro University, School of Science and Technology}, note = {Proceedings details: Lecture Notes in Computer Science (LNCS), 2009, Volume 5399, Volume DOI:10.1007/978-3-642-02921-9, Sublibrary S7 - Lecture Notes in Artificial Intelligence, editors R. Goebel, J. Siekmann, and W.Wahlster. Conference paper DOI: 10.1007/978-3-642-02921-9_40}, pages = {463--472}, publisher = {Springer Berlin Heidelberg}, title = {Planetary Exploration in USARSim : A Case Study including Real World Data from Mars}, series = {Lecture Notes in Computer Science}, DOI = {10.1007/978-3-642-02921-9_40}, abstract = { Intelligent Mobile Robots are increasingly used in unstructured domains; one particularly challenging example for this is, planetary exploration. The preparation of according missions is highly non-trivial, especially as it is difficult to carry out realistic experiments without, very sophisticated infrastructures. In this paper, we argue that, the, Unified System for Automation and Robot Simulation (USARSim) offers interesting opportunities for research on planetary exploration by mobile robots. With the example of work on terrain classification, it, is shown how synthetic as well as real world data, from Mars call be used to test an algorithm's performance in USARSim. Concretely, experiments with an algorithm for the detection of negotiable ground oil a, planetary surface are presented. It is shown that the approach performs fast; and robust on planetary surfaces. }, ISBN = {978-3-642-02920-2}, ISBN = {3-642-02920-5}, year = {2009} } @incollection{Pfingsthorn538840, author = {Pfingsthorn, Max and Nevatia, Yashodhan and Stoyanov, Todor and Rathnam, Ravi and Markov, Stefan and Birk, Andreas}, booktitle = {RoboCup 2008 : Robot Soccer World Cup XII Vol 5399}, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, institution = {Jacobs University Bremen, Campus Ring 1, 28759 Bremen, Germany}, pages = {225--234}, publisher = {Springer Berlin / Heidelberg}, title = {Towards Cooperative and Decentralized Mapping in the Jacobs Virtual Rescue Team}, series = {Lecture Notes in Computer Science}, volume = {5399}, DOI = {10.1007/978-3-642-02921-9_20}, abstract = {The task of mapping and exploring an unknown environment remains one of the fundamental problems of mobile robotics. It is a task that can intuitively benefit significantly from a multi-robot approach. In this paper, we describe the design of the multi-robot mapping system used in the Jacobs Virtual Rescue team. The team competed in the World Cup 2007 and won the second place. It is shown how the recently proposed pose graph map representation facilitates not only map merging but also allows transmitting map updates efficiently }, URL = {http://dx.doi.org/10.1007/978-3-642-02921-9_20}, year = {2009} } @inproceedings{Nevatia538842, author = {Nevatia, Yashodhan and Stoyanov, Todor and Rathnam, Ravi and Pfingsthorn, Max and Markov, Stefan and Ambrus, Rares and Birk, Andreas}, booktitle = {2008 IEEE/RSJ International Conference on Robots and Intelligent Systems, vols 1-3, conference proceedings : }, institution = {Örebro University, School of Science and Technology}, institution = {Univ Bremen, Dept EECS, Robot Lab, D-28725 Bremen, Germany}, institution = {Univ Bremen, Dept EECS, Robot Lab, D-28725 Bremen, Germany}, pages = {2103--2108}, title = {Augmented Autonomy : Improving human-robot team performance in Urban Search and Rescue}, DOI = {10.1109/IROS.2008.4651034}, abstract = {Exploration of unknown environments remains one of the fundamental problems of mobile robotics. It is also a prime example for a task that can benefit significantly from multi-robot teams. We present an integrated system for semi-autonomous cooperative exploration, augmented by an intuitive user interface for efficient human supervision and control. In this preliminary study we demonstrate the effectiveness of the system as a whole and the intuitive interface in particular. Congruent with previous findings, results confirm that having a human in the loop improves task performance, especially with larger numbers of robots. Specific to our interface, we find that even untrained operators can efficiently manage a decently sized team of robots. }, ISBN = {978-1-4244-2057-5}, ISBN = {978-1-4244-2058-2}, year = {2008} } @inproceedings{Birk538838, author = {Birk, Andreas and Stoyanov, Todor and Nevatia, Yashodhan and Ambrus, Rares and Poppinga, Jan and Pathak, Kaustubh}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Terrain Classification for Autonomous Robot Mobility : from Safety, Security Rescue Robotics to Planetary Exploration}, URL = {https://ewh.ieee.org/conf/icra/2008/workshops/PlanetaryRovers/}, year = {2008} } @inproceedings{Carpin538839, author = {Carpin, Stefano and Stoyanov, Todor and Nevatia, Yashodhan and Lewis, M. and Wang, J.}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Quantitative Assessments of USARSim Accuracy}, year = {2006} } @unpublished{Yang1797956, author = {Yang, Yuxuan and Stork, Johannes A. and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, title = {Tracking Branched Deformable Linear Objects Using Particle Filtering on Depth Images}, }