@article{Liao1719864, author = {Liao, Qianfang and Sun, Da and Zhang, Shiyu and Loutfi, Amy and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on Image Processing}, pages = {550--564}, title = {Fuzzy Cluster-based Group-wise Point Set Registration with Quality Assessment}, volume = {32}, DOI = {10.1109/TIP.2022.3231132}, keywords = {Quality assessment, Measurement, Three-dimensional displays, Registers, Probability distribution, Point cloud compression, Optimization, Group-wise registration, registration quality assessment, joint alignment, fuzzy clusters, 3D point sets}, abstract = {This article studies group-wise point set registration and makes the following contributions: "FuzzyGReg", which is a new fuzzy cluster-based method to register multiple point sets jointly, and "FuzzyQA", which is the associated quality assessment to check registration accuracy automatically. Given a group of point sets, FuzzyGReg creates a model of fuzzy clusters and equally treats all the point sets as the elements of the fuzzy clusters. Then, the group-wise registration is turned into a fuzzy clustering problem. To resolve this problem, FuzzyGReg applies a fuzzy clustering algorithm to identify the parameters of the fuzzy clusters while jointly transforming all the point sets to achieve an alignment. Next, based on the identified fuzzy clusters, FuzzyQA calculates the spatial properties of the transformed point sets and then checks the alignment accuracy by comparing the similarity degrees of the spatial properties of the point sets. When a local misalignment is detected, a local re-alignment is performed to improve accuracy. The proposed method is cost-efficient and convenient to be implemented. In addition, it provides reliable quality assessments in the absence of ground truth and user intervention. In the experiments, different point sets are used to test the proposed method and make comparisons with state-of-the-art registration techniques. The experimental results demonstrate the effectiveness of our method.The code is available at https://gitsvn-nt.oru.se/qianfang.liao/FuzzyGRegWithQA }, year = {2023} } @article{Adolfsson1727222, author = {Adolfsson, Daniel and Magnusson, Martin and Alhashimi, Anas and Lilienthal, Achim and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, institution = {Örebro University, Örebro, Sweden; Computer Engineering Department, University of Baghdad, Baghdad, Iraq}, journal = {IEEE Transactions on robotics}, number = {2}, pages = {1476--1495}, title = {Lidar-Level Localization With Radar? The CFEAR Approach to Accurate, Fast, and Robust Large-Scale Radar Odometry in Diverse Environments}, volume = {39}, DOI = {10.1109/tro.2022.3221302}, keywords = {Radar, Sensors, Spinning, Azimuth, Simultaneous localization and mapping, Estimation, Location awareness, Localization, radar odometry, range sensing, SLAM}, abstract = {This article presents an accurate, highly efficient, and learning-free method for large-scale odometry estimation using spinning radar, empirically found to generalize well across very diverse environments—outdoors, from urban to woodland, and indoors in warehouses and mines—without changing parameters. Our method integrates motion compensation within a sweep with one-to-many scan registration that minimizes distances between nearby oriented surface points and mitigates outliers with a robust loss function. Extending our previous approach conservative filtering for efficient and accurate radar odometry (CFEAR), we present an in-depth investigation on a wider range of datasets, quantifying the importance of filtering, resolution, registration cost and loss functions, keyframe history, and motion compensation. We present a new solving strategy and configuration that overcomes previous issues with sparsity and bias, and improves our state-of-the-art by 38%, thus, surprisingly, outperforming radar simultaneous localization and mapping (SLAM) and approaching lidar SLAM. The most accurate configuration achieves 1.09% error at 5 Hz on the Oxford benchmark, and the fastest achieves 1.79% error at 160 Hz. }, URL = {https://doi.org/10.48550/arXiv.2211.02445}, year = {2023} } @article{Gupta1761421, author = {Gupta, Himanshu and Lilienthal, Achim and Andreasson, Henrik and Kurtser, Polina}, institution = {Örebro University, School of Science and Technology}, institution = {Perception for Intelligent Systems, TechnicalUniversity of Munich, Munich, Germany}, institution = {Centre for Applied Autonomous SensorSystems, Institutionen för naturvetenskap &teknik, Örebro University, Örebro, Sweden; Department of Radiation Science, RadiationPhysics, Umeå University, Umeå, Sweden}, journal = {Journal of Field Robotics}, number = {6}, pages = {1603--1619}, title = {NDT-6D for color registration in agri-robotic applications}, volume = {40}, DOI = {10.1002/rob.22194}, keywords = {agricultural robotics, color pointcloud, in-field sensing, machine perception, RGB-D registration, stereo IR, vineyard}, abstract = {Registration of point cloud data containing both depth and color information is critical for a variety of applications, including in-field robotic plant manipulation, crop growth modeling, and autonomous navigation. However, current state-of-the-art registration methods often fail in challenging agricultural field conditions due to factors such as occlusions, plant density, and variable illumination. To address these issues, we propose the NDT-6D registration method, which is a color-based variation of the Normal Distribution Transform (NDT) registration approach for point clouds. Our method computes correspondences between pointclouds using both geometric and color information and minimizes the distance between these correspondences using only the three-dimensional (3D) geometric dimensions. We evaluate the method using the GRAPES3D data set collected with a commercial-grade RGB-D sensor mounted on a mobile platform in a vineyard. Results show that registration methods that only rely on depth information fail to provide quality registration for the tested data set. The proposed color-based variation outperforms state-of-the-art methods with a root mean square error (RMSE) of 1.1-1.6 cm for NDT-6D compared with 1.1-2.3 cm for other color-information-based methods and 1.2-13.7 cm for noncolor-information-based methods. The proposed method is shown to be robust against noises using the TUM RGBD data set by artificially adding noise present in an outdoor scenario. The relative pose error (RPE) increased similar to 14% for our method compared to an increase of similar to 75% for the best-performing registration method. The obtained average accuracy suggests that the NDT-6D registration methods can be used for in-field precision agriculture applications, for example, crop detection, size-based maturity estimation, and growth modeling. }, year = {2023} } @inproceedings{Gupta1812049, author = {Gupta, Himanshu and Andreasson, Henrik and Magnusson, Martin and Julier, Simon and Lilienthal, Achim J.}, booktitle = {2023 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, University College London, London, England}, institution = {Perception for Intelligent Systems, Technical University of Munich, Germany }, pages = {43--48}, publisher = {IEEE}, title = {Revisiting Distribution-Based Registration Methods}, series = {European Conference on Mobile Robots}, DOI = {10.1109/ECMR59166.2023.10256416}, abstract = {Normal Distribution Transformation (NDT) registration is a fast, learning-free point cloud registration algorithm that works well in diverse environments. It uses the compact NDT representation to represent point clouds or maps as a spatial probability function that models the occupancy likelihood in an environment. However, because of the grid discretization in NDT maps, the global minima of the registration cost function do not always correlate to ground truth, particularly for rotational alignment. In this study, we examined the NDT registration cost function in-depth. We evaluated three modifications (Student-t likelihood function, inflated covariance/heavily broadened likelihood curve, and overlapping grid cells) that aim to reduce the negative impact of discretization in classical NDT registration. The first NDT modification improves likelihood estimates for matching the distributions of small population sizes; the second modification reduces discretization artifacts by broadening the likelihood tails through covariance inflation; and the third modification achieves continuity by creating the NDT representations with overlapping grid cells (without increasing the total number of cells). We used the Pomerleau Dataset evaluation protocol for our experiments and found significant improvements compared to the classic NDT D2D registration approach (27.7% success rate) using the registration cost functions "heavily broadened likelihood NDT" (HBL-NDT) (34.7% success rate) and "overlapping grid cells NDT" (OGC-NDT) (33.5% success rate). However, we could not observe a consistent improvement using the Student-t likelihood-based registration cost function (22.2% success rate) over the NDT P2D registration cost function (23.7% success rate). A comparative analysis with other state-of-art registration algorithms is also presented in this work. We found that HBL-NDT worked best for easy initial pose difficulties scenarios making it suitable for consecutive point cloud registration in SLAM application. }, ISBN = {9798350307047}, ISBN = {9798350307054}, year = {2023} } @article{Gupta1770024, author = {Gupta, Himanshu and Andreasson, Henrik and Lilienthal, Achim J. and Kurtser, Polina}, institution = {Örebro University, School of Science and Technology}, institution = {Perception for Intelligent Systems, Technical University of Munich, Munich, Germany}, institution = {Centre for Applied Autonomous Sensor Systems, Örebro University, Örebro, Sweden; Department of Radiation Science, Radiation Physics, Umeå University, Umeå, Sweden}, journal = {Sensors}, number = {10}, eid = {4736}, title = {Robust Scan Registration for Navigation in Forest Environment Using Low-Resolution LiDAR Sensors}, volume = {23}, DOI = {10.3390/s23104736}, keywords = {tree segmentation, LiDAR mapping, forest inventory, SLAM, forestry robotics, scan registration}, abstract = {Automated forest machines are becoming important due to human operators' complex and dangerous working conditions, leading to a labor shortage. This study proposes a new method for robust SLAM and tree mapping using low-resolution LiDAR sensors in forestry conditions. Our method relies on tree detection to perform scan registration and pose correction using only low-resolution LiDAR sensors (16Ch, 32Ch) or narrow field of view Solid State LiDARs without additional sensory modalities like GPS or IMU. We evaluate our approach on three datasets, including two private and one public dataset, and demonstrate improved navigation accuracy, scan registration, tree localization, and tree diameter estimation compared to current approaches in forestry machine automation. Our results show that the proposed method yields robust scan registration using detected trees, outperforming generalized feature-based registration algorithms like Fast Point Feature Histogram, with an above 3 m reduction in RMSE for the 16Chanel LiDAR sensor. For Solid-State LiDAR the algorithm achieves a similar RMSE of 3.7 m. Additionally, our adaptive pre-processing and heuristic approach to tree detection increased the number of detected trees by 13% compared to the current approach of using fixed radius search parameters for pre-processing. Our automated tree trunk diameter estimation method yields a mean absolute error of 4.3 cm (RSME = 6.5 cm) for the local map and complete trajectory maps. }, year = {2023} } @article{Adolfsson1766598, author = {Adolfsson, Daniel and Karlsson, Mattias and Kubelka, Vladimír and Magnusson, Martin and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, institution = {MRO Lab of the AASS Research Centre, Örebro University, Örebro, Sweden}, journal = {IEEE Robotics and Automation Letters}, number = {6}, pages = {3613--3620}, title = {TBV Radar SLAM - Trust but Verify Loop Candidates}, volume = {8}, DOI = {10.1109/LRA.2023.3268040}, keywords = {SLAM, localization, radar, introspection}, abstract = {Robust SLAM in large-scale environments requires fault resilience and awareness at multiple stages, from sensing and odometry estimation to loop closure. In this work, we present TBV (Trust But Verify) Radar SLAM, a method for radar SLAM that introspectively verifies loop closure candidates. TBV Radar SLAM achieves a high correct-loop-retrieval rate by combining multiple place-recognition techniques: tightly coupled place similarity and odometry uncertainty search, creating loop descriptors from origin-shifted scans, and delaying loop selection until after verification. Robustness to false constraints is achieved by carefully verifying and selecting the most likely ones from multiple loop constraints. Importantly, the verification and selection are carried out after registration when additional sources of loop evidence can easily be computed. We integrate our loop retrieval and verification method with a robust odometry pipeline within a pose graph framework. By evaluation on public benchmarks we found that TBV Radar SLAM achieves 65% lower error than the previous state of the art. We also show that it generalizes across environments without needing to change any parameters. We provide the open-source implementation at https://github.com/dan11003/tbv_slam_public }, year = {2023} } @article{Molina1797296, author = {Molina, Sergi and Mannucci, Anna and Magnusson, Martin and Adolfsson, Daniel and Andreasson, Henrik and Hamad, Mazin and Abdolshah, Saeed and Chadalavada, Ravi Teja and Palmieri, Luigi and Linder, Timm and Swaminathan, Chittaranjan Srinivas and Kucner, Tomasz Piotr and Hanheide, Marc and Fernandez-Carmona, Manuel and Cielniak, Grzegorz and Duckett, Tom and Pecora, Federico and Bokesand, Simon and Arras, Kai O. and Haddadin, Sami and Lilienthal, Achim J}, institution = {Örebro University, School of Science and Technology}, institution = {University of Lincoln, Lincoln, U.K}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Technical University of Munich, Munich, Germany}, institution = {Technical University of Munich, Munich, Germany}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Aalto University, Aalto, Finland}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {Kollmorgen Automation AB, Mölndal, Sweden}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Technical University of Munich, Munich, Germany}, journal = {IEEE robotics & automation magazine}, title = {The ILIAD Safety Stack : Human-Aware Infrastructure-Free Navigation of Industrial Mobile Robots}, DOI = {10.1109/MRA.2023.3296983}, keywords = {Robots, Safety, Navigation, Mobile robots, Human-robot interaction, Hidden Markov models, Trajectory}, abstract = {Current intralogistics services require keeping up with e-commerce demands, reducing delivery times and waste, and increasing overall flexibility. As a consequence, the use of automated guided vehicles (AGVs) and, more recently, autonomous mobile robots (AMRs) for logistics operations is steadily increasing. }, year = {2023} } @article{Andreasson1650509, author = {Andreasson, Henrik and Larsson, Jonas and Lowry, Stephanie}, institution = {Örebro University, School of Science and Technology}, institution = {ABB Corporate Research, Västerås, Sweden}, journal = {Sensors}, number = {7}, eid = {2588}, title = {A Local Planner for Accurate Positioning for a Multiple Steer-and-Drive Unit Vehicle Using Non-Linear Optimization}, volume = {22}, DOI = {10.3390/s22072588}, keywords = {local planning, optimal control, obstacle avoidance}, abstract = {This paper presents a local planning approach that is targeted for pseudo-omnidirectional vehicles: that is, vehicles that can drive sideways and rotate on the spot. This local planner—MSDU–is based on optimal control and formulates a non-linear optimization problem formulation that exploits the omni-motion capabilities of the vehicle to drive the vehicle to the goal in a smooth and efficient manner while avoiding obstacles and singularities. MSDU is designed for a real platform for mobile manipulation where one key function is the capability to drive in narrow and confined areas. The real-world evaluations show that MSDU planned paths that were smoother and more accurate than a comparable local path planner Timed Elastic Band (TEB), with a mean (translational, angular) error for MSDU of (0.0028 m, 0.0010 rad) compared to (0.0033 m, 0.0038 rad) for TEB. MSDU also generated paths that were consistently shorter than TEB, with a mean (translational, angular) distance traveled of (0.6026 m, 1.6130 rad) for MSDU compared to (0.7346 m, 3.7598 rad) for TEB. }, year = {2022} } @article{Seeburger1704400, author = {Seeburger, P. and Herdenstam, Anders P. F. and Kurtser, Polina and Arunachalam, A. and Castro Alves, Victor and Hy{\"o}tyl{\"a}inen, Tuulia and Andreasson, Henrik}, institution = {Örebro University, School of Hospitality, Culinary Arts & Meal Science}, institution = {Örebro University, School of Science and Technology}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden}, institution = {Department of Radiation Sciences, Radiation Physics, Umeå University, Umeå, Sweden}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden; Department of Radiation Sciences, Radiation Physics, Umeå University, Umeå, Sweden}, journal = {Food Chemistry}, note = {Funding agency:German Academic Exchange Service (Deutscher Akademischer Austauschdienst, DAAD)}, number = {Pt A}, eid = {134545}, title = {Controlled mechanical stimuli reveal novel associations between basil metabolism and sensory quality}, volume = {404}, DOI = {10.1016/j.foodchem.2022.134545}, keywords = {Agricultural robotics, Linalool glucoside, Network analysis, Plant metabolomics, Sensomics, Sensory analysis}, abstract = {There is an increasing interest in the use of automation in plant production settings. Here, we employed a robotic platform to induce controlled mechanical stimuli (CMS) aiming to improve basil quality. Semi-targeted UHPLC-qToF-MS analysis of organic acids, amino acids, phenolic acids, and phenylpropanoids revealed changes in basil secondary metabolism under CMS, which appear to be associated with changes in taste, as revealed by different means of sensory evaluation (overall liking, check-all-that-apply, and just-about-right analysis). Further network analysis combining metabolomics and sensory data revealed novel links between plant metabolism and sensory quality. Amino acids and organic acids including maleic acid were negatively associated with basil quality, while increased levels of secondary metabolites, particularly linalool glucoside, were associated with improved basil taste. In summary, by combining metabolomics and sensory analysis we reveal the potential of automated CMS on crop production, while also providing new associations between plant metabolism and sensory quality. }, year = {2022} } @article{Adolfsson1689786, author = {Adolfsson, Daniel and Castellano-Quero, Manuel and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, eid = {104136}, title = {CorAl : Introspection for robust radar and lidar perception in diverse environments using differential entropy}, volume = {155}, DOI = {10.1016/j.robot.2022.104136}, keywords = {Radar, Introspection, Localization}, abstract = {Robust perception is an essential component to enable long-term operation of mobile robots. It depends on failure resilience through reliable sensor data and pre-processing, as well as failure awareness through introspection, for example the ability to self-assess localization performance. This paper presents CorAl: a principled, intuitive, and generalizable method to measure the quality of alignment between pairs of point clouds, which learns to detect alignment errors in a self-supervised manner. CorAl compares the differential entropy in the point clouds separately with the entropy in their union to account for entropy inherent to the scene. By making use of dual entropy measurements, we obtain a quality metric that is highly sensitive to small alignment errors and still generalizes well to unseen environments. In this work, we extend our previous work on lidar-only CorAl to radar data by proposing a two-step filtering technique that produces high-quality point clouds from noisy radar scans. Thus, we target robust perception in two ways: by introducing a method that introspectively assesses alignment quality, and by applying it to an inherently robust sensor modality. We show that our filtering technique combined with CorAl can be applied to the problem of alignment classification, and that it detects small alignment errors in urban settings with up to 98% accuracy, and with up to 96% if trained only in a different environment. Our lidar and radar experiments demonstrate that CorAl outperforms previous methods both on the ETH lidar benchmark, which includes several indoor and outdoor environments, and the large-scale Oxford and MulRan radar data sets for urban traffic scenarios. The results also demonstrate that CorAl generalizes very well across substantially different environments without the need of retraining. }, year = {2022} } @article{Liao1606320, author = {Liao, Qianfang and Sun, Da and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on robotics}, number = {4}, pages = {2632--2651}, title = {FuzzyPSReg : Strategies of Fuzzy Cluster-based Point Set Registration}, volume = {38}, DOI = {10.1109/TRO.2021.3123898}, keywords = {point set registration, fuzzy clusters, registration quality assessment, 3D point clouds, object pose estimation.}, abstract = {This paper studies the fuzzy cluster-based point set registration (FuzzyPSReg). First, we propose a new metric based on Gustafson-Kessel (GK) fuzzy clustering to measure the alignment of two point clouds.  Unlike the metric based on fuzzy c-means (FCM) clustering in our previous work, the GK-based metric includes orientation properties of the point clouds, thereby providing more information for registration. We then develop the registration quality assessment of the GK-based metric, which is more sensitive to small misalignments than that of the FCM-based metric. Next, by effectively combining the two metrics, we design two FuzzyPSReg strategies with global optimization: i). \textit{FuzzyPSReg-SS}, which extends our previous work and aligns two similar-sized point clouds with greatly improved efficiency; ii). \textit{FuzzyPSReg-O2S}, which aligns two point clouds with a relatively large difference in size and can be used to estimate the pose of an object in a scene. In the experiment, we use different point clouds to test and compare the proposed method with state-of-the-art registration approaches. The results demonstrate the advantages and effectiveness of our method. }, year = {2022} } @article{Arunachalam1635462, author = {Arunachalam, Ajay and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Journal of Mobile Multimedia}, number = {3}, pages = {723--742}, publisher = {River Publishers}, title = {MSI-RPi : Affordable, Portable, and Modular Multispectral Imaging Prototype Suited to Operate in UV, Visible and Mid-Infrared Regions}, volume = {18}, DOI = {10.13052/jmm1550-4646.18312}, keywords = {imaging technology, low-cost, spectral, phenotype, plant science, vision, imaging sensors, agriculture, image analysis}, abstract = {Digital plant inventory provides critical growth insights, given the associated data quality is good. Stable & high-quality image acquisition is critical for further examination. In this work, we showcase an affordable, portable, and modular spectral camera prototype, designed with open hardware’s and open-source software’s. The image sensors used were color, and infrared Pi micro-camera. The designed prototype presents the advantage as being low-cost and modular with respect to other general commercial market available spectral devices. The micro-size connected sensors make it a compact instrument that can be used for any general spectral acquisition purposes, along with the provision of custom selection of the bands, making the presented prototype design a Plug-nd-Play (PnP) setup that can be used in different wide application areas. The images acquired from our custom-built prototype were back-tested by performing image analysis and qualitative assessments. The image acquisition software, and processing algorithm has been programmed, which is bundled with our developed system. Further, an end-to-end automation script is integrated for the users to readily leverage the services on-demand. The design files, schematics, and all the related materials of the spectral block design is open-sourced with open-hardware license & is made available at https://github.com/ajayarunachalam/Multi-Spectral-Imaging-RaspberryPi-Design. The automated data acquisition scripts & the spectral image analysis done is made available at https://github.com/ajayarunachalam/SI-RPi. }, year = {2022} } @article{Arunachalam1523503, author = {Arunachalam, Ajay and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Internet Technology Letters}, number = {1}, eid = {e272}, title = {RaspberryPi‐Arduino (RPA) powered smart mirrored and reconfigurable IoT facility for plant science research}, volume = {5}, DOI = {10.1002/itl2.272}, keywords = {Open-source, hardware, software, remote monitoring, IoT, Raspberry Pi, Arduino, sensor, database, agriculture, Plant Science, MOX, Mirroring, Reconfigurable Mircoservice, plant growth, automation, gas sensors}, abstract = {Continuous monitoring of crops is critical for the sustainability of agriculture. The effects of changes in temperature, light intensity, humidity, pH, soil moisture, gas intensities, etc. have an overall impact on the plant growth. Growth chambers are environmental controlled facilities which needs to be monitored round-the-clock. To improve both the reproducibility, and maintenance of such facilities, remote monitoring plays a very pivotal role. An automated re-configurable & persistent mirrored storage-based remote monitoring system is developed with low-cost open source hardwares and softwares. The system automates sensors deployment, storage (database, logs), and provides an elegant dashboard to visualize the real-time continuous data stream. We propose a new smart AGRO IoT system with robust data acquisition mechanism, and also propose two software component nodes, (i.e., Mirroring and Reconfiguration) running as an instance of the whole IoT facility. The former one is aimed to minimize/avoid the downtime, while the latter one is aimed to leverage the available cores, and better utilization of the computational resources. Our system can be easily deployed in growth chambers, greenhouses, CNC farming test-bed setup, cultivation plots, and further can be also extended to support large-farms with either using multiple individual standalone setup as heterogeneous instances of this facility, or by extending it as master-slave cluster configuration for communication as a single homogeneous instance. Our RaspberryPi-Arduino (RPA) powered solution is scalable, and provides stability for monitoring any environment continuously at ease. }, year = {2022} } @inproceedings{Machado1633897, author = {Machado, Tyrone and Fassbender, David and Taheri, Abdolreza and Eriksson, Daniel and Gupta, Himanshu and Molaei, Amirmasoud and Forte, Paolo and Rai, Prashant and Ghabcheloo, Reza and M{\"a}kinen, Saku and Lilienthal, Achim and Andreasson, Henrik and Geimer, Marcus}, booktitle = {Proceedings of the IEEE ICTE Leading Digital Transformation in Business and Society Conference : }, institution = {Örebro University, School of Science and Technology}, institution = {Bosch Rexroth AG, Elchingen, Germany}, institution = {Bosch Rexroth AG, Elchingen, Germany}, institution = {Faculty of Engineering and Natural Sciences, Tampere University, Tampere, Finland}, institution = {R&D Wheel Loader-Emerging Technologies, Liebherr-Werk Bischofshofen GmbH, Bischofshofen, Austria}, institution = {Institute of Vehicle System Technology, Karlsruhe Institute of Technology, Karlsruhe, Germany}, institution = {Faculty of Engineering and Natural Sciences, Tampere University, Tampere, Finland}, institution = {Faculty of Engineering and Natural Sciences, Tampere University, Tampere, Finland}, institution = {Faculty of Management and Business, Tampere University, Tampere, Finland}, institution = {Institute of Vehicle System, Technology Karlsruhe Institute of Technology, Karlsruhe, Germany}, title = {Autonomous Heavy-Duty Mobile Machinery : A Multidisciplinary Collaborative Challenge}, DOI = {10.1109/ICTE51655.2021.9584498}, keywords = {automation, augmentation, autonomous, collaboration, mobile machinery, transaction cost economics}, abstract = {Heavy-duty mobile machines (HDMMs), are a wide range of off-road machinery used in diverse and critical application areas which are currently facing several issues like skilled labor shortage, safety requirements, and harsh work environments in general. Consequently, efforts are underway to increase automation in HDMMs for increased productivity and safety, eventually transitioning to operator-less autonomous HDMMs to address skilled labor shortages. However, HDMM are complex machines requiring continuous physical and cognitive inputs from human operators. Thus, developing autonomous HDMM is a huge challenge, with current research and developments being performed in several independent research domains. Through this study, we use the bounded rationality concept to propose multidisciplinary collaborations for new autonomous HDMMs and apply the transaction cost economics framework to suggest future implications in the HDMM industry. Furthermore, we introduce and provide a conceptual understanding of the autonomous HDMM industry collaborations as a unified approach, while highlighting the practical implications and challenges of the complex nature of such multidisciplinary collaborations. The collaborative challenges and potentials are mapped out between the following topics: mechanical systems, AI methods, software systems, sensors, data and connectivity, simulations and process optimization, business cases, organization theories, and finally, regulatory frameworks. }, ISBN = {9781665438957}, ISBN = {9781665445986}, year = {2021} } @inproceedings{Alhashimi1803369, author = {Alhashimi, Anas and Adolfsson, Daniel and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden; Computer Engineering Department, University of Baghdad, Baghdad, Iraq}, title = {BFAR – Bounded False Alarm Rate detector for improved radar odometry estimation}, abstract = {This paper presents a new detector for filtering noise from true detections in radar data, which improves the state of the art in radar odometry. Scanning Frequency-Modulated Continuous Wave (FMCW) radars can be useful for localisation and mapping in low visibility, but return a lot of noise compared to (more commonly used) lidar, which makes the detection task more challenging. Our Bounded False-Alarm Rate (BFAR) detector is different from the classical Constant False-Alarm Rate (CFAR) detector in that it applies an affine transformation on the estimated noise level after which the parameters that minimize the estimation error can be learned. BFAR is an optimized combination between CFAR and fixed-level thresholding. Only a single parameter needs to be learned from a training dataset. We apply BFAR tothe use case of radar odometry, and adapt a state-of-the-art odometry pipeline (CFEAR), replacing its original conservative filtering with BFAR. In this way we reduce the state-of-the-art translation/rotation odometry errors from 1.76%/0.5◦/100 m to 1.55%/0.46◦/100 m; an improvement of 12.5%. }, URL = {https://doi.org/10.48550/arXiv.2109.09669}, year = {2021} } @inproceedings{Adolfsson1595903, author = {Adolfsson, Daniel and Magnusson, Martin and Alhashimi, Anas and Lilienthal, Achim and Andreasson, Henrik}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2021) : }, institution = {Örebro University, School of Science and Technology}, pages = {5462--5469}, title = {CFEAR Radarodometry - Conservative Filtering for Efficient and Accurate Radar Odometry}, series = {IEEE International Conference on Intelligent Robots and Systems. Proceedings}, DOI = {10.1109/IROS51168.2021.9636253}, keywords = {Localization SLAM Mapping Radar}, abstract = {This paper presents the accurate, highly efficient, and learning-free method CFEAR Radarodometry for large-scale radar odometry estimation. By using a filtering technique that keeps the k strongest returns per azimuth and by additionally filtering the radar data in Cartesian space, we are able to compute a sparse set of oriented surface points for efficient and accurate scan matching. Registration is carried out by minimizing a point-to-line metric and robustness to outliers is achieved using a Huber loss. We were able to additionally reduce drift by jointly registering the latest scan to a history of keyframes and found that our odometry method generalizes to different sensor models and datasets without changing a single parameter. We evaluate our method in three widely different environments and demonstrate an improvement over spatially cross-validated state-of-the-art with an overall translation error of 1.76% in a public urban radar odometry benchmark, running at 55Hz merely on a single laptop CPU thread. }, URL = {https://doi.org/10.48550/arXiv.2105.01457}, ISBN = {9781665417143}, ISBN = {9781665417150}, year = {2021} } @inproceedings{Forte1584120, author = {Forte, Paolo and Mannucci, Anna and Andreasson, Henrik and Pecora, Federico}, booktitle = {Proceedings of the 9th ICAPS Workshop on Planning and Robotics (PlanRob) : }, institution = {Örebro University, School of Science and Technology}, title = {Construction Site Automation : Open Challenges for Planning and Robotics}, year = {2021} } @inproceedings{Adolfsson1596301, author = {Adolfsson, Daniel and Magnusson, Martin and Liao, Qianfang and Lilienthal, Achim and Andreasson, Henrik}, booktitle = {10th European Conference on Mobile Robots (ECMR 2021) : }, institution = {Örebro University, School of Science and Technology}, title = {CorAl – Are the point clouds Correctly Aligned?}, volume = {10}, DOI = {10.1109/ECMR50962.2021.9568846}, abstract = {In robotics perception, numerous tasks rely on point cloud registration. However, currently there is no method that can automatically detect misaligned point clouds reliably and without environment-specific parameters. We propose "CorAl", an alignment quality measure and alignment classifier for point cloud pairs, which facilitates the ability to introspectively assess the performance of registration. CorAl compares the joint and the separate entropy of the two point clouds. The separate entropy provides a measure of the entropy that can be expected to be inherent to the environment. The joint entropy should therefore not be substantially higher if the point clouds are properly aligned. Computing the expected entropy makes the method sensitive also to small alignment errors, which are particularly hard to detect, and applicable in a range of different environments. We found that CorAl is able to detect small alignment errors in previously unseen environments with an accuracy of 95% and achieve a substantial improvement to previous methods. }, URL = {https://doi.org/10.48550/arXiv.2109.09820}, year = {2021} } @article{Kurtser1620211, author = {Kurtser, Polina and Castro Alves, Victor and Arunachalam, Ajay and Sj{\"o}berg, Viktor and Hanell, Ulf and Hy{\"o}tyl{\"a}inen, Tuulia and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Scientific Reports}, note = {Funding agency:{\"O}rebro University}, number = {1}, eid = {23876}, title = {Development of novel robotic platforms for mechanical stress induction, and their effects on plant morphology, elements, and metabolism}, volume = {11}, DOI = {10.1038/s41598-021-02581-9}, abstract = {This research evaluates the effect on herbal crops of mechanical stress induced by two specially developed robotic platforms. The changes in plant morphology, metabolite profiles, and element content are evaluated in a series of three empirical experiments, conducted in greenhouse and CNC growing bed conditions, for the case of basil plant growth. Results show significant changes in morphological features, including shortening of overall stem length by up to 40% and inter-node distances by up to 80%, for plants treated with a robotic mechanical stress-induction protocol, compared to control groups. Treated plants showed a significant increase in element absorption, by 20-250% compared to controls, and changes in the metabolite profiles suggested an improvement in plants' nutritional profiles. These results suggest that repetitive, robotic, mechanical stimuli could be potentially beneficial for plants' nutritional and taste properties, and could be performed with no human intervention (and therefore labor cost). The changes in morphological aspects of the plant could potentially replace practices involving chemical treatment of the plants, leading to more sustainable crop production. }, year = {2021} } @article{Paul1548974, author = {Paul, Satyam and Arunachalam, Ajay and Khodadad, Davood and Andreasson, Henrik and Rubanenko, Olena}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Engineering Design and Mathematics, University of the West of England, Bristol, United Kingdom}, institution = {Department of Applied Physics and Electronics, Umeå University, Umeå, Sweden}, institution = {Regional Innovational Center for Electrical Engineering, Faculty of Electrical Engineering, University of West Bohemia, Pilsen, Czech Republic}, journal = {International Journal of Automation and Computing}, number = {4}, pages = {568--580}, publisher = {Chinese Academy of Sciences}, title = {Fuzzy Tuned PID Controller for Envisioned Agricultural Manipulator}, volume = {18}, DOI = {10.1007/s11633-021-1280-5}, keywords = {Proportional-integral-differential (PID) controller, fuzzy logic, precision agriculture, vibration control, stability analysis, modular manipulator, agricultural robot, computer numerical control (CNC) farming}, abstract = {The implementation of image-based phenotyping systems has become an important aspect of crop and plant science research which has shown tremendous growth over the years. Accurate determination of features using images requires stable imaging and very precise processing. By installing a camera on a mechanical arm driven by motor, the maintenance of accuracy and stability becomes non-trivial. As per the state-of-the-art, the issue of external camera shake incurred due to vibration is a great concern in capturing accurate images, which may be induced by the driving motor of the manipulator. So, there is a requirement for a stable active controller for sufficient vibration attenuation of the manipulator. However, there are very few reports in agricultural practices which use control algorithms. Although, many control strategies have been utilized to control the vibration in manipulators associated to various applications, no control strategy with validated stability has been provided to control the vibration in such envisioned agricultural manipulator with simple low-cost hardware devices with the compensation of non-linearities. So, in this work, the combination of proportional-integral-differential (PID) control with type-2 fuzzy logic (T2-F-PID) is implemented for vibration control. The validation of the controller stability using Lyapunov analysis is established. A torsional actuator (TA) is applied for mitigating torsional vibration, which is a new contribution in the area of agricultural manipulators. Also, to prove the effectiveness of the controller, the vibration attenuation results with T2-F-PID is compared with conventional PD/PID controllers, and a type-1 fuzzy PID (T1-F-PID) controller.  }, year = {2021} } @article{Forte1538478, author = {Forte, Paolo and Mannucci, Anna and Andreasson, Henrik and Pecora, Federico}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {3}, pages = {4584--4591}, title = {Online Task Assignment and Coordination in Multi-Robot Fleets}, volume = {6}, DOI = {10.1109/LRA.2021.3068918}, keywords = {Planning, scheduling and coordination, task and motion planning, multi-robot systems}, abstract = {We propose a loosely-coupled framework for integrated task assignment, motion planning, coordination and contro of heterogeneous fleets of robots subject to non-cooperative tasks. The approach accounts for the important real-world requiremen that tasks can be posted asynchronously. We exploit systematic search for optimal task assignment, where interference is considered as a cost and estimated with knowledge of the kinodynamic models and current state of the robots. Safety is guaranteed by an online coordination algorithm, where the absence of collisions is treated as a hard constraint. The relation between the weight of interference cost in task assignment and computational overhead is analyzed empirically, and the approach is compared against alternative realizations using local search algorithms for task assignment. }, year = {2021} } @inproceedings{Adolfsson1803356, author = {Adolfsson, Daniel and Magnusson, Martin and Alhashimi, Anas and Lilienthal, Achim and Andreasson, Henrik}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden}, title = {Oriented surface points for efficient and accurate radar odometry}, abstract = {This paper presents an efficient and accurate radar odometry pipeline for large-scale localization. We propose a radar filter that keeps only the strongest reflections per-azimuth that exceeds the expected noise level. The filtered radar data is used to incrementally estimate odometry by registering the current scan with a nearby keyframe. By modeling local surfaces, we were able to register scans by minimizing a point-to-line metric and accurately estimate odometry from sparse point sets, hence improving efficiency. Specifically, we found that a point-to-line metric yields significant improvements compared to a point-to-point metric when matching sparse sets of surface points. Preliminary results from an urban odometry benchmark show that our odometry pipeline is accurate and efficient compared to existing methods with an overall translation error of 2.05%, down from 2.78% from the previously best published method, running at 12.5ms per frame without need of environmental specific training.  }, URL = {https://doi.org/10.48550/arXiv.2109.09994}, year = {2021} } @article{Liao1396976, author = {Liao, Qianfang and Sun, Da and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, note = {Funding agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS)}, number = {9}, pages = {3229--3246}, title = {Point Set Registration for 3D Range Scans Using Fuzzy Cluster-based Metric and Efficient Global Optimization}, volume = {43}, DOI = {10.1109/TPAMI.2020.2978477}, keywords = {Point Set Registration, Computer Vision, fuzzy clusters, registration quality assessment, 3D range scans, branch-and-bound}, abstract = {This study presents a new point set registration method to align 3D range scans. In our method, fuzzy clusters are utilized to represent a scan, and the registration of two given scans is realized by minimizing a fuzzy weighted sum of the distances between their fuzzy cluster centers. This fuzzy cluster-based metric has a broad basin of convergence and is robust to noise. Moreover, this metric provides analytic gradients, allowing standard gradient-based algorithms to be applied for optimization. Based on this metric, the outlier issues are addressed. In addition, for the first time in rigid point set registration, a registration quality assessment in the absence of ground truth is provided. Furthermore, given specified rotation and translation spaces, we derive the upper and lower bounds of the fuzzy cluster-based metric and develop a branch-and-bound (BnB)-based optimization scheme, which can globally minimize the metric regardless of the initialization. This optimization scheme is performed in an efficient coarse-to-fine fashion: First, fuzzy clustering is applied to describe each of the two given scans by a small number of fuzzy clusters. Then, a global search, which integrates BnB and gradient-based algorithms, is implemented to achieve a coarse alignment for the two scans. During the global search, the registration quality assessment offers a beneficial stop criterion to detect whether a good result is obtained. Afterwards, a relatively large number of points of the two scans are directly taken as the fuzzy cluster centers, and then, the coarse solution is refined to be an exact alignment using the gradient-based local convergence. Compared to existing counterparts, this optimization scheme makes a large improvementin terms of robustness and efficiency by virtue of the fuzzy cluster-based metric and the registration quality assessment. In the experiments, the registration results of several 3D range scan pairs demonstrate the accuracy and effectiveness of the proposed method, as well as its superiority to state-of-the-art registration approaches. }, year = {2021} } @article{Arunachalam1548959, author = {Arunachalam, Ajay and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Computers & electrical engineering}, eid = {107098}, title = {Real-time plant phenomics under robotic farming setup : A vision-based platform for complex plant phenotyping tasks}, volume = {92}, DOI = {10.1016/j.compeleceng.2021.107098}, keywords = {Automation, Computer vision, Image processing, Object localization, Pattern recognition, Perception, Phenotype, Plant science, Precision agriculture, Robotics, Spectral}, abstract = {Plant phenotyping in general refers to quantitative estimation of the plant's anatomical, ontogenetical, physiological and biochemical properties. Analyzing big data is challenging, and non-trivial given the different complexities involved. Efficient processing and analysis pipelines are the need of the hour with the increasing popularity of phenotyping technologies and sensors. Through this work, we largely address the overlapping object segmentation & localization problem. Further, we dwell upon multi-plant pipelines that pose challenges as detection and multi-object tracking becomes critical for single frame/set of frames aimed towards uniform tagging & visual features extraction. A plant phenotyping tool named RTPP (Real-Time Plant Phenotyping) is presented that can aid in the detection of single/multi plant traits, modeling, and visualization for agricultural settings. We compare our system with the plantCV platform. The relationship of the digital estimations, and the measured plant traits are discussed that plays a vital roadmap towards precision farming and/or plant breeding. }, year = {2021} } @article{Chadalavada1374911, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Schindler, Maike and Palm, Rainer and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {Faculty of Human Sciences, University of Cologne, Germany}, journal = {Robotics and Computer-Integrated Manufacturing}, note = {Funding Agencies:KKS SIDUS project AIR: "Action and Intention Recognition in Human Interaction with Autonomous Systems"  20140220H2020 project ILIAD: "Intra-Logistics with Integrated Automatic Deployment: Safe and Scalable Fleets in Shared Spaces"  732737}, eid = {101830}, title = {Bi-directional navigation intent communication using spatial augmented reality and eye-tracking glasses for improved safety in human-robot interaction}, volume = {61}, DOI = {10.1016/j.rcim.2019.101830}, keywords = {Human-robot interaction (HRI), Mobile robots, Intention communication, Eye-tracking, Intention recognition, Spatial augmented reality, Stimulated recall interview, Obstacle avoidance, Safety, Logistics}, abstract = {Safety, legibility and efficiency are essential for autonomous mobile robots that interact with humans. A key factor in this respect is bi-directional communication of navigation intent, which we focus on in this article with a particular view on industrial logistic applications. In the direction robot-to-human, we study how a robot can communicate its navigation intent using Spatial Augmented Reality (SAR) such that humans can intuitively understand the robot's intention and feel safe in the vicinity of robots. We conducted experiments with an autonomous forklift that projects various patterns on the shared floor space to convey its navigation intentions. We analyzed trajectories and eye gaze patterns of humans while interacting with an autonomous forklift and carried out stimulated recall interviews (SRI) in order to identify desirable features for projection of robot intentions. In the direction human-to-robot, we argue that robots in human co-habited environments need human-aware task and motion planning to support safety and efficiency, ideally responding to people's motion intentions as soon as they can be inferred from human cues. Eye gaze can convey information about intentions beyond what can be inferred from the trajectory and head pose of a person. Hence, we propose eye-tracking glasses as safety equipment in industrial environments shared by humans and robots. In this work, we investigate the possibility of human-to-robot implicit intention transference solely from eye gaze data and evaluate how the observed eye gaze patterns of the participants relate to their navigation decisions. We again analyzed trajectories and eye gaze patterns of humans while interacting with an autonomous forklift for clues that could reveal direction intent. Our analysis shows that people primarily gazed on that side of the robot they ultimately decided to pass by. We discuss implications of these results and relate to a control approach that uses human gaze for early obstacle avoidance. }, year = {2020} } @inproceedings{Sun1524100, author = {Sun, L. and Adolfsson, Daniel and Magnusson, Martin and Andreasson, Henrik and Posner, I. and Duckett, T.}, booktitle = {2020 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Lincoln Centre for Autonomous Systems (L-CAS), University of Lincoln, UK}, institution = {University of Oxford, Oxford, UK}, institution = {Lincoln Centre for Autonomous Systems (L-CAS), University of Lincoln, UK}, note = {Funding agency:UK Research {\&}amp; Innovation (UKRI)Engineering {\&}amp; Physical Sciences Research Council (EPSRC) EP/M019918/1}, pages = {4386--4392}, title = {Localising Faster : Efficient and precise lidar-based robot localisation in large-scale environments}, series = {IEEE International Conference on Robotics and Automation (ICRA)}, DOI = {10.1109/ICRA40945.2020.9196708}, keywords = {Gaussian processes, learning (artificial intelligence), mobile robots, Monte Carlo methods, neural nets, optical radar, path planning, recursive estimation, robot vision, SLAM (robots), precise lidar-based robot localisation, large-scale environments, global localisation, Monte Carlo Localisation, MCL, fast localisation system, deep-probabilistic model, Gaussian process regression, deep kernel, precise recursive estimator, Gaussian method, deep probabilistic localisation, large-scale localisation, largescale environment, time 0.8 s, size 0.75 m, Robots, Neural networks, Three-dimensional displays, Laser radar, Kernel}, abstract = {This paper proposes a novel approach for global localisation of mobile robots in large-scale environments. Our method leverages learning-based localisation and filtering-based localisation, to localise the robot efficiently and precisely through seeding Monte Carlo Localisation (MCL) with a deeplearned distribution. In particular, a fast localisation system rapidly estimates the 6-DOF pose through a deep-probabilistic model (Gaussian Process Regression with a deep kernel), then a precise recursive estimator refines the estimated robot pose according to the geometric alignment. More importantly, the Gaussian method (i.e. deep probabilistic localisation) and nonGaussian method (i.e. MCL) can be integrated naturally via importance sampling. Consequently, the two systems can be integrated seamlessly and mutually benefit from each other. To verify the proposed framework, we provide a case study in large-scale localisation with a 3D lidar sensor. Our experiments on the Michigan NCLT long-term dataset show that the proposed method is able to localise the robot in 1.94 s on average (median of 0.8 s) with precision 0.75 m in a largescale environment of approximately 0.5 km 2. }, ISBN = {978-1-7281-7396-2}, ISBN = {978-1-7281-7395-5}, year = {2020} } @inproceedings{Kurtser1414586, author = {Kurtser, Polina and Ringdahl, Ola and Rotstein, Nati and Andreasson, Henrik}, booktitle = {Proceedings of the Northern Lights Deep Learning Workshop : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing science, Umeå University, Umeå, Sweden}, institution = {Department of Industrial Engineering and Management, Ben-Gurion University of the Negev, Beer Sheva, Israel}, institution = {Centre for Applied Autonomous Sensor Systems}, pages = {1--6}, publisher = {NLDL}, title = {PointNet and geometric reasoning for detection of grape vines from single frame RGB-D data in outdoor conditions}, volume = {1}, DOI = {10.7557/18.5155}, keywords = {RGBD, Deep-learning, Agricultural robotics, outdoor vision, grape}, abstract = {In this paper we present the usage of PointNet, a deep neural network that consumes raw un-ordered point clouds, for detection of grape vine clusters in outdoor conditions. We investigate the added value of feeding the detection network with both RGB and depth, contradictory to common practice in agricultural robotics of relying on RGB only. A total of 5057 pointclouds (1033 manually annotated and 4024 annotated using geometric reasoning) were collected in a field experiment conducted in outdoor conditions on 9 grape vines and 5 plants. The detection results show overall accuracy of 91% (average class accuracy of 74%, precision 53% recall 48%) for RGBXYZ data and a significant drop in recall for RGB or XYZ data only. These results suggest the usage of depth cameras for vision in agricultural robotics is crucial for crops where the color contrast between the crop and the background is complex. The results also suggest geometric reasoning can be used for increased training set size, a major bottleneck in the development of agricultural vision systems. }, year = {2020} } @inproceedings{Kurtser1521090, author = {Kurtser, Polina and Hanell, Ulf and Andreasson, Henrik}, booktitle = {2020 IEEE 16th International Conference on Automation Science and Engineering (CASE) : }, institution = {Örebro University, School of Science and Technology}, pages = {1558--1565}, title = {Robotic Platform for Precise Mechanical Stress Induction in Greenhouses Cultivation}, series = {IEEE International Conference on Automation Science and Engineering}, DOI = {10.1109/CASE48305.2020.9249229}, keywords = {Robotics in Agriculture and Forestry, Agricultural Automation, Industrial Robots}, abstract = {This paper presents an autonomous robotic platform for research of mechanically induced stress in plants growing in controlled greenhouse conditions. The platform provides a range of possibilities for mechanical stimuli including motion type, frequency, speed, and torque. The motions can be tailored for a single pot, making study of mechanical plant stress versatile, rapid and precise. We evaluate the performance of the platform for a use-case of basil plant cultivation. An eight week experiment was performed in greenhouse conditions on 220 basil plants. We show that the induction of mechanical stress by the platform significantly affects plant morphology, such as shortening stem length by 30 % -40 % and inter-node length by 50 % -80 %, while preserving leaf weight which is the main part of the basil plant used for culinary purposes. Results also show that variations in types of mechanical stimuli motions provides significant differences in the effect on plant morphology. Finally we show that decreasing the mechanical stimuli frequency to rates feasible to be performed manually significantly reduces the effect, stressing the need for autonomous systems capable of providing continuous stimuli during day and night. These results validate previously published findings in research of mechanical stress induction, and therefore implies the platform can be used for research of this phenomena. }, ISBN = {978-1-7281-6905-7}, ISBN = {978-1-7281-6904-0}, year = {2020} } @inproceedings{Adolfsson1391182, author = {Adolfsson, Daniel and Lowry, Stephanie and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, booktitle = {2019 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, title = {A Submap per Perspective : Selecting Subsets for SuPer Mapping that Afford Superior Localization Quality}, DOI = {10.1109/ECMR.2019.8870941}, abstract = {This paper targets high-precision robot localization. We address a general problem for voxel-based map representations that the expressiveness of the map is fundamentally limited by the resolution since integration of measurements taken from different perspectives introduces imprecisions, and thus reduces localization accuracy.We propose SuPer maps that contain one Submap per Perspective representing a particular view of the environment. For localization, a robot then selects the submap that best explains the environment from its perspective. We propose SuPer mapping as an offline refinement step between initial SLAM and deploying autonomous robots for navigation. We evaluate the proposed method on simulated and real-world data that represent an important use case of an industrial scenario with high accuracy requirements in an repetitive environment. Our results demonstrate a significantly improved localization accuracy, up to 46% better compared to localization in global maps, and up to 25% better compared to alternative submapping approaches. }, ISBN = {978-1-7281-3605-9}, year = {2019} } @inproceedings{Chadalavada1391172, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Schindler, Maike and Lilienthal, Achim J.}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {Faculty of Human Sciences, University of Cologne, Germany, Cologne, Gemany}, title = {Implicit intention transference using eye-tracking glasses for improved safety in human-robot interaction}, keywords = {Human-robot interaction, intention communication, eye tracking, spatial augmented reality, electrodermal activity, stress, cognitive load.}, abstract = {Eye gaze can convey information about intentions beyond what can beinferred from the trajectory and head pose of a person. We propose eye-trackingglasses as safety equipment in industrial environments shared by humans androbots. In this work, an implicit intention transference system was developed and implemented. Robot was given access to human eye gaze data, and it responds tothe eye gaze data through spatial augmented reality projections on the sharedfloor space in real-time and the robot could also adapt its path. This allows proactivesafety approaches in HRI for example by attempting to get the human'sattention when they are in the vicinity of a moving robot. A study was conductedwith workers at an industrial warehouse. The time taken to understand the behaviorof the system was recorded. Electrodermal activity and pupil diameter wererecorded to measure the increase in stress and cognitive load while interactingwith an autonomous system, using these measurements as a proxy to quantifytrust in autonomous systems. }, year = {2019} } @article{DellaCorte1291440, author = {Della Corte, Bartolomeo and Andreasson, Henrik and Stoyanov, Todor and Grisetti, Giorgio}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS) }, number = {2}, pages = {902--909}, title = {Unified Motion-Based Calibration of Mobile Multi-Sensor Platforms With Time Delay Estimation}, volume = {4}, DOI = {10.1109/LRA.2019.2892992}, keywords = {Calibration and Identification}, abstract = {The ability to maintain and continuously update geometric calibration parameters of a mobile platform is a key functionality for every robotic system. These parameters include the intrinsic kinematic parameters of the platform, the extrinsic parameters of the sensors mounted on it, and their time delays. In this letter, we present a unified pipeline for motion-based calibration of mobile platforms equipped with multiple heterogeneous sensors. We formulate a unified optimization problem to concurrently estimate the platform kinematic parameters, the sensors extrinsic parameters, and their time delays. We analyze the influence of the trajectory followed by the robot on the accuracy of the estimate. Our framework automatically selects appropriate trajectories to maximize the information gathered and to obtain a more accurate parameters estimate. In combination with that, our pipeline observes the parameters evolution in long-term operation to detect possible values change in the parameters set. The experiments conducted on real data show a smooth convergence along with the ability to detect changes in parameters value. We release an open-source version of our framework to the community. }, year = {2019} } @inproceedings{Pecora1178913, author = {Pecora, Federico and Andreasson, Henrik and Mansouri, Masoumeh and Petkov, Vilian}, booktitle = {Proceedings of the International Conference on Automated Planning and Scheduling : }, institution = {Örebro University, School of Science and Technology}, institution = {Technical University of Varna, Varna, Bulgaria}, pages = {485--493}, eid = {139850}, title = {A Loosely-Coupled Approach for Multi-Robot Coordination, Motion Planning and Control, ICAPS}, volume = {2018-June}, abstract = {Deploying fleets of autonomous robots in real-world applications requires addressing three problems: motion planning, coordination, and control. Application-specific features of the environment and robots often narrow down the possible motion planning and control methods that can be used. This paper proposes a lightweight coordination method that implements a high-level controller for a fleet of potentially heterogeneous robots. Very few assumptions are made on robot controllers, which are required only to be able to accept set point updates and to report their current state. The approach can be used with any motion planning method for computing kinematically-feasible paths. Coordination uses heuristics to update priorities while robots are in motion, and a simple model of robot dynamics to guarantee dynamic feasibility. The approach avoids a priori discretization of the environment or of robot paths, allowing robots to “follow each other” through critical sections. We validate the method formally and experimentally with different motion planners and robot controllers, in simulation and with real robots. }, year = {2018} } @inproceedings{Chadalavada1270176, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Schindler, Maike and Palm, Rainer and Lilienthal, Achim}, booktitle = {Advances in Manufacturing Technology XXXII : Proceedings of the 16th International Conference on Manufacturing Research, incorporating the 33rd National Conference on Manufacturing Research, September 11–13, 2018, University of Skövde, Sweden}, institution = {Örebro University, School of Science and Technology}, pages = {253--258}, title = {Accessing your navigation plans! Human-Robot Intention Transfer using Eye-Tracking Glasses}, series = {Advances in Transdisciplinary Engineering}, number = {8}, DOI = {10.3233/978-1-61499-902-7-253}, keywords = {Human-Robot Interaction (HRI), Eye-tracking, Eye-Tracking Glasses, Navigation Intent, Implicit Intention Transference, Obstacle avoidance.}, abstract = {Robots in human co-habited environments need human-aware task and motion planning, ideally responding to people’s motion intentions as soon as they can be inferred from human cues. Eye gaze can convey information about intentions beyond trajectory and head pose of a person. Hence, we propose eye-tracking glasses as safety equipment in industrial environments shared by humans and robots. This paper investigates the possibility of human-to-robot implicit intention transference solely from eye gaze data.  We present experiments in which humans wearing eye-tracking glasses encountered a small forklift truck under various conditions. We evaluate how the observed eye gaze patterns of the participants related to their navigation decisions. Our analysis shows that people primarily gazed on that side of the robot they ultimately decided to pass by. We discuss implications of these results and relate to a control approach that uses human eye gaze for early obstacle avoidance. }, ISBN = {978-1-61499-901-0}, ISBN = {978-1-61499-902-7}, year = {2018} } @inproceedings{Adolfsson1282987, author = {Adolfsson, Daniel and Lowry, Stephanie and Andreasson, Henrik}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Improving Localisation Accuracy using Submaps in warehouses}, abstract = {This paper presents a method for localisation in hybrid metric-topological maps built using only local information that is, only measurements that were captured by the robot when it was in a nearby location. The motivation is that observations are typically range and viewpoint dependent and that a map a discrete map representation might not be able to explain the full structure within a voxel. The localisation system uses a method to select submap based on how frequently and where from each submap was updated. This allow the system to select the most descriptive submap, thereby improving the localisation and increasing performance by up to 40%. }, year = {2018} } @article{Lowry1178647, author = {Lowry, Stephanie and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation}, number = {2}, pages = {957--964}, title = {Lightweight, Viewpoint-Invariant Visual Place Recognition in Changing Environments}, volume = {3}, DOI = {10.1109/LRA.2018.2793308}, keywords = {Visual-based navigation, recognition, localization}, abstract = {This paper presents a viewpoint-invariant place recognition algorithm which is robust to changing environments while requiring only a small memory footprint. It demonstrates that condition-invariant local features can be combined with Vectors of Locally Aggregated Descriptors (VLAD) to reduce high-dimensional representations of images to compact binary signatures while retaining place matching capability across visually dissimilar conditions. This system provides a speed-up of two orders of magnitude over direct feature matching, and outperforms a bag-of-visual-words approach with near-identical computation speed and memory footprint. The experimental results show that single-image place matching from non-aligned images can be achieved in visually changing environments with as few as 256 bits (32 bytes) per image. }, year = {2018} } @inproceedings{Lowry1238392, author = {Lowry, Stephanie and Andreasson, Henrik}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS)}, pages = {7262--7269}, title = {LOGOS : Local geometric support for high-outlier spatial verification}, abstract = {This paper presents LOGOS, a method of spatial verification for visual localization that is robust in the presence of a high proportion of outliers. LOGOS uses scale and orientation information from local neighbourhoods of features to determine which points are likely to be inliers. The inlier points can be used for secondary localization verification and pose estimation. LOGOS is demonstrated on a number of benchmark localization datasets and outperforms RANSAC as a method of outlier removal and localization verification in scenarios that require robustness to many outliers. }, year = {2018} } @inproceedings{Andreasson1159885, author = {Andreasson, Henrik and Adolfsson, Daniel and Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim}, booktitle = {2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {1389--1395}, title = {Incorporating Ego-motion Uncertainty Estimates in Range Data Registration}, series = {Proceedings of the ... IEEE/RSJ International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2017.8202318}, abstract = {Local scan registration approaches commonlyonly utilize ego-motion estimates (e.g. odometry) as aninitial pose guess in an iterative alignment procedure. Thispaper describes a new method to incorporate ego-motionestimates, including uncertainty, into the objective function of aregistration algorithm. The proposed approach is particularlysuited for feature-poor and self-similar environments,which typically present challenges to current state of theart registration algorithms. Experimental evaluation showssignificant improvements in accuracy when using data acquiredby Automatic Guided Vehicles (AGVs) in industrial productionand warehouse environments. }, ISBN = {978-1-5386-2682-5}, ISBN = {978-1-5386-2683-2}, year = {2017} } @inproceedings{Magnusson1151027, author = {Magnusson, Martin and Kucner, Tomasz Piotr and Gholami Shahbandi, Saeed and Andreasson, Henrik and Lilienthal, Achim}, booktitle = {2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, institution = {IS lab, Halmstad University, Halmstad, Sweden}, note = {Iliad Project: http://iliad-project.eu}, pages = {620--625}, title = {Semi-Supervised 3D Place Categorisation by Descriptor Clustering}, series = {Proceedings of the ... IEEE/RSJ International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2017.8202216}, abstract = {Place categorisation; i. e., learning to group perception data into categories based on appearance; typically uses supervised learning and either visual or 2D range data. This paper shows place categorisation from 3D data without any training phase. We show that, by leveraging the NDT histogram descriptor to compactly encode 3D point cloud appearance, in combination with standard clustering techniques, it is possible to classify public indoor data sets with accuracy comparable to, and sometimes better than, previous supervised training methods. We also demonstrate the effectiveness of this approach to outdoor data, with an added benefit of being able to hierarchically categorise places into sub-categories based on a user-selected threshold. This technique relieves users of providing relevant training data, and only requires them to adjust the sensitivity to the number of place categories, and provide a semantic label to each category after the process is completed. }, ISBN = {978-1-5386-2682-5}, ISBN = {978-1-5386-2683-2}, year = {2017} } @inproceedings{Mielle1155435, author = {Mielle, Malcolm and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {2017 IEEE International Symposium on Safety, Security and Rescue Robotics (SSRR) : }, institution = {Örebro University, School of Science and Technology}, note = {Funding Agency:EU  ICT-23-2014645101}, pages = {35--40}, eid = {8088137}, title = {SLAM auto-complete : completing a robot map using an emergency map}, DOI = {10.1109/SSRR.2017.8088137}, keywords = {SLAM, robotics, graph, graph SLAM, emergency map, rescue, exploration, auto complete, SLAM, robotics, graph, graph SLAM, plan de secours, sauvetage, exploration, auto complete}, abstract = {In search and rescue missions, time is an important factor; fast navigation and quickly acquiring situation awareness might be matters of life and death. Hence, the use of robots in such scenarios has been restricted by the time needed to explore and build a map. One way to speed up exploration and mapping is to reason about unknown parts of the environment using prior information. While previous research on using external priors for robot mapping mainly focused on accurate maps or aerial images, such data are not always possible to get, especially indoor. We focus on emergency maps as priors for robot mapping since they are easy to get and already extensively used by firemen in rescue missions. However, those maps can be outdated, information might be missing, and the scales of rooms are typically not consistent. We have developed a formulation of graph-based SLAM that incorporates information from an emergency map. The graph-SLAM is optimized using a combination of robust kernels, fusing the emergency map and the robot map into one map, even when faced with scale inaccuracies and inexact start poses. We typically have more than 50% of wrong correspondences in the settings studied in this paper, and the method we propose correctly handles them. Experiments in an office environment show that we can handle up to 70% of wrong correspondences and still get the expected result. The robot can navigate and explore while taking into account places it has not yet seen. We demonstrate this in a test scenario and also show that the emergency map is enhanced by adding information not represented such as closed doors or new walls. }, ISBN = {978-1-5386-3923-8}, ISBN = {978-1-5386-3924-5}, year = {2017} } @inproceedings{Mielle1151040, author = {Mielle, Malcolm and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Using emergency maps to add not yet explored places into SLAM}, keywords = {Search and Rescue Robots, SLAM, Mapping}, abstract = {While using robots in search and rescue missions would help ensure the safety of first responders, a key issue is the time needed by the robot to operate. Even though SLAM is faster and faster, it might still be too slow to enable the use of robots in critical situations. One way to speed up operation time is to use prior information. We aim at integrating emergency-maps into SLAM to complete the SLAM map with information about not yet explored part of the environment. By integrating prior information, we can speed up exploration time or provide valuable prior information for navigation, for example, in case of sensor blackout/failure. However, while extensively used by firemen in their operations, emergency maps are not easy to integrate in SLAM since they are often not up to date or with non consistent scales. The main challenge we are tackling is in dealing with the imperfect scale of the rough emergency maps and integrate it with the online SLAM map in addition to challenges due to incorrect matches between these two types of map. We developed a formulation of graph-based SLAM incorporating information from an emergency map into SLAM, and propose a novel optimization process adapted to this formulation. We extract corners from the emergency map and the SLAM map, in between which we find correspondences using a distance measure. We then build a graph representation associating information from the emergency map and the SLAM map. Corners in the emergency map, corners in the robot map, and robot poses are added as nodes in the graph, while odometry, corner observations, walls in the emergency map, and corner associations are added as edges. To conserve the topology of the emergency map, but correct its possible errors in scale, edges representing the emergency map's walls are given a covariance so that they are easy to extend or shrink but hard to rotate. Correspondences between corners represent a zero transformation for the optimization to match them as close as possible. The graph optimization is done by using a combination robust kernels. We first use the Huber kernel, to converge toward a good solution, followed by Dynamic Covariance Scaling, to handle the remaining errors. We demonstrate our system in an office environment. We run the SLAM online during the exploration. Using the map enhanced by information from the emergency map, the robot was able to plan the shortest path toward a place it has not yet explored. This capability can be a real asset in complex buildings where exploration can take up a long time. It can also reduce exploration time by avoiding exploration of dead-ends, or search of specific places since the robot knows where it is in the emergency map. }, year = {2017} } @article{Rituerto931985, author = {Rituerto, Alejandro and Andreasson, Henrik and Murillo, Ana C. and Lilienthal, Achim and Jesus Guerrero, Jose}, institution = {Örebro University, School of Science and Technology}, institution = {Instituto de Investigación en Ingeniería de Aragón, Deptartmento de Informática e Ingeniería de Sistemas, University of Zaragoza, Zaragoza, Spain}, institution = {Instituto de Investigación en Ingeniería de Aragón, Deptartmento de Informática e Ingeniería de Sistemas, University of Zaragoza, Zaragoza, Spain}, institution = {Instituto de Investigación en Ingeniería de Aragón, Deptartmento de Informática e Ingeniería de Sistemas, University of Zaragoza, Zaragoza, Spain}, journal = {Sensors}, note = {Funding Agencies:Spanish Government European Union DPI2015-65962-R}, number = {4}, eid = {493}, publisher = {MDPI AG}, title = {Building an Enhanced Vocabulary of the Robot Environment with a Ceiling Pointing Camera}, volume = {16}, DOI = {10.3390/s16040493}, keywords = {visual vocabulary, computer vision, bag of words, robotics, place recognition, environment description}, abstract = {Mobile robots are of great help for automatic monitoring tasks in different environments. One of the first tasks that needs to be addressed when creating these kinds of robotic systems is modeling the robot environment. This work proposes a pipeline to build an enhanced visual model of a robot environment indoors. Vision based recognition approaches frequently use quantized feature spaces, commonly known as Bag of Words (BoW) or vocabulary representations. A drawback using standard BoW approaches is that semantic information is not considered as a criteria to create the visual words. To solve this challenging task, this paper studies how to leverage the standard vocabulary construction process to obtain a more meaningful visual vocabulary of the robot work environment using image sequences. We take advantage of spatio-temporal constraints and prior knowledge about the position of the camera. The key contribution of our work is the definition of a new pipeline to create a model of the environment. This pipeline incorporates (1) tracking information to the process of vocabulary construction and (2) geometric cues to the appearance descriptors. Motivated by long term robotic applications, such as the aforementioned monitoring tasks, we focus on a configuration where the robot camera points to the ceiling, which captures more stable regions of the environment. The experimental validation shows how our vocabulary models the environment in more detail than standard vocabulary approaches, without loss of recognition performance. We show different robotic tasks that could benefit of the use of our visual vocabulary approach, such as place recognition or object discovery. For this validation, we use our publicly available data-set. }, year = {2016} } @inproceedings{Chadalavada1070994, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Krug, Robert and Lilienthal, Achim}, booktitle = {Proceedings of RSS Workshop "Social Trust in Autonomous Robots 2016" : }, institution = {Örebro University, School of Science and Technology}, title = {Empirical evaluation of human trust in an expressive mobile robot}, keywords = {Human robot interaction, hri, mobile robot, trust, evaluation}, abstract = {A mobile robot communicating its intentions using Spatial Augmented Reality (SAR) on the shared floor space makes humans feel safer and more comfortable around the robot. Our previous work [1] and several other works established this fact. We built upon that work by adding an adaptable information and control to the SAR module. An empirical study about how a mobile robot builds trust in humans by communicating its intentions was conducted. A novel way of evaluating that trust is presented and experimentally shown that adaption in SAR module lead to natural interaction and the new evaluation system helped us discover that the comfort levels between human-robot interactions approached those of human-human interactions. }, year = {2016} } @misc{Andreasson914493, author = {Andreasson, Henrik and Berglund, Sara and Waller, Anna}, institution = {Örebro University, Örebro University School of Business}, pages = {37}, school = {Örebro University, Örebro University School of Business}, school = {Örebro University, Örebro University School of Business}, school = {Örebro University, Örebro University School of Business}, title = {En r{\aa}dgivares f{\"o}rtroendeskapande En studie om hur finansiella r{\aa}dgivare skapar f{\"o}rtroende hos kunden i kundm{\"o}tet}, year = {2016} } @article{Mansouri941273, author = {Mansouri, Masoumeh and Andreasson, Henrik and Pecora, Federico}, institution = {Örebro University, School of Science and Technology}, journal = {Acta Polytechnica}, number = {1}, pages = {47--56}, publisher = {Czech Technical University in Prague}, title = {Hybrid Reasoning for Multi-robot Drill Planning in Open-pit Mines}, volume = {56}, DOI = {10.14311/APP.2016.56.0047}, keywords = {robot planning, multi-robot coordination, on-line reasoning}, abstract = {Fleet automation often involves solving several strongly correlated sub-problems, including task allocation, motion planning, and coordination. Solutions need to account for very specific, domaindependent constraints. In addition, several aspects of the overall fleet management problem become known only online. We propose a method for solving the fleet-management problem grounded on a heuristically-guided search in the space of mutually feasible solutions to sub-problems. We focus on a mining application which requires online contingency handling and accommodating many domainspecific constraints. As contingencies occur, efficient reasoning is performed to adjust the plan online for the entire fleet. }, year = {2016} } @inproceedings{Mosberger1057245, author = {Mosberger, Rafael and Schaffernicht, Erik and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {4131--4136}, title = {Inferring human body posture information from reflective patterns of protective work garments}, DOI = {10.1109/IROS.2016.7759608}, keywords = {Computer Vision, Human Detection, Reflective Clothing, Image Segmentation, Active Illumination, Infrared Vision}, abstract = {We address the problem of extracting human body posture labels, upper body orientation and the spatial location of individual body parts from near-infrared (NIR) images depicting patterns of retro-reflective markers. The analyzed patterns originate from the observation of humans equipped with protective high-visibility garments that represent common safety equipment in the industrial sector. Exploiting the shape of the observed reflectors we adopt shape matching based on the chamfer distance and infer one of seven discrete body posture labels as well as the approximate upper body orientation with respect to the camera. We then proceed to analyze the NIR images on a pixel scale and estimate a figure-ground segmentation together with human body part labels using classification of densely extracted local image patches. Our results indicate a body posture classification accuracy of 80% and figure-ground segmentations with 87% accuracy. }, ISBN = {978-1-5090-3762-9}, year = {2016} } @inproceedings{Bunz1071024, author = {Bunz, Elsa and Chadalavada, Ravi Teja and Andreasson, Henrik and Krug, Robert and Schindler, Maike and Lilienthal, Achim}, booktitle = {Proceedings of RO-MAN 2016 Workshop : Workshop on Communicating Intentions in Human-Robot Interaction}, institution = {Örebro University, School of Science and Technology}, institution = {Örebro University, Örebro, Sweden}, title = {Spatial Augmented Reality and Eye Tracking for Evaluating Human Robot Interaction}, abstract = {Freely moving autonomous mobile robots may leadto anxiety when operating in workspaces shared with humans.Previous works have given evidence that communicating in-tentions using Spatial Augmented Reality (SAR) in the sharedworkspace will make humans more comfortable in the vicinity ofrobots. In this work, we conducted experiments with the robotprojecting various patterns in order to convey its movementintentions during encounters with humans. In these experiments,the trajectories of both humans and robot were recorded witha laser scanner. Human test subjects were also equipped withan eye tracker. We analyzed the eye gaze patterns and thelaser scan tracking data in order to understand how the robot’sintention communication affects the human movement behavior.Furthermore, we used retrospective recall interviews to aid inidentifying the reasons that lead to behavior changes. }, year = {2016} } @article{Krug1044259, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = { University of Pisa, Pisa, Italy}, journal = {IEEE Robotics and Automation Letters}, number = {1}, pages = {546--553}, title = {The Next Step in Robot Commissioning : Autonomous Picking and Palletizing}, volume = {1}, DOI = {10.1109/LRA.2016.2519944}, keywords = {Logistics, grasping, autonomous vehicle navigation, robot safety, mobile manipulation}, abstract = {So far, autonomous order picking (commissioning) systems have not been able to meet the stringent demands regarding speed, safety, and accuracy of real-world warehouse automation, resulting in reliance on human workers. In this letter, we target the next step in autonomous robot commissioning: automatizing the currently manual order picking procedure. To this end, we investigate the use case of autonomous picking and palletizing with a dedicated research platform and discuss lessons learned during testing in simplified warehouse settings. The main theoretical contribution is a novel grasp representation scheme which allows for redundancy in the gripper pose placement. This redundancy is exploited by a local, prioritized kinematic controller which generates reactive manipulator motions on-the-fly. We validated our grasping approach by means of a large set of experiments, which yielded an average grasp acquisition time of 23.5 s at a success rate of 94.7%. Our system is able to autonomously carry out simple order picking tasks in a humansafe manner, and as such serves as an initial step toward future commercial-scale in-house logistics automation solutions. }, year = {2016} } @inproceedings{Siddiqui945980, author = {Siddiqui, J. Rafid and Andreasson, Henrik and Driankov, Dimiter and Lilienthal, Achim J.}, booktitle = {2016 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {5766--5773}, eid = {7487800}, title = {Towards visual mapping in industrial environments : a heterogeneous task-specific and saliency driven approach}, series = {IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2016.7487800}, keywords = {Image color analysis, Object detection, Robot sensing systems, Service robots, Training, Visualization}, abstract = {The highly percipient nature of human mind in avoiding sensory overload is a crucial factor which gives human vision an advantage over machine vision, the latter has otherwise powerful computational resources at its disposal given today’s technology. This stresses the need to focus on methods which extract a concise representation of the environment inorder to approach a complex problem such as visual mapping. This article is an attempt of creating a mapping system, which proposes an architecture that combines task-specific and saliency driven approaches. The proposed method is implemented on a warehouse robot. The proposed solution provide a priority framework which enables an industrial robot to build a concise visual representation of the environment. The method is evaluated on data collected by a RGBD sensor mounted on a fork-lift robot and shows promise for addressing visual mapping problems in industrial environments. }, ISBN = {978-146738026-3}, year = {2016} } @inproceedings{Lowry1079851, author = {Lowry, Stephanie and Andreasson, Henrik}, booktitle = {Visual Place Recognition: What is it Good For? workshop, Robotics : Science and Systems (RSS) 2016}, institution = {Örebro University, School of Science and Technology}, title = {Visual place recognition techniques for pose estimation in changing environments}, abstract = {This paper investigates whether visual place recognition techniques can be used to provide pose estimation information for a visual SLAM system operating long-term in an environment where the appearance may change a great deal. It demonstrates that a combination of a conventional SURF feature detector and a condition-invariant feature descriptor such as HOG or conv3 can provide a method of determining the relative transformation between two images, even when there is both appearance change and rotation or viewpoint change. }, year = {2016} } @article{Andreasson807693, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and Cirillo, Marcello and Dimitrov, Dimitar Nikolaev and Driankov, Dimiter and Karlsson, Lars and Lilienthal, Achim J. and Pecora, Federico and Saarinen, Jari Pekka and Sherikov, Aleksander and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {INRIA - Grenoble, Meylan, France}, institution = {Aalto University, Espo, Finland }, institution = {Centre de recherche Grenoble Rhône-Alpes, Grenoble, France }, journal = {IEEE robotics & automation magazine}, number = {1}, pages = {64--75}, title = {Autonomous transport vehicles : where we are and what is missing}, volume = {22}, DOI = {10.1109/MRA.2014.2381357}, keywords = {Intelligent vehicles; Mobile robots; Resource management; Robot kinematics; Trajectory; Vehicle dynamics}, abstract = {In this article, we address the problem of realizing a complete efficient system for automated management of fleets of autonomous ground vehicles in industrial sites. We elicit from current industrial practice and the scientific state of the art the key challenges related to autonomous transport vehicles in industrial environments and relate them to enabling techniques in perception, task allocation, motion planning, coordination, collision prediction, and control. We propose a modular approach based on least commitment, which integrates all modules through a uniform constraint-based paradigm. We describe an instantiation of this system and present a summary of the results, showing evidence of increased flexibility at the control level to adapt to contingencies. }, year = {2015} } @inproceedings{Andreasson894653, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA), 2015 : }, institution = {Örebro University, School of Science and Technology}, institution = {SCANIA AB, Södertälje, Sweden}, pages = {662--669}, title = {Fast, continuous state path smoothing to improve navigation accuracy}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2015.7139250}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not be widely adopted in commercial AGV systems. The main contribution of this paper addresses this shortcoming by introducing a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. In real world tests presented in this paper we demonstrate that the proposed approach is fast enough for online use (it computes trajectories faster than they can be driven) and highly accurate. In 100 repetitions we achieve mean end-point pose errors below 0.01 meters in translation and 0.002 radians in orientation. Even the maximum errors are very small: only 0.02 meters in translation and 0.008 radians in orientation. }, ISBN = {9781479969234}, year = {2015} } @inproceedings{Mosberger891476, author = {Mosberger, Rafael and Leibe, Bastian and Andreasson, Henrik and Lilienthal, Achim}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Aachen University, Aachen, Germany}, pages = {697--703}, title = {Multi-band Hough Forests for detecting humans with Reflective Safety Clothing from mobile machinery}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2015.7139255}, keywords = {Human Detection, Robot Vision, Industrial Safety}, abstract = {We address the problem of human detection from heavy mobile machinery and robotic equipment operating at industrial working sites. Exploiting the fact that workers are typically obliged to wear high-visibility clothing with reflective markers, we propose a new recognition algorithm that specifically incorporates the highly discriminative features of the safety garments in the detection process. Termed Multi-band Hough Forest, our detector fuses the input from active near-infrared (NIR) and RGB color vision to learn a human appearance model that not only allows us to detect and localize industrial workers, but also to estimate their body orientation. We further propose an efficient pipeline for automated generation of training data with high-quality body part annotations that are used in training to increase detector performance. We report a thorough experimental evaluation on challenging image sequences from a real-world production environment, where persons appear in a variety of upright and non-upright body positions. }, ISBN = {978-1-4799-6923-4}, year = {2015} } @inproceedings{Krug808145, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Bicchi, Antonio and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA) - Workshop on Robotic Hands, Grasping, and Manipulation : }, institution = {Örebro University, School of Science and Technology}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, title = {On Using Optimization-based Control instead of Path-Planning for Robot Grasp Motion Generation}, keywords = {Grasping, Motion Planning, Control}, year = {2015} } @inproceedings{Chadalavada900532, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Krug, Robert and Lilienthal, Achim}, booktitle = {2015 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, publisher = {IEEE conference proceedings}, title = {That’s on my Mind! : Robot to Human Intention Communication through on-board Projection on Shared Floor Space}, DOI = {10.1109/ECMR.2015.7403771}, keywords = {Human Robot Interaction, Intention Communication, Shared spaces}, abstract = {The upcoming new generation of autonomous vehicles for transporting materials in industrial environments will be more versatile, flexible and efficient than traditional AGVs, which simply follow pre-defined paths. However, freely navigating vehicles can appear unpredictable to human workers and thus cause stress and render joint use of the available space inefficient. Here we address this issue and propose on-board intention projection on the shared floor space for communication from robot to human. We present a research prototype of a robotic fork-lift equipped with a LED projector to visualize internal state information and intents. We describe the projector system and discuss calibration issues. The robot’s ability to communicate its intentions is evaluated in realistic situations where test subjects meet the robotic forklift. The results show that already adding simple information, such as the trajectory and the space to be occupied by the robot in the near future, is able to effectively improve human response to the robot. }, ISBN = {978-1-4673-9163-4}, year = {2015} } @inproceedings{Mansouri900492, author = {Mansouri, Masoumeh and Andreasson, Henrik and Pecora, Frederico}, booktitle = {24th International Joint Conference on Artificial Intelligence, Workshop on Hybrid Reasoning : }, institution = {Örebro University, School of Science and Technology}, title = {Towards Hybrid Reasoning for Automated Industrial Fleet Management}, abstract = {More and more industrial applications require fleets of autonomous ground vehicles. Today's solutions to the management of these fleets still largely rely on fixed set-ups of the system, manually specified ad-hoc rules. Our aim is to replace current practice with autonomous fleets and fleet management systems that are easily adaptable to new set-ups and environments, can accommodate human-intelligible rules, and guarantee feasible and meaningful behavior of the fleet. We propose to cast the problem of autonomous fleet management to a meta-CSP that integrates task allocation, coordination and motion planning. We discuss design choices of the approach, and how it caters to the need for hybrid reasoning in terms of symbolic, metric, temporal and spatial constraints. We also comment on a preliminary realization of the system. }, ISBN = {978-1-57735-738-4}, year = {2015} } @article{Mosberger772165, author = {Mosberger, Rafael and Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Sensors}, number = {10}, pages = {17952--17980}, title = {A customized vision system for tracking humans wearing reflective safety clothing from industrial vehicles and machinery}, volume = {14}, DOI = {10.3390/s141017952}, keywords = {infrared vision, human detection, industrial safety, high-visibility clothing}, abstract = {This article presents a novel approach for vision-based detection and tracking of humans wearing high-visibility clothing with retro-reflective markers. Addressing industrial applications where heavy vehicles operate in the vicinity of humans, we deploy a customized stereo camera setup with active illumination that allows for efficient detection of the reflective patterns created by the worker's safety garments. After segmenting reflective objects from the image background, the interest regions are described with local image feature descriptors and classified in order to discriminate safety garments from other reflective objects in the scene. In a final step, the trajectories of the detected humans are estimated in 3D space relative to the camera. We evaluate our tracking system in two industrial real-world work environments on several challenging video sequences. The experimental results indicate accurate tracking performance and good robustness towards partial occlusions, body pose variation, and a wide range of different illumination conditions. }, year = {2014} } @article{Andreasson780236, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics}, number = {4}, pages = {400--416}, publisher = {M D P I AG}, title = {Drive the Drive : From Discrete Motion Plans to Smooth Drivable Trajectories}, volume = {3}, DOI = {10.3390/robotics3040400}, keywords = {Motion planning, motion and path planning, autonomous navigation}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not been widely adopted in commercial AGV systems. The main contribution of this paper is a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. The proposed approach is evaluated in several industrially relevant scenarios and found to be both fast (less than 2 s per vehicle trajectory) and accurate (end-point pose errors below 0.01 m in translation and 0.005 radians in orientation). }, year = {2014} } @incollection{Mosberger780355, author = {Mosberger, Rafael and Andreasson, Henrik}, booktitle = {Field and Service Robotics : Results of the 8th International Conference}, institution = {Örebro University, School of Science and Technology}, pages = {143--157}, title = {Estimating the 3D Position of Humans Wearing a Reflective Vest Using a Single Camera System}, series = {Springer Tracts in Advanced Robotics}, number = {92}, DOI = {10.1007/978-3-642-40686-7_10}, keywords = {People Detection, Industrial Safety, Reflective Vest Detection}, abstract = {This chapter presents a novel possible solution for people detection and estimation of their 3D position in challenging shared environments. Addressing safety critical applications in industrial environments, we make the basic assumption that people wear reflective vests. In order to detect these vests and to discriminate them from other reflective material, we propose an approach based on a single camera equipped with an IR flash. The camera acquires pairs of images, one with and one without IR flash, in short succession. The images forming a pair are then related to each other through feature tracking, which allows to discard features for which the relative intensity difference is small and which are thus not believed to belong to a reflective vest. Next, the local neighbourhood of the remaining features is further analysed. First, a Random Forest classifier is used to discriminate between features caused by a reflective vest and features caused by some other reflective materials. Second, the distance between the camera and the vest features is estimated using a Random Forest regressor. The proposed system was evaluated in one indoor and two challenging outdoor scenarios. Our results indicate very good classification performance and remarkably accurate distance estimation especially in combination with the SURF descriptor, even under direct exposure to sunlight. }, ISBN = {978-3-642-40685-0}, ISBN = {978-3-642-40686-7}, year = {2014} } @incollection{Andreasson780294, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {Field and Service Robotics : Results of the 8th International Conference}, institution = {Örebro University, School of Science and Technology}, institution = {CAISR Centrum för tillämpade intelligenta system (IS-lab), Högskolan i Halmstad, Halmstad, Sweden}, institution = {CAISR Centrum för tillämpade intelligenta system (IS-lab), Högskolan i Halmstad, Halmstad, Sweden}, pages = {585--598}, title = {Gold-Fish SLAM : An Application of SLAM to Localize AGVs}, series = {Springer Tracts in Advanced Robotics}, number = {92}, DOI = {10.1007/978-3-642-40686-7_39}, abstract = {The main focus of this paper is to present a case study of a SLAM solution for Automated Guided Vehicles (AGVs) operating in real-world industrial environments. The studied solution, called Gold-fish SLAM, was implemented to provide localization estimates in dynamic industrial environments, where there are static landmarks that are only rarely perceived by the AGVs. The main idea of Gold-fish SLAM is to consider the goods that enter and leave the environment as temporary landmarks that can be used in combination with the rarely seen static landmarks to compute online estimates of AGV poses. The solution is tested and verified in a factory of paper using an eight ton diesel-truck retrofitted with an AGV control system running at speeds up to 3 m/s. The paper includes also a general discussion on how SLAM can be used in industrial applications with AGVs }, URL = {http://dx.doi.org/10.1007/978-3-642-40686-7_39}, ISBN = {978-3-642-40685-0}, ISBN = {978-3-642-40686-7}, year = {2014} } @inproceedings{Cirillo755510, author = {Cirillo, Marcello and Pecora, Federico and Andreasson, Henrik and Uras, Tansel and Koenig, Sven}, booktitle = {Proceedings of the 24th International Conference on Automated Planning and Scheduling : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, University of Southern California, Los Angeles, USA}, institution = {Department of Computer Science, University of Southern California, Los Angeles, USA}, title = {Integrated Motion Planning and Coordination for Industrial Vehicles}, keywords = {multi-robot coordination, non-holonomic motion planning, scheduling}, abstract = {A growing interest in the industrial sector for autonomous ground vehicles has prompted significant investment in fleet management systems. Such systems need to accommodate on-line externally imposed temporal and spatial requirements, and to adhere to them even in the presence of contingencies. Moreover, a fleet management system should ensure correctness, i.e., refuse to commit to requirements that cannot be satisfied. We present an approach to obtain sets of alternative execution patterns (called trajectory envelopes) which provide these guarantees. The approach relies on a constraint-based representation shared among multiple solvers, each of which progressively refines trajectory envelopes following a least commitment principle. }, URL = {http://idm-lab.org/bib/abstracts/papers/icaps14a.pdf}, ISBN = {978-1-57735-660-8}, year = {2014} } @inproceedings{Valencia780074, author = {Valencia, Rafael and Saarinen, Jari and Andreasson, Henrik and Vallv{\’e;}, Joan and Andrade-Cetto, Juan and Lilienthal, Achim J.}, booktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {CSIC-UPC, Barcelona,Spain}, institution = {CSIC-UPC, Barcelona, Spain}, note = {Institut de Robòtica i Informàtica industrial - UPC, Joint Research Center of the Technical University of Catalonia (UPC) and the Spanish Council for Scientific Research (CSIC) focused on robotics research}, pages = {3956--3962}, title = {Localization in highly dynamic environments using dual-timescale NDT-MCL}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2014.6907433}, keywords = {Localization, Monte Carlo Localization, Intra Logistics, Mapping}, abstract = {Industrial environments are rarely static and oftentheir configuration is continuously changing due to the materialtransfer flow. This is a major challenge for infrastructure freelocalization systems. In this paper we address this challengeby introducing a localization approach that uses a dualtimescaleapproach. The proposed approach - Dual-TimescaleNormal Distributions Transform Monte Carlo Localization (DTNDT-MCL) - is a particle filter based localization method,which simultaneously keeps track of the pose using an aprioriknown static map and a short-term map. The short-termmap is continuously updated and uses Normal DistributionsTransform Occupancy maps to maintain the current state ofthe environment. A key novelty of this approach is that it doesnot have to select an entire timescale map but rather use thebest timescale locally. The approach has real-time performanceand is evaluated using three datasets with increasing levels ofdynamics. We compare our approach against previously proposedNDT-MCL and commonly used SLAM algorithms andshow that DT-NDT-MCL outperforms competing algorithmswith regards to accuracy in all three test cases. }, year = {2014} } @article{Saarinen644380, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding agency:Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {14}, pages = {1627--1644}, title = {3D normal distributions transform occupancy maps : an efficient representation for mapping in dynamic environments}, volume = {32}, DOI = {10.1177/0278364913499415}, abstract = {In order to enable long-term operation of autonomous vehicles in industrial environments numerous challenges need to be addressed. A basic requirement for many applications is the creation and maintenance of consistent 3D world models. This article proposes a novel 3D spatial representation for online real-world mapping, building upon two known representations: normal distributions transform (NDT) maps and occupancy grid maps. The proposed normal distributions transform occupancy map (NDT-OM) combines the advantages of both representations; compactness of NDT maps and robustness of occupancy maps. One key contribution in this article is that we formulate an exact recursive updates for NDT-OMs. We show that the recursive update equations provide natural support for multi-resolution maps. Next, we describe a modification of the recursive update equations that allows adaptation in dynamic environments. As a second key contribution we introduce NDT-OMs and formulate the occupancy update equations that allow to build consistent maps in dynamic environments. The update of the occupancy values are based on an efficient probabilistic sensor model that is specially formulated for NDT-OMs. In several experiments with a total of 17 hours of data from a milk factory we demonstrate that NDT-OMs enable real-time performance in large-scale, long-term industrial setups. }, year = {2013} } @inproceedings{Mosberger647365, author = {Mosberger, Rafael and Andreasson, Henrik}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {5850--5857}, title = {An Inexpensive Monocular Vision System for Tracking Humans in Industrial Environments}, series = {Robotics and Automation (ICRA), 2013 IEEE International Conference on}, DOI = {10.1109/ICRA.2013.6631419}, keywords = {Human Detection, Robot Vision, Industrial Safety}, abstract = {We report on a novel vision-based method for reliable human detection from vehicles operating in industrial environments in the vicinity of workers. By exploiting the fact that reflective vests represent a standard safety equipment on most industrial worksites, we use a single camera system and active IR illumination to detect humans by identifying the reflective vest markers. Adopting a sparse feature based approach, we classify vest markers against other reflective material and perform supervised learning of the object distance based on local image descriptors. The integration of the resulting per-feature 3D position estimates in a particle filter finally allows to perform human tracking in conditions ranging from broad daylight to complete darkness. }, ISBN = {978-1-4673-5641-1}, year = {2013} } @article{Stoyanov618586, author = {Stoyanov, Todor and Mojtahedzadeh, Rasoul and Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, number = {10}, pages = {1094--1105}, title = {Comparative evaluation of range sensor accuracy for indoor mobile robotics and automated logistics applications}, volume = {61}, DOI = {10.1016/j.robot.2012.08.011}, abstract = {3D range sensing is an important topic in robotics, as it is a component in vital autonomous subsystems such as for collision avoidance, mapping and perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements, without using a precise ground truth environment model, is proposed. This article presents an extensive evaluation of three novel depth sensors — the Swiss Ranger SR-4000, Fotonic B70 and Microsoft Kinect. Tests are concentrated on the automated logistics scenario of container unloading. Six different setups of box-, cylinder-, and sack-shaped goods inside a mock-up container are used to collect range measurements. Comparisons are performed against hand-crafted ground truth data, as well as against a reference actuated Laser Range Finder (aLRF) system. Additional test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2013} } @inproceedings{Saarinen644375, author = {Saarinen, Jari and Stoyanov, Todor and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4694--4701}, title = {Fast 3D mapping in highly dynamic environments using normal distributions transform occupancy maps}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697032}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Mosberger684470, author = {Mosberger, Rafael and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {638--644}, title = {Multi-human Tracking using High-visibility Clothing for Industrial Safety}, series = {Intelligent Robots and Systems (IROS), 2013 IEEE/RSJ International Conference on}, DOI = {10.1109/IROS.2013.6696418}, keywords = {Human Detection, Robot Vision, Industrial Safety}, abstract = {We propose and evaluate a system for detecting and tracking multiple humans wearing high-visibility clothing from vehicles operating in industrial work environments. We use a customized stereo camera setup equipped with IR flash and IR filter to detect the reflective material on the worker's garments and estimate their trajectories in 3D space. An evaluation in two distinct industrial environments with different degrees of complexity demonstrates the approach to be robust and accurate for tracking workers in arbitrary body poses, under occlusion, and under a wide range of different illumination settings. }, year = {2013} } @inproceedings{Saarinen644376, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {382--389}, title = {Normal distributions transform monte-carlo localization (NDT-MCL)}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696380}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Stoyanov644379, author = {Stoyanov, Todor and Saarinen, Jari and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4702--4708}, title = {Normal distributions transform occupancy map fusion : simultaneous mapping and tracking in large scale dynamic environments}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697033}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Saarinen622633, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Ala-Luhtala, Juha and Lilienthal, Achim J.}, booktitle = {IEEE International Conference on Robotics and Automation : }, institution = {Örebro University, School of Science and Technology}, institution = {Aalto University of Technology, Aalto, Finland}, pages = {2233--2238}, title = {Normal distributions transform occupancy maps : application to large-scale online 3D mapping}, DOI = {10.1109/ICRA.2013.6630878}, abstract = {Autonomous vehicles operating in real-world industrial environments have to overcome numerous challenges, chief among which is the creation and maintenance of consistent 3D world models. This paper proposes to address the challenges of online real-world mapping by building upon previous work on compact spatial representation and formulating a novel 3D mapping approach — the Normal Distributions Transform Occupancy Map (NDT-OM). The presented algorithm enables accurate real-time 3D mapping in large-scale dynamic nvironments employing a recursive update strategy. In addition, the proposed approach can seamlessly provide maps at multiple resolutions allowing for fast utilization in high-level functions such as localization or path planning. Compared to previous approaches that use the NDT representation, the proposed NDT-OM formulates an exact and efficient recursive update formulation and models the full occupancy of the map. }, year = {2013} } @inproceedings{Mosberger619101, author = {Mosberger, Rafael and Andreasson, Henrik}, booktitle = {Proceedings of the International Conference on Field and Service Robotics (FSR) : }, institution = {Örebro University, School of Science and Technology}, title = {Estimating the 3d position of humans wearing a reflective vest using a single camera system}, series = {Springer Tracts in Advanced Robotics}, abstract = {This paper presents a novel possible solution for people detection and estimation of their 3D position in challenging shared environments. Addressing safety critical applications in industrial environments, we make the basic assumption that people wear reflective vests. In order to detect these vests and to discriminate them from other reflective material, we propose an approach based on a single camera equipped with an IR flash. The camera acquires pairs of images, one with and one without IR flash, in short succession. The images forming a pair are then related to each other through feature tracking, which allows to discard features for which the relative intensity difference is small and which are thus not believed to belong to a reflective vest. Next, the local neighbourhood of the remaining features is further analysed. First, a Random Forest classifier is used to discriminate between features caused by a reflective vest and features caused by some other reflective materials. Second, the distance between the camera and the vest features is estimated using a Random Forest regressor. The proposed system was evaluated in one indoor and two challenging outdoor scenarios. Our results indicate very good classification performance and remarkably accurate distance estimation especially in combination with the SURF descriptor, even under direct exposure to sunlight. }, year = {2012} } @article{Stoyanov618701, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding Agencies:European Union FP7 - 270350Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {12}, pages = {1377--1393}, title = {Fast and accurate scan registration through minimization of the distance between compact 3D NDT Representations}, volume = {31}, DOI = {10.1177/0278364912460895}, keywords = {point set registration; mapping; normal distributions transform}, abstract = {Registration of range sensor measurements is an important task in mobile robotics and has received a lot of attention. Several iterative optimization schemes have been proposed in order to align three-dimensional (3D) point scans. With the more widespread use of high-frame-rate 3D sensors and increasingly more challenging application scenarios for mobile robots, there is a need for fast and accurate registration methods that current state-of-the-art algorithms cannot always meet. This work proposes a novel algorithm that achieves accurate point cloud registration an order of a magnitude faster than the current state of the art. The speedup is achieved through the use of a compact spatial representation: the Three-Dimensional Normal Distributions Transform (3D-NDT). In addition, a fast, global-descriptor based on the 3D-NDT is defined and used to achieve reliable initial poses for the iterative algorithm. Finally, a closed-form expression for the covariance of the proposed method is also derived. The proposed algorithms are evaluated on two standard point cloud data sets, resulting in stable performance on a par with or better than the state of the art. The implementation is available as an open-source package for the Robot Operating system (ROS). }, year = {2012} } @inproceedings{Andreasson311038, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {Proceedings of the International Conference on Field and Service Robotics (FSR), July 2012. : }, institution = {Örebro University, School of Science and Technology}, title = {Gold-fish SLAM : an application of SLAM to localize AGVs}, keywords = {Mobile robotics, AGV localization}, abstract = {The main focus of this paper is to present a case study of a SLAM solution for Automated Guided Vehicles (AGVs) operating in real-world industrial environ- ments. The studied solution, called Gold-fish SLAM, was implemented to provide localization estimates in dynamic industrial environments, where there are static landmarks that are only rarely perceived by the AGVs. The main idea of Gold-fish SLAM is to consider the goods that enter and leave the environment as temporary landmarks that can be used in combination with the rarely seen static landmarks to compute online estimates of AGV poses. The solution is tested and verified in a factory of paper using an eight ton diesel-truck retrofitted with an AGV control sys- tem running at speeds up to 3 meters per second. The paper includes also a general discussion on how SLAM can be used in industrial applications with AGVs. }, year = {2012} } @inproceedings{Saarinen1190203, author = {Saarinen, Jari and Andreasson, Henrik and Lilienthal, Achim}, booktitle = {2012 IEEE/RSJ International Conference on Intelligent Robots and Systems : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Automation and Systems Technology, Aalto University, Alto, Finland}, pages = {3489--3495}, title = {Independent Markov Chain Occupancy Grid Maps for Representation of Dynamic Environments}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2012.6385629}, keywords = {Markov chain, Poisson process, model of dynamics}, abstract = {In this paper we propose a new grid based approach to model a dynamic environment. Each grid cell is assumed to be an independent Markov chain (iMac) with two states. The state transition parameters are learned online and modeled as two Poisson processes. As a result, our representation not only encodes the expected occupancy of the cell, but also models the expected dynamics within the cell. The paper also presents a strategy based on recency weighting to learn the model parameters from observations that is able to deal with non-stationary cell dynamics. Moreover, an interpretation of the model parameters with discussion about the convergence rates of the cells is presented. The proposed model is experimentally validated using offline data recorded with a Laser Guided Vehicle (LGV) system running in production use. }, ISBN = {978-1-4673-1736-8}, ISBN = {978-1-4673-1737-5}, ISBN = {978-1-4673-1735-1}, year = {2012} } @inproceedings{Andreasson618702, author = {Andreasson, Henrik and Stoyanov, Todor}, booktitle = {Proc. of International Conference on Robotics and Automation (ICRA) Workshop on Semantic Perception, Mapping and Exploration (SPME) : }, institution = {Örebro University, School of Science and Technology}, note = {The conference table of contents may be found on http://toc.proceedings.com/15154webtoc.pdf}, title = {Real time registration of RGB-D data using local visual features and 3D-NDT registration}, abstract = {Recent increased popularity of RGB-D capable sensors in robotics has resulted in a surge of related RGBD registration methods. This paper presents several RGB-D registration algorithms based on combinations between local visual feature and geometric registration. Fast and accurate transformation refinement is obtained by using a recently proposed geometric registration algorithm, based on the Three-Dimensional Normal Distributions Transform (3D-NDT). Results obtained on standard data sets have demonstrated mean translational errors on the order of 1 cm and rotational errors bellow 1 degree, at frame processing rates of about 15 Hz. }, ISBN = {9781467314039}, year = {2012} } @inproceedings{Stoyanov540987, author = {Stoyanov, Todor and Louloudi, Athanasia and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the 5th European Conference on Mobile Robots, ECMR 2011 : }, institution = {Örebro University, School of Science and Technology}, pages = {19--24}, title = {Comparative evaluation of range sensor accuracy in indoor environments}, abstract = {3D range sensing is one of the important topics in robotics, as it is often a component in vital autonomous subsystems like collision avoidance, mapping and semantic perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements is proposed. The approach is then used to compare the behavior of three integrated range sensing devices, to that of a standard actuated laser range sensor. Test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2011} } @article{Andreasson274835, author = {Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, note = {Selected papers from the 2007 European Conference on Mobile Robots (ECMR ’07)}, number = {2}, pages = {157--165}, title = {6D scan registration using depth-interpolated local image features}, volume = {58}, DOI = {10.1016/j.robot.2009.09.011}, keywords = {Registration, Vision, Laser Range Finder, SLAM}, abstract = {This paper describes a novel registration approach that is based on a combination of visual and 3D range information.To identify correspondences, local visual features obtained from images of a standard color camera are compared and the depth of matching features (and their position covariance) is determined from the range measurements of a 3D laserscanner. The matched depth-interpolated image features allows to apply registration with known correspondences.We compare several ICP variants in this paper and suggest an extension that considers the spatial distance betweenmatching features to eliminate false correspondences. Experimental results are presented in both outdoor and indoor environments. In addition to pair-wise registration, we also propose a global registration method that registers allscan poses simultaneously. }, year = {2010} } @inproceedings{Stoyanov445259, author = {Stoyanov, Todor and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {IEEE/RSJ 2010 International Conference on Intelligent Robots and Systems (IROS 2010) : }, institution = {Örebro University, School of Science and Technology}, pages = {3263--3268}, title = {Path planning in 3D environments using the normal distributions transform}, DOI = {10.1109/IROS.2010.5650789}, abstract = {Planning feasible paths in fully three-dimensional environments is a challenging problem. Application of existing algorithms typically requires the use of limited 3D representations that discard potentially useful information. This article proposes a novel approach to path planning that utilizes a full 3D representation directly: the Three-Dimensional Normal Distributions Transform (3D-NDT). The well known wavefront planner is modified to use 3D-NDT as a basis for map representation and evaluated using both indoor and outdoor data sets. The use of 3D-NDT for path planning is thus demonstrated to be a viable choice with good expressive capabilities. }, ISBN = {978-1-4244-6675-7}, year = {2010} } @inproceedings{Astrand274865, author = {{\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn and Bouguerra, Abdelbaki and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the 4th Swedish Workshop on Autonomous Robotics (SWAR)}, institution = {Örebro University, School of Science and Technology}, institution = {Halmstad University}, institution = {Halmstad University}, pages = {56--57}, title = {An Autonomous Robotic System for Load Transportation}, year = {2009} } @inproceedings{Bouguerra274885, author = {Bouguerra, Abdelbaki and Andreasson, Henrik and Lilienthal, Achim J. and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {2009 IEEE Conference on Emerging Technologies & Factory Automation (EFTA 2009) : }, institution = {Örebro University, School of Science and Technology}, institution = {Halmstad University, Halmstad, Sweden}, institution = {Halmstad University, Halmstad, Sweden}, pages = {1563--1566}, title = {An autonomous robotic system for load transportation}, series = {IEEE International Conference on Emerging Technologies and Factory Automation-ETFA}, DOI = {10.1109/ETFA.2009.5347247}, keywords = {AGV system; Autonomous robotic systems; Dynamic environments; Material handling; Object Detection; Runtimes}, abstract = {This paper presents an overview of an autonomous robotic material handling system. The goal of the system is to extend the functionalities of traditional AGVs to operate in highly dynamic environments. Traditionally, the reliable functioning of AGVs relies on the availability of adequate infrastructure to support navigation. In the target environments of our system, such infrastructure is difficult to setup in an efficient way. Additionally, the location of objects to handle are unknown, which requires that the system be able to detect and track object positions at runtime. Another requirement of the system is to be able to generate trajectories dynamically, which is uncommon in industrial AGV systems. }, ISBN = {978-1-4244-2727-7}, ISBN = {978-1-4244-2728-4}, year = {2009} } @inproceedings{Magnusson391763, author = {Magnusson, Martin and Andreasson, Henrik and N{\"u}chter, A. and Lilienthal, Achim J.}, booktitle = {IEEE International Conference on Robotics and Automation 2009 (ICRA '09) : }, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen, Bremen, Germany}, note = {Funding Agency:Atlas Copco Rock Drills }, pages = {23--28}, title = {Appearance-based loop detection from 3D laser data using the normal distributions transform}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ROBOT.2009.5152712}, abstract = {We propose a new approach to appearance based loop detection from metric 3D maps, exploiting the NDT surface representation. Locations are described with feature histograms based on surface orientation and smoothness, and loop closure can be detected by matching feature histograms. We also present a quantitative performance evaluation using two realworld data sets, showing that the proposed method works well in different environments.© 2009 IEEE. }, ISBN = {9781424427888}, ISBN = {9781424427895}, year = {2009} } @article{Magnusson274842, author = {Magnusson, Martin and Andreasson, Henrik and N{\"u}chter, Andreas and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen}, journal = {Journal of Field Robotics}, number = {11-12}, pages = {892--914}, title = {Automatic appearance-based loop detection from three-dimensional laser data using the normal distributions transform}, volume = {26}, DOI = {10.1002/rob.20314}, abstract = {We propose a new approach to appearance-based loop detection for mobile robots, usingthree-dimensional (3D) laser scans. Loop detection is an important problem in the simultaneouslocalization and mapping (SLAM) domain, and, because it can be seen as theproblem of recognizing previously visited places, it is an example of the data associationproblem. Without a flat-floor assumption, two-dimensional laser-based approaches arebound to fail in many cases. Two of the problems with 3D approaches that we address inthis paper are how to handle the greatly increased amount of data and how to efficientlyobtain invariance to 3D rotations.We present a compact representation of 3D point cloudsthat is still discriminative enough to detect loop closures without false positives (i.e.,detecting loop closure where there is none). A low false-positive rate is very important becausewrong data association could have disastrous consequences in a SLAM algorithm.Our approach uses only the appearance of 3D point clouds to detect loops and requires nopose information. We exploit the normal distributions transform surface representationto create feature histograms based on surface orientation and smoothness. The surfaceshape histograms compress the input data by two to three orders of magnitude. Becauseof the high compression rate, the histograms can be matched efficiently to compare theappearance of two scans. Rotation invariance is achieved by aligning scans with respectto dominant surface orientations. We also propose to use expectation maximization to fit a gamma mixture model to the output similarity measures in order to automatically determinethe threshold that separates scans at loop closures from nonoverlapping ones.Wediscuss the problem of determining ground truth in the context of loop detection and thedifficulties in comparing the results of the few available methods based on range information.Furthermore, we present quantitative performance evaluations using three realworlddata sets, one of which is highly self-similar, showing that the proposed methodachieves high recall rates (percentage of correctly identified loop closures) at low falsepositiverates in environments with different characteristics. }, year = {2009} } @book{Andreasson306494, author = {Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, pages = {206}, title = {Camera based navigation by mobile robots : local visual feature based localisation and mapping}, abstract = {The most important property of a mobile robot is the fact that it is mobile. How to give a robot the skills required to navigate around its environment is therefore an important topic in mobile robotics. Navigation, both for robots and humans, typically involves a map. The map can be used, for example, to estimate a pose based on observations (localisation) or determine a suitable path between to locations. Maps are available nowadays for us humans with few exceptions, however, maps suitable for mobile robots rarely exists. In addition, to relate sensor readings to a map requires that the map content and the observation is compatible, i.e. different robots may require different maps for the same area. This book addresses some of the fundamental problems related to mobile robot navigation (registration, localisation and mapping) using cameras as the primary sensor input. Small salient regions (local visual features) are extracted from each camera image, where each region can be seen as a fingerprint. Many fingerprint matches implicates a high likelihood that they corresponding images originate from a similar location, which is a central property utilised in this work. }, ISBN = {978-3-639-12452-1}, year = {2009} } @inproceedings{Bouguerra274878, author = {Bouguerra, Abdelbaki and Andreasson, Henrik and Lilienthal, Achim J. and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {Proceedings of the 4th European conference on mobile robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, institution = {Halmstad University}, institution = {Halmstad University, Sweden}, pages = {93--98}, title = {MALTA : a system of multiple autonomous trucks for load transportation}, keywords = {Autonomous Vehicles, Load Handling, AGVs}, abstract = {This paper presents an overview of an autonomousrobotic material handling system. The goal of the system is toextend the functionalities of traditional AGVs to operate in highlydynamic environments. Traditionally, the reliable functioning ofAGVs relies on the availability of adequate infrastructure tosupport navigation. In the target environments of our system,such infrastructure is difficult to setup in an efficient way.Additionally, the location of objects to handle are unknown,which requires that the system be able to detect and track objectpositions at runtime. Another requirement of the system is to beable to generate trajectories dynamically, which is uncommon inindustrial AGV systems. }, ISBN = {978-953-6037-54-4}, year = {2009} } @article{Andreasson158115, author = {Andreasson, Henrik and Duckett, Tom and Lilienthal, Achim J.}, institution = {Örebro University, Department of Technology}, institution = {University of Lincoln, University of Lincoln, UK}, journal = {IEEE Transactions on Robotics}, number = {5}, pages = {991--1001}, title = {A Minimalistic Approach to Appearance-Based Visual SLAM}, volume = {24}, DOI = {10.1109/TRO.2008.2004642}, keywords = {Omnidirectional vision, simultaneous localization and mapping (SLAM)}, abstract = {This paper presents a vision-based approach to SLAM in indoor / outdoor environments with minimalistic sensing and computational requirements. The approach is based on a graph representation of robot poses, using a relaxation algorithm to obtain a globally consistent map. Each link corresponds to a relative measurement of the spatial relation between the two nodes it connects. The links describe the likelihood distribution of the relative pose as a Gaussian distribution. To estimate the covariance matrix for links obtained from an omni-directional vision sensor, a novel method is introduced based on the relative similarity of neighbouring images. This new method does not require determining distances to image features using multiple view geometry, for example. Combined indoor and outdoor experiments demonstrate that the approach can handle qualitatively different environments (without modification of the parameters), that it can cope with violations of the “flat floor assumption” to some degree, and that it scales well with increasing size of the environment, producing topologically correct and geometrically accurate maps at low computational cost. Further experiments demonstrate that the approach is also suitable for combining multiple overlapping maps, e.g. for solving the multi-robot SLAM problem with unknown initial poses. }, year = {2008} } @phdthesis{Andreasson136254, author = {Andreasson, Henrik}, institution = {Örebro University, Department of Technology}, pages = {204}, publisher = {Örebro universitet}, school = {Örebro University, Department of Technology}, title = {Local visual feature based localisation and mapping by mobile robots}, series = {Örebro Studies in Technology}, ISSN = {1650-8580}, number = {31}, keywords = {mobile robotics, registration, localisation, SLAM, mapping, omnidirectional vision, 3D vision, appearance based}, abstract = {This thesis addresses the problems of registration, localisation and simultaneous localisation and mapping (SLAM), relying particularly on local visual features extracted from camera images. These fundamental problems in mobile robot navigation are tightly coupled. Localisation requires a representation of the environment (a map) and registration methods to estimate the pose of the robot relative to the map given the robot’s sensory readings. To create a map, sensor data must be accumulated into a consistent representation and therefore the pose of the robot needs to be estimated, which is again the problem of localisation. The major contributions of this thesis are new methods proposed to address the registration, localisation and SLAM problems, considering two different sensor configurations. The first part of the thesis concerns a sensor configuration consisting of an omni-directional camera and odometry, while the second part assumes a standard camera together with a 3D laser range scanner. The main difference is that the former configuration allows for a very inexpensive set-up and (considering the possibility to include visual odometry) the realisation of purely visual navigation approaches. By contrast, the second configuration was chosen to study the usefulness of colour or intensity information in connection with 3D point clouds (“coloured point clouds”), both for improved 3D resolution (“super resolution”) and approaches to the fundamental problems of navigation that exploit the complementary strengths of visual and range information. Considering the omni-directional camera/odometry setup, the first part introduces a new registration method based on a measure of image similarity. This registration method is then used to develop a localisation method, which is robust to the changes in dynamic environments, and a visual approach to metric SLAM, which does not require position estimation of local image features and thus provides a very efficient approach. The second part, which considers a standard camera together with a 3D laser range scanner, starts with the proposal and evaluation of non-iterative interpolation methods. These methods use colour information from the camera to obtain range information at the resolution of the camera image, or even with sub-pixel accuracy, from the low resolution range information provided by the range scanner. Based on the ability to determine depth values for local visual features, a new registration method is then introduced, which combines the depth of local image features and variance estimates obtained from the 3D laser range scanner to realise a vision-aided 6D registration method, which does not require an initial pose estimate. This is possible because of the discriminative power of the local image features used to determine point correspondences (data association). The vision-aided registration method is further developed into a 6D SLAM approach where the optimisation constraint is based on distances of paired local visual features. Finally, the methods introduced in the second part are combined with a novel adaptive normal distribution transform (NDT) representation of coloured 3D point clouds into a robotic difference detection system. }, ISBN = {978-91-7668-614-0}, year = {2008} } @inproceedings{Andreasson138559, author = {Andreasson, Henrik and Magnusson, Martin and Lilienthal, Achim}, booktitle = {2007 IEEE/RSJ international conference on intelligent robots and systems : }, institution = {Örebro University, Department of Technology}, institution = {Örebro University, Department of Natural Sciences}, pages = {3429--3435}, eid = {4399381}, title = {Has something changed here? : Autonomous difference detection for security patrol robots}, DOI = {10.1109/IROS.2007.4399381}, abstract = {This paper presents a system for autonomous change detection with a security patrol robot. In an initial step a reference model of the environment is created and changes are then detected with respect to the reference model as differences in coloured 3D point clouds, which are obtained from a 3D laser range scanner and a CCD camera. The suggested approach introduces several novel aspects, including a registration method that utilizes local visual features to determine point correspondences (thus essentially working without an initial pose estimate) and the 3D-NDT representation with adaptive cell size to efficiently represent both the spatial and colour aspects of the reference model. Apart from a detailed description of the individual parts of the difference detection system, a qualitative experimental evaluation in an indoor lab environment is presented, which demonstrates that the suggested system is able register and detect changes in spatial 3D data and also to detect changes that occur in colour space and are not observable using range values only. }, ISBN = {978-1-4244-0912-9}, year = {2007} } @inproceedings{Andreasson138560, author = {Andreasson, Henrik and Duckett, Tom and Lilienthal, Achim J.}, booktitle = {2007 IEEE international conference on robotics and automation (ICRA) : }, institution = {Örebro University, Department of Technology}, institution = {Dept. of Computing & Informatics, University of Lincoln, Lincoln, United Kingdom}, pages = {4096--4101}, eid = {4209726}, title = {Mini-SLAM : minimalistic visual SLAM in large-scale environments based on a new interpretation of image similarity}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ROBOT.2007.364108}, abstract = {This paper presents a vision-based approach to SLAM in large-scale environments with minimal sensing and computational requirements. The approach is based on a graphical representation of robot poses and links between the poses. Links between the robot poses are established based on odometry and image similarity, then a relaxation algorithm is used to generate a globally consistent map. To estimate the covariance matrix for links obtained from the vision sensor, a novel method is introduced based on the relative similarity of neighbouring images, without requiring distances to image features or multiple view geometry. Indoor and outdoor experiments demonstrate that the approach scales well to large-scale environments, producing topologically correct and geometrically accurate maps at minimal computational cost. Mini-SLAM was found to produce consistent maps in an unstructured, large-scale environment (the total path length was 1.4 km) containing indoor and outdoor passages. }, ISBN = {978-1-4244-0601-2}, year = {2007} } @inproceedings{Andreasson138558, author = {Andreasson, Henrik and Triebel, Rudolph and Lilienthal, Achim J.}, booktitle = {Autonomos Agents and Robots : }, institution = {Örebro University, Department of Technology}, institution = {Department of Computer Science, University of Freiburg, Freiburg, Germany}, pages = {83--90}, eid = {4399381}, publisher = {Springer}, title = {Non-iterative Vision-based Interpolation of 3D Laser Scans}, series = {Studies in Computational Intelligence}, number = {76}, volume = {76}, DOI = {10.1007/978-3-540-73424-6_10}, keywords = {3D range sensor, laser range scanner, vision-based depth interpolation, 3D vision}, abstract = {3D range sensors, particularly 3D laser range scanners, enjoy a rising popularity and are used nowadays for many different applications. The resolution 3D range sensors provide in the image plane is typically much lower than the resolution of a modern colour camera. In this chapter we focus on methods to derive a highresolution depth image from a low-resolution 3D range sensor and a colour image. The main idea is to use colour similarity as an indication of depth similarity, based on the observation that depth discontinuities in the scene often correspond to colour or brightness changes in the camera image. We present five interpolation methods and compare them with an independently proposed method based on Markov random fields. The proposed algorithms are non-iterative and include a parameter-free vision-based interpolation method. In contrast to previous work, we present ground truth evaluation with real world data and analyse both indoor and outdoor data. }, ISBN = {978-3-540-73423-9}, year = {2007} } @article{Andreasson158113, author = {Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom}, institution = {Örebro University, Department of Technology}, institution = {University of Tübingen}, journal = {Robotics and Autonomous Systems}, number = {7}, pages = {541--551}, publisher = {Elsevier}, title = {Self-localization in non-stationary environments using omni-directional vision}, volume = {55}, DOI = {10.1016/j.robot.2007.02.002}, abstract = {This paper presents an image-based approach for localization in non-static environments using local feature descriptors, and its experimental evaluation in a large, dynamic, populated environment where the time interval between the collected data sets is up to two months. By using local features together with panoramic images, robustness and invariance to large changes in the environment can be handled. Results from global place recognition with no evidence accumulation and a Monte Carlo localization method are shown. To test the approach even further, experiments were conducted with up to 90% virtual occlusion in addition to the dynamic changes in the environment }, year = {2007} } @inproceedings{Andreasson138563, author = {Andreasson, Henrik and Lilienthal, Achim}, booktitle = {ECMR 2007 : Proceedings of the European Conference on Mobile Robots}, institution = {Örebro University, Department of Technology}, institution = {Örebro University, Department of Natural Sciences}, institution = {aass}, pages = {192--197}, title = {Vision aided 3D laser scanner based registration}, keywords = {Registration, Vision}, abstract = {This paper describes a vision and 3D laser based registration approach which utilizes visual features to identify correspondences. Visual features are obtained from the images of a standard color camera and the depth of these features is determined by interpolating between the scanning points of a 3D laser range scanner, taking into consideration the visual information in the neighbourhood of the respective visual feature. The 3D laser scanner is also used to determine a position covariance estimate of the visual feature. To exploit these covariance estimates, an ICP algorithm based on the Mahalanobis distance is applied. Initial experimental results are presented in a real world indoor laboratory environment }, URL = {http://ecmr07.informatik.uni-freiburg.de/proceedings/ECMR07_0059.pdf}, year = {2007} } @article{Tamimi137728, author = {Tamimi, Hashem and Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom and Zell, Andreas}, institution = {Örebro University, Department of Technology}, institution = {University of Tubingen}, institution = {University of Tubingen}, institution = {University of Lincoln}, institution = {University of Tubingen}, journal = {Robotics and Autonomous Systems}, note = {Selected papers from the 2nd European Conference on Mobile Robots (ECMR ’05)}, number = {9}, pages = {758--765}, title = {Localization of mobile robots with omnidirectional vision using Particle Filter and iterative SIFT}, volume = {54}, DOI = {10.1016/j.robot.2006.04.018}, keywords = {Robot localization, Scale Invariant Feature Transform, Omnidirectional vision, Particle Filter}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot localization. Still, the number of features extracted with this approach is immense, especially when dealing with omnidirectional vision. In this work, we propose a new approach that reduces the number of features generated by SIFT as well as their extraction and matching time. With the help of a Particle Filter, we demonstrate that we can still localize the mobile robot accurately with a lower number of features }, year = {2006} } @inproceedings{Andreasson138254, author = {Andreasson, Henrik and Lilienthal, Achim J. and Triebel, Rudolph}, booktitle = {Proceedings of the Third International Conference on Autonomous Robots and Agents : }, institution = {Örebro University, Department of Technology}, institution = {Department of Computer Science, University of Freiburg, Germany}, pages = {455--460}, title = {Vision based interpolation of 3D laser scans}, keywords = {3D range sensor, laser range scanner, vision-based depth interpolation, 3D vision}, abstract = {3D range sensors, particularly 3D laser range scanners, enjoy a rising popularity and are used nowadays for many different applications. The resolution 3D range sensors provide in the image plane is typically much lower than the resolution of a modern color camera. In this paper we focus on methods to derive a high-resolution depth image from a low-resolution 3D range sensor and a color image. The main idea is to use color similarity as an indication of depth similarity, based on the observation that depth discontinuities in the scene often correspond to color or brightness changes in the camera image. We present five interpolation methods and compare them with an independently proposed method based on Markov Random Fields. The algorithms proposed in this paper are non-iterative and include a parameter-free vision-based interpolation method. In contrast to previous work, we present ground truth evaluation with real world data and analyse both indoor and outdoor data. Further, we suggest and evaluate four methods to determine a confidence measure for the accuracy of interpolated range values. }, year = {2006} } @inproceedings{Andreasson138260, author = {Andreasson, Henrik and Triebel, Rudolph and Burgard, Wolfram}, booktitle = {2005 IEEE/RSJ International Conference on Intelligent Robots and Systems, 2005. (IROS 2005) : IROS 2005 IEEE/RSJ}, institution = {Örebro University, Department of Technology}, institution = {University of Friburg}, institution = {University of Friburg}, pages = {2656--2661}, title = {Improving plane extraction from 3D data by fusing laser data and vision}, abstract = {The problem of extracting three-dimensional structures from data acquired with mobile robots has received considerable attention over the past years. Robots that are able to perceive their three-dimensional environment are envisioned to more robustly perform tasks like navigation, rescue, and manipulation. In this paper we present an approach that simultaneously uses color and range information to cluster 3d points into planar structures. Our current system also is able to calibrate the camera and the laser based on the remission values provided by the range scanner and the brightness of the pixels in the image. It has been implemented on a mobile robot equipped with a manipulator that carries a range scanner and a camera for acquiring colored range scans. Several experiments carried out on real data and in simulations demonstrate that our approach yields highly accurate results also in comparison with previous approaches }, year = {2005} } @inproceedings{Andreasson158109, author = {Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom}, booktitle = {Proceedings of the 2005 IEEE International Converence on Robotics and Automation : ICRA - 2005}, institution = {Örebro University, Department of Technology}, institution = {University of Tübingen}, pages = {3348--3353}, title = {Localization for mobile robots using panoramic vision, local features and particle filter}, DOI = {10.1109/ROBOT.2005.1570627}, abstract = {In this paper we present a vision-based approach to self-localization that uses a novel scheme to integrate featurebased matching of panoramic images with Monte Carlo localization. A specially modified version of Lowe’s SIFT algorithm is used to match features extracted from local interest points in the image, rather than using global features calculated from the whole image. Experiments conducted in a large, populated indoor environment (up to 5 persons visible) over a period of several months demonstrate the robustness of the approach, including kidnapping and occlusion of up to 90% of the robot’s field of view. }, year = {2005} } @inproceedings{Tamimi138267, author = {Tamimi, Hashem and Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom and Zell, Andreas}, booktitle = { : }, institution = {Örebro University, Department of Technology}, institution = {University of Tubingen}, institution = {University of Tubingen}, institution = {University of Tubingen}, title = {Localization of mobile robots with omnidirectional vision using particle filter and iterative SIFT}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot localization. Still, the number of features extracted with this approach is immense, especially when dealing with omnidirectional vision. In this work, we propose a new approach that reduces the number of features generated by SIFT as well as their extraction and matching time. With the help of a particle filter, we demonstrate that we can still localize the mobile robot accurately with a lower number of features }, year = {2005} } @inproceedings{Fleck158110, author = {Fleck, Sven and Busch, Florian and Biber, Peter and Strasser, Wolfgang and Andreasson, Henrik}, booktitle = {Proceedings of the 2005 IEEE International Converence on Robotics and Automation : ICRA - 2005}, institution = {Örebro University, Department of Technology}, institution = {University of Tübingen}, institution = {University of Tübingen}, institution = {University of Tübingen}, institution = {University of Tübingen}, pages = {1748--1754}, title = {Omnidirectional 3D modeling on a mobile robot using graph cuts}, DOI = {10.1109/ROBOT.2005.1570366}, abstract = {For a mobile robot it is a natural task to build a 3D model of its environment. Such a model is not only useful for planning robot actions but also to provide a remote human surveillant a realistic visualization of the robot’s state with respect to the environment. Acquiring 3D models of environments is also an important task on its own with many possible applications like creating virtual interactive walkthroughs or as basis for 3D-TV. In this paper we present our method to acquire a 3D model using a mobile robot that is equipped with a laser scanner and a panoramic camera. The method is based on calculating dense depth maps for panoramic images using pairs of panoramic images taken from different positions using stereo matching. Traditional 2D-SLAM using laser-scan-matching is used to determine the needed camera poses. To receive high-quality results we use a high-quality stereo matching algorithm – the graph cut method. We describe the necessary modifications to handle panoramic images and specialized post-processing methods. }, year = {2005} } @article{Liao1719864, author = {Liao, Qianfang and Sun, Da and Zhang, Shiyu and Loutfi, Amy and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on Image Processing}, pages = {550--564}, title = {Fuzzy Cluster-based Group-wise Point Set Registration with Quality Assessment}, volume = {32}, DOI = {10.1109/TIP.2022.3231132}, keywords = {Quality assessment, Measurement, Three-dimensional displays, Registers, Probability distribution, Point cloud compression, Optimization, Group-wise registration, registration quality assessment, joint alignment, fuzzy clusters, 3D point sets}, abstract = {This article studies group-wise point set registration and makes the following contributions: "FuzzyGReg", which is a new fuzzy cluster-based method to register multiple point sets jointly, and "FuzzyQA", which is the associated quality assessment to check registration accuracy automatically. Given a group of point sets, FuzzyGReg creates a model of fuzzy clusters and equally treats all the point sets as the elements of the fuzzy clusters. Then, the group-wise registration is turned into a fuzzy clustering problem. To resolve this problem, FuzzyGReg applies a fuzzy clustering algorithm to identify the parameters of the fuzzy clusters while jointly transforming all the point sets to achieve an alignment. Next, based on the identified fuzzy clusters, FuzzyQA calculates the spatial properties of the transformed point sets and then checks the alignment accuracy by comparing the similarity degrees of the spatial properties of the point sets. When a local misalignment is detected, a local re-alignment is performed to improve accuracy. The proposed method is cost-efficient and convenient to be implemented. In addition, it provides reliable quality assessments in the absence of ground truth and user intervention. In the experiments, different point sets are used to test the proposed method and make comparisons with state-of-the-art registration techniques. The experimental results demonstrate the effectiveness of our method.The code is available at https://gitsvn-nt.oru.se/qianfang.liao/FuzzyGRegWithQA }, year = {2023} } @article{Adolfsson1727222, author = {Adolfsson, Daniel and Magnusson, Martin and Alhashimi, Anas and Lilienthal, Achim and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, institution = {Örebro University, Örebro, Sweden; Computer Engineering Department, University of Baghdad, Baghdad, Iraq}, journal = {IEEE Transactions on robotics}, number = {2}, pages = {1476--1495}, title = {Lidar-Level Localization With Radar? The CFEAR Approach to Accurate, Fast, and Robust Large-Scale Radar Odometry in Diverse Environments}, volume = {39}, DOI = {10.1109/tro.2022.3221302}, keywords = {Radar, Sensors, Spinning, Azimuth, Simultaneous localization and mapping, Estimation, Location awareness, Localization, radar odometry, range sensing, SLAM}, abstract = {This article presents an accurate, highly efficient, and learning-free method for large-scale odometry estimation using spinning radar, empirically found to generalize well across very diverse environments—outdoors, from urban to woodland, and indoors in warehouses and mines—without changing parameters. Our method integrates motion compensation within a sweep with one-to-many scan registration that minimizes distances between nearby oriented surface points and mitigates outliers with a robust loss function. Extending our previous approach conservative filtering for efficient and accurate radar odometry (CFEAR), we present an in-depth investigation on a wider range of datasets, quantifying the importance of filtering, resolution, registration cost and loss functions, keyframe history, and motion compensation. We present a new solving strategy and configuration that overcomes previous issues with sparsity and bias, and improves our state-of-the-art by 38%, thus, surprisingly, outperforming radar simultaneous localization and mapping (SLAM) and approaching lidar SLAM. The most accurate configuration achieves 1.09% error at 5 Hz on the Oxford benchmark, and the fastest achieves 1.79% error at 160 Hz. }, URL = {https://doi.org/10.48550/arXiv.2211.02445}, year = {2023} } @article{Gupta1761421, author = {Gupta, Himanshu and Lilienthal, Achim and Andreasson, Henrik and Kurtser, Polina}, institution = {Örebro University, School of Science and Technology}, institution = {Perception for Intelligent Systems, TechnicalUniversity of Munich, Munich, Germany}, institution = {Centre for Applied Autonomous SensorSystems, Institutionen för naturvetenskap &teknik, Örebro University, Örebro, Sweden; Department of Radiation Science, RadiationPhysics, Umeå University, Umeå, Sweden}, journal = {Journal of Field Robotics}, number = {6}, pages = {1603--1619}, title = {NDT-6D for color registration in agri-robotic applications}, volume = {40}, DOI = {10.1002/rob.22194}, keywords = {agricultural robotics, color pointcloud, in-field sensing, machine perception, RGB-D registration, stereo IR, vineyard}, abstract = {Registration of point cloud data containing both depth and color information is critical for a variety of applications, including in-field robotic plant manipulation, crop growth modeling, and autonomous navigation. However, current state-of-the-art registration methods often fail in challenging agricultural field conditions due to factors such as occlusions, plant density, and variable illumination. To address these issues, we propose the NDT-6D registration method, which is a color-based variation of the Normal Distribution Transform (NDT) registration approach for point clouds. Our method computes correspondences between pointclouds using both geometric and color information and minimizes the distance between these correspondences using only the three-dimensional (3D) geometric dimensions. We evaluate the method using the GRAPES3D data set collected with a commercial-grade RGB-D sensor mounted on a mobile platform in a vineyard. Results show that registration methods that only rely on depth information fail to provide quality registration for the tested data set. The proposed color-based variation outperforms state-of-the-art methods with a root mean square error (RMSE) of 1.1-1.6 cm for NDT-6D compared with 1.1-2.3 cm for other color-information-based methods and 1.2-13.7 cm for noncolor-information-based methods. The proposed method is shown to be robust against noises using the TUM RGBD data set by artificially adding noise present in an outdoor scenario. The relative pose error (RPE) increased similar to 14% for our method compared to an increase of similar to 75% for the best-performing registration method. The obtained average accuracy suggests that the NDT-6D registration methods can be used for in-field precision agriculture applications, for example, crop detection, size-based maturity estimation, and growth modeling. }, year = {2023} } @inproceedings{Gupta1812049, author = {Gupta, Himanshu and Andreasson, Henrik and Magnusson, Martin and Julier, Simon and Lilienthal, Achim J.}, booktitle = {2023 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, University College London, London, England}, institution = {Perception for Intelligent Systems, Technical University of Munich, Germany }, pages = {43--48}, publisher = {IEEE}, title = {Revisiting Distribution-Based Registration Methods}, series = {European Conference on Mobile Robots}, DOI = {10.1109/ECMR59166.2023.10256416}, abstract = {Normal Distribution Transformation (NDT) registration is a fast, learning-free point cloud registration algorithm that works well in diverse environments. It uses the compact NDT representation to represent point clouds or maps as a spatial probability function that models the occupancy likelihood in an environment. However, because of the grid discretization in NDT maps, the global minima of the registration cost function do not always correlate to ground truth, particularly for rotational alignment. In this study, we examined the NDT registration cost function in-depth. We evaluated three modifications (Student-t likelihood function, inflated covariance/heavily broadened likelihood curve, and overlapping grid cells) that aim to reduce the negative impact of discretization in classical NDT registration. The first NDT modification improves likelihood estimates for matching the distributions of small population sizes; the second modification reduces discretization artifacts by broadening the likelihood tails through covariance inflation; and the third modification achieves continuity by creating the NDT representations with overlapping grid cells (without increasing the total number of cells). We used the Pomerleau Dataset evaluation protocol for our experiments and found significant improvements compared to the classic NDT D2D registration approach (27.7% success rate) using the registration cost functions "heavily broadened likelihood NDT" (HBL-NDT) (34.7% success rate) and "overlapping grid cells NDT" (OGC-NDT) (33.5% success rate). However, we could not observe a consistent improvement using the Student-t likelihood-based registration cost function (22.2% success rate) over the NDT P2D registration cost function (23.7% success rate). A comparative analysis with other state-of-art registration algorithms is also presented in this work. We found that HBL-NDT worked best for easy initial pose difficulties scenarios making it suitable for consecutive point cloud registration in SLAM application. }, ISBN = {9798350307047}, ISBN = {9798350307054}, year = {2023} } @article{Gupta1770024, author = {Gupta, Himanshu and Andreasson, Henrik and Lilienthal, Achim J. and Kurtser, Polina}, institution = {Örebro University, School of Science and Technology}, institution = {Perception for Intelligent Systems, Technical University of Munich, Munich, Germany}, institution = {Centre for Applied Autonomous Sensor Systems, Örebro University, Örebro, Sweden; Department of Radiation Science, Radiation Physics, Umeå University, Umeå, Sweden}, journal = {Sensors}, number = {10}, eid = {4736}, title = {Robust Scan Registration for Navigation in Forest Environment Using Low-Resolution LiDAR Sensors}, volume = {23}, DOI = {10.3390/s23104736}, keywords = {tree segmentation, LiDAR mapping, forest inventory, SLAM, forestry robotics, scan registration}, abstract = {Automated forest machines are becoming important due to human operators' complex and dangerous working conditions, leading to a labor shortage. This study proposes a new method for robust SLAM and tree mapping using low-resolution LiDAR sensors in forestry conditions. Our method relies on tree detection to perform scan registration and pose correction using only low-resolution LiDAR sensors (16Ch, 32Ch) or narrow field of view Solid State LiDARs without additional sensory modalities like GPS or IMU. We evaluate our approach on three datasets, including two private and one public dataset, and demonstrate improved navigation accuracy, scan registration, tree localization, and tree diameter estimation compared to current approaches in forestry machine automation. Our results show that the proposed method yields robust scan registration using detected trees, outperforming generalized feature-based registration algorithms like Fast Point Feature Histogram, with an above 3 m reduction in RMSE for the 16Chanel LiDAR sensor. For Solid-State LiDAR the algorithm achieves a similar RMSE of 3.7 m. Additionally, our adaptive pre-processing and heuristic approach to tree detection increased the number of detected trees by 13% compared to the current approach of using fixed radius search parameters for pre-processing. Our automated tree trunk diameter estimation method yields a mean absolute error of 4.3 cm (RSME = 6.5 cm) for the local map and complete trajectory maps. }, year = {2023} } @article{Adolfsson1766598, author = {Adolfsson, Daniel and Karlsson, Mattias and Kubelka, Vladimír and Magnusson, Martin and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, institution = {MRO Lab of the AASS Research Centre, Örebro University, Örebro, Sweden}, journal = {IEEE Robotics and Automation Letters}, number = {6}, pages = {3613--3620}, title = {TBV Radar SLAM - Trust but Verify Loop Candidates}, volume = {8}, DOI = {10.1109/LRA.2023.3268040}, keywords = {SLAM, localization, radar, introspection}, abstract = {Robust SLAM in large-scale environments requires fault resilience and awareness at multiple stages, from sensing and odometry estimation to loop closure. In this work, we present TBV (Trust But Verify) Radar SLAM, a method for radar SLAM that introspectively verifies loop closure candidates. TBV Radar SLAM achieves a high correct-loop-retrieval rate by combining multiple place-recognition techniques: tightly coupled place similarity and odometry uncertainty search, creating loop descriptors from origin-shifted scans, and delaying loop selection until after verification. Robustness to false constraints is achieved by carefully verifying and selecting the most likely ones from multiple loop constraints. Importantly, the verification and selection are carried out after registration when additional sources of loop evidence can easily be computed. We integrate our loop retrieval and verification method with a robust odometry pipeline within a pose graph framework. By evaluation on public benchmarks we found that TBV Radar SLAM achieves 65% lower error than the previous state of the art. We also show that it generalizes across environments without needing to change any parameters. We provide the open-source implementation at https://github.com/dan11003/tbv_slam_public }, year = {2023} } @article{Molina1797296, author = {Molina, Sergi and Mannucci, Anna and Magnusson, Martin and Adolfsson, Daniel and Andreasson, Henrik and Hamad, Mazin and Abdolshah, Saeed and Chadalavada, Ravi Teja and Palmieri, Luigi and Linder, Timm and Swaminathan, Chittaranjan Srinivas and Kucner, Tomasz Piotr and Hanheide, Marc and Fernandez-Carmona, Manuel and Cielniak, Grzegorz and Duckett, Tom and Pecora, Federico and Bokesand, Simon and Arras, Kai O. and Haddadin, Sami and Lilienthal, Achim J}, institution = {Örebro University, School of Science and Technology}, institution = {University of Lincoln, Lincoln, U.K}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Technical University of Munich, Munich, Germany}, institution = {Technical University of Munich, Munich, Germany}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Aalto University, Aalto, Finland}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {University of Lincoln, Lincoln, U.K.}, institution = {Kollmorgen Automation AB, Mölndal, Sweden}, institution = {Robert Bosch GmbH, Renningen, Germany}, institution = {Technical University of Munich, Munich, Germany}, journal = {IEEE robotics & automation magazine}, title = {The ILIAD Safety Stack : Human-Aware Infrastructure-Free Navigation of Industrial Mobile Robots}, DOI = {10.1109/MRA.2023.3296983}, keywords = {Robots, Safety, Navigation, Mobile robots, Human-robot interaction, Hidden Markov models, Trajectory}, abstract = {Current intralogistics services require keeping up with e-commerce demands, reducing delivery times and waste, and increasing overall flexibility. As a consequence, the use of automated guided vehicles (AGVs) and, more recently, autonomous mobile robots (AMRs) for logistics operations is steadily increasing. }, year = {2023} } @article{Andreasson1650509, author = {Andreasson, Henrik and Larsson, Jonas and Lowry, Stephanie}, institution = {Örebro University, School of Science and Technology}, institution = {ABB Corporate Research, Västerås, Sweden}, journal = {Sensors}, number = {7}, eid = {2588}, title = {A Local Planner for Accurate Positioning for a Multiple Steer-and-Drive Unit Vehicle Using Non-Linear Optimization}, volume = {22}, DOI = {10.3390/s22072588}, keywords = {local planning, optimal control, obstacle avoidance}, abstract = {This paper presents a local planning approach that is targeted for pseudo-omnidirectional vehicles: that is, vehicles that can drive sideways and rotate on the spot. This local planner—MSDU–is based on optimal control and formulates a non-linear optimization problem formulation that exploits the omni-motion capabilities of the vehicle to drive the vehicle to the goal in a smooth and efficient manner while avoiding obstacles and singularities. MSDU is designed for a real platform for mobile manipulation where one key function is the capability to drive in narrow and confined areas. The real-world evaluations show that MSDU planned paths that were smoother and more accurate than a comparable local path planner Timed Elastic Band (TEB), with a mean (translational, angular) error for MSDU of (0.0028 m, 0.0010 rad) compared to (0.0033 m, 0.0038 rad) for TEB. MSDU also generated paths that were consistently shorter than TEB, with a mean (translational, angular) distance traveled of (0.6026 m, 1.6130 rad) for MSDU compared to (0.7346 m, 3.7598 rad) for TEB. }, year = {2022} } @article{Seeburger1704400, author = {Seeburger, P. and Herdenstam, Anders P. F. and Kurtser, Polina and Arunachalam, A. and Castro Alves, Victor and Hy{\"o}tyl{\"a}inen, Tuulia and Andreasson, Henrik}, institution = {Örebro University, School of Hospitality, Culinary Arts & Meal Science}, institution = {Örebro University, School of Science and Technology}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden}, institution = {Department of Radiation Sciences, Radiation Physics, Umeå University, Umeå, Sweden}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden; Department of Radiation Sciences, Radiation Physics, Umeå University, Umeå, Sweden}, journal = {Food Chemistry}, note = {Funding agency:German Academic Exchange Service (Deutscher Akademischer Austauschdienst, DAAD)}, number = {Pt A}, eid = {134545}, title = {Controlled mechanical stimuli reveal novel associations between basil metabolism and sensory quality}, volume = {404}, DOI = {10.1016/j.foodchem.2022.134545}, keywords = {Agricultural robotics, Linalool glucoside, Network analysis, Plant metabolomics, Sensomics, Sensory analysis}, abstract = {There is an increasing interest in the use of automation in plant production settings. Here, we employed a robotic platform to induce controlled mechanical stimuli (CMS) aiming to improve basil quality. Semi-targeted UHPLC-qToF-MS analysis of organic acids, amino acids, phenolic acids, and phenylpropanoids revealed changes in basil secondary metabolism under CMS, which appear to be associated with changes in taste, as revealed by different means of sensory evaluation (overall liking, check-all-that-apply, and just-about-right analysis). Further network analysis combining metabolomics and sensory data revealed novel links between plant metabolism and sensory quality. Amino acids and organic acids including maleic acid were negatively associated with basil quality, while increased levels of secondary metabolites, particularly linalool glucoside, were associated with improved basil taste. In summary, by combining metabolomics and sensory analysis we reveal the potential of automated CMS on crop production, while also providing new associations between plant metabolism and sensory quality. }, year = {2022} } @article{Adolfsson1689786, author = {Adolfsson, Daniel and Castellano-Quero, Manuel and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, eid = {104136}, title = {CorAl : Introspection for robust radar and lidar perception in diverse environments using differential entropy}, volume = {155}, DOI = {10.1016/j.robot.2022.104136}, keywords = {Radar, Introspection, Localization}, abstract = {Robust perception is an essential component to enable long-term operation of mobile robots. It depends on failure resilience through reliable sensor data and pre-processing, as well as failure awareness through introspection, for example the ability to self-assess localization performance. This paper presents CorAl: a principled, intuitive, and generalizable method to measure the quality of alignment between pairs of point clouds, which learns to detect alignment errors in a self-supervised manner. CorAl compares the differential entropy in the point clouds separately with the entropy in their union to account for entropy inherent to the scene. By making use of dual entropy measurements, we obtain a quality metric that is highly sensitive to small alignment errors and still generalizes well to unseen environments. In this work, we extend our previous work on lidar-only CorAl to radar data by proposing a two-step filtering technique that produces high-quality point clouds from noisy radar scans. Thus, we target robust perception in two ways: by introducing a method that introspectively assesses alignment quality, and by applying it to an inherently robust sensor modality. We show that our filtering technique combined with CorAl can be applied to the problem of alignment classification, and that it detects small alignment errors in urban settings with up to 98% accuracy, and with up to 96% if trained only in a different environment. Our lidar and radar experiments demonstrate that CorAl outperforms previous methods both on the ETH lidar benchmark, which includes several indoor and outdoor environments, and the large-scale Oxford and MulRan radar data sets for urban traffic scenarios. The results also demonstrate that CorAl generalizes very well across substantially different environments without the need of retraining. }, year = {2022} } @article{Liao1606320, author = {Liao, Qianfang and Sun, Da and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on robotics}, number = {4}, pages = {2632--2651}, title = {FuzzyPSReg : Strategies of Fuzzy Cluster-based Point Set Registration}, volume = {38}, DOI = {10.1109/TRO.2021.3123898}, keywords = {point set registration, fuzzy clusters, registration quality assessment, 3D point clouds, object pose estimation.}, abstract = {This paper studies the fuzzy cluster-based point set registration (FuzzyPSReg). First, we propose a new metric based on Gustafson-Kessel (GK) fuzzy clustering to measure the alignment of two point clouds.  Unlike the metric based on fuzzy c-means (FCM) clustering in our previous work, the GK-based metric includes orientation properties of the point clouds, thereby providing more information for registration. We then develop the registration quality assessment of the GK-based metric, which is more sensitive to small misalignments than that of the FCM-based metric. Next, by effectively combining the two metrics, we design two FuzzyPSReg strategies with global optimization: i). \textit{FuzzyPSReg-SS}, which extends our previous work and aligns two similar-sized point clouds with greatly improved efficiency; ii). \textit{FuzzyPSReg-O2S}, which aligns two point clouds with a relatively large difference in size and can be used to estimate the pose of an object in a scene. In the experiment, we use different point clouds to test and compare the proposed method with state-of-the-art registration approaches. The results demonstrate the advantages and effectiveness of our method. }, year = {2022} } @article{Arunachalam1635462, author = {Arunachalam, Ajay and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Journal of Mobile Multimedia}, number = {3}, pages = {723--742}, publisher = {River Publishers}, title = {MSI-RPi : Affordable, Portable, and Modular Multispectral Imaging Prototype Suited to Operate in UV, Visible and Mid-Infrared Regions}, volume = {18}, DOI = {10.13052/jmm1550-4646.18312}, keywords = {imaging technology, low-cost, spectral, phenotype, plant science, vision, imaging sensors, agriculture, image analysis}, abstract = {Digital plant inventory provides critical growth insights, given the associated data quality is good. Stable & high-quality image acquisition is critical for further examination. In this work, we showcase an affordable, portable, and modular spectral camera prototype, designed with open hardware’s and open-source software’s. The image sensors used were color, and infrared Pi micro-camera. The designed prototype presents the advantage as being low-cost and modular with respect to other general commercial market available spectral devices. The micro-size connected sensors make it a compact instrument that can be used for any general spectral acquisition purposes, along with the provision of custom selection of the bands, making the presented prototype design a Plug-nd-Play (PnP) setup that can be used in different wide application areas. The images acquired from our custom-built prototype were back-tested by performing image analysis and qualitative assessments. The image acquisition software, and processing algorithm has been programmed, which is bundled with our developed system. Further, an end-to-end automation script is integrated for the users to readily leverage the services on-demand. The design files, schematics, and all the related materials of the spectral block design is open-sourced with open-hardware license & is made available at https://github.com/ajayarunachalam/Multi-Spectral-Imaging-RaspberryPi-Design. The automated data acquisition scripts & the spectral image analysis done is made available at https://github.com/ajayarunachalam/SI-RPi. }, year = {2022} } @article{Arunachalam1523503, author = {Arunachalam, Ajay and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Internet Technology Letters}, number = {1}, eid = {e272}, title = {RaspberryPi‐Arduino (RPA) powered smart mirrored and reconfigurable IoT facility for plant science research}, volume = {5}, DOI = {10.1002/itl2.272}, keywords = {Open-source, hardware, software, remote monitoring, IoT, Raspberry Pi, Arduino, sensor, database, agriculture, Plant Science, MOX, Mirroring, Reconfigurable Mircoservice, plant growth, automation, gas sensors}, abstract = {Continuous monitoring of crops is critical for the sustainability of agriculture. The effects of changes in temperature, light intensity, humidity, pH, soil moisture, gas intensities, etc. have an overall impact on the plant growth. Growth chambers are environmental controlled facilities which needs to be monitored round-the-clock. To improve both the reproducibility, and maintenance of such facilities, remote monitoring plays a very pivotal role. An automated re-configurable & persistent mirrored storage-based remote monitoring system is developed with low-cost open source hardwares and softwares. The system automates sensors deployment, storage (database, logs), and provides an elegant dashboard to visualize the real-time continuous data stream. We propose a new smart AGRO IoT system with robust data acquisition mechanism, and also propose two software component nodes, (i.e., Mirroring and Reconfiguration) running as an instance of the whole IoT facility. The former one is aimed to minimize/avoid the downtime, while the latter one is aimed to leverage the available cores, and better utilization of the computational resources. Our system can be easily deployed in growth chambers, greenhouses, CNC farming test-bed setup, cultivation plots, and further can be also extended to support large-farms with either using multiple individual standalone setup as heterogeneous instances of this facility, or by extending it as master-slave cluster configuration for communication as a single homogeneous instance. Our RaspberryPi-Arduino (RPA) powered solution is scalable, and provides stability for monitoring any environment continuously at ease. }, year = {2022} } @inproceedings{Machado1633897, author = {Machado, Tyrone and Fassbender, David and Taheri, Abdolreza and Eriksson, Daniel and Gupta, Himanshu and Molaei, Amirmasoud and Forte, Paolo and Rai, Prashant and Ghabcheloo, Reza and M{\"a}kinen, Saku and Lilienthal, Achim and Andreasson, Henrik and Geimer, Marcus}, booktitle = {Proceedings of the IEEE ICTE Leading Digital Transformation in Business and Society Conference : }, institution = {Örebro University, School of Science and Technology}, institution = {Bosch Rexroth AG, Elchingen, Germany}, institution = {Bosch Rexroth AG, Elchingen, Germany}, institution = {Faculty of Engineering and Natural Sciences, Tampere University, Tampere, Finland}, institution = {R&D Wheel Loader-Emerging Technologies, Liebherr-Werk Bischofshofen GmbH, Bischofshofen, Austria}, institution = {Institute of Vehicle System Technology, Karlsruhe Institute of Technology, Karlsruhe, Germany}, institution = {Faculty of Engineering and Natural Sciences, Tampere University, Tampere, Finland}, institution = {Faculty of Engineering and Natural Sciences, Tampere University, Tampere, Finland}, institution = {Faculty of Management and Business, Tampere University, Tampere, Finland}, institution = {Institute of Vehicle System, Technology Karlsruhe Institute of Technology, Karlsruhe, Germany}, title = {Autonomous Heavy-Duty Mobile Machinery : A Multidisciplinary Collaborative Challenge}, DOI = {10.1109/ICTE51655.2021.9584498}, keywords = {automation, augmentation, autonomous, collaboration, mobile machinery, transaction cost economics}, abstract = {Heavy-duty mobile machines (HDMMs), are a wide range of off-road machinery used in diverse and critical application areas which are currently facing several issues like skilled labor shortage, safety requirements, and harsh work environments in general. Consequently, efforts are underway to increase automation in HDMMs for increased productivity and safety, eventually transitioning to operator-less autonomous HDMMs to address skilled labor shortages. However, HDMM are complex machines requiring continuous physical and cognitive inputs from human operators. Thus, developing autonomous HDMM is a huge challenge, with current research and developments being performed in several independent research domains. Through this study, we use the bounded rationality concept to propose multidisciplinary collaborations for new autonomous HDMMs and apply the transaction cost economics framework to suggest future implications in the HDMM industry. Furthermore, we introduce and provide a conceptual understanding of the autonomous HDMM industry collaborations as a unified approach, while highlighting the practical implications and challenges of the complex nature of such multidisciplinary collaborations. The collaborative challenges and potentials are mapped out between the following topics: mechanical systems, AI methods, software systems, sensors, data and connectivity, simulations and process optimization, business cases, organization theories, and finally, regulatory frameworks. }, ISBN = {9781665438957}, ISBN = {9781665445986}, year = {2021} } @inproceedings{Alhashimi1803369, author = {Alhashimi, Anas and Adolfsson, Daniel and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden; Computer Engineering Department, University of Baghdad, Baghdad, Iraq}, title = {BFAR – Bounded False Alarm Rate detector for improved radar odometry estimation}, abstract = {This paper presents a new detector for filtering noise from true detections in radar data, which improves the state of the art in radar odometry. Scanning Frequency-Modulated Continuous Wave (FMCW) radars can be useful for localisation and mapping in low visibility, but return a lot of noise compared to (more commonly used) lidar, which makes the detection task more challenging. Our Bounded False-Alarm Rate (BFAR) detector is different from the classical Constant False-Alarm Rate (CFAR) detector in that it applies an affine transformation on the estimated noise level after which the parameters that minimize the estimation error can be learned. BFAR is an optimized combination between CFAR and fixed-level thresholding. Only a single parameter needs to be learned from a training dataset. We apply BFAR tothe use case of radar odometry, and adapt a state-of-the-art odometry pipeline (CFEAR), replacing its original conservative filtering with BFAR. In this way we reduce the state-of-the-art translation/rotation odometry errors from 1.76%/0.5◦/100 m to 1.55%/0.46◦/100 m; an improvement of 12.5%. }, URL = {https://doi.org/10.48550/arXiv.2109.09669}, year = {2021} } @inproceedings{Adolfsson1595903, author = {Adolfsson, Daniel and Magnusson, Martin and Alhashimi, Anas and Lilienthal, Achim and Andreasson, Henrik}, booktitle = {IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS 2021) : }, institution = {Örebro University, School of Science and Technology}, pages = {5462--5469}, title = {CFEAR Radarodometry - Conservative Filtering for Efficient and Accurate Radar Odometry}, series = {IEEE International Conference on Intelligent Robots and Systems. Proceedings}, DOI = {10.1109/IROS51168.2021.9636253}, keywords = {Localization SLAM Mapping Radar}, abstract = {This paper presents the accurate, highly efficient, and learning-free method CFEAR Radarodometry for large-scale radar odometry estimation. By using a filtering technique that keeps the k strongest returns per azimuth and by additionally filtering the radar data in Cartesian space, we are able to compute a sparse set of oriented surface points for efficient and accurate scan matching. Registration is carried out by minimizing a point-to-line metric and robustness to outliers is achieved using a Huber loss. We were able to additionally reduce drift by jointly registering the latest scan to a history of keyframes and found that our odometry method generalizes to different sensor models and datasets without changing a single parameter. We evaluate our method in three widely different environments and demonstrate an improvement over spatially cross-validated state-of-the-art with an overall translation error of 1.76% in a public urban radar odometry benchmark, running at 55Hz merely on a single laptop CPU thread. }, URL = {https://doi.org/10.48550/arXiv.2105.01457}, ISBN = {9781665417143}, ISBN = {9781665417150}, year = {2021} } @inproceedings{Forte1584120, author = {Forte, Paolo and Mannucci, Anna and Andreasson, Henrik and Pecora, Federico}, booktitle = {Proceedings of the 9th ICAPS Workshop on Planning and Robotics (PlanRob) : }, institution = {Örebro University, School of Science and Technology}, title = {Construction Site Automation : Open Challenges for Planning and Robotics}, year = {2021} } @inproceedings{Adolfsson1596301, author = {Adolfsson, Daniel and Magnusson, Martin and Liao, Qianfang and Lilienthal, Achim and Andreasson, Henrik}, booktitle = {10th European Conference on Mobile Robots (ECMR 2021) : }, institution = {Örebro University, School of Science and Technology}, title = {CorAl – Are the point clouds Correctly Aligned?}, volume = {10}, DOI = {10.1109/ECMR50962.2021.9568846}, abstract = {In robotics perception, numerous tasks rely on point cloud registration. However, currently there is no method that can automatically detect misaligned point clouds reliably and without environment-specific parameters. We propose "CorAl", an alignment quality measure and alignment classifier for point cloud pairs, which facilitates the ability to introspectively assess the performance of registration. CorAl compares the joint and the separate entropy of the two point clouds. The separate entropy provides a measure of the entropy that can be expected to be inherent to the environment. The joint entropy should therefore not be substantially higher if the point clouds are properly aligned. Computing the expected entropy makes the method sensitive also to small alignment errors, which are particularly hard to detect, and applicable in a range of different environments. We found that CorAl is able to detect small alignment errors in previously unseen environments with an accuracy of 95% and achieve a substantial improvement to previous methods. }, URL = {https://doi.org/10.48550/arXiv.2109.09820}, year = {2021} } @article{Kurtser1620211, author = {Kurtser, Polina and Castro Alves, Victor and Arunachalam, Ajay and Sj{\"o}berg, Viktor and Hanell, Ulf and Hy{\"o}tyl{\"a}inen, Tuulia and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Scientific Reports}, note = {Funding agency:{\"O}rebro University}, number = {1}, eid = {23876}, title = {Development of novel robotic platforms for mechanical stress induction, and their effects on plant morphology, elements, and metabolism}, volume = {11}, DOI = {10.1038/s41598-021-02581-9}, abstract = {This research evaluates the effect on herbal crops of mechanical stress induced by two specially developed robotic platforms. The changes in plant morphology, metabolite profiles, and element content are evaluated in a series of three empirical experiments, conducted in greenhouse and CNC growing bed conditions, for the case of basil plant growth. Results show significant changes in morphological features, including shortening of overall stem length by up to 40% and inter-node distances by up to 80%, for plants treated with a robotic mechanical stress-induction protocol, compared to control groups. Treated plants showed a significant increase in element absorption, by 20-250% compared to controls, and changes in the metabolite profiles suggested an improvement in plants' nutritional profiles. These results suggest that repetitive, robotic, mechanical stimuli could be potentially beneficial for plants' nutritional and taste properties, and could be performed with no human intervention (and therefore labor cost). The changes in morphological aspects of the plant could potentially replace practices involving chemical treatment of the plants, leading to more sustainable crop production. }, year = {2021} } @article{Paul1548974, author = {Paul, Satyam and Arunachalam, Ajay and Khodadad, Davood and Andreasson, Henrik and Rubanenko, Olena}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Engineering Design and Mathematics, University of the West of England, Bristol, United Kingdom}, institution = {Department of Applied Physics and Electronics, Umeå University, Umeå, Sweden}, institution = {Regional Innovational Center for Electrical Engineering, Faculty of Electrical Engineering, University of West Bohemia, Pilsen, Czech Republic}, journal = {International Journal of Automation and Computing}, number = {4}, pages = {568--580}, publisher = {Chinese Academy of Sciences}, title = {Fuzzy Tuned PID Controller for Envisioned Agricultural Manipulator}, volume = {18}, DOI = {10.1007/s11633-021-1280-5}, keywords = {Proportional-integral-differential (PID) controller, fuzzy logic, precision agriculture, vibration control, stability analysis, modular manipulator, agricultural robot, computer numerical control (CNC) farming}, abstract = {The implementation of image-based phenotyping systems has become an important aspect of crop and plant science research which has shown tremendous growth over the years. Accurate determination of features using images requires stable imaging and very precise processing. By installing a camera on a mechanical arm driven by motor, the maintenance of accuracy and stability becomes non-trivial. As per the state-of-the-art, the issue of external camera shake incurred due to vibration is a great concern in capturing accurate images, which may be induced by the driving motor of the manipulator. So, there is a requirement for a stable active controller for sufficient vibration attenuation of the manipulator. However, there are very few reports in agricultural practices which use control algorithms. Although, many control strategies have been utilized to control the vibration in manipulators associated to various applications, no control strategy with validated stability has been provided to control the vibration in such envisioned agricultural manipulator with simple low-cost hardware devices with the compensation of non-linearities. So, in this work, the combination of proportional-integral-differential (PID) control with type-2 fuzzy logic (T2-F-PID) is implemented for vibration control. The validation of the controller stability using Lyapunov analysis is established. A torsional actuator (TA) is applied for mitigating torsional vibration, which is a new contribution in the area of agricultural manipulators. Also, to prove the effectiveness of the controller, the vibration attenuation results with T2-F-PID is compared with conventional PD/PID controllers, and a type-1 fuzzy PID (T1-F-PID) controller.  }, year = {2021} } @article{Forte1538478, author = {Forte, Paolo and Mannucci, Anna and Andreasson, Henrik and Pecora, Federico}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, number = {3}, pages = {4584--4591}, title = {Online Task Assignment and Coordination in Multi-Robot Fleets}, volume = {6}, DOI = {10.1109/LRA.2021.3068918}, keywords = {Planning, scheduling and coordination, task and motion planning, multi-robot systems}, abstract = {We propose a loosely-coupled framework for integrated task assignment, motion planning, coordination and contro of heterogeneous fleets of robots subject to non-cooperative tasks. The approach accounts for the important real-world requiremen that tasks can be posted asynchronously. We exploit systematic search for optimal task assignment, where interference is considered as a cost and estimated with knowledge of the kinodynamic models and current state of the robots. Safety is guaranteed by an online coordination algorithm, where the absence of collisions is treated as a hard constraint. The relation between the weight of interference cost in task assignment and computational overhead is analyzed empirically, and the approach is compared against alternative realizations using local search algorithms for task assignment. }, year = {2021} } @inproceedings{Adolfsson1803356, author = {Adolfsson, Daniel and Magnusson, Martin and Alhashimi, Anas and Lilienthal, Achim and Andreasson, Henrik}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {School of Science and Technology, Örebro University, Örebro, Sweden}, title = {Oriented surface points for efficient and accurate radar odometry}, abstract = {This paper presents an efficient and accurate radar odometry pipeline for large-scale localization. We propose a radar filter that keeps only the strongest reflections per-azimuth that exceeds the expected noise level. The filtered radar data is used to incrementally estimate odometry by registering the current scan with a nearby keyframe. By modeling local surfaces, we were able to register scans by minimizing a point-to-line metric and accurately estimate odometry from sparse point sets, hence improving efficiency. Specifically, we found that a point-to-line metric yields significant improvements compared to a point-to-point metric when matching sparse sets of surface points. Preliminary results from an urban odometry benchmark show that our odometry pipeline is accurate and efficient compared to existing methods with an overall translation error of 2.05%, down from 2.78% from the previously best published method, running at 12.5ms per frame without need of environmental specific training.  }, URL = {https://doi.org/10.48550/arXiv.2109.09994}, year = {2021} } @article{Liao1396976, author = {Liao, Qianfang and Sun, Da and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Transactions on Pattern Analysis and Machine Intelligence}, note = {Funding agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS)}, number = {9}, pages = {3229--3246}, title = {Point Set Registration for 3D Range Scans Using Fuzzy Cluster-based Metric and Efficient Global Optimization}, volume = {43}, DOI = {10.1109/TPAMI.2020.2978477}, keywords = {Point Set Registration, Computer Vision, fuzzy clusters, registration quality assessment, 3D range scans, branch-and-bound}, abstract = {This study presents a new point set registration method to align 3D range scans. In our method, fuzzy clusters are utilized to represent a scan, and the registration of two given scans is realized by minimizing a fuzzy weighted sum of the distances between their fuzzy cluster centers. This fuzzy cluster-based metric has a broad basin of convergence and is robust to noise. Moreover, this metric provides analytic gradients, allowing standard gradient-based algorithms to be applied for optimization. Based on this metric, the outlier issues are addressed. In addition, for the first time in rigid point set registration, a registration quality assessment in the absence of ground truth is provided. Furthermore, given specified rotation and translation spaces, we derive the upper and lower bounds of the fuzzy cluster-based metric and develop a branch-and-bound (BnB)-based optimization scheme, which can globally minimize the metric regardless of the initialization. This optimization scheme is performed in an efficient coarse-to-fine fashion: First, fuzzy clustering is applied to describe each of the two given scans by a small number of fuzzy clusters. Then, a global search, which integrates BnB and gradient-based algorithms, is implemented to achieve a coarse alignment for the two scans. During the global search, the registration quality assessment offers a beneficial stop criterion to detect whether a good result is obtained. Afterwards, a relatively large number of points of the two scans are directly taken as the fuzzy cluster centers, and then, the coarse solution is refined to be an exact alignment using the gradient-based local convergence. Compared to existing counterparts, this optimization scheme makes a large improvementin terms of robustness and efficiency by virtue of the fuzzy cluster-based metric and the registration quality assessment. In the experiments, the registration results of several 3D range scan pairs demonstrate the accuracy and effectiveness of the proposed method, as well as its superiority to state-of-the-art registration approaches. }, year = {2021} } @article{Arunachalam1548959, author = {Arunachalam, Ajay and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {Computers & electrical engineering}, eid = {107098}, title = {Real-time plant phenomics under robotic farming setup : A vision-based platform for complex plant phenotyping tasks}, volume = {92}, DOI = {10.1016/j.compeleceng.2021.107098}, keywords = {Automation, Computer vision, Image processing, Object localization, Pattern recognition, Perception, Phenotype, Plant science, Precision agriculture, Robotics, Spectral}, abstract = {Plant phenotyping in general refers to quantitative estimation of the plant's anatomical, ontogenetical, physiological and biochemical properties. Analyzing big data is challenging, and non-trivial given the different complexities involved. Efficient processing and analysis pipelines are the need of the hour with the increasing popularity of phenotyping technologies and sensors. Through this work, we largely address the overlapping object segmentation & localization problem. Further, we dwell upon multi-plant pipelines that pose challenges as detection and multi-object tracking becomes critical for single frame/set of frames aimed towards uniform tagging & visual features extraction. A plant phenotyping tool named RTPP (Real-Time Plant Phenotyping) is presented that can aid in the detection of single/multi plant traits, modeling, and visualization for agricultural settings. We compare our system with the plantCV platform. The relationship of the digital estimations, and the measured plant traits are discussed that plays a vital roadmap towards precision farming and/or plant breeding. }, year = {2021} } @article{Chadalavada1374911, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Schindler, Maike and Palm, Rainer and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {Faculty of Human Sciences, University of Cologne, Germany}, journal = {Robotics and Computer-Integrated Manufacturing}, note = {Funding Agencies:KKS SIDUS project AIR: "Action and Intention Recognition in Human Interaction with Autonomous Systems"  20140220H2020 project ILIAD: "Intra-Logistics with Integrated Automatic Deployment: Safe and Scalable Fleets in Shared Spaces"  732737}, eid = {101830}, title = {Bi-directional navigation intent communication using spatial augmented reality and eye-tracking glasses for improved safety in human-robot interaction}, volume = {61}, DOI = {10.1016/j.rcim.2019.101830}, keywords = {Human-robot interaction (HRI), Mobile robots, Intention communication, Eye-tracking, Intention recognition, Spatial augmented reality, Stimulated recall interview, Obstacle avoidance, Safety, Logistics}, abstract = {Safety, legibility and efficiency are essential for autonomous mobile robots that interact with humans. A key factor in this respect is bi-directional communication of navigation intent, which we focus on in this article with a particular view on industrial logistic applications. In the direction robot-to-human, we study how a robot can communicate its navigation intent using Spatial Augmented Reality (SAR) such that humans can intuitively understand the robot's intention and feel safe in the vicinity of robots. We conducted experiments with an autonomous forklift that projects various patterns on the shared floor space to convey its navigation intentions. We analyzed trajectories and eye gaze patterns of humans while interacting with an autonomous forklift and carried out stimulated recall interviews (SRI) in order to identify desirable features for projection of robot intentions. In the direction human-to-robot, we argue that robots in human co-habited environments need human-aware task and motion planning to support safety and efficiency, ideally responding to people's motion intentions as soon as they can be inferred from human cues. Eye gaze can convey information about intentions beyond what can be inferred from the trajectory and head pose of a person. Hence, we propose eye-tracking glasses as safety equipment in industrial environments shared by humans and robots. In this work, we investigate the possibility of human-to-robot implicit intention transference solely from eye gaze data and evaluate how the observed eye gaze patterns of the participants relate to their navigation decisions. We again analyzed trajectories and eye gaze patterns of humans while interacting with an autonomous forklift for clues that could reveal direction intent. Our analysis shows that people primarily gazed on that side of the robot they ultimately decided to pass by. We discuss implications of these results and relate to a control approach that uses human gaze for early obstacle avoidance. }, year = {2020} } @inproceedings{Sun1524100, author = {Sun, L. and Adolfsson, Daniel and Magnusson, Martin and Andreasson, Henrik and Posner, I. and Duckett, T.}, booktitle = {2020 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Lincoln Centre for Autonomous Systems (L-CAS), University of Lincoln, UK}, institution = {University of Oxford, Oxford, UK}, institution = {Lincoln Centre for Autonomous Systems (L-CAS), University of Lincoln, UK}, note = {Funding agency:UK Research {\&}amp; Innovation (UKRI)Engineering {\&}amp; Physical Sciences Research Council (EPSRC) EP/M019918/1}, pages = {4386--4392}, title = {Localising Faster : Efficient and precise lidar-based robot localisation in large-scale environments}, series = {IEEE International Conference on Robotics and Automation (ICRA)}, DOI = {10.1109/ICRA40945.2020.9196708}, keywords = {Gaussian processes, learning (artificial intelligence), mobile robots, Monte Carlo methods, neural nets, optical radar, path planning, recursive estimation, robot vision, SLAM (robots), precise lidar-based robot localisation, large-scale environments, global localisation, Monte Carlo Localisation, MCL, fast localisation system, deep-probabilistic model, Gaussian process regression, deep kernel, precise recursive estimator, Gaussian method, deep probabilistic localisation, large-scale localisation, largescale environment, time 0.8 s, size 0.75 m, Robots, Neural networks, Three-dimensional displays, Laser radar, Kernel}, abstract = {This paper proposes a novel approach for global localisation of mobile robots in large-scale environments. Our method leverages learning-based localisation and filtering-based localisation, to localise the robot efficiently and precisely through seeding Monte Carlo Localisation (MCL) with a deeplearned distribution. In particular, a fast localisation system rapidly estimates the 6-DOF pose through a deep-probabilistic model (Gaussian Process Regression with a deep kernel), then a precise recursive estimator refines the estimated robot pose according to the geometric alignment. More importantly, the Gaussian method (i.e. deep probabilistic localisation) and nonGaussian method (i.e. MCL) can be integrated naturally via importance sampling. Consequently, the two systems can be integrated seamlessly and mutually benefit from each other. To verify the proposed framework, we provide a case study in large-scale localisation with a 3D lidar sensor. Our experiments on the Michigan NCLT long-term dataset show that the proposed method is able to localise the robot in 1.94 s on average (median of 0.8 s) with precision 0.75 m in a largescale environment of approximately 0.5 km 2. }, ISBN = {978-1-7281-7396-2}, ISBN = {978-1-7281-7395-5}, year = {2020} } @inproceedings{Kurtser1414586, author = {Kurtser, Polina and Ringdahl, Ola and Rotstein, Nati and Andreasson, Henrik}, booktitle = {Proceedings of the Northern Lights Deep Learning Workshop : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computing science, Umeå University, Umeå, Sweden}, institution = {Department of Industrial Engineering and Management, Ben-Gurion University of the Negev, Beer Sheva, Israel}, institution = {Centre for Applied Autonomous Sensor Systems}, pages = {1--6}, publisher = {NLDL}, title = {PointNet and geometric reasoning for detection of grape vines from single frame RGB-D data in outdoor conditions}, volume = {1}, DOI = {10.7557/18.5155}, keywords = {RGBD, Deep-learning, Agricultural robotics, outdoor vision, grape}, abstract = {In this paper we present the usage of PointNet, a deep neural network that consumes raw un-ordered point clouds, for detection of grape vine clusters in outdoor conditions. We investigate the added value of feeding the detection network with both RGB and depth, contradictory to common practice in agricultural robotics of relying on RGB only. A total of 5057 pointclouds (1033 manually annotated and 4024 annotated using geometric reasoning) were collected in a field experiment conducted in outdoor conditions on 9 grape vines and 5 plants. The detection results show overall accuracy of 91% (average class accuracy of 74%, precision 53% recall 48%) for RGBXYZ data and a significant drop in recall for RGB or XYZ data only. These results suggest the usage of depth cameras for vision in agricultural robotics is crucial for crops where the color contrast between the crop and the background is complex. The results also suggest geometric reasoning can be used for increased training set size, a major bottleneck in the development of agricultural vision systems. }, year = {2020} } @inproceedings{Kurtser1521090, author = {Kurtser, Polina and Hanell, Ulf and Andreasson, Henrik}, booktitle = {2020 IEEE 16th International Conference on Automation Science and Engineering (CASE) : }, institution = {Örebro University, School of Science and Technology}, pages = {1558--1565}, title = {Robotic Platform for Precise Mechanical Stress Induction in Greenhouses Cultivation}, series = {IEEE International Conference on Automation Science and Engineering}, DOI = {10.1109/CASE48305.2020.9249229}, keywords = {Robotics in Agriculture and Forestry, Agricultural Automation, Industrial Robots}, abstract = {This paper presents an autonomous robotic platform for research of mechanically induced stress in plants growing in controlled greenhouse conditions. The platform provides a range of possibilities for mechanical stimuli including motion type, frequency, speed, and torque. The motions can be tailored for a single pot, making study of mechanical plant stress versatile, rapid and precise. We evaluate the performance of the platform for a use-case of basil plant cultivation. An eight week experiment was performed in greenhouse conditions on 220 basil plants. We show that the induction of mechanical stress by the platform significantly affects plant morphology, such as shortening stem length by 30 % -40 % and inter-node length by 50 % -80 %, while preserving leaf weight which is the main part of the basil plant used for culinary purposes. Results also show that variations in types of mechanical stimuli motions provides significant differences in the effect on plant morphology. Finally we show that decreasing the mechanical stimuli frequency to rates feasible to be performed manually significantly reduces the effect, stressing the need for autonomous systems capable of providing continuous stimuli during day and night. These results validate previously published findings in research of mechanical stress induction, and therefore implies the platform can be used for research of this phenomena. }, ISBN = {978-1-7281-6905-7}, ISBN = {978-1-7281-6904-0}, year = {2020} } @inproceedings{Adolfsson1391182, author = {Adolfsson, Daniel and Lowry, Stephanie and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, booktitle = {2019 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, title = {A Submap per Perspective : Selecting Subsets for SuPer Mapping that Afford Superior Localization Quality}, DOI = {10.1109/ECMR.2019.8870941}, abstract = {This paper targets high-precision robot localization. We address a general problem for voxel-based map representations that the expressiveness of the map is fundamentally limited by the resolution since integration of measurements taken from different perspectives introduces imprecisions, and thus reduces localization accuracy.We propose SuPer maps that contain one Submap per Perspective representing a particular view of the environment. For localization, a robot then selects the submap that best explains the environment from its perspective. We propose SuPer mapping as an offline refinement step between initial SLAM and deploying autonomous robots for navigation. We evaluate the proposed method on simulated and real-world data that represent an important use case of an industrial scenario with high accuracy requirements in an repetitive environment. Our results demonstrate a significantly improved localization accuracy, up to 46% better compared to localization in global maps, and up to 25% better compared to alternative submapping approaches. }, ISBN = {978-1-7281-3605-9}, year = {2019} } @inproceedings{Chadalavada1391172, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Schindler, Maike and Lilienthal, Achim J.}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, institution = {Faculty of Human Sciences, University of Cologne, Germany, Cologne, Gemany}, title = {Implicit intention transference using eye-tracking glasses for improved safety in human-robot interaction}, keywords = {Human-robot interaction, intention communication, eye tracking, spatial augmented reality, electrodermal activity, stress, cognitive load.}, abstract = {Eye gaze can convey information about intentions beyond what can beinferred from the trajectory and head pose of a person. We propose eye-trackingglasses as safety equipment in industrial environments shared by humans androbots. In this work, an implicit intention transference system was developed and implemented. Robot was given access to human eye gaze data, and it responds tothe eye gaze data through spatial augmented reality projections on the sharedfloor space in real-time and the robot could also adapt its path. This allows proactivesafety approaches in HRI for example by attempting to get the human'sattention when they are in the vicinity of a moving robot. A study was conductedwith workers at an industrial warehouse. The time taken to understand the behaviorof the system was recorded. Electrodermal activity and pupil diameter wererecorded to measure the increase in stress and cognitive load while interactingwith an autonomous system, using these measurements as a proxy to quantifytrust in autonomous systems. }, year = {2019} } @article{DellaCorte1291440, author = {Della Corte, Bartolomeo and Andreasson, Henrik and Stoyanov, Todor and Grisetti, Giorgio}, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, institution = {Department of Computer, Control, and Management Engineering “Antonio Ruberti” Sapienza, University of Rome, Rome, Italy}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS) }, number = {2}, pages = {902--909}, title = {Unified Motion-Based Calibration of Mobile Multi-Sensor Platforms With Time Delay Estimation}, volume = {4}, DOI = {10.1109/LRA.2019.2892992}, keywords = {Calibration and Identification}, abstract = {The ability to maintain and continuously update geometric calibration parameters of a mobile platform is a key functionality for every robotic system. These parameters include the intrinsic kinematic parameters of the platform, the extrinsic parameters of the sensors mounted on it, and their time delays. In this letter, we present a unified pipeline for motion-based calibration of mobile platforms equipped with multiple heterogeneous sensors. We formulate a unified optimization problem to concurrently estimate the platform kinematic parameters, the sensors extrinsic parameters, and their time delays. We analyze the influence of the trajectory followed by the robot on the accuracy of the estimate. Our framework automatically selects appropriate trajectories to maximize the information gathered and to obtain a more accurate parameters estimate. In combination with that, our pipeline observes the parameters evolution in long-term operation to detect possible values change in the parameters set. The experiments conducted on real data show a smooth convergence along with the ability to detect changes in parameters value. We release an open-source version of our framework to the community. }, year = {2019} } @inproceedings{Pecora1178913, author = {Pecora, Federico and Andreasson, Henrik and Mansouri, Masoumeh and Petkov, Vilian}, booktitle = {Proceedings of the International Conference on Automated Planning and Scheduling : }, institution = {Örebro University, School of Science and Technology}, institution = {Technical University of Varna, Varna, Bulgaria}, pages = {485--493}, eid = {139850}, title = {A Loosely-Coupled Approach for Multi-Robot Coordination, Motion Planning and Control, ICAPS}, volume = {2018-June}, abstract = {Deploying fleets of autonomous robots in real-world applications requires addressing three problems: motion planning, coordination, and control. Application-specific features of the environment and robots often narrow down the possible motion planning and control methods that can be used. This paper proposes a lightweight coordination method that implements a high-level controller for a fleet of potentially heterogeneous robots. Very few assumptions are made on robot controllers, which are required only to be able to accept set point updates and to report their current state. The approach can be used with any motion planning method for computing kinematically-feasible paths. Coordination uses heuristics to update priorities while robots are in motion, and a simple model of robot dynamics to guarantee dynamic feasibility. The approach avoids a priori discretization of the environment or of robot paths, allowing robots to “follow each other” through critical sections. We validate the method formally and experimentally with different motion planners and robot controllers, in simulation and with real robots. }, year = {2018} } @inproceedings{Chadalavada1270176, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Schindler, Maike and Palm, Rainer and Lilienthal, Achim}, booktitle = {Advances in Manufacturing Technology XXXII : Proceedings of the 16th International Conference on Manufacturing Research, incorporating the 33rd National Conference on Manufacturing Research, September 11–13, 2018, University of Skövde, Sweden}, institution = {Örebro University, School of Science and Technology}, pages = {253--258}, title = {Accessing your navigation plans! Human-Robot Intention Transfer using Eye-Tracking Glasses}, series = {Advances in Transdisciplinary Engineering}, number = {8}, DOI = {10.3233/978-1-61499-902-7-253}, keywords = {Human-Robot Interaction (HRI), Eye-tracking, Eye-Tracking Glasses, Navigation Intent, Implicit Intention Transference, Obstacle avoidance.}, abstract = {Robots in human co-habited environments need human-aware task and motion planning, ideally responding to people’s motion intentions as soon as they can be inferred from human cues. Eye gaze can convey information about intentions beyond trajectory and head pose of a person. Hence, we propose eye-tracking glasses as safety equipment in industrial environments shared by humans and robots. This paper investigates the possibility of human-to-robot implicit intention transference solely from eye gaze data.  We present experiments in which humans wearing eye-tracking glasses encountered a small forklift truck under various conditions. We evaluate how the observed eye gaze patterns of the participants related to their navigation decisions. Our analysis shows that people primarily gazed on that side of the robot they ultimately decided to pass by. We discuss implications of these results and relate to a control approach that uses human eye gaze for early obstacle avoidance. }, ISBN = {978-1-61499-901-0}, ISBN = {978-1-61499-902-7}, year = {2018} } @inproceedings{Adolfsson1282987, author = {Adolfsson, Daniel and Lowry, Stephanie and Andreasson, Henrik}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Improving Localisation Accuracy using Submaps in warehouses}, abstract = {This paper presents a method for localisation in hybrid metric-topological maps built using only local information that is, only measurements that were captured by the robot when it was in a nearby location. The motivation is that observations are typically range and viewpoint dependent and that a map a discrete map representation might not be able to explain the full structure within a voxel. The localisation system uses a method to select submap based on how frequently and where from each submap was updated. This allow the system to select the most descriptive submap, thereby improving the localisation and increasing performance by up to 40%. }, year = {2018} } @article{Lowry1178647, author = {Lowry, Stephanie and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {IEEE Robotics and Automation Letters}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation}, number = {2}, pages = {957--964}, title = {Lightweight, Viewpoint-Invariant Visual Place Recognition in Changing Environments}, volume = {3}, DOI = {10.1109/LRA.2018.2793308}, keywords = {Visual-based navigation, recognition, localization}, abstract = {This paper presents a viewpoint-invariant place recognition algorithm which is robust to changing environments while requiring only a small memory footprint. It demonstrates that condition-invariant local features can be combined with Vectors of Locally Aggregated Descriptors (VLAD) to reduce high-dimensional representations of images to compact binary signatures while retaining place matching capability across visually dissimilar conditions. This system provides a speed-up of two orders of magnitude over direct feature matching, and outperforms a bag-of-visual-words approach with near-identical computation speed and memory footprint. The experimental results show that single-image place matching from non-aligned images can be achieved in visually changing environments with as few as 256 bits (32 bytes) per image. }, year = {2018} } @inproceedings{Lowry1238392, author = {Lowry, Stephanie and Andreasson, Henrik}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, note = {Funding Agency:Semantic Robots Research Profile - Swedish Knowledge Foundation (KKS)}, pages = {7262--7269}, title = {LOGOS : Local geometric support for high-outlier spatial verification}, abstract = {This paper presents LOGOS, a method of spatial verification for visual localization that is robust in the presence of a high proportion of outliers. LOGOS uses scale and orientation information from local neighbourhoods of features to determine which points are likely to be inliers. The inlier points can be used for secondary localization verification and pose estimation. LOGOS is demonstrated on a number of benchmark localization datasets and outperforms RANSAC as a method of outlier removal and localization verification in scenarios that require robustness to many outliers. }, year = {2018} } @inproceedings{Andreasson1159885, author = {Andreasson, Henrik and Adolfsson, Daniel and Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim}, booktitle = {2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {1389--1395}, title = {Incorporating Ego-motion Uncertainty Estimates in Range Data Registration}, series = {Proceedings of the ... IEEE/RSJ International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2017.8202318}, abstract = {Local scan registration approaches commonlyonly utilize ego-motion estimates (e.g. odometry) as aninitial pose guess in an iterative alignment procedure. Thispaper describes a new method to incorporate ego-motionestimates, including uncertainty, into the objective function of aregistration algorithm. The proposed approach is particularlysuited for feature-poor and self-similar environments,which typically present challenges to current state of theart registration algorithms. Experimental evaluation showssignificant improvements in accuracy when using data acquiredby Automatic Guided Vehicles (AGVs) in industrial productionand warehouse environments. }, ISBN = {978-1-5386-2682-5}, ISBN = {978-1-5386-2683-2}, year = {2017} } @inproceedings{Magnusson1151027, author = {Magnusson, Martin and Kucner, Tomasz Piotr and Gholami Shahbandi, Saeed and Andreasson, Henrik and Lilienthal, Achim}, booktitle = {2017 IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, institution = {IS lab, Halmstad University, Halmstad, Sweden}, note = {Iliad Project: http://iliad-project.eu}, pages = {620--625}, title = {Semi-Supervised 3D Place Categorisation by Descriptor Clustering}, series = {Proceedings of the ... IEEE/RSJ International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2017.8202216}, abstract = {Place categorisation; i. e., learning to group perception data into categories based on appearance; typically uses supervised learning and either visual or 2D range data. This paper shows place categorisation from 3D data without any training phase. We show that, by leveraging the NDT histogram descriptor to compactly encode 3D point cloud appearance, in combination with standard clustering techniques, it is possible to classify public indoor data sets with accuracy comparable to, and sometimes better than, previous supervised training methods. We also demonstrate the effectiveness of this approach to outdoor data, with an added benefit of being able to hierarchically categorise places into sub-categories based on a user-selected threshold. This technique relieves users of providing relevant training data, and only requires them to adjust the sensitivity to the number of place categories, and provide a semantic label to each category after the process is completed. }, ISBN = {978-1-5386-2682-5}, ISBN = {978-1-5386-2683-2}, year = {2017} } @inproceedings{Mielle1155435, author = {Mielle, Malcolm and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {2017 IEEE International Symposium on Safety, Security and Rescue Robotics (SSRR) : }, institution = {Örebro University, School of Science and Technology}, note = {Funding Agency:EU  ICT-23-2014645101}, pages = {35--40}, eid = {8088137}, title = {SLAM auto-complete : completing a robot map using an emergency map}, DOI = {10.1109/SSRR.2017.8088137}, keywords = {SLAM, robotics, graph, graph SLAM, emergency map, rescue, exploration, auto complete, SLAM, robotics, graph, graph SLAM, plan de secours, sauvetage, exploration, auto complete}, abstract = {In search and rescue missions, time is an important factor; fast navigation and quickly acquiring situation awareness might be matters of life and death. Hence, the use of robots in such scenarios has been restricted by the time needed to explore and build a map. One way to speed up exploration and mapping is to reason about unknown parts of the environment using prior information. While previous research on using external priors for robot mapping mainly focused on accurate maps or aerial images, such data are not always possible to get, especially indoor. We focus on emergency maps as priors for robot mapping since they are easy to get and already extensively used by firemen in rescue missions. However, those maps can be outdated, information might be missing, and the scales of rooms are typically not consistent. We have developed a formulation of graph-based SLAM that incorporates information from an emergency map. The graph-SLAM is optimized using a combination of robust kernels, fusing the emergency map and the robot map into one map, even when faced with scale inaccuracies and inexact start poses. We typically have more than 50% of wrong correspondences in the settings studied in this paper, and the method we propose correctly handles them. Experiments in an office environment show that we can handle up to 70% of wrong correspondences and still get the expected result. The robot can navigate and explore while taking into account places it has not yet seen. We demonstrate this in a test scenario and also show that the emergency map is enhanced by adding information not represented such as closed doors or new walls. }, ISBN = {978-1-5386-3923-8}, ISBN = {978-1-5386-3924-5}, year = {2017} } @inproceedings{Mielle1151040, author = {Mielle, Malcolm and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim}, booktitle = { : }, institution = {Örebro University, School of Science and Technology}, title = {Using emergency maps to add not yet explored places into SLAM}, keywords = {Search and Rescue Robots, SLAM, Mapping}, abstract = {While using robots in search and rescue missions would help ensure the safety of first responders, a key issue is the time needed by the robot to operate. Even though SLAM is faster and faster, it might still be too slow to enable the use of robots in critical situations. One way to speed up operation time is to use prior information. We aim at integrating emergency-maps into SLAM to complete the SLAM map with information about not yet explored part of the environment. By integrating prior information, we can speed up exploration time or provide valuable prior information for navigation, for example, in case of sensor blackout/failure. However, while extensively used by firemen in their operations, emergency maps are not easy to integrate in SLAM since they are often not up to date or with non consistent scales. The main challenge we are tackling is in dealing with the imperfect scale of the rough emergency maps and integrate it with the online SLAM map in addition to challenges due to incorrect matches between these two types of map. We developed a formulation of graph-based SLAM incorporating information from an emergency map into SLAM, and propose a novel optimization process adapted to this formulation. We extract corners from the emergency map and the SLAM map, in between which we find correspondences using a distance measure. We then build a graph representation associating information from the emergency map and the SLAM map. Corners in the emergency map, corners in the robot map, and robot poses are added as nodes in the graph, while odometry, corner observations, walls in the emergency map, and corner associations are added as edges. To conserve the topology of the emergency map, but correct its possible errors in scale, edges representing the emergency map's walls are given a covariance so that they are easy to extend or shrink but hard to rotate. Correspondences between corners represent a zero transformation for the optimization to match them as close as possible. The graph optimization is done by using a combination robust kernels. We first use the Huber kernel, to converge toward a good solution, followed by Dynamic Covariance Scaling, to handle the remaining errors. We demonstrate our system in an office environment. We run the SLAM online during the exploration. Using the map enhanced by information from the emergency map, the robot was able to plan the shortest path toward a place it has not yet explored. This capability can be a real asset in complex buildings where exploration can take up a long time. It can also reduce exploration time by avoiding exploration of dead-ends, or search of specific places since the robot knows where it is in the emergency map. }, year = {2017} } @article{Rituerto931985, author = {Rituerto, Alejandro and Andreasson, Henrik and Murillo, Ana C. and Lilienthal, Achim and Jesus Guerrero, Jose}, institution = {Örebro University, School of Science and Technology}, institution = {Instituto de Investigación en Ingeniería de Aragón, Deptartmento de Informática e Ingeniería de Sistemas, University of Zaragoza, Zaragoza, Spain}, institution = {Instituto de Investigación en Ingeniería de Aragón, Deptartmento de Informática e Ingeniería de Sistemas, University of Zaragoza, Zaragoza, Spain}, institution = {Instituto de Investigación en Ingeniería de Aragón, Deptartmento de Informática e Ingeniería de Sistemas, University of Zaragoza, Zaragoza, Spain}, journal = {Sensors}, note = {Funding Agencies:Spanish Government European Union DPI2015-65962-R}, number = {4}, eid = {493}, publisher = {MDPI AG}, title = {Building an Enhanced Vocabulary of the Robot Environment with a Ceiling Pointing Camera}, volume = {16}, DOI = {10.3390/s16040493}, keywords = {visual vocabulary, computer vision, bag of words, robotics, place recognition, environment description}, abstract = {Mobile robots are of great help for automatic monitoring tasks in different environments. One of the first tasks that needs to be addressed when creating these kinds of robotic systems is modeling the robot environment. This work proposes a pipeline to build an enhanced visual model of a robot environment indoors. Vision based recognition approaches frequently use quantized feature spaces, commonly known as Bag of Words (BoW) or vocabulary representations. A drawback using standard BoW approaches is that semantic information is not considered as a criteria to create the visual words. To solve this challenging task, this paper studies how to leverage the standard vocabulary construction process to obtain a more meaningful visual vocabulary of the robot work environment using image sequences. We take advantage of spatio-temporal constraints and prior knowledge about the position of the camera. The key contribution of our work is the definition of a new pipeline to create a model of the environment. This pipeline incorporates (1) tracking information to the process of vocabulary construction and (2) geometric cues to the appearance descriptors. Motivated by long term robotic applications, such as the aforementioned monitoring tasks, we focus on a configuration where the robot camera points to the ceiling, which captures more stable regions of the environment. The experimental validation shows how our vocabulary models the environment in more detail than standard vocabulary approaches, without loss of recognition performance. We show different robotic tasks that could benefit of the use of our visual vocabulary approach, such as place recognition or object discovery. For this validation, we use our publicly available data-set. }, year = {2016} } @inproceedings{Chadalavada1070994, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Krug, Robert and Lilienthal, Achim}, booktitle = {Proceedings of RSS Workshop "Social Trust in Autonomous Robots 2016" : }, institution = {Örebro University, School of Science and Technology}, title = {Empirical evaluation of human trust in an expressive mobile robot}, keywords = {Human robot interaction, hri, mobile robot, trust, evaluation}, abstract = {A mobile robot communicating its intentions using Spatial Augmented Reality (SAR) on the shared floor space makes humans feel safer and more comfortable around the robot. Our previous work [1] and several other works established this fact. We built upon that work by adding an adaptable information and control to the SAR module. An empirical study about how a mobile robot builds trust in humans by communicating its intentions was conducted. A novel way of evaluating that trust is presented and experimentally shown that adaption in SAR module lead to natural interaction and the new evaluation system helped us discover that the comfort levels between human-robot interactions approached those of human-human interactions. }, year = {2016} } @misc{Andreasson914493, author = {Andreasson, Henrik and Berglund, Sara and Waller, Anna}, institution = {Örebro University, Örebro University School of Business}, pages = {37}, school = {Örebro University, Örebro University School of Business}, school = {Örebro University, Örebro University School of Business}, school = {Örebro University, Örebro University School of Business}, title = {En r{\aa}dgivares f{\"o}rtroendeskapande En studie om hur finansiella r{\aa}dgivare skapar f{\"o}rtroende hos kunden i kundm{\"o}tet}, year = {2016} } @article{Mansouri941273, author = {Mansouri, Masoumeh and Andreasson, Henrik and Pecora, Federico}, institution = {Örebro University, School of Science and Technology}, journal = {Acta Polytechnica}, number = {1}, pages = {47--56}, publisher = {Czech Technical University in Prague}, title = {Hybrid Reasoning for Multi-robot Drill Planning in Open-pit Mines}, volume = {56}, DOI = {10.14311/APP.2016.56.0047}, keywords = {robot planning, multi-robot coordination, on-line reasoning}, abstract = {Fleet automation often involves solving several strongly correlated sub-problems, including task allocation, motion planning, and coordination. Solutions need to account for very specific, domaindependent constraints. In addition, several aspects of the overall fleet management problem become known only online. We propose a method for solving the fleet-management problem grounded on a heuristically-guided search in the space of mutually feasible solutions to sub-problems. We focus on a mining application which requires online contingency handling and accommodating many domainspecific constraints. As contingencies occur, efficient reasoning is performed to adjust the plan online for the entire fleet. }, year = {2016} } @inproceedings{Mosberger1057245, author = {Mosberger, Rafael and Schaffernicht, Erik and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {4131--4136}, title = {Inferring human body posture information from reflective patterns of protective work garments}, DOI = {10.1109/IROS.2016.7759608}, keywords = {Computer Vision, Human Detection, Reflective Clothing, Image Segmentation, Active Illumination, Infrared Vision}, abstract = {We address the problem of extracting human body posture labels, upper body orientation and the spatial location of individual body parts from near-infrared (NIR) images depicting patterns of retro-reflective markers. The analyzed patterns originate from the observation of humans equipped with protective high-visibility garments that represent common safety equipment in the industrial sector. Exploiting the shape of the observed reflectors we adopt shape matching based on the chamfer distance and infer one of seven discrete body posture labels as well as the approximate upper body orientation with respect to the camera. We then proceed to analyze the NIR images on a pixel scale and estimate a figure-ground segmentation together with human body part labels using classification of densely extracted local image patches. Our results indicate a body posture classification accuracy of 80% and figure-ground segmentations with 87% accuracy. }, ISBN = {978-1-5090-3762-9}, year = {2016} } @inproceedings{Bunz1071024, author = {Bunz, Elsa and Chadalavada, Ravi Teja and Andreasson, Henrik and Krug, Robert and Schindler, Maike and Lilienthal, Achim}, booktitle = {Proceedings of RO-MAN 2016 Workshop : Workshop on Communicating Intentions in Human-Robot Interaction}, institution = {Örebro University, School of Science and Technology}, institution = {Örebro University, Örebro, Sweden}, title = {Spatial Augmented Reality and Eye Tracking for Evaluating Human Robot Interaction}, abstract = {Freely moving autonomous mobile robots may leadto anxiety when operating in workspaces shared with humans.Previous works have given evidence that communicating in-tentions using Spatial Augmented Reality (SAR) in the sharedworkspace will make humans more comfortable in the vicinity ofrobots. In this work, we conducted experiments with the robotprojecting various patterns in order to convey its movementintentions during encounters with humans. In these experiments,the trajectories of both humans and robot were recorded witha laser scanner. Human test subjects were also equipped withan eye tracker. We analyzed the eye gaze patterns and thelaser scan tracking data in order to understand how the robot’sintention communication affects the human movement behavior.Furthermore, we used retrospective recall interviews to aid inidentifying the reasons that lead to behavior changes. }, year = {2016} } @article{Krug1044259, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {University of Pisa, Pisa, Italy}, institution = { University of Pisa, Pisa, Italy}, journal = {IEEE Robotics and Automation Letters}, number = {1}, pages = {546--553}, title = {The Next Step in Robot Commissioning : Autonomous Picking and Palletizing}, volume = {1}, DOI = {10.1109/LRA.2016.2519944}, keywords = {Logistics, grasping, autonomous vehicle navigation, robot safety, mobile manipulation}, abstract = {So far, autonomous order picking (commissioning) systems have not been able to meet the stringent demands regarding speed, safety, and accuracy of real-world warehouse automation, resulting in reliance on human workers. In this letter, we target the next step in autonomous robot commissioning: automatizing the currently manual order picking procedure. To this end, we investigate the use case of autonomous picking and palletizing with a dedicated research platform and discuss lessons learned during testing in simplified warehouse settings. The main theoretical contribution is a novel grasp representation scheme which allows for redundancy in the gripper pose placement. This redundancy is exploited by a local, prioritized kinematic controller which generates reactive manipulator motions on-the-fly. We validated our grasping approach by means of a large set of experiments, which yielded an average grasp acquisition time of 23.5 s at a success rate of 94.7%. Our system is able to autonomously carry out simple order picking tasks in a humansafe manner, and as such serves as an initial step toward future commercial-scale in-house logistics automation solutions. }, year = {2016} } @inproceedings{Siddiqui945980, author = {Siddiqui, J. Rafid and Andreasson, Henrik and Driankov, Dimiter and Lilienthal, Achim J.}, booktitle = {2016 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {5766--5773}, eid = {7487800}, title = {Towards visual mapping in industrial environments : a heterogeneous task-specific and saliency driven approach}, series = {IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2016.7487800}, keywords = {Image color analysis, Object detection, Robot sensing systems, Service robots, Training, Visualization}, abstract = {The highly percipient nature of human mind in avoiding sensory overload is a crucial factor which gives human vision an advantage over machine vision, the latter has otherwise powerful computational resources at its disposal given today’s technology. This stresses the need to focus on methods which extract a concise representation of the environment inorder to approach a complex problem such as visual mapping. This article is an attempt of creating a mapping system, which proposes an architecture that combines task-specific and saliency driven approaches. The proposed method is implemented on a warehouse robot. The proposed solution provide a priority framework which enables an industrial robot to build a concise visual representation of the environment. The method is evaluated on data collected by a RGBD sensor mounted on a fork-lift robot and shows promise for addressing visual mapping problems in industrial environments. }, ISBN = {978-146738026-3}, year = {2016} } @inproceedings{Lowry1079851, author = {Lowry, Stephanie and Andreasson, Henrik}, booktitle = {Visual Place Recognition: What is it Good For? workshop, Robotics : Science and Systems (RSS) 2016}, institution = {Örebro University, School of Science and Technology}, title = {Visual place recognition techniques for pose estimation in changing environments}, abstract = {This paper investigates whether visual place recognition techniques can be used to provide pose estimation information for a visual SLAM system operating long-term in an environment where the appearance may change a great deal. It demonstrates that a combination of a conventional SURF feature detector and a condition-invariant feature descriptor such as HOG or conv3 can provide a method of determining the relative transformation between two images, even when there is both appearance change and rotation or viewpoint change. }, year = {2016} } @article{Andreasson807693, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and Cirillo, Marcello and Dimitrov, Dimitar Nikolaev and Driankov, Dimiter and Karlsson, Lars and Lilienthal, Achim J. and Pecora, Federico and Saarinen, Jari Pekka and Sherikov, Aleksander and Stoyanov, Todor}, institution = {Örebro University, School of Science and Technology}, institution = {INRIA - Grenoble, Meylan, France}, institution = {Aalto University, Espo, Finland }, institution = {Centre de recherche Grenoble Rhône-Alpes, Grenoble, France }, journal = {IEEE robotics & automation magazine}, number = {1}, pages = {64--75}, title = {Autonomous transport vehicles : where we are and what is missing}, volume = {22}, DOI = {10.1109/MRA.2014.2381357}, keywords = {Intelligent vehicles; Mobile robots; Resource management; Robot kinematics; Trajectory; Vehicle dynamics}, abstract = {In this article, we address the problem of realizing a complete efficient system for automated management of fleets of autonomous ground vehicles in industrial sites. We elicit from current industrial practice and the scientific state of the art the key challenges related to autonomous transport vehicles in industrial environments and relate them to enabling techniques in perception, task allocation, motion planning, coordination, collision prediction, and control. We propose a modular approach based on least commitment, which integrates all modules through a uniform constraint-based paradigm. We describe an instantiation of this system and present a summary of the results, showing evidence of increased flexibility at the control level to adapt to contingencies. }, year = {2015} } @inproceedings{Andreasson894653, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA), 2015 : }, institution = {Örebro University, School of Science and Technology}, institution = {SCANIA AB, Södertälje, Sweden}, pages = {662--669}, title = {Fast, continuous state path smoothing to improve navigation accuracy}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2015.7139250}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not be widely adopted in commercial AGV systems. The main contribution of this paper addresses this shortcoming by introducing a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. In real world tests presented in this paper we demonstrate that the proposed approach is fast enough for online use (it computes trajectories faster than they can be driven) and highly accurate. In 100 repetitions we achieve mean end-point pose errors below 0.01 meters in translation and 0.002 radians in orientation. Even the maximum errors are very small: only 0.02 meters in translation and 0.008 radians in orientation. }, ISBN = {9781479969234}, year = {2015} } @inproceedings{Mosberger891476, author = {Mosberger, Rafael and Leibe, Bastian and Andreasson, Henrik and Lilienthal, Achim}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {Aachen University, Aachen, Germany}, pages = {697--703}, title = {Multi-band Hough Forests for detecting humans with Reflective Safety Clothing from mobile machinery}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2015.7139255}, keywords = {Human Detection, Robot Vision, Industrial Safety}, abstract = {We address the problem of human detection from heavy mobile machinery and robotic equipment operating at industrial working sites. Exploiting the fact that workers are typically obliged to wear high-visibility clothing with reflective markers, we propose a new recognition algorithm that specifically incorporates the highly discriminative features of the safety garments in the detection process. Termed Multi-band Hough Forest, our detector fuses the input from active near-infrared (NIR) and RGB color vision to learn a human appearance model that not only allows us to detect and localize industrial workers, but also to estimate their body orientation. We further propose an efficient pipeline for automated generation of training data with high-quality body part annotations that are used in training to increase detector performance. We report a thorough experimental evaluation on challenging image sequences from a real-world production environment, where persons appear in a variety of upright and non-upright body positions. }, ISBN = {978-1-4799-6923-4}, year = {2015} } @inproceedings{Krug808145, author = {Krug, Robert and Stoyanov, Todor and Tincani, Vinicio and Andreasson, Henrik and Mosberger, Rafael and Fantoni, Gualtiero and Bicchi, Antonio and Lilienthal, Achim}, booktitle = {IEEE International Conference on Robotics and Automation (ICRA) - Workshop on Robotic Hands, Grasping, and Manipulation : }, institution = {Örebro University, School of Science and Technology}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, institution = {Interdepart. Research Center “E. Piaggio”; University of Pisa, Pisa, Italy}, title = {On Using Optimization-based Control instead of Path-Planning for Robot Grasp Motion Generation}, keywords = {Grasping, Motion Planning, Control}, year = {2015} } @inproceedings{Chadalavada900532, author = {Chadalavada, Ravi Teja and Andreasson, Henrik and Krug, Robert and Lilienthal, Achim}, booktitle = {2015 European Conference on Mobile Robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, publisher = {IEEE conference proceedings}, title = {That’s on my Mind! : Robot to Human Intention Communication through on-board Projection on Shared Floor Space}, DOI = {10.1109/ECMR.2015.7403771}, keywords = {Human Robot Interaction, Intention Communication, Shared spaces}, abstract = {The upcoming new generation of autonomous vehicles for transporting materials in industrial environments will be more versatile, flexible and efficient than traditional AGVs, which simply follow pre-defined paths. However, freely navigating vehicles can appear unpredictable to human workers and thus cause stress and render joint use of the available space inefficient. Here we address this issue and propose on-board intention projection on the shared floor space for communication from robot to human. We present a research prototype of a robotic fork-lift equipped with a LED projector to visualize internal state information and intents. We describe the projector system and discuss calibration issues. The robot’s ability to communicate its intentions is evaluated in realistic situations where test subjects meet the robotic forklift. The results show that already adding simple information, such as the trajectory and the space to be occupied by the robot in the near future, is able to effectively improve human response to the robot. }, ISBN = {978-1-4673-9163-4}, year = {2015} } @inproceedings{Mansouri900492, author = {Mansouri, Masoumeh and Andreasson, Henrik and Pecora, Frederico}, booktitle = {24th International Joint Conference on Artificial Intelligence, Workshop on Hybrid Reasoning : }, institution = {Örebro University, School of Science and Technology}, title = {Towards Hybrid Reasoning for Automated Industrial Fleet Management}, abstract = {More and more industrial applications require fleets of autonomous ground vehicles. Today's solutions to the management of these fleets still largely rely on fixed set-ups of the system, manually specified ad-hoc rules. Our aim is to replace current practice with autonomous fleets and fleet management systems that are easily adaptable to new set-ups and environments, can accommodate human-intelligible rules, and guarantee feasible and meaningful behavior of the fleet. We propose to cast the problem of autonomous fleet management to a meta-CSP that integrates task allocation, coordination and motion planning. We discuss design choices of the approach, and how it caters to the need for hybrid reasoning in terms of symbolic, metric, temporal and spatial constraints. We also comment on a preliminary realization of the system. }, ISBN = {978-1-57735-738-4}, year = {2015} } @article{Mosberger772165, author = {Mosberger, Rafael and Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Sensors}, number = {10}, pages = {17952--17980}, title = {A customized vision system for tracking humans wearing reflective safety clothing from industrial vehicles and machinery}, volume = {14}, DOI = {10.3390/s141017952}, keywords = {infrared vision, human detection, industrial safety, high-visibility clothing}, abstract = {This article presents a novel approach for vision-based detection and tracking of humans wearing high-visibility clothing with retro-reflective markers. Addressing industrial applications where heavy vehicles operate in the vicinity of humans, we deploy a customized stereo camera setup with active illumination that allows for efficient detection of the reflective patterns created by the worker's safety garments. After segmenting reflective objects from the image background, the interest regions are described with local image feature descriptors and classified in order to discriminate safety garments from other reflective objects in the scene. In a final step, the trajectories of the detected humans are estimated in 3D space relative to the camera. We evaluate our tracking system in two industrial real-world work environments on several challenging video sequences. The experimental results indicate accurate tracking performance and good robustness towards partial occlusions, body pose variation, and a wide range of different illumination conditions. }, year = {2014} } @article{Andreasson780236, author = {Andreasson, Henrik and Saarinen, Jari and Cirillo, Marcello and Stoyanov, Todor and Lilienthal, Achim}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics}, number = {4}, pages = {400--416}, publisher = {M D P I AG}, title = {Drive the Drive : From Discrete Motion Plans to Smooth Drivable Trajectories}, volume = {3}, DOI = {10.3390/robotics3040400}, keywords = {Motion planning, motion and path planning, autonomous navigation}, abstract = {Autonomous navigation in real-world industrial environments is a challenging task in many respects. One of the key open challenges is fast planning and execution of trajectories to reach arbitrary target positions and orientations with high accuracy and precision, while taking into account non-holonomic vehicle constraints. In recent years, lattice-based motion planners have been successfully used to generate kinematically and kinodynamically feasible motions for non-holonomic vehicles. However, the discretized nature of these algorithms induces discontinuities in both state and control space of the obtained trajectories, resulting in a mismatch between the achieved and the target end pose of the vehicle. As endpose accuracy is critical for the successful loading and unloading of cargo in typical industrial applications, automatically planned paths have not been widely adopted in commercial AGV systems. The main contribution of this paper is a path smoothing approach, which builds on the output of a lattice-based motion planner to generate smooth drivable trajectories for non-holonomic industrial vehicles. The proposed approach is evaluated in several industrially relevant scenarios and found to be both fast (less than 2 s per vehicle trajectory) and accurate (end-point pose errors below 0.01 m in translation and 0.005 radians in orientation). }, year = {2014} } @incollection{Mosberger780355, author = {Mosberger, Rafael and Andreasson, Henrik}, booktitle = {Field and Service Robotics : Results of the 8th International Conference}, institution = {Örebro University, School of Science and Technology}, pages = {143--157}, title = {Estimating the 3D Position of Humans Wearing a Reflective Vest Using a Single Camera System}, series = {Springer Tracts in Advanced Robotics}, number = {92}, DOI = {10.1007/978-3-642-40686-7_10}, keywords = {People Detection, Industrial Safety, Reflective Vest Detection}, abstract = {This chapter presents a novel possible solution for people detection and estimation of their 3D position in challenging shared environments. Addressing safety critical applications in industrial environments, we make the basic assumption that people wear reflective vests. In order to detect these vests and to discriminate them from other reflective material, we propose an approach based on a single camera equipped with an IR flash. The camera acquires pairs of images, one with and one without IR flash, in short succession. The images forming a pair are then related to each other through feature tracking, which allows to discard features for which the relative intensity difference is small and which are thus not believed to belong to a reflective vest. Next, the local neighbourhood of the remaining features is further analysed. First, a Random Forest classifier is used to discriminate between features caused by a reflective vest and features caused by some other reflective materials. Second, the distance between the camera and the vest features is estimated using a Random Forest regressor. The proposed system was evaluated in one indoor and two challenging outdoor scenarios. Our results indicate very good classification performance and remarkably accurate distance estimation especially in combination with the SURF descriptor, even under direct exposure to sunlight. }, ISBN = {978-3-642-40685-0}, ISBN = {978-3-642-40686-7}, year = {2014} } @incollection{Andreasson780294, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {Field and Service Robotics : Results of the 8th International Conference}, institution = {Örebro University, School of Science and Technology}, institution = {CAISR Centrum för tillämpade intelligenta system (IS-lab), Högskolan i Halmstad, Halmstad, Sweden}, institution = {CAISR Centrum för tillämpade intelligenta system (IS-lab), Högskolan i Halmstad, Halmstad, Sweden}, pages = {585--598}, title = {Gold-Fish SLAM : An Application of SLAM to Localize AGVs}, series = {Springer Tracts in Advanced Robotics}, number = {92}, DOI = {10.1007/978-3-642-40686-7_39}, abstract = {The main focus of this paper is to present a case study of a SLAM solution for Automated Guided Vehicles (AGVs) operating in real-world industrial environments. The studied solution, called Gold-fish SLAM, was implemented to provide localization estimates in dynamic industrial environments, where there are static landmarks that are only rarely perceived by the AGVs. The main idea of Gold-fish SLAM is to consider the goods that enter and leave the environment as temporary landmarks that can be used in combination with the rarely seen static landmarks to compute online estimates of AGV poses. The solution is tested and verified in a factory of paper using an eight ton diesel-truck retrofitted with an AGV control system running at speeds up to 3 m/s. The paper includes also a general discussion on how SLAM can be used in industrial applications with AGVs }, URL = {http://dx.doi.org/10.1007/978-3-642-40686-7_39}, ISBN = {978-3-642-40685-0}, ISBN = {978-3-642-40686-7}, year = {2014} } @inproceedings{Cirillo755510, author = {Cirillo, Marcello and Pecora, Federico and Andreasson, Henrik and Uras, Tansel and Koenig, Sven}, booktitle = {Proceedings of the 24th International Conference on Automated Planning and Scheduling : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Computer Science, University of Southern California, Los Angeles, USA}, institution = {Department of Computer Science, University of Southern California, Los Angeles, USA}, title = {Integrated Motion Planning and Coordination for Industrial Vehicles}, keywords = {multi-robot coordination, non-holonomic motion planning, scheduling}, abstract = {A growing interest in the industrial sector for autonomous ground vehicles has prompted significant investment in fleet management systems. Such systems need to accommodate on-line externally imposed temporal and spatial requirements, and to adhere to them even in the presence of contingencies. Moreover, a fleet management system should ensure correctness, i.e., refuse to commit to requirements that cannot be satisfied. We present an approach to obtain sets of alternative execution patterns (called trajectory envelopes) which provide these guarantees. The approach relies on a constraint-based representation shared among multiple solvers, each of which progressively refines trajectory envelopes following a least commitment principle. }, URL = {http://idm-lab.org/bib/abstracts/papers/icaps14a.pdf}, ISBN = {978-1-57735-660-8}, year = {2014} } @inproceedings{Valencia780074, author = {Valencia, Rafael and Saarinen, Jari and Andreasson, Henrik and Vallv{\’e;}, Joan and Andrade-Cetto, Juan and Lilienthal, Achim J.}, booktitle = {2014 IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, institution = {CSIC-UPC, Barcelona,Spain}, institution = {CSIC-UPC, Barcelona, Spain}, note = {Institut de Robòtica i Informàtica industrial - UPC, Joint Research Center of the Technical University of Catalonia (UPC) and the Spanish Council for Scientific Research (CSIC) focused on robotics research}, pages = {3956--3962}, title = {Localization in highly dynamic environments using dual-timescale NDT-MCL}, series = {Proceedings - IEEE International Conference on Robotics and Automation}, DOI = {10.1109/ICRA.2014.6907433}, keywords = {Localization, Monte Carlo Localization, Intra Logistics, Mapping}, abstract = {Industrial environments are rarely static and oftentheir configuration is continuously changing due to the materialtransfer flow. This is a major challenge for infrastructure freelocalization systems. In this paper we address this challengeby introducing a localization approach that uses a dualtimescaleapproach. The proposed approach - Dual-TimescaleNormal Distributions Transform Monte Carlo Localization (DTNDT-MCL) - is a particle filter based localization method,which simultaneously keeps track of the pose using an aprioriknown static map and a short-term map. The short-termmap is continuously updated and uses Normal DistributionsTransform Occupancy maps to maintain the current state ofthe environment. A key novelty of this approach is that it doesnot have to select an entire timescale map but rather use thebest timescale locally. The approach has real-time performanceand is evaluated using three datasets with increasing levels ofdynamics. We compare our approach against previously proposedNDT-MCL and commonly used SLAM algorithms andshow that DT-NDT-MCL outperforms competing algorithmswith regards to accuracy in all three test cases. }, year = {2014} } @article{Saarinen644380, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding agency:Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {14}, pages = {1627--1644}, title = {3D normal distributions transform occupancy maps : an efficient representation for mapping in dynamic environments}, volume = {32}, DOI = {10.1177/0278364913499415}, abstract = {In order to enable long-term operation of autonomous vehicles in industrial environments numerous challenges need to be addressed. A basic requirement for many applications is the creation and maintenance of consistent 3D world models. This article proposes a novel 3D spatial representation for online real-world mapping, building upon two known representations: normal distributions transform (NDT) maps and occupancy grid maps. The proposed normal distributions transform occupancy map (NDT-OM) combines the advantages of both representations; compactness of NDT maps and robustness of occupancy maps. One key contribution in this article is that we formulate an exact recursive updates for NDT-OMs. We show that the recursive update equations provide natural support for multi-resolution maps. Next, we describe a modification of the recursive update equations that allows adaptation in dynamic environments. As a second key contribution we introduce NDT-OMs and formulate the occupancy update equations that allow to build consistent maps in dynamic environments. The update of the occupancy values are based on an efficient probabilistic sensor model that is specially formulated for NDT-OMs. In several experiments with a total of 17 hours of data from a milk factory we demonstrate that NDT-OMs enable real-time performance in large-scale, long-term industrial setups. }, year = {2013} } @inproceedings{Mosberger647365, author = {Mosberger, Rafael and Andreasson, Henrik}, booktitle = {Proceedings of the IEEE International Conference on Robotics and Automation (ICRA) : }, institution = {Örebro University, School of Science and Technology}, pages = {5850--5857}, title = {An Inexpensive Monocular Vision System for Tracking Humans in Industrial Environments}, series = {Robotics and Automation (ICRA), 2013 IEEE International Conference on}, DOI = {10.1109/ICRA.2013.6631419}, keywords = {Human Detection, Robot Vision, Industrial Safety}, abstract = {We report on a novel vision-based method for reliable human detection from vehicles operating in industrial environments in the vicinity of workers. By exploiting the fact that reflective vests represent a standard safety equipment on most industrial worksites, we use a single camera system and active IR illumination to detect humans by identifying the reflective vest markers. Adopting a sparse feature based approach, we classify vest markers against other reflective material and perform supervised learning of the object distance based on local image descriptors. The integration of the resulting per-feature 3D position estimates in a particle filter finally allows to perform human tracking in conditions ranging from broad daylight to complete darkness. }, ISBN = {978-1-4673-5641-1}, year = {2013} } @article{Stoyanov618586, author = {Stoyanov, Todor and Mojtahedzadeh, Rasoul and Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, number = {10}, pages = {1094--1105}, title = {Comparative evaluation of range sensor accuracy for indoor mobile robotics and automated logistics applications}, volume = {61}, DOI = {10.1016/j.robot.2012.08.011}, abstract = {3D range sensing is an important topic in robotics, as it is a component in vital autonomous subsystems such as for collision avoidance, mapping and perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements, without using a precise ground truth environment model, is proposed. This article presents an extensive evaluation of three novel depth sensors — the Swiss Ranger SR-4000, Fotonic B70 and Microsoft Kinect. Tests are concentrated on the automated logistics scenario of container unloading. Six different setups of box-, cylinder-, and sack-shaped goods inside a mock-up container are used to collect range measurements. Comparisons are performed against hand-crafted ground truth data, as well as against a reference actuated Laser Range Finder (aLRF) system. Additional test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2013} } @inproceedings{Saarinen644375, author = {Saarinen, Jari and Stoyanov, Todor and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4694--4701}, title = {Fast 3D mapping in highly dynamic environments using normal distributions transform occupancy maps}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697032}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Mosberger684470, author = {Mosberger, Rafael and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, pages = {638--644}, title = {Multi-human Tracking using High-visibility Clothing for Industrial Safety}, series = {Intelligent Robots and Systems (IROS), 2013 IEEE/RSJ International Conference on}, DOI = {10.1109/IROS.2013.6696418}, keywords = {Human Detection, Robot Vision, Industrial Safety}, abstract = {We propose and evaluate a system for detecting and tracking multiple humans wearing high-visibility clothing from vehicles operating in industrial work environments. We use a customized stereo camera setup equipped with IR flash and IR filter to detect the reflective material on the worker's garments and estimate their trajectories in 3D space. An evaluation in two distinct industrial environments with different degrees of complexity demonstrates the approach to be robust and accurate for tracking workers in arbitrary body poses, under occlusion, and under a wide range of different illumination settings. }, year = {2013} } @inproceedings{Saarinen644376, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {382--389}, title = {Normal distributions transform monte-carlo localization (NDT-MCL)}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6696380}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Stoyanov644379, author = {Stoyanov, Todor and Saarinen, Jari and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the IEEE/RSJ International Conference on Intelligent Robots and Systems (IROS) : }, institution = {Örebro University, School of Science and Technology}, note = {to appear}, pages = {4702--4708}, title = {Normal distributions transform occupancy map fusion : simultaneous mapping and tracking in large scale dynamic environments}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2013.6697033}, ISBN = {978-1-4673-6358-7}, year = {2013} } @inproceedings{Saarinen622633, author = {Saarinen, Jari and Andreasson, Henrik and Stoyanov, Todor and Ala-Luhtala, Juha and Lilienthal, Achim J.}, booktitle = {IEEE International Conference on Robotics and Automation : }, institution = {Örebro University, School of Science and Technology}, institution = {Aalto University of Technology, Aalto, Finland}, pages = {2233--2238}, title = {Normal distributions transform occupancy maps : application to large-scale online 3D mapping}, DOI = {10.1109/ICRA.2013.6630878}, abstract = {Autonomous vehicles operating in real-world industrial environments have to overcome numerous challenges, chief among which is the creation and maintenance of consistent 3D world models. This paper proposes to address the challenges of online real-world mapping by building upon previous work on compact spatial representation and formulating a novel 3D mapping approach — the Normal Distributions Transform Occupancy Map (NDT-OM). The presented algorithm enables accurate real-time 3D mapping in large-scale dynamic nvironments employing a recursive update strategy. In addition, the proposed approach can seamlessly provide maps at multiple resolutions allowing for fast utilization in high-level functions such as localization or path planning. Compared to previous approaches that use the NDT representation, the proposed NDT-OM formulates an exact and efficient recursive update formulation and models the full occupancy of the map. }, year = {2013} } @inproceedings{Mosberger619101, author = {Mosberger, Rafael and Andreasson, Henrik}, booktitle = {Proceedings of the International Conference on Field and Service Robotics (FSR) : }, institution = {Örebro University, School of Science and Technology}, title = {Estimating the 3d position of humans wearing a reflective vest using a single camera system}, series = {Springer Tracts in Advanced Robotics}, abstract = {This paper presents a novel possible solution for people detection and estimation of their 3D position in challenging shared environments. Addressing safety critical applications in industrial environments, we make the basic assumption that people wear reflective vests. In order to detect these vests and to discriminate them from other reflective material, we propose an approach based on a single camera equipped with an IR flash. The camera acquires pairs of images, one with and one without IR flash, in short succession. The images forming a pair are then related to each other through feature tracking, which allows to discard features for which the relative intensity difference is small and which are thus not believed to belong to a reflective vest. Next, the local neighbourhood of the remaining features is further analysed. First, a Random Forest classifier is used to discriminate between features caused by a reflective vest and features caused by some other reflective materials. Second, the distance between the camera and the vest features is estimated using a Random Forest regressor. The proposed system was evaluated in one indoor and two challenging outdoor scenarios. Our results indicate very good classification performance and remarkably accurate distance estimation especially in combination with the SURF descriptor, even under direct exposure to sunlight. }, year = {2012} } @article{Stoyanov618701, author = {Stoyanov, Todor and Magnusson, Martin and Lilienthal, Achim J. and Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, journal = {The international journal of robotics research}, note = {Funding Agencies:European Union FP7 - 270350Kunskaps och Kompetensutveckling Stiftelsen project SAUNA 20100315}, number = {12}, pages = {1377--1393}, title = {Fast and accurate scan registration through minimization of the distance between compact 3D NDT Representations}, volume = {31}, DOI = {10.1177/0278364912460895}, keywords = {point set registration; mapping; normal distributions transform}, abstract = {Registration of range sensor measurements is an important task in mobile robotics and has received a lot of attention. Several iterative optimization schemes have been proposed in order to align three-dimensional (3D) point scans. With the more widespread use of high-frame-rate 3D sensors and increasingly more challenging application scenarios for mobile robots, there is a need for fast and accurate registration methods that current state-of-the-art algorithms cannot always meet. This work proposes a novel algorithm that achieves accurate point cloud registration an order of a magnitude faster than the current state of the art. The speedup is achieved through the use of a compact spatial representation: the Three-Dimensional Normal Distributions Transform (3D-NDT). In addition, a fast, global-descriptor based on the 3D-NDT is defined and used to achieve reliable initial poses for the iterative algorithm. Finally, a closed-form expression for the covariance of the proposed method is also derived. The proposed algorithms are evaluated on two standard point cloud data sets, resulting in stable performance on a par with or better than the state of the art. The implementation is available as an open-source package for the Robot Operating system (ROS). }, year = {2012} } @inproceedings{Andreasson311038, author = {Andreasson, Henrik and Bouguerra, Abdelbaki and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {Proceedings of the International Conference on Field and Service Robotics (FSR), July 2012. : }, institution = {Örebro University, School of Science and Technology}, title = {Gold-fish SLAM : an application of SLAM to localize AGVs}, keywords = {Mobile robotics, AGV localization}, abstract = {The main focus of this paper is to present a case study of a SLAM solution for Automated Guided Vehicles (AGVs) operating in real-world industrial environ- ments. The studied solution, called Gold-fish SLAM, was implemented to provide localization estimates in dynamic industrial environments, where there are static landmarks that are only rarely perceived by the AGVs. The main idea of Gold-fish SLAM is to consider the goods that enter and leave the environment as temporary landmarks that can be used in combination with the rarely seen static landmarks to compute online estimates of AGV poses. The solution is tested and verified in a factory of paper using an eight ton diesel-truck retrofitted with an AGV control sys- tem running at speeds up to 3 meters per second. The paper includes also a general discussion on how SLAM can be used in industrial applications with AGVs. }, year = {2012} } @inproceedings{Saarinen1190203, author = {Saarinen, Jari and Andreasson, Henrik and Lilienthal, Achim}, booktitle = {2012 IEEE/RSJ International Conference on Intelligent Robots and Systems : }, institution = {Örebro University, School of Science and Technology}, institution = {Department of Automation and Systems Technology, Aalto University, Alto, Finland}, pages = {3489--3495}, title = {Independent Markov Chain Occupancy Grid Maps for Representation of Dynamic Environments}, series = {IEEE International Conference on Intelligent Robots and Systems}, DOI = {10.1109/IROS.2012.6385629}, keywords = {Markov chain, Poisson process, model of dynamics}, abstract = {In this paper we propose a new grid based approach to model a dynamic environment. Each grid cell is assumed to be an independent Markov chain (iMac) with two states. The state transition parameters are learned online and modeled as two Poisson processes. As a result, our representation not only encodes the expected occupancy of the cell, but also models the expected dynamics within the cell. The paper also presents a strategy based on recency weighting to learn the model parameters from observations that is able to deal with non-stationary cell dynamics. Moreover, an interpretation of the model parameters with discussion about the convergence rates of the cells is presented. The proposed model is experimentally validated using offline data recorded with a Laser Guided Vehicle (LGV) system running in production use. }, ISBN = {978-1-4673-1736-8}, ISBN = {978-1-4673-1737-5}, ISBN = {978-1-4673-1735-1}, year = {2012} } @inproceedings{Andreasson618702, author = {Andreasson, Henrik and Stoyanov, Todor}, booktitle = {Proc. of International Conference on Robotics and Automation (ICRA) Workshop on Semantic Perception, Mapping and Exploration (SPME) : }, institution = {Örebro University, School of Science and Technology}, note = {The conference table of contents may be found on http://toc.proceedings.com/15154webtoc.pdf}, title = {Real time registration of RGB-D data using local visual features and 3D-NDT registration}, abstract = {Recent increased popularity of RGB-D capable sensors in robotics has resulted in a surge of related RGBD registration methods. This paper presents several RGB-D registration algorithms based on combinations between local visual feature and geometric registration. Fast and accurate transformation refinement is obtained by using a recently proposed geometric registration algorithm, based on the Three-Dimensional Normal Distributions Transform (3D-NDT). Results obtained on standard data sets have demonstrated mean translational errors on the order of 1 cm and rotational errors bellow 1 degree, at frame processing rates of about 15 Hz. }, ISBN = {9781467314039}, year = {2012} } @inproceedings{Stoyanov540987, author = {Stoyanov, Todor and Louloudi, Athanasia and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the 5th European Conference on Mobile Robots, ECMR 2011 : }, institution = {Örebro University, School of Science and Technology}, pages = {19--24}, title = {Comparative evaluation of range sensor accuracy in indoor environments}, abstract = {3D range sensing is one of the important topics in robotics, as it is often a component in vital autonomous subsystems like collision avoidance, mapping and semantic perception. The development of affordable, high frame rate and precise 3D range sensors is thus of considerable interest. Recent advances in sensing technology have produced several novel sensors that attempt to meet these requirements. This work is concerned with the development of a holistic method for accuracy evaluation of the measurements produced by such devices. A method for comparison of range sensor output to a set of reference distance measurements is proposed. The approach is then used to compare the behavior of three integrated range sensing devices, to that of a standard actuated laser range sensor. Test cases in an uncontrolled indoor environment are performed in order to evaluate the sensors’ performance in a challenging, realistic application scenario. }, year = {2011} } @article{Andreasson274835, author = {Andreasson, Henrik and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, journal = {Robotics and Autonomous Systems}, note = {Selected papers from the 2007 European Conference on Mobile Robots (ECMR ’07)}, number = {2}, pages = {157--165}, title = {6D scan registration using depth-interpolated local image features}, volume = {58}, DOI = {10.1016/j.robot.2009.09.011}, keywords = {Registration, Vision, Laser Range Finder, SLAM}, abstract = {This paper describes a novel registration approach that is based on a combination of visual and 3D range information.To identify correspondences, local visual features obtained from images of a standard color camera are compared and the depth of matching features (and their position covariance) is determined from the range measurements of a 3D laserscanner. The matched depth-interpolated image features allows to apply registration with known correspondences.We compare several ICP variants in this paper and suggest an extension that considers the spatial distance betweenmatching features to eliminate false correspondences. Experimental results are presented in both outdoor and indoor environments. In addition to pair-wise registration, we also propose a global registration method that registers allscan poses simultaneously. }, year = {2010} } @inproceedings{Stoyanov445259, author = {Stoyanov, Todor and Magnusson, Martin and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {IEEE/RSJ 2010 International Conference on Intelligent Robots and Systems (IROS 2010) : }, institution = {Örebro University, School of Science and Technology}, pages = {3263--3268}, title = {Path planning in 3D environments using the normal distributions transform}, DOI = {10.1109/IROS.2010.5650789}, abstract = {Planning feasible paths in fully three-dimensional environments is a challenging problem. Application of existing algorithms typically requires the use of limited 3D representations that discard potentially useful information. This article proposes a novel approach to path planning that utilizes a full 3D representation directly: the Three-Dimensional Normal Distributions Transform (3D-NDT). The well known wavefront planner is modified to use 3D-NDT as a basis for map representation and evaluated using both indoor and outdoor data sets. The use of 3D-NDT for path planning is thus demonstrated to be a viable choice with good expressive capabilities. }, ISBN = {978-1-4244-6675-7}, year = {2010} } @inproceedings{Astrand274865, author = {{\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn and Bouguerra, Abdelbaki and Andreasson, Henrik and Lilienthal, Achim J.}, booktitle = {Proceedings of the 4th Swedish Workshop on Autonomous Robotics (SWAR)}, institution = {Örebro University, School of Science and Technology}, institution = {Halmstad University}, institution = {Halmstad University}, pages = {56--57}, title = {An Autonomous Robotic System for Load Transportation}, year = {2009} } @inproceedings{Bouguerra274885, author = {Bouguerra, Abdelbaki and Andreasson, Henrik and Lilienthal, Achim J. and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {2009 IEEE Conference on Emerging Technologies & Factory Automation (EFTA 2009) : }, institution = {Örebro University, School of Science and Technology}, institution = {Halmstad University, Halmstad, Sweden}, institution = {Halmstad University, Halmstad, Sweden}, pages = {1563--1566}, title = {An autonomous robotic system for load transportation}, series = {IEEE International Conference on Emerging Technologies and Factory Automation-ETFA}, DOI = {10.1109/ETFA.2009.5347247}, keywords = {AGV system; Autonomous robotic systems; Dynamic environments; Material handling; Object Detection; Runtimes}, abstract = {This paper presents an overview of an autonomous robotic material handling system. The goal of the system is to extend the functionalities of traditional AGVs to operate in highly dynamic environments. Traditionally, the reliable functioning of AGVs relies on the availability of adequate infrastructure to support navigation. In the target environments of our system, such infrastructure is difficult to setup in an efficient way. Additionally, the location of objects to handle are unknown, which requires that the system be able to detect and track object positions at runtime. Another requirement of the system is to be able to generate trajectories dynamically, which is uncommon in industrial AGV systems. }, ISBN = {978-1-4244-2727-7}, ISBN = {978-1-4244-2728-4}, year = {2009} } @inproceedings{Magnusson391763, author = {Magnusson, Martin and Andreasson, Henrik and N{\"u}chter, A. and Lilienthal, Achim J.}, booktitle = {IEEE International Conference on Robotics and Automation 2009 (ICRA '09) : }, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen, Bremen, Germany}, note = {Funding Agency:Atlas Copco Rock Drills }, pages = {23--28}, title = {Appearance-based loop detection from 3D laser data using the normal distributions transform}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ROBOT.2009.5152712}, abstract = {We propose a new approach to appearance based loop detection from metric 3D maps, exploiting the NDT surface representation. Locations are described with feature histograms based on surface orientation and smoothness, and loop closure can be detected by matching feature histograms. We also present a quantitative performance evaluation using two realworld data sets, showing that the proposed method works well in different environments.© 2009 IEEE. }, ISBN = {9781424427888}, ISBN = {9781424427895}, year = {2009} } @article{Magnusson274842, author = {Magnusson, Martin and Andreasson, Henrik and N{\"u}chter, Andreas and Lilienthal, Achim J.}, institution = {Örebro University, School of Science and Technology}, institution = {Jacobs University Bremen}, journal = {Journal of Field Robotics}, number = {11-12}, pages = {892--914}, title = {Automatic appearance-based loop detection from three-dimensional laser data using the normal distributions transform}, volume = {26}, DOI = {10.1002/rob.20314}, abstract = {We propose a new approach to appearance-based loop detection for mobile robots, usingthree-dimensional (3D) laser scans. Loop detection is an important problem in the simultaneouslocalization and mapping (SLAM) domain, and, because it can be seen as theproblem of recognizing previously visited places, it is an example of the data associationproblem. Without a flat-floor assumption, two-dimensional laser-based approaches arebound to fail in many cases. Two of the problems with 3D approaches that we address inthis paper are how to handle the greatly increased amount of data and how to efficientlyobtain invariance to 3D rotations.We present a compact representation of 3D point cloudsthat is still discriminative enough to detect loop closures without false positives (i.e.,detecting loop closure where there is none). A low false-positive rate is very important becausewrong data association could have disastrous consequences in a SLAM algorithm.Our approach uses only the appearance of 3D point clouds to detect loops and requires nopose information. We exploit the normal distributions transform surface representationto create feature histograms based on surface orientation and smoothness. The surfaceshape histograms compress the input data by two to three orders of magnitude. Becauseof the high compression rate, the histograms can be matched efficiently to compare theappearance of two scans. Rotation invariance is achieved by aligning scans with respectto dominant surface orientations. We also propose to use expectation maximization to fit a gamma mixture model to the output similarity measures in order to automatically determinethe threshold that separates scans at loop closures from nonoverlapping ones.Wediscuss the problem of determining ground truth in the context of loop detection and thedifficulties in comparing the results of the few available methods based on range information.Furthermore, we present quantitative performance evaluations using three realworlddata sets, one of which is highly self-similar, showing that the proposed methodachieves high recall rates (percentage of correctly identified loop closures) at low falsepositiverates in environments with different characteristics. }, year = {2009} } @book{Andreasson306494, author = {Andreasson, Henrik}, institution = {Örebro University, School of Science and Technology}, pages = {206}, title = {Camera based navigation by mobile robots : local visual feature based localisation and mapping}, abstract = {The most important property of a mobile robot is the fact that it is mobile. How to give a robot the skills required to navigate around its environment is therefore an important topic in mobile robotics. Navigation, both for robots and humans, typically involves a map. The map can be used, for example, to estimate a pose based on observations (localisation) or determine a suitable path between to locations. Maps are available nowadays for us humans with few exceptions, however, maps suitable for mobile robots rarely exists. In addition, to relate sensor readings to a map requires that the map content and the observation is compatible, i.e. different robots may require different maps for the same area. This book addresses some of the fundamental problems related to mobile robot navigation (registration, localisation and mapping) using cameras as the primary sensor input. Small salient regions (local visual features) are extracted from each camera image, where each region can be seen as a fingerprint. Many fingerprint matches implicates a high likelihood that they corresponding images originate from a similar location, which is a central property utilised in this work. }, ISBN = {978-3-639-12452-1}, year = {2009} } @inproceedings{Bouguerra274878, author = {Bouguerra, Abdelbaki and Andreasson, Henrik and Lilienthal, Achim J. and {\AA}strand, Bj{\"o}rn and R{\"o}gnvaldsson, Thorsteinn}, booktitle = {Proceedings of the 4th European conference on mobile robots (ECMR) : }, institution = {Örebro University, School of Science and Technology}, institution = {Halmstad University}, institution = {Halmstad University, Sweden}, pages = {93--98}, title = {MALTA : a system of multiple autonomous trucks for load transportation}, keywords = {Autonomous Vehicles, Load Handling, AGVs}, abstract = {This paper presents an overview of an autonomousrobotic material handling system. The goal of the system is toextend the functionalities of traditional AGVs to operate in highlydynamic environments. Traditionally, the reliable functioning ofAGVs relies on the availability of adequate infrastructure tosupport navigation. In the target environments of our system,such infrastructure is difficult to setup in an efficient way.Additionally, the location of objects to handle are unknown,which requires that the system be able to detect and track objectpositions at runtime. Another requirement of the system is to beable to generate trajectories dynamically, which is uncommon inindustrial AGV systems. }, ISBN = {978-953-6037-54-4}, year = {2009} } @article{Andreasson158115, author = {Andreasson, Henrik and Duckett, Tom and Lilienthal, Achim J.}, institution = {Örebro University, Department of Technology}, institution = {University of Lincoln, University of Lincoln, UK}, journal = {IEEE Transactions on Robotics}, number = {5}, pages = {991--1001}, title = {A Minimalistic Approach to Appearance-Based Visual SLAM}, volume = {24}, DOI = {10.1109/TRO.2008.2004642}, keywords = {Omnidirectional vision, simultaneous localization and mapping (SLAM)}, abstract = {This paper presents a vision-based approach to SLAM in indoor / outdoor environments with minimalistic sensing and computational requirements. The approach is based on a graph representation of robot poses, using a relaxation algorithm to obtain a globally consistent map. Each link corresponds to a relative measurement of the spatial relation between the two nodes it connects. The links describe the likelihood distribution of the relative pose as a Gaussian distribution. To estimate the covariance matrix for links obtained from an omni-directional vision sensor, a novel method is introduced based on the relative similarity of neighbouring images. This new method does not require determining distances to image features using multiple view geometry, for example. Combined indoor and outdoor experiments demonstrate that the approach can handle qualitatively different environments (without modification of the parameters), that it can cope with violations of the “flat floor assumption” to some degree, and that it scales well with increasing size of the environment, producing topologically correct and geometrically accurate maps at low computational cost. Further experiments demonstrate that the approach is also suitable for combining multiple overlapping maps, e.g. for solving the multi-robot SLAM problem with unknown initial poses. }, year = {2008} } @phdthesis{Andreasson136254, author = {Andreasson, Henrik}, institution = {Örebro University, Department of Technology}, pages = {204}, publisher = {Örebro universitet}, school = {Örebro University, Department of Technology}, title = {Local visual feature based localisation and mapping by mobile robots}, series = {Örebro Studies in Technology}, ISSN = {1650-8580}, number = {31}, keywords = {mobile robotics, registration, localisation, SLAM, mapping, omnidirectional vision, 3D vision, appearance based}, abstract = {This thesis addresses the problems of registration, localisation and simultaneous localisation and mapping (SLAM), relying particularly on local visual features extracted from camera images. These fundamental problems in mobile robot navigation are tightly coupled. Localisation requires a representation of the environment (a map) and registration methods to estimate the pose of the robot relative to the map given the robot’s sensory readings. To create a map, sensor data must be accumulated into a consistent representation and therefore the pose of the robot needs to be estimated, which is again the problem of localisation. The major contributions of this thesis are new methods proposed to address the registration, localisation and SLAM problems, considering two different sensor configurations. The first part of the thesis concerns a sensor configuration consisting of an omni-directional camera and odometry, while the second part assumes a standard camera together with a 3D laser range scanner. The main difference is that the former configuration allows for a very inexpensive set-up and (considering the possibility to include visual odometry) the realisation of purely visual navigation approaches. By contrast, the second configuration was chosen to study the usefulness of colour or intensity information in connection with 3D point clouds (“coloured point clouds”), both for improved 3D resolution (“super resolution”) and approaches to the fundamental problems of navigation that exploit the complementary strengths of visual and range information. Considering the omni-directional camera/odometry setup, the first part introduces a new registration method based on a measure of image similarity. This registration method is then used to develop a localisation method, which is robust to the changes in dynamic environments, and a visual approach to metric SLAM, which does not require position estimation of local image features and thus provides a very efficient approach. The second part, which considers a standard camera together with a 3D laser range scanner, starts with the proposal and evaluation of non-iterative interpolation methods. These methods use colour information from the camera to obtain range information at the resolution of the camera image, or even with sub-pixel accuracy, from the low resolution range information provided by the range scanner. Based on the ability to determine depth values for local visual features, a new registration method is then introduced, which combines the depth of local image features and variance estimates obtained from the 3D laser range scanner to realise a vision-aided 6D registration method, which does not require an initial pose estimate. This is possible because of the discriminative power of the local image features used to determine point correspondences (data association). The vision-aided registration method is further developed into a 6D SLAM approach where the optimisation constraint is based on distances of paired local visual features. Finally, the methods introduced in the second part are combined with a novel adaptive normal distribution transform (NDT) representation of coloured 3D point clouds into a robotic difference detection system. }, ISBN = {978-91-7668-614-0}, year = {2008} } @inproceedings{Andreasson138559, author = {Andreasson, Henrik and Magnusson, Martin and Lilienthal, Achim}, booktitle = {2007 IEEE/RSJ international conference on intelligent robots and systems : }, institution = {Örebro University, Department of Technology}, institution = {Örebro University, Department of Natural Sciences}, pages = {3429--3435}, eid = {4399381}, title = {Has something changed here? : Autonomous difference detection for security patrol robots}, DOI = {10.1109/IROS.2007.4399381}, abstract = {This paper presents a system for autonomous change detection with a security patrol robot. In an initial step a reference model of the environment is created and changes are then detected with respect to the reference model as differences in coloured 3D point clouds, which are obtained from a 3D laser range scanner and a CCD camera. The suggested approach introduces several novel aspects, including a registration method that utilizes local visual features to determine point correspondences (thus essentially working without an initial pose estimate) and the 3D-NDT representation with adaptive cell size to efficiently represent both the spatial and colour aspects of the reference model. Apart from a detailed description of the individual parts of the difference detection system, a qualitative experimental evaluation in an indoor lab environment is presented, which demonstrates that the suggested system is able register and detect changes in spatial 3D data and also to detect changes that occur in colour space and are not observable using range values only. }, ISBN = {978-1-4244-0912-9}, year = {2007} } @inproceedings{Andreasson138560, author = {Andreasson, Henrik and Duckett, Tom and Lilienthal, Achim J.}, booktitle = {2007 IEEE international conference on robotics and automation (ICRA) : }, institution = {Örebro University, Department of Technology}, institution = {Dept. of Computing & Informatics, University of Lincoln, Lincoln, United Kingdom}, pages = {4096--4101}, eid = {4209726}, title = {Mini-SLAM : minimalistic visual SLAM in large-scale environments based on a new interpretation of image similarity}, series = {IEEE International Conference on Robotics and Automation ICRA}, DOI = {10.1109/ROBOT.2007.364108}, abstract = {This paper presents a vision-based approach to SLAM in large-scale environments with minimal sensing and computational requirements. The approach is based on a graphical representation of robot poses and links between the poses. Links between the robot poses are established based on odometry and image similarity, then a relaxation algorithm is used to generate a globally consistent map. To estimate the covariance matrix for links obtained from the vision sensor, a novel method is introduced based on the relative similarity of neighbouring images, without requiring distances to image features or multiple view geometry. Indoor and outdoor experiments demonstrate that the approach scales well to large-scale environments, producing topologically correct and geometrically accurate maps at minimal computational cost. Mini-SLAM was found to produce consistent maps in an unstructured, large-scale environment (the total path length was 1.4 km) containing indoor and outdoor passages. }, ISBN = {978-1-4244-0601-2}, year = {2007} } @inproceedings{Andreasson138558, author = {Andreasson, Henrik and Triebel, Rudolph and Lilienthal, Achim J.}, booktitle = {Autonomos Agents and Robots : }, institution = {Örebro University, Department of Technology}, institution = {Department of Computer Science, University of Freiburg, Freiburg, Germany}, pages = {83--90}, eid = {4399381}, publisher = {Springer}, title = {Non-iterative Vision-based Interpolation of 3D Laser Scans}, series = {Studies in Computational Intelligence}, number = {76}, volume = {76}, DOI = {10.1007/978-3-540-73424-6_10}, keywords = {3D range sensor, laser range scanner, vision-based depth interpolation, 3D vision}, abstract = {3D range sensors, particularly 3D laser range scanners, enjoy a rising popularity and are used nowadays for many different applications. The resolution 3D range sensors provide in the image plane is typically much lower than the resolution of a modern colour camera. In this chapter we focus on methods to derive a highresolution depth image from a low-resolution 3D range sensor and a colour image. The main idea is to use colour similarity as an indication of depth similarity, based on the observation that depth discontinuities in the scene often correspond to colour or brightness changes in the camera image. We present five interpolation methods and compare them with an independently proposed method based on Markov random fields. The proposed algorithms are non-iterative and include a parameter-free vision-based interpolation method. In contrast to previous work, we present ground truth evaluation with real world data and analyse both indoor and outdoor data. }, ISBN = {978-3-540-73423-9}, year = {2007} } @article{Andreasson158113, author = {Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom}, institution = {Örebro University, Department of Technology}, institution = {University of Tübingen}, journal = {Robotics and Autonomous Systems}, number = {7}, pages = {541--551}, publisher = {Elsevier}, title = {Self-localization in non-stationary environments using omni-directional vision}, volume = {55}, DOI = {10.1016/j.robot.2007.02.002}, abstract = {This paper presents an image-based approach for localization in non-static environments using local feature descriptors, and its experimental evaluation in a large, dynamic, populated environment where the time interval between the collected data sets is up to two months. By using local features together with panoramic images, robustness and invariance to large changes in the environment can be handled. Results from global place recognition with no evidence accumulation and a Monte Carlo localization method are shown. To test the approach even further, experiments were conducted with up to 90% virtual occlusion in addition to the dynamic changes in the environment }, year = {2007} } @inproceedings{Andreasson138563, author = {Andreasson, Henrik and Lilienthal, Achim}, booktitle = {ECMR 2007 : Proceedings of the European Conference on Mobile Robots}, institution = {Örebro University, Department of Technology}, institution = {Örebro University, Department of Natural Sciences}, institution = {aass}, pages = {192--197}, title = {Vision aided 3D laser scanner based registration}, keywords = {Registration, Vision}, abstract = {This paper describes a vision and 3D laser based registration approach which utilizes visual features to identify correspondences. Visual features are obtained from the images of a standard color camera and the depth of these features is determined by interpolating between the scanning points of a 3D laser range scanner, taking into consideration the visual information in the neighbourhood of the respective visual feature. The 3D laser scanner is also used to determine a position covariance estimate of the visual feature. To exploit these covariance estimates, an ICP algorithm based on the Mahalanobis distance is applied. Initial experimental results are presented in a real world indoor laboratory environment }, URL = {http://ecmr07.informatik.uni-freiburg.de/proceedings/ECMR07_0059.pdf}, year = {2007} } @article{Tamimi137728, author = {Tamimi, Hashem and Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom and Zell, Andreas}, institution = {Örebro University, Department of Technology}, institution = {University of Tubingen}, institution = {University of Tubingen}, institution = {University of Lincoln}, institution = {University of Tubingen}, journal = {Robotics and Autonomous Systems}, note = {Selected papers from the 2nd European Conference on Mobile Robots (ECMR ’05)}, number = {9}, pages = {758--765}, title = {Localization of mobile robots with omnidirectional vision using Particle Filter and iterative SIFT}, volume = {54}, DOI = {10.1016/j.robot.2006.04.018}, keywords = {Robot localization, Scale Invariant Feature Transform, Omnidirectional vision, Particle Filter}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot localization. Still, the number of features extracted with this approach is immense, especially when dealing with omnidirectional vision. In this work, we propose a new approach that reduces the number of features generated by SIFT as well as their extraction and matching time. With the help of a Particle Filter, we demonstrate that we can still localize the mobile robot accurately with a lower number of features }, year = {2006} } @inproceedings{Andreasson138254, author = {Andreasson, Henrik and Lilienthal, Achim J. and Triebel, Rudolph}, booktitle = {Proceedings of the Third International Conference on Autonomous Robots and Agents : }, institution = {Örebro University, Department of Technology}, institution = {Department of Computer Science, University of Freiburg, Germany}, pages = {455--460}, title = {Vision based interpolation of 3D laser scans}, keywords = {3D range sensor, laser range scanner, vision-based depth interpolation, 3D vision}, abstract = {3D range sensors, particularly 3D laser range scanners, enjoy a rising popularity and are used nowadays for many different applications. The resolution 3D range sensors provide in the image plane is typically much lower than the resolution of a modern color camera. In this paper we focus on methods to derive a high-resolution depth image from a low-resolution 3D range sensor and a color image. The main idea is to use color similarity as an indication of depth similarity, based on the observation that depth discontinuities in the scene often correspond to color or brightness changes in the camera image. We present five interpolation methods and compare them with an independently proposed method based on Markov Random Fields. The algorithms proposed in this paper are non-iterative and include a parameter-free vision-based interpolation method. In contrast to previous work, we present ground truth evaluation with real world data and analyse both indoor and outdoor data. Further, we suggest and evaluate four methods to determine a confidence measure for the accuracy of interpolated range values. }, year = {2006} } @inproceedings{Andreasson138260, author = {Andreasson, Henrik and Triebel, Rudolph and Burgard, Wolfram}, booktitle = {2005 IEEE/RSJ International Conference on Intelligent Robots and Systems, 2005. (IROS 2005) : IROS 2005 IEEE/RSJ}, institution = {Örebro University, Department of Technology}, institution = {University of Friburg}, institution = {University of Friburg}, pages = {2656--2661}, title = {Improving plane extraction from 3D data by fusing laser data and vision}, abstract = {The problem of extracting three-dimensional structures from data acquired with mobile robots has received considerable attention over the past years. Robots that are able to perceive their three-dimensional environment are envisioned to more robustly perform tasks like navigation, rescue, and manipulation. In this paper we present an approach that simultaneously uses color and range information to cluster 3d points into planar structures. Our current system also is able to calibrate the camera and the laser based on the remission values provided by the range scanner and the brightness of the pixels in the image. It has been implemented on a mobile robot equipped with a manipulator that carries a range scanner and a camera for acquiring colored range scans. Several experiments carried out on real data and in simulations demonstrate that our approach yields highly accurate results also in comparison with previous approaches }, year = {2005} } @inproceedings{Andreasson158109, author = {Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom}, booktitle = {Proceedings of the 2005 IEEE International Converence on Robotics and Automation : ICRA - 2005}, institution = {Örebro University, Department of Technology}, institution = {University of Tübingen}, pages = {3348--3353}, title = {Localization for mobile robots using panoramic vision, local features and particle filter}, DOI = {10.1109/ROBOT.2005.1570627}, abstract = {In this paper we present a vision-based approach to self-localization that uses a novel scheme to integrate featurebased matching of panoramic images with Monte Carlo localization. A specially modified version of Lowe’s SIFT algorithm is used to match features extracted from local interest points in the image, rather than using global features calculated from the whole image. Experiments conducted in a large, populated indoor environment (up to 5 persons visible) over a period of several months demonstrate the robustness of the approach, including kidnapping and occlusion of up to 90% of the robot’s field of view. }, year = {2005} } @inproceedings{Tamimi138267, author = {Tamimi, Hashem and Andreasson, Henrik and Treptow, Andr{\’e;} and Duckett, Tom and Zell, Andreas}, booktitle = { : }, institution = {Örebro University, Department of Technology}, institution = {University of Tubingen}, institution = {University of Tubingen}, institution = {University of Tubingen}, title = {Localization of mobile robots with omnidirectional vision using particle filter and iterative SIFT}, abstract = {The Scale Invariant Feature Transform, SIFT, has been successfully applied to robot localization. Still, the number of features extracted with this approach is immense, especially when dealing with omnidirectional vision. In this work, we propose a new approach that reduces the number of features generated by SIFT as well as their extraction and matching time. With the help of a particle filter, we demonstrate that we can still localize the mobile robot accurately with a lower number of features }, year = {2005} } @inproceedings{Fleck158110, author = {Fleck, Sven and Busch, Florian and Biber, Peter and Strasser, Wolfgang and Andreasson, Henrik}, booktitle = {Proceedings of the 2005 IEEE International Converence on Robotics and Automation : ICRA - 2005}, institution = {Örebro University, Department of Technology}, institution = {University of Tübingen}, institution = {University of Tübingen}, institution = {University of Tübingen}, institution = {University of Tübingen}, pages = {1748--1754}, title = {Omnidirectional 3D modeling on a mobile robot using graph cuts}, DOI = {10.1109/ROBOT.2005.1570366}, abstract = {For a mobile robot it is a natural task to build a 3D model of its environment. Such a model is not only useful for planning robot actions but also to provide a remote human surveillant a realistic visualization of the robot’s state with respect to the environment. Acquiring 3D models of environments is also an important task on its own with many possible applications like creating virtual interactive walkthroughs or as basis for 3D-TV. In this paper we present our method to acquire a 3D model using a mobile robot that is equipped with a laser scanner and a panoramic camera. The method is based on calculating dense depth maps for panoramic images using pairs of panoramic images taken from different positions using stereo matching. Traditional 2D-SLAM using laser-scan-matching is used to determine the needed camera poses. To receive high-quality results we use a high-quality stereo matching algorithm – the graph cut method. We describe the necessary modifications to handle panoramic images and specialized post-processing methods. }, year = {2005} }