2025
|
Dave, Vedant; Rueckert, Elmar Skill Disentanglement in Reproducing Kernel Hilbert Space Proceedings Article In: In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI), 2025. @inproceedings{Dave2025bb,
title = {Skill Disentanglement in Reproducing Kernel Hilbert Space},
author = {Vedant Dave and Elmar Rueckert
},
url = {https://cloud.cps.unileoben.ac.at/index.php/s/m9XKo4t2FXAH6Cs},
year = {2025},
date = {2025-02-27},
urldate = {2025-02-27},
booktitle = {In Proceedings of the AAAI Conference on Artificial Intelligence (AAAI)},
keywords = {Deep Learning, neural network, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
2024
|
Lygerakis, Fotios; Dave, Vedant; Rueckert, Elmar M2CURL: Sample-Efficient Multimodal Reinforcement Learning via Self-Supervised Representation Learning for Robotic Manipulation Proceedings Article In: IEEE International Conference on Ubiquitous Robots (UR 2024), IEEE 2024. @inproceedings{Lygerakis2024,
title = {M2CURL: Sample-Efficient Multimodal Reinforcement Learning via Self-Supervised Representation Learning for Robotic Manipulation},
author = {Fotios Lygerakis and Vedant Dave and Elmar Rueckert},
url = {https://cloud.cps.unileoben.ac.at/index.php/s/NPejb2Fp4Y8LeyZ},
year = {2024},
date = {2024-04-04},
urldate = {2024-04-04},
booktitle = {IEEE International Conference on Ubiquitous Robots (UR 2024)},
organization = {IEEE},
keywords = {Contrastive Learning, Manipulation, Multimodal Reinforcement Learning, Multimodal Representation Learning, Reinforcement Learning, Robot Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
Feith, Nikolaus; Rueckert, Elmar Integrating Human Expertise in Continuous Spaces: A Novel Interactive Bayesian Optimization Framework with Preference Expected Improvement Proceedings Article In: IEEE International Conference on Ubiquitous Robots (UR 2024), IEEE 2024. @inproceedings{Feith2024A,
title = {Integrating Human Expertise in Continuous Spaces: A Novel Interactive Bayesian Optimization Framework with Preference Expected Improvement},
author = {Nikolaus Feith and Elmar Rueckert},
url = {https://cloud.cps.unileoben.ac.at/index.php/s/6rTWAkoXa3zsJxf},
year = {2024},
date = {2024-04-04},
urldate = {2024-04-04},
booktitle = {IEEE International Conference on Ubiquitous Robots (UR 2024)},
organization = {IEEE},
keywords = {Interactive Learning, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
2023
|
Yadav, Harsh; Xue, Honghu; Rudall, Yan; Bakr, Mohamed; Hein, Benedikt; Rueckert, Elmar; Nguyen, Ngoc Thinh Deep Reinforcement Learning for Mapless Navigation of Autonomous Mobile Robot Proceedings Article In: International Conference on System Theory, Control and Computing (ICSTCC), 2023, (October 11-13, 2023, Timisoara, Romania.). @inproceedings{Yadav2023b,
title = {Deep Reinforcement Learning for Mapless Navigation of Autonomous Mobile Robot},
author = {Harsh Yadav and Honghu Xue and Yan Rudall and Mohamed Bakr and Benedikt Hein and Elmar Rueckert and Ngoc Thinh Nguyen},
url = {https://cloud.cps.unileoben.ac.at/index.php/s/zEnY3yoFHZRdzkR},
year = {2023},
date = {2023-06-26},
urldate = {2023-06-26},
publisher = { International Conference on System Theory, Control and Computing (ICSTCC)},
note = {October 11-13, 2023, Timisoara, Romania.},
keywords = {Autonomous Navigation, Deep Learning, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
Keshavarz, Sahar; Vita, Petr; Rueckert, Elmar; Ortner, Ronald; Thonhauser, Gerhard A Reinforcement Learning Approach for Real-Time Autonomous Decision-Making in Well Construction Proceedings Article In: Society of Petroleum Engineers - SPE Symposium: Leveraging Artificial Intelligence to Shape the Future of the Energy Industry, AIS 2023, Society of Petroleum Engineers., 2023, ISBN: 9781613999882. @inproceedings{Keshavarz2023,
title = {A Reinforcement Learning Approach for Real-Time Autonomous Decision-Making in Well Construction},
author = {Sahar Keshavarz and Petr Vita and Elmar Rueckert and Ronald Ortner and Gerhard Thonhauser},
url = {https://cloud.cps.unileoben.ac.at/index.php/s/yT9Erwsnk36JKtr},
doi = {10.2118/214465-MS},
isbn = {9781613999882},
year = {2023},
date = {2023-01-19},
urldate = {2023-01-19},
booktitle = {Society of Petroleum Engineers - SPE Symposium: Leveraging Artificial Intelligence to Shape the Future of the Energy Industry, AIS 2023},
publisher = {Society of Petroleum Engineers.},
keywords = {Reinforcement Learning, Well Construction},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
2022
|
Xue, Honghu; Hein, Benedikt; Bakr, Mohamed; Schildbach, Georg; Abel, Bengt; Rueckert, Elmar Using Deep Reinforcement Learning with Automatic Curriculum Learning for Mapless Navigation in Intralogistics Journal Article In: Applied Sciences (MDPI), Special Issue on Intelligent Robotics, 2022, (Supplement: https://cloud.cps.unileoben.ac.at/index.php/s/Sj68rQewnkf4ppZ). @article{Xue2022,
title = {Using Deep Reinforcement Learning with Automatic Curriculum Learning for Mapless Navigation in Intralogistics},
author = {Honghu Xue and Benedikt Hein and Mohamed Bakr and Georg Schildbach and Bengt Abel and Elmar Rueckert},
editor = {/},
url = {https://cloud.cps.unileoben.ac.at/index.php/s/yddDZ7z9oqxenCi
},
year = {2022},
date = {2022-01-31},
urldate = {2022-01-31},
journal = {Applied Sciences (MDPI), Special Issue on Intelligent Robotics},
abstract = {We propose a deep reinforcement learning approach for solving a mapless navigation problem in warehouse scenarios. The automatic guided vehicle is equipped with LiDAR and frontal RGB sensors and learns to reach underneath the target dolly. The challenges reside in the sparseness of positive samples for learning, multi-modal sensor perception with partial observability, the demand for accurate steering maneuvers together with long training cycles. To address these points, we proposed NavACL-Q as an automatic curriculum learning together with distributed soft actor-critic. The performance of the learning algorithm is evaluated exhaustively in a different warehouse environment to check both robustness and generalizability of the learned policy. Results in NVIDIA Isaac Sim demonstrates that our trained agent significantly outperforms the map-based navigation pipeline provided by NVIDIA Isaac Sim in terms of higher agent-goal distances and relative orientations. The ablation studies also confirmed that NavACL-Q greatly facilitates the whole learning process and a pre-trained feature extractor manifestly boosts the training speed.},
note = {Supplement: https://cloud.cps.unileoben.ac.at/index.php/s/Sj68rQewnkf4ppZ},
keywords = {Autonomous Navigation, Deep Learning, mobile navigation, Reinforcement Learning},
pubstate = {published},
tppubtype = {article}
}
We propose a deep reinforcement learning approach for solving a mapless navigation problem in warehouse scenarios. The automatic guided vehicle is equipped with LiDAR and frontal RGB sensors and learns to reach underneath the target dolly. The challenges reside in the sparseness of positive samples for learning, multi-modal sensor perception with partial observability, the demand for accurate steering maneuvers together with long training cycles. To address these points, we proposed NavACL-Q as an automatic curriculum learning together with distributed soft actor-critic. The performance of the learning algorithm is evaluated exhaustively in a different warehouse environment to check both robustness and generalizability of the learned policy. Results in NVIDIA Isaac Sim demonstrates that our trained agent significantly outperforms the map-based navigation pipeline provided by NVIDIA Isaac Sim in terms of higher agent-goal distances and relative orientations. The ablation studies also confirmed that NavACL-Q greatly facilitates the whole learning process and a pre-trained feature extractor manifestly boosts the training speed. |  |
2021
|
Cansev, Mehmet Ege; Xue, Honghu; Rottmann, Nils; Bliek, Adna; Miller, Luke E.; Rueckert, Elmar; Beckerle, Philipp Interactive Human-Robot Skill Transfer: A Review of Learning Methods and User Experience Journal Article In: Advanced Intelligent Systems, pp. 1–28, 2021. @article{Cansev2021,
title = {Interactive Human-Robot Skill Transfer: A Review of Learning Methods and User Experience},
author = {Mehmet Ege Cansev and Honghu Xue and Nils Rottmann and Adna Bliek and Luke E. Miller and Elmar Rueckert and Philipp Beckerle},
url = {https://cps.unileoben.ac.at/wp/AIS2021Cansev.pdf, Article File},
doi = {10.1002/aisy.202000247},
year = {2021},
date = {2021-03-10},
journal = {Advanced Intelligent Systems},
pages = {1--28},
keywords = {human motor control, intrinsic motivation, movement primitives, Probabilistic Inference, Reinforcement Learning, spiking},
pubstate = {published},
tppubtype = {article}
}
|  |
2020
|
Rottmann, N.; Kunavar, T.; Babič, J.; Peters, J.; Rueckert, E. Learning Hierarchical Acquisition Functions for Bayesian Optimization Proceedings Article In: International Conference on Intelligent Robots and Systems (IROS’ 2020), 2020. @inproceedings{Rottmann2020HiBO,
title = {Learning Hierarchical Acquisition Functions for Bayesian Optimization},
author = {N. Rottmann and T. Kunavar and J. Babič and J. Peters and E. Rueckert},
url = {https://cps.unileoben.ac.at/wp/IROS2020Rottmann.pdf, Article File},
year = {2020},
date = {2020-10-25},
booktitle = {International Conference on Intelligent Robots and Systems (IROS’ 2020)},
keywords = {Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
Rottmann, N.; Bruder, R.; Xue, H.; Schweikard, A.; Rueckert, E. Parameter Optimization for Loop Closure Detection in Closed Environments Proceedings Article In: Workshop Paper at the International Conference on Intelligent Robots and Systems (IROS), pp. 1–8, 2020. @inproceedings{Rottmann2020c,
title = {Parameter Optimization for Loop Closure Detection in Closed Environments},
author = {N. Rottmann and R. Bruder and H. Xue and A. Schweikard and E. Rueckert},
url = {https://cps.unileoben.ac.at/wp/IROSWS2020Rottmann.pdf, Article File},
year = {2020},
date = {2020-10-25},
booktitle = {Workshop Paper at the International Conference on Intelligent Robots and Systems (IROS)},
pages = {1--8},
keywords = {mobile navigation, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
Tanneberg, Daniel; Rueckert, Elmar; Peters, Jan Evolutionary training and abstraction yields algorithmic generalization of neural computers Journal Article In: Nature Machine Intelligence, pp. 1–11, 2020. @article{Tanneberg2020,
title = {Evolutionary training and abstraction yields algorithmic generalization of neural computers},
author = {Daniel Tanneberg and Elmar Rueckert and Jan Peters },
url = {https://rdcu.be/caRlg, Article File},
doi = {10.1038/s42256-020-00255-1},
year = {2020},
date = {2020-10-10},
journal = {Nature Machine Intelligence},
pages = {1--11},
keywords = {neural network, Reinforcement Learning, Transfer Learning},
pubstate = {published},
tppubtype = {article}
}
|  |
Xue, H.; Boettger, S.; Rottmann, N.; Pandya, H.; Bruder, R.; Neumann, G.; Schweikard, A.; Rueckert, E. Sample-Efficient Covariance Matrix Adaptation Evolutional Strategy via Simulated Rollouts in Neural Networks Proceedings Article In: International Conference on Advances in Signal Processing and Artificial Intelligence (ASPAI’ 2020), 2020. @inproceedings{Xue2020,
title = {Sample-Efficient Covariance Matrix Adaptation Evolutional Strategy via Simulated Rollouts in Neural Networks},
author = {H. Xue and S. Boettger and N. Rottmann and H. Pandya and R. Bruder and G. Neumann and A. Schweikard and E. Rueckert},
url = {https://cps.unileoben.ac.at/wp/ASPAI2020Xue.pdf, Article File},
year = {2020},
date = {2020-06-30},
booktitle = {International Conference on Advances in Signal Processing and Artificial Intelligence (ASPAI’ 2020)},
keywords = {Manipulation, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
2019
|
Stark, Svenja; Peters, Jan; Rueckert, Elmar Experience Reuse with Probabilistic Movement Primitives Proceedings Article In: Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2019., 2019. @inproceedings{Stark2019,
title = {Experience Reuse with Probabilistic Movement Primitives},
author = {Svenja Stark and Jan Peters and Elmar Rueckert},
url = {https://cps.unileoben.ac.at/wp/IROS2019Stark.pdf, Article File},
year = {2019},
date = {2019-11-03},
booktitle = {Proceedings of the IEEE/RSJ Conference on Intelligent Robots and Systems (IROS), 2019.},
keywords = {movement primitives, Reinforcement Learning, Transfer Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
Rueckert, Elmar; Jauer, Philipp; Derksen, Alexander; Schweikard, Achim Dynamic Control Strategies for Cable-Driven Master Slave Robots Proceedings Article In: Keck, Tobias (Ed.): Proceedings on Minimally Invasive Surgery, Luebeck, Germany, 2019, (January 24-25, 2019). @inproceedings{Rueckert2019c,
title = {Dynamic Control Strategies for Cable-Driven Master Slave Robots},
author = {Elmar Rueckert and Philipp Jauer and Alexander Derksen and Achim Schweikard},
editor = {Tobias Keck},
doi = {10.18416/MIC.2019.1901007},
year = {2019},
date = {2019-01-24},
booktitle = {Proceedings on Minimally Invasive Surgery, Luebeck, Germany},
note = {January 24-25, 2019},
keywords = {Medical Robotics, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
2016
|
Sharma, David; Tanneberg, Daniel; Grosse-Wentrup, Moritz; Peters, Jan; Rueckert, Elmar Adaptive Training Strategies for BCIs Proceedings Article In: Cybathlon Symposium, 2016. @inproceedings{Sharma2016,
title = {Adaptive Training Strategies for BCIs},
author = {David Sharma and Daniel Tanneberg and Moritz Grosse-Wentrup and Jan Peters and Elmar Rueckert},
url = {https://cps.unileoben.ac.at/wp/Cybathlon2016Sharma.pdf, Article File},
year = {2016},
date = {2016-01-01},
booktitle = {Cybathlon Symposium},
crossref = {p10952},
keywords = {human motor control, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |
2014
|
Rueckert, Elmar Biologically inspired motor skill learning in robotics through probabilistic inference PhD Thesis Technical University Graz, 2014. @phdthesis{Rueckert2014a,
title = {Biologically inspired motor skill learning in robotics through probabilistic inference},
author = {Elmar Rueckert},
url = {https://cps.unileoben.ac.at/wp/PhDThesis2014Rueckert.pdf, Article File},
year = {2014},
date = {2014-02-04},
school = {Technical University Graz},
keywords = {graphical models, locomotion, model learning, morphological compuation, movement primitives, policy search, postural control, Probabilistic Inference, Reinforcement Learning, RNN, SOC, spiking},
pubstate = {published},
tppubtype = {phdthesis}
}
|  |
2013
|
Rueckert, Elmar; d'Avella, Andrea Learned Muscle Synergies as Prior in Dynamical Systems for Controlling Bio-mechanical and Robotic Systems Proceedings Article In: Abstracts of Neural Control of Movement Conference (NCM), Conference Talk, pp. 27–28, 2013. @inproceedings{Rueckert2013,
title = {Learned Muscle Synergies as Prior in Dynamical Systems for Controlling Bio-mechanical and Robotic Systems},
author = {Elmar Rueckert and Andrea d'Avella},
url = {https://cps.unileoben.ac.at/wp/Frontiers2013bRueckert.pdf, Article File},
year = {2013},
date = {2013-01-01},
booktitle = {Abstracts of Neural Control of Movement Conference (NCM), Conference Talk},
pages = {27--28},
crossref = {p10682},
keywords = {muscle synergies, policy search, Reinforcement Learning},
pubstate = {published},
tppubtype = {inproceedings}
}
|  |