-
Jacky Baltes and John Anderson.
Intelligent Global Vision for Teams of Mobile Robots.
In Sascha Kolski, editor, Mobile Robots: Perception & Navigation,
chapter 9,
pages 165-186.
Advanced Robotic Systems International/pro literatur Verlag,
Vienna, Austria,
2007.
@incollection{BaltesAnderson06:VisionChapter,
author = {Jacky Baltes and John Anderson},
editor = {Sascha Kolski},
title = {Intelligent Global Vision for Teams of Mobile Robots},
booktitle = {Mobile Robots: Perception & Navigation},
publisher = {Advanced Robotic Systems International/pro literatur Verlag},
year = {2007},
chapter = {9},
isbn = {3-86611-283-1},
pages = {165--186},
address = {Vienna, Austria},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/BaltesAndersonVisionChapter.pdf}
}
-
Mark Karpenko,
Nariman Sepehri,
and John Anderson.
Decentralized Coordinated Motion Control of Two Hydraulic Actuators Handling a Common Object.
ACME Journal of Dynamic Systems, Measurement, and Control,
129:729-741,
September 2007.
@article{karpenko07,
author = {Mark Karpenko and Nariman Sepehri and John Anderson},
title = {Decentralized Coordinated Motion Control of Two Hydraulic Actuators Handling a Common Object},
journal = {ACME Journal of Dynamic Systems, Measurement, and Control},
year = {2007},
volume = {129},
pages = {729--741},
month = {September},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/KarpenkoSepehriAnderson07.pdf}
}
-
Jeff Allen and John Anderson.
A Vision-Based Approach to Imitation Using Heterogeneous Demonstrators.
In Christopher Geib and David Pynadath, editors,
Proceedings of the AAAI Workshop on Plan and Intent Recognition,
Vancouver, Canada,
pages 9-16,
July 2007.
AAAI Press.
[Slides]
Abstract:
Imitation learning is a powerful mechanism used by humans and other creatures. In imitation learning, the actions of others form the basis for desirable behaviour, and an imitation learner must be able to recognize the outcomes of the actions of others, understand how these relate to its own abilities, and ultimately duplicate the final outcome of a series of actions. We are interested in supporting this type of learning in general populations of robots, where a two important complications arise. First, physical variation between demonstrator and learner may require the learner to carry out different action(s) from the demonstrator to achieve the same results. Second, since demonstrators' skills may differ as much as their physiology, agents must be able to compare the demonstrations of a number of different individuals, in order to give greater weight to better demonstrators. Being able to integrate multiple demonstrations from different demonstrators allows a learner to deal with these problems as well as encouraging the creation of more general behaviours, rather than simply mimicking the actions of a single agent with no ability to generalize. In this paper we describe an approach to imitation learning based on global vision, which deals with these problems. |
@inproceedings{AllenAnderson07:Imitation,
author = {Jeff Allen and John Anderson},
title = {A Vision-Based Approach to Imitation Using Heterogeneous Demonstrators},
booktitle = {Proceedings of the AAAI Workshop on Plan and Intent Recognition},
year = {2007},
editor = {Christopher Geib and David Pynadath},
address = {Vancouver, Canada},
month = {July},
pages = {9--16},
publisher = {AAAI Press},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/VisionBasedImitation.pdf},
abstract = {Imitation learning is a powerful mechanism used by humans and other creatures. In imitation learning, the actions of others form the basis for desirable behaviour, and an imitation learner must be able to recognize the outcomes of the actions of others, understand how these relate to its own abilities, and ultimately duplicate the final outcome of a series of actions. We are interested in supporting this type of learning in general populations of robots, where a two important complications arise. First, physical variation between demonstrator and learner may require the learner to carry out different action(s) from the demonstrator to achieve the same results. Second, since demonstrators' skills may differ as much as their physiology, agents must be able to compare the demonstrations of a number of different individuals, in order to give greater weight to better demonstrators. Being able to integrate multiple demonstrations from different demonstrators allows a learner to deal with these problems as well as encouraging the creation of more general behaviours, rather than simply mimicking the actions of a single agent with no ability to generalize. In this paper we describe an approach to imitation learning based on global vision, which deals with these problems. },
slides = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/VisionBasedImitationSlides.pdf}
}
-
John Anderson and Jacky Baltes.
A Mixed Reality Approach to Undergraduate Robotics Education.
In Robert Holte and Adele Howe, editors,
Proceedings of AAAI-07 (Robot Exhibition Papers),
Vancouver, Canada,
July 2007.
AAAI Press.
Abstract:
Teaching robotics to undergraduate students requires a course framework that allows students to learn about robotics in stages, without being overwhelmed with details. Such a framework must also provide the students with a motivating application environment that challenges them to apply what they have learned. Robotics competitions have proven to be an excellent method for motivating students, so the framework should be portable and robust enough to be used for competitions, and flexible enough to provide a range of environments that can become more challenging as students become more adept. Finally, the framework should provide repeatability and control for evaluating the student’s work, as well as for performing research. In this paper, we overview a mixed reality approach that meets these criteria, and describe its use in an advanced undergraduate course. |
@inproceedings{AndersonBaltes07:MixedRealityAAAI,
author = {John Anderson and Jacky Baltes},
title = {A Mixed Reality Approach to Undergraduate Robotics Education},
booktitle = {Proceedings of AAAI-07 (Robot Exhibition Papers)},
year = {2007},
editor = {Robert Holte and Adele Howe},
address = {Vancouver, Canada},
month = {July},
publisher = {AAAI Press},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/MixedRealityAAAI07.pdf},
abstract = {Teaching robotics to undergraduate students requires a course framework that allows students to learn about robotics in stages, without being overwhelmed with details. Such a framework must also provide the students with a motivating application environment that challenges them to apply what they have learned. Robotics competitions have proven to be an excellent method for motivating students, so the framework should be portable and robust enough to be used for competitions, and flexible enough to provide a range of environments that can become more challenging as students become more adept. Finally, the framework should provide repeatability and control for evaluating the student’s work, as well as for performing research. In this paper, we overview a mixed reality approach that meets these criteria, and describe its use in an advanced undergraduate course.}
}
-
John Anderson and Jacky Baltes.
A Pragmatic Global Vision System for Educational Robotics.
In Robots and Robot Venues: Resources for AI Education,
AAAI Spring Symposium Series,
Stanford, CA,
pages 1-6,
March 2007.
[Slides]
Abstract:
This paper advocates the use of global vision as a tool for increasing the effectiveness of robotics education, and describes the design and functionality of advanced global vision systems used in our own programs. Our experiences with using global vision as a basis for teaching robotics and AI have led us to use this as a standard method for teaching undergraduates. Our recent vision systems (DORAEMON and ERGO) have consistently been improved to perform accurately and robustly over a wide range of applications. DORAEMON uses a sophisticated camera calibration method and colour model to remove the need for an overhead view of the world. ERGO minimized the use of colour information to provide more robust object recognition under varying lighting scenarios. Most recently, these video servers have been used by undergraduates to develop autonomous robots for a mixed virtual/physical world. |
@inproceedings{AndersonBaltes07:VisionForEducationalRobotics,
author = {John Anderson and Jacky Baltes},
title = {A Pragmatic Global Vision System for Educational Robotics},
booktitle = {Robots and Robot Venues: Resources for AI Education},
year = {2007},
series = {AAAI Spring Symposium Series},
pages = {1--6},
address = {Stanford, CA},
month = {March},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/SS07RoboticEducation.pdf},
slides = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/SS07RoboticEducationSlides.pdf},
abstract = {This paper advocates the use of global vision as a tool for increasing the effectiveness of robotics education, and describes the design and functionality of advanced global vision systems used in our own programs. Our experiences with using global vision as a basis for teaching robotics and AI have led us to use this as a standard method for teaching undergraduates. Our recent vision systems (DORAEMON and ERGO) have consistently been improved to perform accurately and robustly over a wide range of applications. DORAEMON uses a sophisticated camera calibration method and colour model to remove the need for an overhead view of the world. ERGO minimized the use of colour information to provide more robust object recognition under varying lighting scenarios. Most recently, these video servers have been used by undergraduates to develop autonomous robots for a mixed virtual/physical world.}
}
-
Nathan Wiebe and John Anderson.
A Local Approach to Developing Grounded Spatial References in Multi-Robot Systems.
In Proceedings of the 2007 IEEE/RSJ International Conference On Intelligent Robots and Systems (IROS-2007),
San Diego, CA,
pages 1357-1364,
October 2007.
@inproceedings{IROS07Grounding,
author = {Nathan Wiebe and John Anderson},
title = {A Local Approach to Developing Grounded Spatial References in Multi-Robot Systems},
booktitle = {Proceedings of the 2007 {IEEE/RSJ} International Conference On Intelligent Robots and Systems ({IROS}-2007)},
address = {San Diego, CA},
month = {October},
year = {2007},
pages = {1357--1364},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/IROS07Grounding.pdf}
}