-
Jacky Baltes,
Michail G. Lagoudakis,
Tadashi Naruse,
and Saeed Shiry, editors.
Proceedings of RoboCup-2009: Robot Soccer World Cup XIII,
2009.
Springer-Verlag.
@proceedings{RoboCup2009,
editor = {Jacky Baltes and Michail G. Lagoudakis and Tadashi Naruse and Saeed Shiry},
booktitle = {Proceedings of RoboCup-2009: Robot Soccer World Cup XIII},
publisher = {Springer-Verlag},
location = {Heidelberg},
year = {2009}
}
-
J.-H. Kim,
S. Ge,
P. Vadakkepat,
N. Jesse,
A. Al Manum,
K. Puthusserypady,
U. Rueckert,
J. Sitte,
U. Witkowski,
R. Nakatsu,
T. Braunl,
J. Baltes,
J. Anderson,
C.-C. Wong,
I. Verner,
and D. Ahlgren, editors.
Progress in Robotics (FIRA RoboWorld Congress 2009),
volume 44 of Communications in Computer and Information Science,
2009.
Springer-Verlag.
Abstract:
This volume is a selection of papers of six international conferences that are held under the umbrella of the 12th FIRA RoboWorld congress, in Incheon, Korea, August 16-18, 2009.
From the 115 contributed papers 44 papers are included in the volume, which is organized into 6 sections: humanoid robotics, human robot interaction, education and entertainment, cooperative robotics, robotic system design, and learning, optimization, communication. The volume is intended to provide readers with the recent technical progresses in robotics, human robot interactions, cooperative robotics and the related fields. |
@proceedings{ProgressFIRA2009,
editor = {J.-H. Kim and S. Ge and P. Vadakkepat and N. Jesse and A. Al Manum and K. Puthusserypady and U. Rueckert and J. Sitte and U. Witkowski and R. Nakatsu and T. Braunl and J. Baltes and J. Anderson and C.-C. Wong and I. Verner and D. Ahlgren},
booktitle = {Progress in Robotics (FIRA RoboWorld Congress 2009)},
publisher = {Springer-Verlag},
location = {Heidelberg},
year = {2009},
pages = {392},
isbn = {978-3-642-03985-0},
series = {Communications in Computer and Information Science },
volume = {44},
abstract = {This volume is a selection of papers of six international conferences that are held under the umbrella of the 12th FIRA RoboWorld congress, in Incheon, Korea, August 16-18, 2009.
From the 115 contributed papers 44 papers are included in the volume, which is organized into 6 sections: humanoid robotics, human robot interaction, education and entertainment, cooperative robotics, robotic system design, and learning, optimization, communication. The volume is intended to provide readers with the recent technical progresses in robotics, human robot interactions, cooperative robotics and the related fields.}
}
-
J.-H. Kim,
S. Ge,
P. Vadakkepat,
N. Jesse,
A. Al Manum,
K. Puthusserypady,
U. Rueckert,
J. Sitte,
U. Witkowski,
R. Nakatsu,
T. Braunl,
J. Baltes,
J. Anderson,
C.-C. Wong,
I. Verner,
and D. Ahlgren, editors.
Advances in Robotics (FIRA RoboWorld Congress 2009),
volume 5744 of Image Processing, Computer Vision, Pattern Recognition, and Graphics,
2009.
Springer-Verlag.
Abstract:
The volume consists of selected quality papers from six international conferences that are held under the umbrella of the 12th FIRA RoboWorld congress, in Incheon, Korea, August 16-18, 2009.
31 papers from 115 contributed papers at the FIRA RoboWorld Congress, held in Incheon, Korea, August 16-18, were included in the volume. It is organized in 7 sections: emotions and behaviour, human robot interaction, biped humanoid robotics, localization, path planning, obstacle avoidance, control, communication, terrain mapping and classification. The volume is intended to provide readers with the recent technical progresses in robotics, human robot interactions, cooperative robotics and the related fields. |
@proceedings{AdvancesFIRA2009,
editor = {J.-H. Kim and S. Ge and P. Vadakkepat and N. Jesse and A. Al Manum and K. Puthusserypady and U. Rueckert and J. Sitte and U. Witkowski and R. Nakatsu and T. Braunl and J. Baltes and J. Anderson and C.-C. Wong and I. Verner and D. Ahlgren},
booktitle = {Advances in Robotics (FIRA RoboWorld Congress 2009)},
publisher = {Springer-Verlag},
location = {Heidelberg},
year = {2009},
series = {Image Processing, Computer Vision, Pattern Recognition, and Graphics},
volume = {5744},
pages = {322},
isbn = {978-3-642-03982-9},
abstract = {The volume consists of selected quality papers from six international conferences that are held under the umbrella of the 12th FIRA RoboWorld congress, in Incheon, Korea, August 16-18, 2009.
31 papers from 115 contributed papers at the FIRA RoboWorld Congress, held in Incheon, Korea, August 16-18, were included in the volume. It is organized in 7 sections: emotions and behaviour, human robot interaction, biped humanoid robotics, localization, path planning, obstacle avoidance, control, communication, terrain mapping and classification. The volume is intended to provide readers with the recent technical progresses in robotics, human robot interactions, cooperative robotics and the related fields.}
}
-
Jeff Allen.
Imitation Learning from Multiple Demonstrators Using Global Vision.
Master's thesis,
Department of Computer Science, University of Manitoba,
Winnipeg, Canada,
August 2009.
Abstract:
Imitation learning enables a learner to expand its own skill set with behaviours that it observes from others. Most imitation learning systems learn from a single class of demonstrators, and often only a single demonstrator. Such approaches are limited, however: in the real world, people have varying levels of skills and different approaches to solving problems, and learning from only one demonstrator would be a very limited perspective. In the context of robots, very different physiologies make learning from many types of demonstrators equally important. A wheeled robot may watch a humanoid perform a task, for example, and yet not be able to perfectly approximate its movements (e.g. stepping over small obstacles). This thesis describes an approach to learning a task by observing demonstrations performed by multiple heterogeneous robots using global (overhead) vision, incorporating demonstrators that are different in size, physiology (wheeled vs. legged), and skill level. The imitator evaluates demonstrators relative to each other, which gives it the ability to weigh its learning towards the more skilled demonstrators. I assume the imitator has no initial knowledge of the observable effects of its own actions, and begin by training a set of Hidden Markov Models (HMMs) to map observations to actions. These HMMs provide a low-level basis for interpreting the observations of others. I then use forward models to construct more abstract behaviours that bridge the differences between highly heterogeneous agents. This approach is evaluated in the domain of robotic soccer, where it is found that the imitator can weigh its learning towards skilled demonstrators regardless of physiology. |
@mastersthesis{AllenThesis,
author = {Jeff Allen},
title = {Imitation Learning from Multiple Demonstrators Using Global Vision},
school = {Department of Computer Science, University of Manitoba},
year = {2009},
address = {Winnipeg, Canada},
month = {August},
abstract = {Imitation learning enables a learner to expand its own skill set with behaviours that it observes from others. Most imitation learning systems learn from a single class of demonstrators, and often only a single demonstrator. Such approaches are limited, however: in the real world, people have varying levels of skills and different approaches to solving problems, and learning from only one demonstrator would be a very limited perspective. In the context of robots, very different physiologies make learning from many types of demonstrators equally important. A wheeled robot may watch a humanoid perform a task, for example, and yet not be able to perfectly approximate its movements (e.g. stepping over small obstacles). This thesis describes an approach to learning a task by observing demonstrations performed by multiple heterogeneous robots using global (overhead) vision, incorporating demonstrators that are different in size, physiology (wheeled vs. legged), and skill level. The imitator evaluates demonstrators relative to each other, which gives it the ability to weigh its learning towards the more skilled demonstrators. I assume the imitator has no initial knowledge of the observable effects of its own actions, and begin by training a set of Hidden Markov Models (HMMs) to map observations to actions. These HMMs provide a low-level basis for interpreting the observations of others. I then use forward models to construct more abstract behaviours that bridge the differences between highly heterogeneous agents. This approach is evaluated in the domain of robotic soccer, where it is found that the imitator can weigh its learning towards skilled demonstrators regardless of physiology.},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/AllenThesis.pdf}
}
-
Brian McKinnon.
Point, Line Segment, and Region-Based Stereo Matching for Mobile Robotics.
Master's thesis,
Department of Computer Science, University of Manitoba,
Winnipeg, Canada,
August 2009.
Abstract:
At the heart of every stereo vision algorithm is a solution to the matching problem - the problem of finding points in the right and left image that correspond to a single point in the real world. Applying assumptions regarding the epipolar rectification and color similarity between two frames is often not possible for real-world image capture systems, like those used in urban search and rescue robots. More flexible and robust feature descriptors are necessary to operate under harsh real world conditions. This thesis compares the accuracy of disparity images generated using local features including points, line segments, and regions, as well as a global framework implemented using loopy belief propagation. This thesis will introduce two new algorithms for stereo matching using line segments and regions, as well as several support structures that optimize the algorithms performance and accuracy. Since few complete frameworks exist for line segment and region features, new algorithms that were developed during the research for this thesis will be outlined and evaluated. The comparison includes quantitative evaluation using the Middlebury stereo image pairs and qualitative evaluation using images from a less structured environment. Since this evaluation is grounded in urban search and rescue robotics, processing time is a significant constraint which will be evaluated for each algorithm. This thesis will show that line segment-based stereo vision with a gradient descriptor achieves at least a 10% better accuracy than all other methods used in this evaluation while maintaining the low runtime associated with local feature based stereo vision. |
@mastersthesis{McKinnonThesis,
author = {Brian McKinnon},
title = {Point, Line Segment, and Region-Based Stereo Matching for Mobile Robotics},
school = {Department of Computer Science, University of Manitoba},
year = {2009},
address = {Winnipeg, Canada},
month = {August},
abstract = {At the heart of every stereo vision algorithm is a solution to the matching problem - the problem of finding points in the right and left image that correspond to a single point in the real world. Applying assumptions regarding the epipolar rectification and color similarity between two frames is often not possible for real-world image capture systems, like those used in urban search and rescue robots. More flexible and robust feature descriptors are necessary to operate under harsh real world conditions. This thesis compares the accuracy of disparity images generated using local features including points, line segments, and regions, as well as a global framework implemented using loopy belief propagation. This thesis will introduce two new algorithms for stereo matching using line segments and regions, as well as several support structures that optimize the algorithms performance and accuracy. Since few complete frameworks exist for line segment and region features, new algorithms that were developed during the research for this thesis will be outlined and evaluated. The comparison includes quantitative evaluation using the Middlebury stereo image pairs and qualitative evaluation using images from a less structured environment. Since this evaluation is grounded in urban search and rescue robotics, processing time is a significant constraint which will be evaluated for each algorithm. This thesis will show that line segment-based stereo vision with a gradient descriptor achieves at least a 10% better accuracy than all other methods used in this evaluation while maintaining the low runtime associated with local feature based stereo vision.},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/McKinnonThesis.pdf}
}
-
Jacky Baltes and John Anderson.
Advancing Artificial Intelligence through Minimalist Humanoid Robotics.
In Dikai Liu,
Lingfeng Wang,
and Kay Chen Tan, editors, Design and Control of Intelligent Robotic Systems,
chapter 17,
pages 355-376.
Springer-Verlag,
Heidelberg,
2009.
Abstract:
While the robots that most quickly come to mind to the general public are those with the most elaborate features and movements, those that are most useful in advancing the state of the art in artificial intelligence (AI) are very different. Minimalist robots are inexpensive and therefore more broadly available for research and educational purposes, but also force the researcher to rely on good, adaptable solutions to hard AI problems rather than relying on expensive specialized hardware that will only work under strict conditions. This chapter describes our work in minimalist humanoid robots, focussing mainly on Tao-Pie-Pie, a robot that competed successfully in numerous RoboCup and FIRA competitions. The chapter describes our motivations in designing minimalist robots and our rationale for working with humanoid robots, and describes the development of Tao-Pie-Pie, including contrasting this robot with other work and developing its walking gait and balancing reflexes. We then describe some issues in evaluating humanoid robots, and describe ongoing work. |
@incollection{BaltesAnderson09:MinHumanoidRoboticsChapter09,
author = {Jacky Baltes and John Anderson},
editor = {Dikai Liu and Lingfeng Wang and Kay Chen Tan},
title = {Advancing Artificial Intelligence through Minimalist Humanoid Robotics},
booktitle = {Design and Control of Intelligent Robotic Systems},
publisher = {Springer-Verlag},
year = {2009},
chapter = {17},
doi = {10.1007/978-3-540-89933-4_17},
isbn = {978-3-540-89932-7},
pages = {355--376},
address = {Heidelberg},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/MinHumanoidRoboticsChapter09.pdf},
abstract = {While the robots that most quickly come to mind to the general public are those with the most elaborate features and movements, those that are most useful in advancing the state of the art in artificial intelligence (AI) are very different. Minimalist robots are inexpensive and therefore more broadly available for research and educational purposes, but also force the researcher to rely on good, adaptable solutions to hard AI problems rather than relying on expensive specialized hardware that will only work under strict conditions. This chapter describes our work in minimalist humanoid robots, focussing mainly on Tao-Pie-Pie, a robot that competed successfully in numerous RoboCup and FIRA competitions. The chapter describes our motivations in designing minimalist robots and our rationale for working with humanoid robots, and describes the development of Tao-Pie-Pie, including contrasting this robot with other work and developing its walking gait and balancing reflexes. We then describe some issues in evaluating humanoid robots, and describe ongoing work.}
}
-
Nathan Wiebe and John Anderson.
Local Methods for Supporting Grounded Communication in Robot Teams.
In Dikai Liu,
Lingfeng Wang,
and Kay Chen Tan, editors, Design and Control of Intelligent Robotic Systems,
chapter 14,
pages 279-301.
Springer-Verlag,
Heidelberg,
2009.
Abstract:
For a mobile robot to be able to communicate usefully with others in a group, the references it makes to points in space must be grounded in concepts that are shared among the group. In the past it has been common to hand-construct a complete set of such groundings, either by individual enumeration or by enforcement of a common coordinate system and origin among all team members. Such assumptions remove the ability to add new robots with no knowledge of the environment in an ad hoc manner, and also require knowledge which may not be available. In an urban search and rescue (USAR) setting, for example, robots may be released into rubble from a collapsed building with no shared starting point for an origin, under conditions where GPS reception is disrupted. Preconstructed groundings are also anthropocentric in that they are a best guess by humans as to what is useful from their perspective, and may be nothing like what robotic agents would come up with on their own. This chapter describes the an approach that allows a group of robotic agents to develop consistent shared groundings for useful locations in an environment over time, using only local communication and interaction. This approach is thus suitable for domains in which broadcast communication may be sporadic, such as USAR, or jammed, such as military applications. The evaluation of this approach, which compares several different grounding techniques, shows that a consistent set of shared groundings can be developed effectively by a team of robots over time using only local interactions, and that these improve the effectiveness of communication in a multi-robot setting. |
@incollection{WiebeAnderson09:LocalGroundedCommChapter09,
author = {Nathan Wiebe and John Anderson},
editor = {Dikai Liu and Lingfeng Wang and Kay Chen Tan},
title = {Local Methods for Supporting Grounded Communication in Robot Teams},
booktitle = {Design and Control of Intelligent Robotic Systems},
publisher = {Springer-Verlag},
year = {2009},
chapter = {14},
doi = {10.1007/978-3-540-89933-4_14},
isbn = {978-3-540-89932-7},
pages = {279--301},
address = {Heidelberg},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/LocalGroundedCommChapter09.pdf},
abstract = {For a mobile robot to be able to communicate usefully with others in a group, the references it makes to points in space must be grounded in concepts that are shared among the group. In the past it has been common to hand-construct a complete set of such groundings, either by individual enumeration or by enforcement of a common coordinate system and origin among all team members. Such assumptions remove the ability to add new robots with no knowledge of the environment in an ad hoc manner, and also require knowledge which may not be available. In an urban search and rescue (USAR) setting, for example, robots may be released into rubble from a collapsed building with no shared starting point for an origin, under conditions where GPS reception is disrupted. Preconstructed groundings are also anthropocentric in that they are a best guess by humans as to what is useful from their perspective, and may be nothing like what robotic agents would come up with on their own. This chapter describes the an approach that allows a group of robotic agents to develop consistent shared groundings for useful locations in an environment over time, using only local communication and interaction. This approach is thus suitable for domains in which broadcast communication may be sporadic, such as USAR, or jammed, such as military applications. The evaluation of this approach, which compares several different grounding techniques, shows that a consistent set of shared groundings can be developed effectively by a team of robots over time using only local interactions, and that these improve the effectiveness of communication in a multi-robot setting.}
}
-
John Anderson and Jacky Baltes.
Using Mixed Reality to Facilitate Education in Robotics and AI.
In Proceedings of the 22nd International FLAIRS Conference,
Sanibel, FL,
May 2009.
Abstract:
Using robots as part of any curriculum requires careful management of the significant complexity that physical embodiment introduces. Students need to be made aware of this complexity without being overwhelmed by it, and navigating students through this complexity is the biggest challenge faced by an instructor. Achieving this requires a framework that allows complexity to be introduced in stages, as students' abilities improve. Such a framework should also be flexible enough to provide a range of application environments that can grow with student sophistication, and be able to quickly change between applications. It should be portable and maintainable, and require a minimum of overhead to manage in a classroom. Finally, the framework should provide repeatability and control for evaluating the students' work, as well as for performing research. In this paper, we discuss the advantages of a mixed reality approach to applying robotics to education in order to accomplish these challenges. We introduce a framework for managing mixed reality in the classroom, and discuss our experiences with using this framework for teaching robotics and AI. |
@inproceedings{AndersonBaltes09:FLAIRSMixedRealityEducation,
author = {John Anderson and Jacky Baltes},
booktitle = {Proceedings of the 22nd International FLAIRS Conference},
title = {Using Mixed Reality to Facilitate Education in Robotics and AI},
year = {2009},
address = {Sanibel, FL},
month = {May},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/FLAIRSMixedRealityEducation.pdf},
abstract = {Using robots as part of any curriculum requires careful management of the significant complexity that physical embodiment introduces. Students need to be made aware of this complexity without being overwhelmed by it, and navigating students through this complexity is the biggest challenge faced by an instructor. Achieving this requires a framework that allows complexity to be introduced in stages, as students' abilities improve. Such a framework should also be flexible enough to provide a range of application environments that can grow with student sophistication, and be able to quickly change between applications. It should be portable and maintainable, and require a minimum of overhead to manage in a classroom. Finally, the framework should provide repeatability and control for evaluating the students' work, as well as for performing research. In this paper, we discuss the advantages of a mixed reality approach to applying robotics to education in order to accomplish these challenges. We introduce a framework for managing mixed reality in the classroom, and discuss our experiences with using this framework for teaching robotics and AI.}
}
-
John Anderson,
Jacky Baltes,
and Kuo-Yang Tu.
Improving Robotics Competitions for Real-World Evaluation of AI.
In Proceedings of the AAAI Spring Symposium on Experimental Design for Real-World Systems,
AAAI Spring Symposium Series,
Stanford, CA,
March 2009.
[Slides]
Abstract:
While embodied robotic applications have been a strong influence on moving artificial intelligence toward focussing on broad, robust solutions that operate in the real world, evaluating such systems remains difficult. Competition-based evaluation, using common challenge problems, is one of the major methods for comparing AI systems employing robotic embodiment. Competitions unfortunately tend to influence the creation of specific solutions that exploit particular rules rather than the broad and robust techniques that are hoped for, however, and physical embodiment in the real world also creates difficulties in control and repeatability. In this paper we discuss the positive and negative influences of competitions as a means of evaluating AI systems, and present recent work designed to improve such evaluations. We describe how improved control and repeatability can be achieved with mixed reality applications for challenge problems, and how competitions themselves can encourage breadth and robustness, using our rules for the FIRA HuroCup as an example. |
@inproceedings{AndersonBaltesTu09:aaaiss09Evaluation,
author = {John Anderson and Jacky Baltes and Kuo-Yang Tu},
booktitle = {Proceedings of the AAAI Spring Symposium on Experimental Design for Real-World Systems},
title = {Improving Robotics Competitions for Real-World Evaluation of AI},
year = {2009},
series = {AAAI Spring Symposium Series},
address = {Stanford, CA},
month = {March},
slides = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/aaaiss09EvaluationSlides.pdf},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/aaaiss09Evaluation.pdf},
abstract = {While embodied robotic applications have been a strong influence on moving artificial intelligence toward focussing on broad, robust solutions that operate in the real world, evaluating such systems remains difficult. Competition-based evaluation, using common challenge problems, is one of the major methods for comparing AI systems employing robotic embodiment. Competitions unfortunately tend to influence the creation of specific solutions that exploit particular rules rather than the broad and robust techniques that are hoped for, however, and physical embodiment in the real world also creates difficulties in control and repeatability. In this paper we discuss the positive and negative influences of competitions as a means of evaluating AI systems, and present recent work designed to improve such evaluations. We describe how improved control and repeatability can be achieved with mixed reality applications for challenge problems, and how competitions themselves can encourage breadth and robustness, using our rules for the FIRA HuroCup as an example.}
}
-
Jacky Baltes,
Ahmad Byagowi,
John Anderson,
and Peter Kopacek.
Teen Sized Humanoid Robot: Archie.
In Proceedings of FIRA 2009, CCIS 44,
Incheon, Korea,
pages 34-41,
August 2009.
Abstract:
This paper describes our first teen sized humanoid robot Archie. This robot has been developed in conjunction with Prof. Kopacek's lab from the Technical University of Vienna. Archie uses brushless motors and harmonic gears with a novel approach to position encoding. Based on our previous experience with small humanoid robots, we developed software to create, store, and play back motions as well as control methods which automatically balance the robot using feedback from an internal measurement unit (IMU). |
@inproceedings{Archie09,
author = {Jacky Baltes and Ahmad Byagowi and John Anderson and Peter Kopacek},
title = {Teen Sized Humanoid Robot: Archie},
booktitle = {Proceedings of FIRA 2009, CCIS 44},
address = {Incheon, Korea},
year = {2009},
month = {August},
pages = {34-41},
abstract = {This paper describes our first teen sized humanoid robot Archie. This robot has been developed in conjunction with Prof. Kopacek's lab from the Technical University of Vienna. Archie uses brushless motors and harmonic gears with a novel approach to position encoding. Based on our previous experience with small humanoid robots, we developed software to create, store, and play back motions as well as control methods which automatically balance the robot using feedback from an internal measurement unit (IMU).},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/Archie09.pdf}
}
-
Jacky Baltes,
N. Michael Mayer,
John Anderson,
Kuo-Yang Tu,
and Alan Liu.
The Humanoid Leagues in Robot Soccer Competitions.
In Proceedings of the IJCAI Workshop on Competitions in Artificial Intelligence and Robotics,
Pasadena, California,
pages 9-16,
July 2009.
AAAI Press.
Abstract:
This paper describes two major humanoid robotic competitions: the RoboCup Humanoid League and the FIRA HuroCup, which were both introduced in 2002. Even though both competitions have the final goal of creating a team of robots that can compete with humans in a soccer match, the two associateions focused on different intermediate goals. RoboCup forucsed on interesting soccer matches between teams of robots as soon as possible, whereas HuroCup emphasizes versatility and robustness through a series of 8 events for a single robot. |
@inproceedings{IJCAI09humanoidleagues,
author = {Jacky Baltes and N. Michael Mayer and John Anderson and Kuo-Yang Tu and Alan Liu},
title = {The Humanoid Leagues in Robot Soccer Competitions},
booktitle = {Proceedings of the IJCAI Workshop on Competitions in Artificial Intelligence and Robotics},
year = {2009},
address = {Pasadena, California},
month = {July},
pages = {9--16},
publisher = {AAAI Press},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/IJCAI09HumanoidLeagues.pdf },
abstract = {This paper describes two major humanoid robotic competitions: the RoboCup Humanoid League and the FIRA HuroCup, which were both introduced in 2002. Even though both competitions have the final goal of creating a team of robots that can compete with humans in a soccer match, the two associateions focused on different intermediate goals. RoboCup forucsed on interesting soccer matches between teams of robots as soon as possible, whereas HuroCup emphasizes versatility and robustness through a series of 8 events for a single robot.}
}
-
Michael de Denus,
John Anderson,
and Jacky Baltes.
Heuristic Formation Control in Multi-Robot Systems Using Local Communication and Limited Identification.
In Jacky Baltes,
Michail G. Lagoudakis,
Tadashi Naruse,
and Saeed Shiry, editors,
Proceedings of RoboCup-2009: Robot Soccer World Cup XIII,
Graz, Austria,
July 2009.
[Poster]
@inproceedings{RC09Formations,
author = {Michael de Denus and John Anderson and Jacky Baltes},
title = {Heuristic Formation Control in Multi-Robot Systems Using Local Communication and Limited Identification},
booktitle = {Proceedings of RoboCup-2009: Robot Soccer World Cup XIII},
editor = {Jacky Baltes and Michail G. Lagoudakis and Tadashi Naruse and Saeed Shiry},
address = {Graz, Austria},
year = {2009},
month = {July},
pdf = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/heuristicFC.pdf},
poster = {http://aalab.cs.umanitoba.ca/%7eandersj/Publications/pdf/heuristicFCPoster.pdf}
}