Universität Karlsruhe
Improving a priori control knowledge by reinforcement learning
@conference{,
  author={Martin Spott and Martin Riedmiller},
  title=\{Improving a priori control knowledge by reinforcement learning},
  booktitle=\{Proc. in AI: Fuzzy-Neuro-Systems '98},
  year=\{1998},
  month=\{Mar},
  publisher=\{Infix-Verlag},
  pages=\{146-153},
  abstract=\{The major goal of the Fynesse control architecture is to combine the advantages of different modern controller design methods. A priori knowledge about the control strategy (classic   controllers like PID, fuzzy controllers, statistical information)   can be brought in if available.  This information is transformed   into a fuzzy relation.  Advanced methods of dynamic   programming are used to either improve existing control knowledge   or to learn optimal control strategies from scratch.  A neural   network serves as a learning critic that evaluates the strategy   represented by the fuzzy relation. At the end, the strategy can be   interpreted in terms of fuzzy rules that are extracted from the   fuzzy relation. This paper explains all design stages with the example of the cart   pole balancing problem.  It is especially shown that even simple   controllers as a priori knowledge accelerate the learning procedure   and improve the resulting quality of the controller considerably.},
}

Login
Links