#include <core.hh>
Public Member Functions | |
virtual int | first_action (const std::vector< float > &s)=0 |
virtual void | last_action (float r)=0 |
virtual int | next_action (float r, const std::vector< float > &s)=0 |
virtual void | savePolicy (const char *filename) |
virtual void | seedExp (std::vector< experience > seeds) |
virtual void | setDebug (bool d)=0 |
virtual | ~Agent () |
Interface for an agent. Implementations of the Agent interface determine the choice of actions given previous sensations and rewards.
virtual Agent::~Agent | ( | ) | [inline, virtual] |
virtual int Agent::first_action | ( | const std::vector< float > & | s | ) | [pure virtual] |
Determines the first action that an agent takes in an environment. This method implies that the environment is currently in an initial state.
s | The initial sensation from the environment. |
virtual void Agent::last_action | ( | float | r | ) | [pure virtual] |
Gives feedback for the last action taken. This method may only be called if the last method called was first_action or next_action. It implies that the task is episodic and has just terminated. Note that terminal sensations (states) are not represented.
r | The one-step reward resulting from the previous action. |
virtual int Agent::next_action | ( | float | r, |
const std::vector< float > & | s | ||
) | [pure virtual] |
Determines the next action that an agent takes in an environment and gives feedback for the previous action. This method may only be called if the last method called was first_action or next_action.
r | The one-step reward resulting from the previous action. |
s | The current sensation from the environment. |
virtual void Agent::savePolicy | ( | const char * | filename | ) | [inline, virtual] |
virtual void Agent::seedExp | ( | std::vector< experience > | seeds | ) | [inline, virtual] |
virtual void Agent::setDebug | ( | bool | d | ) | [pure virtual] |
Set some debug flags on/off