00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00023
00024
00025
00026
00027
00028
00029
00030
00031 """
00032 Regressive A* planner with Node, Planner and PlanExecutor
00033 """
00034
00035
00036 from collections import deque
00037
00038 from rgoap import WorldState
00039
00040
00041 import logging
00042 _logger = logging.getLogger('rgoap')
00043
00044
00045
00046 class Node(object):
00047 """
00048 worldstate: states at this node
00049 action: action that led (regressively) to this node (and that should be run when executing this path forwards)
00050 possible_prev_nodes: nodes with actions that the planner found possible to help reach this node
00051 (empty until planner ran, used for visualization/debugging)
00052 parent_nodes_path_list: nodes that led (from the goal) to this node
00053 parent_actions_path_list: actions that led (from the goal) to this node
00054 note that the parent path lists begin with the goal node and end with this node's parent
00055
00056 if this node is the goal node:
00057 - the action is None
00058 - the path lists are empty
00059 - also, cost() and heuristic_distance are zero
00060 """
00061 def __init__(self, worldstate, action, parent_nodes_path_list, parent_actions_path_list):
00062 self.worldstate = worldstate
00063 self.action = action
00064 self.possible_prev_nodes = []
00065 self.parent_nodes_path_list = parent_nodes_path_list
00066 self.parent_actions_path_list = parent_actions_path_list
00067
00068 self.heuristic_distance = None
00069
00070 def __str__(self):
00071 return 'Node %X tCost=%d' % (id(self), self.total_cost())
00072
00073 def __repr__(self):
00074
00075
00076 return '<Node %X cost=%s pathc=%s heur_dist=%s totalc=%s action=%s>' % \
00077 (id(self), self.cost(), self.path_cost(), self.heuristic_distance, self.total_cost(), self.action)
00078
00079 def is_goal(self):
00080 """See class description"""
00081 return self.action is None
00082
00083 def parent_node(self):
00084 return self.parent_nodes_path_list[-1]
00085
00086 def cost(self):
00087 """The cost of this node's action"""
00088 return 0 if self.is_goal() else self.action.cost()
00089
00090 def path_cost(self):
00091 """Path cost calculation"""
00092 return self.cost() + sum(node.cost() for node in self.parent_nodes_path_list)
00093
00094 def total_cost(self):
00095 """Path costs plus heuristic distance"""
00096 return self.path_cost() + self.heuristic_distance
00097
00098 def _calc_heuristic_distance_for_node(self, start_worldstate):
00099
00100 """Set self.heuristic_distance, a value representing the difference
00101 between this node's worldstate and the given start worldstate.
00102 """
00103 assert self.heuristic_distance is None, "Node heuristic should be calculated only once"
00104
00105
00106 unsatisfied_conditions_set = self.worldstate.get_unsatisfied_conditions(start_worldstate)
00107
00108 if self.is_goal():
00109
00110 self.heuristic_distance = len(unsatisfied_conditions_set)
00111 else:
00112
00113 goal_worldstate = self.parent_nodes_path_list[0].worldstate
00114 self.heuristic_distance = 0
00115
00116 for condition in unsatisfied_conditions_set:
00117 try:
00118 goal_value = goal_worldstate.get_condition_value(condition)
00119 except KeyError:
00120
00121
00122
00123 self.heuristic_distance += 1
00124 else:
00125 node_value = self.worldstate.get_condition_value(condition)
00126 start_value = start_worldstate.get_condition_value(condition)
00127 try:
00128 distance_total = abs(goal_value - start_value)
00129 distance_remaining = abs(node_value - start_value)
00130 relative_distance = float(distance_remaining) / distance_total
00131
00132
00133
00134 assert relative_distance > 0, "If the relative progress" \
00135 " results to zero, why is it considered unsatisfied?"
00136 except TypeError:
00137
00138 self.heuristic_distance += 1
00139 else:
00140 _logger.debug("comparing condition %s: relative_distance = "
00141 "distance_left / distance_total = %s / %s = %s",
00142 condition._state_name, distance_remaining,
00143 distance_total, relative_distance)
00144 self.heuristic_distance += relative_distance
00145
00146
00147 self.heuristic_distance = min(len(unsatisfied_conditions_set), self.heuristic_distance)
00148
00149
00150 def get_child_nodes(self, actions, start_worldstate):
00151 """Returns a list of nodes that are childs of this node and
00152 contain the given action and start worldstate.
00153 """
00154 assert len(self.possible_prev_nodes) == 0, "Node.get_child_nodes is probably not safe to be called twice"
00155 for action in actions:
00156 nodes_path_list = self.parent_nodes_path_list[:]
00157 nodes_path_list.append(self)
00158 actions_path_list = self.parent_actions_path_list[:]
00159 actions_path_list.append(action)
00160 worldstatecopy = WorldState(self.worldstate)
00161 action.apply_preconditions(worldstatecopy, start_worldstate)
00162 node = Node(worldstatecopy, action, nodes_path_list, actions_path_list)
00163 node._calc_heuristic_distance_for_node(start_worldstate)
00164 self.possible_prev_nodes.append(node)
00165 return self.possible_prev_nodes
00166
00167
00168
00169 class Planner(object):
00170 """
00171 The given start_worldstate must contain every condition ever needed
00172 by an action or condition.
00173 """
00174
00175
00176 def __init__(self, actions, worldstate, goal):
00177 self._actions = actions
00178 self._start_worldstate = worldstate
00179 self._goal = goal
00180
00181 self.last_goal_node = None
00182
00183 def plan(self, start_worldstate=None, goal=None):
00184 """Plan ...
00185 Return the node that matches the given start WorldState and
00186 is the start node for a plan reaching the given Goal.
00187
00188 If any parameter is not given the data given at initialisation is used.
00189 """
00190
00191 if start_worldstate is not None:
00192 self._start_worldstate = start_worldstate
00193 if goal is not None:
00194 self._goal = goal
00195
00196
00197 checked_actions = set()
00198 for action in self._actions:
00199 if not action.check_freeform_context():
00200 _logger.warn("Ignoring action with bad freeform context: %s", action)
00201 else:
00202 checked_actions.add(action)
00203
00204 _logger.info("Planner started\n""actions: %s\n"
00205 "start_worldstate: %s\n""goal: %s",
00206 self._actions, self._start_worldstate, self._goal)
00207
00208
00209 goal_worldstate = WorldState()
00210 self._goal.apply_preconditions(goal_worldstate)
00211 _logger.debug("goal_worldstate: %s", goal_worldstate)
00212
00213 goal_node = Node(goal_worldstate, None, [], [])
00214 goal_node._calc_heuristic_distance_for_node(self._start_worldstate)
00215 _logger.debug("goal_node: %s", goal_node)
00216 self.last_goal_node = goal_node
00217
00218 child_nodes = deque([goal_node])
00219
00220 loopcount = 0
00221 while len(child_nodes) != 0:
00222 loopcount += 1
00223 if loopcount > 500:
00224 _logger.error("Planner stops because the loop limit (%d) is hit!", loopcount - 1)
00225 break
00226
00227 _logger.info("Planning loop #%s", loopcount)
00228 _logger.debug("nodes (%d): %s", len(child_nodes), child_nodes)
00229
00230 current_node = child_nodes.popleft()
00231 _logger.debug("current node (least cost): %s", current_node)
00232 _logger.debug("current node's worldstate: %s", current_node.worldstate)
00233
00234 if self._start_worldstate.matches(current_node.worldstate):
00235 _logger.info("Found plan! Considered nodes: %s; nodes left: %s", loopcount, len(child_nodes))
00236 _logger.info("plan nodes: %s", current_node.parent_nodes_path_list)
00237 _logger.info("plan actions: %s", current_node.parent_actions_path_list)
00238 return current_node
00239
00240 helpful_actions = self._filter_matching_actions(current_node.worldstate,
00241 checked_actions)
00242 new_child_nodes = current_node.get_child_nodes(helpful_actions,
00243 self._start_worldstate)
00244 _logger.debug("new child nodes: %s", new_child_nodes)
00245
00246
00247
00248 child_nodes.extend(new_child_nodes)
00249 child_nodes = deque(sorted(child_nodes, key=lambda node: node.total_cost()))
00250
00251 _logger.warn("No plan found.")
00252 return None
00253
00254 def _filter_matching_actions(self, node_worldstate, actions):
00255 """Returns a list of actions that might help between
00256 start_worldstate and current node_worldstate.
00257 """
00258
00259 unsatisfied_conditions_set = node_worldstate.get_unsatisfied_conditions(self._start_worldstate)
00260
00261 helpful_actions = []
00262
00263 for action in actions:
00264 if action.has_satisfying_effects(node_worldstate, self._start_worldstate, unsatisfied_conditions_set):
00265 _logger.debug("helping action: %s", action)
00266 helpful_actions.append(action)
00267 else:
00268 _logger.debug("helpless action: %s", action)
00269
00270 return helpful_actions
00271
00272
00273 class PlanExecutor(object):
00274
00275 def execute(self, start_node, introspector=None):
00276 """Execute an RGOAP plan, return True on success, False otherwise"""
00277 assert len(start_node.parent_nodes_path_list) == len(start_node.parent_actions_path_list)
00278
00279 if start_node.is_goal():
00280 _logger.info("Executor reached goal node, stopping execution")
00281 return True
00282
00283 current_worldstate = start_node.worldstate
00284 action = start_node.action
00285 next_node = start_node.parent_node()
00286 next_worldstate = next_node.worldstate
00287
00288 if introspector is not None:
00289 introspector.publish_update(start_node)
00290
00291 if action.is_valid(current_worldstate):
00292 if action.check_freeform_context():
00293 _logger.info("PlanExecutor now executes: %s", action)
00294 action.run(next_worldstate)
00295
00296 return self.execute(next_node, introspector)
00297 else:
00298 _logger.error("Action's freeform context isn't valid! Aborting"
00299 " executor. action: %s", action)
00300 else:
00301 _logger.error("Action isn't valid to worldstate! Aborting"
00302 " executor.\n action: %s\n worldstate: %s",
00303 action, current_worldstate)
00304
00305 return False
00306