analysis.py
Go to the documentation of this file.
1 '''
2 Copyright (c) 2016, Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Hutmacher Robin, Karrenbauer Oliver, Marek Felix, Meissner Pascal, Trautmann Jeremias, Wittenbeck Valerij
3 
4 All rights reserved.
5 
6 Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
7 
8 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
9 
10 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
11 
12 3. Neither the name of the copyright holder nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
13 
14 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
15 '''
16 
17 import pandas as pd
18 import os
19 import os.path
20 import fnmatch
21 import math
22 
23 
24 def generate_analysis(path, filename):
25  """ Generate state analysis in path
26 
27  Folder has to contain log.csv file
28  """
29 
30  # replace first line with column titles
31  log_path = os.path.join(path, 'log.csv')
32 
33  if not os.path.isfile(log_path):
34  print(path + " does not contain log.csv")
35  print("Skipping")
36  return
37 
38  lines = []
39  with open(log_path, 'r') as fin:
40  lines = fin.readlines()
41 
42  if len(lines) == 0:
43  # empty log file
44  return
45 
46  lines[0] = "timestamp state outcome\n"
47 
48  with open(log_path, 'w') as fout:
49  for line in lines:
50  fout.write(line)
51 
52  df = pd.read_csv(log_path, sep=' ')
53 
54 
55  # add delta column
56  df['delta'] = (df['timestamp']-df['timestamp'].shift()).fillna(0)
57 
58  # new DataFrame with count column
59  new_df = pd.DataFrame({'count': df.groupby('state')['delta'].count()})
60 
61  # add sum column
62  new_df['sum'] = df.groupby('state')['delta'].sum()
63 
64  # add mean column
65  new_df['mean'] = df.groupby('state')['delta'].mean()
66 
67  new_df.to_csv(os.path.join(path, filename))
68 
69  # this is not how you should do it but it gets the job done
70  # it fixes the state column
71  new_df = pd.read_csv(os.path.join(path, filename))
72 
73  generate_moved_distance(path, new_df)
74  # Update with MovedDistance; has to be writen a second time, because of state column fix
75  new_df.to_csv(os.path.join(path, filename), index=False)
76  print "experiment_mean for: " + str(path) + '\n' + str(new_df)
77 
78 
79  runtime = df['timestamp'][df.last_valid_index()] - \
80  df['timestamp'][df.first_valid_index()]
81 
82  print "Runtime: " + str(runtime) + '\n'
83  ret = {'runtime': runtime, 'dataframe': new_df}
84  return ret
85 
87  log_path = None
88 
89  for file in os.listdir(path):
90  if fnmatch.fnmatch(file, 'state_machine*.log'):
91  if log_path is not None:
92  print("Multiple logs for state_machine found. This will be ignored: " + str(file))
93  else:
94  log_path = os.path.join(path, file)
95 
96  if log_path is None:
97  print(str(path) + " does not contain state_machine*.log")
98  print("Skipping")
99  return
100 
101  lines = []
102  with open(log_path, 'r') as fin:
103  lines = fin.readlines()
104 
105  if len(lines) == 0:
106  # empty log file
107  return
108 
109 
110  searchForInit = False
111  searchForNext = False
112 
113  my_sum = 0.0
114  count = 0.0
115 
116  oldX = 0.0
117  oldY = 0.0
118  currentX = 0.0
119  currentY = 0.0
120 
121  for line in lines:
122  # Gets the initial robot pose
123  # This was in the log on 14.11.2016
124  if "Initial robot state" in line:
125  searchForInit = True
126  elif searchForInit and "x: " in line:
127  oldX = float(line.replace("x: ", ""))
128  elif searchForInit and "y: " in line:
129  oldY = float(line.replace("y: ", ""))
130  searchForInit = False
131 
132  # Gets the poses in each move_base step
133  # This was in the log on 14.11.2016
134  elif "This is the actual robot pose after navigation: position:" in line:
135  searchForNext = True
136  elif searchForNext and " x: " in line:
137  currentX = float(line.replace(" x: ", ""))
138  elif searchForNext and " y: " in line:
139  currentY = float(line.replace(" y: ", ""))
140  # Add distance from current to old pose
141  my_sum += getDistance(oldX, oldY, currentX, currentY)
142  count += 1
143  oldX = currentX
144  oldY = currentY
145  searchForNext = False
146 
147  if count == 0:
148  # incomplete log file
149  return
150 
151  mean = my_sum/count
152  # Add new row to DataFrame
153  df.loc[len(df)] = ["MovedDistance (in m)", count, my_sum, mean]
154 
155 
156 def getDistance(x1, y1, x2, y2):
157  return math.sqrt(pow(x1 - x2, 2) + pow(y1 - y2, 2))
158 
159 
161  """ Generates analysis in all subfolders with log.csv file """
162  runtime_index = {}
163  runtime_data = {}
164  df_concat = {}
165 
166  # It's import to go bootomup, so that we can create the csv files in higher levels out of the basis
167  for root, dirs, files in os.walk(path, topdown=False):
168  abspathOfCurrentDir = os.path.abspath(root)
169  if os.path.isfile(os.path.join(root, "log.csv")):
170  ret = generate_analysis(root, os.path.basename(root) + ".csv")
171  if ret is None:
172  print "ret is none"
173  continue
174  runtime_index[abspathOfCurrentDir] = [root]
175  runtime_data[abspathOfCurrentDir] = [ret['runtime']]
176  df_concat[abspathOfCurrentDir] = [ret['dataframe']]
177  # If no log.csv present it is most likely a dir containing subdirs, which already have csv files
178  else:
179  subDirPaths = get_immediate_subdirectories(abspathOfCurrentDir)
180  appendDataAndCreateCsvForCurrentDepth(root, subDirPaths, runtime_index, runtime_data, df_concat)
181 
182 
184  return [os.path.abspath(os.path.join(a_dir, name)) for name in os.listdir(a_dir)
185  if os.path.isdir(os.path.join(a_dir, name))]
186 
187 def appendDataAndCreateCsvForCurrentDepth(currentDir, subDirPaths, runtime_index, runtime_data, df_concat):
188  all_runtime_index = []
189  all_runtime_data = []
190  all_df_concat = []
191 
192  # Make a new list containing all basis anylisis for the currentDir
193  for subDirPath in subDirPaths:
194  if subDirPath in runtime_index:
195  all_runtime_index.extend(runtime_index[subDirPath])
196  all_runtime_data.extend(runtime_data[subDirPath])
197  all_df_concat.extend(df_concat[subDirPath])
198  else:
199  print "No csv in subDir: " + str(subDirPath)
200  if len(all_runtime_index) == 0:
201  print "There are no csv in the subdirs for the following dir: " + str(currentDir)
202  return
203 
204  abspathOfCurrentDir = os.path.abspath(currentDir)
205  runtime_index[abspathOfCurrentDir] = all_runtime_index
206  runtime_data[abspathOfCurrentDir] = all_runtime_data
207  df_concat[abspathOfCurrentDir] = all_df_concat
208 
209  createConcatCsv(runtime_index[abspathOfCurrentDir], runtime_data[abspathOfCurrentDir], df_concat[abspathOfCurrentDir], currentDir)
210 
211 def createConcatCsv(runtime_indexes, runtime_datas, df_concates, path):
212  df_concat = pd.DataFrame()
213  for df in df_concates:
214  df_concat = pd.concat((df_concat, df))
215 
216  experiment_mean = pd.DataFrame()
217  experiment_mean['count'] = df_concat.groupby('state')['count'].mean()
218  experiment_mean['sum'] = df_concat.groupby('state')['sum'].mean()
219  experiment_mean['mean'] = df_concat.groupby('state')['mean'].mean()
220 
221  filePrefix = os.path.basename(path)
222  print "experiment_mean for: " + str(path) + '\n' + str(experiment_mean)
223  experiment_mean.to_csv(os.path.join(path, filePrefix + '_mean.csv'))
224 
225  pd.Series(runtime_datas, index=runtime_indexes).to_csv(os.path.join(path, filePrefix + '_runtimes.csv'))
226  print "Runtime (mean): " + str(reduce(lambda x, y: x + y, runtime_datas) / len(runtime_datas)) + '\n'
227 
228 
229 if __name__ == '__main__':
230  print "This tool will generate analysis information from the log file(s). You can enter the path to the log file in the next step."
231  print "You can create a hierarchy of log_folders. For each level there will be a runtimes.csv and experiment_mean.csv generated."
232  path = str(raw_input("Enter path: "))
233  analyze_subfolders(path)
234 
def appendDataAndCreateCsvForCurrentDepth(currentDir, subDirPaths, runtime_index, runtime_data, df_concat)
Definition: analysis.py:187
def generate_analysis(path, filename)
Definition: analysis.py:24
def getDistance(x1, y1, x2, y2)
Definition: analysis.py:156
def analyze_subfolders(path)
Definition: analysis.py:160
def generate_moved_distance(path, df)
Definition: analysis.py:86
def createConcatCsv(runtime_indexes, runtime_datas, df_concates, path)
Definition: analysis.py:211
def get_immediate_subdirectories(a_dir)
Definition: analysis.py:183


asr_state_machine
Author(s): Allgeyer Tobias, Aumann Florian, Borella Jocelyn, Hutmacher Robin, Karrenbauer Oliver, Marek Felix, Meißner Pascal, Trautmann Jeremias, Wittenbeck Valerij
autogenerated on Mon Feb 28 2022 21:53:50