00001
00002
00003 '''
00004 example program that dumps a Mavlink log file. The log file is
00005 assumed to be in the format that qgroundcontrol uses, which consists
00006 of a series of MAVLink packets, each with a 64 bit timestamp
00007 header. The timestamp is in microseconds since 1970 (unix epoch)
00008 '''
00009
00010 import sys, time, os, struct, json, fnmatch
00011
00012 try:
00013 from pymavlink.mavextra import *
00014 except:
00015 print("WARNING: Numpy missing, mathematical notation will not be supported..")
00016
00017 from argparse import ArgumentParser
00018 parser = ArgumentParser(description=__doc__)
00019
00020 parser.add_argument("--no-timestamps", dest="notimestamps", action='store_true', help="Log doesn't have timestamps")
00021 parser.add_argument("--planner", action='store_true', help="use planner file format")
00022 parser.add_argument("--robust", action='store_true', help="Enable robust parsing (skip over bad data)")
00023 parser.add_argument("-f", "--follow", action='store_true', help="keep waiting for more data at end of file")
00024 parser.add_argument("--condition", default=None, help="select packets by condition")
00025 parser.add_argument("-q", "--quiet", action='store_true', help="don't display packets")
00026 parser.add_argument("-o", "--output", default=None, help="output matching packets to give file")
00027 parser.add_argument("-p", "--parms", action='store_true', help="preserve parameters in output with -o")
00028 parser.add_argument("--format", default=None, help="Change the output format between 'standard', 'json', and 'csv'. For the CSV output, you must supply types that you want.")
00029 parser.add_argument("--csv_sep", dest="csv_sep", default=",", help="Select the delimiter between columns for the output CSV file. Use 'tab' to specify tabs. Only applies when --format=csv")
00030 parser.add_argument("--types", default=None, help="types of messages (comma separated with wildcard)")
00031 parser.add_argument("--nottypes", default=None, help="types of messages not to include (comma separated with wildcard)")
00032 parser.add_argument("--dialect", default="ardupilotmega", help="MAVLink dialect")
00033 parser.add_argument("--zero-time-base", action='store_true', help="use Z time base for DF logs")
00034 parser.add_argument("--no-bad-data", action='store_true', help="Don't output corrupted messages")
00035 parser.add_argument("log", metavar="LOG")
00036 args = parser.parse_args()
00037
00038 import inspect
00039
00040 from pymavlink import mavutil
00041
00042
00043 filename = args.log
00044 mlog = mavutil.mavlink_connection(filename, planner_format=args.planner,
00045 notimestamps=args.notimestamps,
00046 robust_parsing=args.robust,
00047 dialect=args.dialect,
00048 zero_time_base=args.zero_time_base)
00049
00050 output = None
00051 if args.output:
00052 output = open(args.output, mode='wb')
00053
00054 types = args.types
00055 if types is not None:
00056 types = types.split(',')
00057
00058 nottypes = args.nottypes
00059 if nottypes is not None:
00060 nottypes = nottypes.split(',')
00061
00062 ext = os.path.splitext(filename)[1]
00063 isbin = ext in ['.bin', '.BIN']
00064 islog = ext in ['.log', '.LOG','.tlog','.TLOG']
00065
00066 if args.csv_sep == "tab":
00067 args.csv_sep = "\t"
00068
00069 def match_type(mtype, patterns):
00070 '''return True if mtype matches pattern'''
00071 for p in patterns:
00072 if fnmatch.fnmatch(mtype, p):
00073 return True
00074 return False
00075
00076
00077 fields = ['timestamp']
00078 offsets = {}
00079 if islog and args.format == 'csv':
00080 try:
00081 currentOffset = 1
00082 for type in types:
00083 try:
00084 typeClass = "MAVLink_{0}_message".format(type.lower())
00085 fields += [type + '.' + x for x in inspect.getargspec(getattr(mavutil.mavlink, typeClass).__init__).args[1:]]
00086 offsets[type] = currentOffset
00087 currentOffset += len(fields)
00088 except IndexError:
00089 quit()
00090 except TypeError:
00091 print("You must specify a list of message types if outputting CSV format via the --types argument.")
00092 exit()
00093
00094
00095 csv_out = ["" for x in fields]
00096 print(','.join(fields))
00097
00098 if isbin and args.format == 'csv':
00099 if types is None or len(types) != 1:
00100 print("Need exactly one type when dumping CSV from bin file")
00101 quit()
00102
00103
00104 last_timestamp = None
00105
00106
00107 while True:
00108 m = mlog.recv_match(blocking=args.follow)
00109 if m is None:
00110
00111 break
00112 if isbin and m.get_type() == "FMT" and args.format == 'csv':
00113 if m.Name == types[0]:
00114 fields += m.Columns.split(',')
00115 csv_out = ["" for x in fields]
00116 print(','.join(fields))
00117
00118 if output is not None:
00119 if (isbin or islog) and m.get_type() == "FMT":
00120 output.write(m.get_msgbuf())
00121 continue
00122 if (isbin or islog) and (m.get_type() == "PARM" and args.parms):
00123 output.write(m.get_msgbuf())
00124 continue
00125 if m.get_type() == 'PARAM_VALUE' and args.parms:
00126 timestamp = getattr(m, '_timestamp', None)
00127 output.write(struct.pack('>Q', timestamp*1.0e6) + m.get_msgbuf())
00128 continue
00129 if not mavutil.evaluate_condition(args.condition, mlog.messages):
00130 continue
00131
00132 if types is not None and m.get_type() != 'BAD_DATA' and not match_type(m.get_type(), types):
00133 continue
00134
00135 if nottypes is not None and match_type(m.get_type(), nottypes):
00136 continue
00137
00138
00139
00140 if m.get_type() == 'BAD_DATA' and (args.no_bad_data is True or m.reason == "Bad prefix"):
00141 continue
00142
00143
00144 timestamp = getattr(m, '_timestamp', 0.0)
00145
00146
00147 if output:
00148 if not (isbin or islog):
00149 output.write(struct.pack('>Q', timestamp*1.0e6))
00150 try:
00151 output.write(m.get_msgbuf())
00152 except Exception as ex:
00153 print("Failed to write msg %s" % m.get_type())
00154 pass
00155
00156
00157 if args.quiet:
00158 continue
00159
00160
00161 if args.format == 'json':
00162
00163 data = m.to_dict()
00164
00165
00166 del data['mavpackettype']
00167
00168
00169 if 'data' in data and type(data['data']) is not dict:
00170 data['data'] = list(data['data'])
00171
00172
00173
00174 outMsg = {"meta": {"type": m.get_type(), "timestamp": timestamp}, "data": data}
00175
00176
00177 print(json.dumps(outMsg))
00178
00179 elif args.format == 'csv':
00180 data = m.to_dict()
00181 type = m.get_type()
00182
00183
00184
00185 if timestamp == last_timestamp or last_timestamp is None:
00186 if isbin:
00187 newData = [str(data[y]) if y != "timestamp" else "" for y in fields]
00188 else:
00189 newData = [str(data[y.split('.')[-1]]) if y.split('.')[0] == type and y.split('.')[-1] in data else "" for y in fields]
00190
00191 for i, val in enumerate(newData):
00192 if val:
00193 csv_out[i] = val
00194
00195
00196 else:
00197 csv_out[0] = "{:.8f}".format(last_timestamp)
00198 print(args.csv_sep.join(csv_out))
00199 if isbin:
00200 csv_out = [str(data[y]) if y != "timestamp" else "" for y in fields]
00201 else:
00202 csv_out = [str(data[y.split('.')[-1]]) if y.split('.')[0] == type and y.split('.')[-1] in data else "" for y in fields]
00203
00204 else:
00205 print("%s.%02u: %s" % (
00206 time.strftime("%Y-%m-%d %H:%M:%S",
00207 time.localtime(timestamp)),
00208 int(timestamp*100.0)%100, m))
00209
00210
00211 last_timestamp = timestamp
00212