255 lines
8.8 KiB
Python
Executable File
255 lines
8.8 KiB
Python
Executable File
#!/usr/bin/env python
|
|
from __future__ import absolute_import
|
|
from __future__ import division
|
|
from __future__ import print_function
|
|
from __future__ import unicode_literals
|
|
|
|
import argparse
|
|
import json
|
|
import smap
|
|
import trace_data
|
|
import urllib
|
|
|
|
SECONDS_TO_NANOSECONDS = (1000*1000)
|
|
SAMPLE_DELTA_IN_SECONDS = 0.0001
|
|
|
|
class Marker(object):
|
|
def __init__(self, _name, _timestamp, _depth, _is_end, _ident, url, line, col):
|
|
self.name = _name
|
|
self.timestamp = _timestamp
|
|
self.depth = _depth
|
|
self.is_end = _is_end
|
|
self.ident = _ident
|
|
self.url = url
|
|
self.line = line
|
|
self.col = col
|
|
|
|
# sort markers making sure they are ordered by timestamp then depth of function call
|
|
# and finally that markers of the same ident are sorted in the order begin then end
|
|
def __cmp__(self, other):
|
|
if self.timestamp < other.timestamp:
|
|
return -1
|
|
if self.timestamp > other.timestamp:
|
|
return 1
|
|
if self.depth < other.depth:
|
|
return -1
|
|
if self.depth > other.depth:
|
|
return 1
|
|
if self.ident == other.ident:
|
|
if self.is_end:
|
|
return 1
|
|
return 0
|
|
|
|
# calculate marker name based on combination of function name and location
|
|
def _calcname(entry):
|
|
funcname = ""
|
|
if "functionName" in entry:
|
|
funcname = funcname + entry["functionName"]
|
|
return funcname
|
|
|
|
def _calcurl(mapcache, entry, map_file):
|
|
if entry.url not in mapcache:
|
|
map_url = entry.url.replace('.bundle', '.map')
|
|
|
|
if map_url != entry.url:
|
|
if map_file:
|
|
print('Loading sourcemap from:' + map_file)
|
|
map_url = map_file
|
|
|
|
try:
|
|
url_file = urllib.urlopen(map_url)
|
|
if url_file != None:
|
|
entries = smap.parse(url_file)
|
|
mapcache[entry.url] = entries
|
|
except Exception, e:
|
|
mapcache[entry.url] = []
|
|
|
|
if entry.url in mapcache:
|
|
source_entry = smap.find(mapcache[entry.url], entry.line, entry.col)
|
|
if source_entry:
|
|
entry.url = 'file://' + source_entry.src
|
|
entry.line = source_entry.src_line
|
|
entry.col = source_entry.src_col
|
|
|
|
def _compute_markers(markers, call_point, depth):
|
|
name = _calcname(call_point)
|
|
ident = len(markers)
|
|
url = ""
|
|
lineNumber = -1
|
|
columnNumber = -1
|
|
if "url" in call_point:
|
|
url = call_point["url"]
|
|
if "lineNumber" in call_point:
|
|
lineNumber = call_point["lineNumber"]
|
|
if "columnNumber" in call_point:
|
|
columnNumber = call_point["columnNumber"]
|
|
|
|
for call in call_point["calls"]:
|
|
markers.append(Marker(name, call["startTime"], depth, 0, ident, url, lineNumber, columnNumber))
|
|
markers.append(Marker(name, call["startTime"] + call["totalTime"], depth, 1, ident, url, lineNumber, columnNumber))
|
|
ident = ident + 2
|
|
if "children" in call_point:
|
|
for child in call_point["children"]:
|
|
_compute_markers(markers, child, depth+1);
|
|
|
|
def _find_child(children, name):
|
|
for child in children:
|
|
if child['functionName'] == name:
|
|
return child
|
|
return None
|
|
|
|
def _add_entry_cpuprofiler_program(newtime, cpuprofiler):
|
|
curnode = _find_child(cpuprofiler['head']['children'], '(program)')
|
|
if cpuprofiler['lastTime'] != None:
|
|
lastTime = cpuprofiler['lastTime']
|
|
while lastTime < newtime:
|
|
curnode['hitCount'] += 1
|
|
cpuprofiler['samples'].append(curnode['callUID'])
|
|
cpuprofiler['timestamps'].append(int(lastTime*SECONDS_TO_NANOSECONDS))
|
|
lastTime += SAMPLE_DELTA_IN_SECONDS
|
|
cpuprofiler['lastTime'] = lastTime
|
|
else:
|
|
cpuprofiler['lastTime'] = newtime
|
|
|
|
|
|
def _add_entry_cpuprofiler(stack, newtime, cpuprofiler):
|
|
index = len(stack) - 1
|
|
marker = stack[index]
|
|
|
|
if marker.name not in cpuprofiler['markers']:
|
|
cpuprofiler['markers'][marker.name] = cpuprofiler['id']
|
|
cpuprofiler['callUID'] += 1
|
|
callUID = cpuprofiler['markers'][marker.name]
|
|
|
|
curnode = cpuprofiler['head']
|
|
index = 0
|
|
while index < len(stack):
|
|
newnode = _find_child(curnode['children'], stack[index].name)
|
|
if newnode == None:
|
|
newnode = {}
|
|
newnode['callUID'] = callUID
|
|
newnode['url'] = marker.url
|
|
newnode['functionName'] = stack[index].name
|
|
newnode['hitCount'] = 0
|
|
newnode['lineNumber'] = marker.line
|
|
newnode['columnNumber'] = marker.col
|
|
newnode['scriptId'] = callUID
|
|
newnode['positionTicks'] = []
|
|
newnode['id'] = cpuprofiler['id']
|
|
cpuprofiler['id'] += 1
|
|
newnode['children'] = []
|
|
curnode['children'].append(newnode)
|
|
curnode['deoptReason'] = ''
|
|
curnode = newnode
|
|
index += 1
|
|
|
|
if cpuprofiler['lastTime'] == None:
|
|
cpuprofiler['lastTime'] = newtime
|
|
|
|
if cpuprofiler['lastTime'] != None:
|
|
lastTime = cpuprofiler['lastTime']
|
|
while lastTime < newtime:
|
|
curnode['hitCount'] += 1
|
|
if len(curnode['positionTicks']) == 0:
|
|
ticks = {}
|
|
ticks['line'] = curnode['callUID']
|
|
ticks['ticks'] = 0
|
|
curnode['positionTicks'].append(ticks)
|
|
curnode['positionTicks'][0]['ticks'] += 1
|
|
cpuprofiler['samples'].append(curnode['callUID'])
|
|
cpuprofiler['timestamps'].append(int(lastTime*1000*1000))
|
|
lastTime += 0.0001
|
|
cpuprofiler['lastTime'] = lastTime
|
|
|
|
def _create_default_cpuprofiler_node(name, _id, _uid):
|
|
return {'functionName': name,
|
|
'scriptId':'0',
|
|
'url':'',
|
|
'lineNumber':0,
|
|
'columnNumber':0,
|
|
'positionTicks':[],
|
|
'id':_id,
|
|
'callUID':_uid,
|
|
'children': [],
|
|
'hitCount': 0,
|
|
'deoptReason':''}
|
|
|
|
def main():
|
|
parser = argparse.ArgumentParser(description="Converts JSON profile format to fbsystrace text output")
|
|
|
|
parser.add_argument(
|
|
"-o",
|
|
dest = "output_file",
|
|
default = None,
|
|
help = "Output file for trace data")
|
|
parser.add_argument(
|
|
"-cpuprofiler",
|
|
dest = "output_cpuprofiler",
|
|
default = None,
|
|
help = "Output file for cpuprofiler data")
|
|
parser.add_argument(
|
|
"-map",
|
|
dest = "map_file",
|
|
default = None,
|
|
help = "Map file for symbolicating")
|
|
parser.add_argument( "file", help = "JSON trace input_file")
|
|
|
|
args = parser.parse_args()
|
|
|
|
markers = []
|
|
with open(args.file, "r") as trace_file:
|
|
trace = json.load(trace_file)
|
|
for root_entry in trace["rootNodes"]:
|
|
_compute_markers(markers, root_entry, 0)
|
|
|
|
mapcache = {}
|
|
for m in markers:
|
|
_calcurl(mapcache, m, args.map_file)
|
|
|
|
sorted_markers = list(sorted(markers));
|
|
|
|
if args.output_cpuprofiler != None:
|
|
cpuprofiler = {}
|
|
cpuprofiler['startTime'] = None
|
|
cpuprofiler['endTime'] = None
|
|
cpuprofiler['lastTime'] = None
|
|
cpuprofiler['id'] = 4
|
|
cpuprofiler['callUID'] = 4
|
|
cpuprofiler['samples'] = []
|
|
cpuprofiler['timestamps'] = []
|
|
cpuprofiler['markers'] = {}
|
|
cpuprofiler['head'] = _create_default_cpuprofiler_node('(root)', 1, 1)
|
|
cpuprofiler['head']['children'].append(_create_default_cpuprofiler_node('(root)', 2, 2))
|
|
cpuprofiler['head']['children'].append(_create_default_cpuprofiler_node('(program)', 3, 3))
|
|
marker_stack = []
|
|
with open(args.output_cpuprofiler, 'w') as file_out:
|
|
for marker in sorted_markers:
|
|
if len(marker_stack):
|
|
_add_entry_cpuprofiler(marker_stack, marker.timestamp, cpuprofiler)
|
|
else:
|
|
_add_entry_cpuprofiler_program(marker.timestamp, cpuprofiler)
|
|
if marker.is_end:
|
|
marker_stack.pop()
|
|
else:
|
|
marker_stack.append(marker)
|
|
cpuprofiler['startTime'] = cpuprofiler['timestamps'][0] / 1000000.0
|
|
cpuprofiler['endTime'] = cpuprofiler['timestamps'][len(cpuprofiler['timestamps']) - 1] / 1000000.0
|
|
json.dump(cpuprofiler, file_out, sort_keys=False, indent=4, separators=(',', ': '))
|
|
|
|
|
|
if args.output_file != None:
|
|
with open(args.output_file,"w") as trace_file:
|
|
for marker in sorted_markers:
|
|
start_or_end = None
|
|
if marker.is_end:
|
|
start_or_end = "E"
|
|
else:
|
|
start_or_end = "B"
|
|
#output with timestamp at high level of precision
|
|
trace_file.write("json-0 [000] .... {0:.12f}: tracing_mark_write: {1}|0|{2}\n".format(
|
|
marker.timestamp,
|
|
start_or_end,
|
|
marker.name))
|
|
|
|
main()
|