This repository has been archived by the owner on Apr 23, 2024. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 45
/
appstats_profiler.py
158 lines (128 loc) · 6.5 KB
/
appstats_profiler.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
# TODO(colin): fix these lint errors (http://pep8.readthedocs.io/en/release-1.7.x/intro.html#error-codes)
# pep8-disable:E128,E271,E302,E501,E713
"""RPC profiler that uses appstats to track, time, and log all RPC events.
This is just a simple wrapper for appstats with result formatting. See
https://developers.google.com/appengine/docs/python/tools/appstats for more.
"""
import logging
from pprint import pformat
from google.appengine.ext.appstats import recording
import cleanup
import unformatter
import util
class Profile(object):
"""Profiler that wraps appstats for programmatic access and reporting."""
def __init__(self):
# Configure AppStats output, keeping a high level of request
# content so we can detect dupe RPCs more accurately
recording.config.MAX_REPR = 750
# Each request has its own internal appstats recorder
self.recorder = None
def results(self):
"""Return appstats results in a dictionary for template context."""
if not self.recorder:
# If appstats fails to initialize for any reason, return an empty
# set of results.
logging.warn("Missing recorder for appstats profiler.")
return {
"calls": [],
"total_time": 0,
}
total_call_count = 0
total_time = 0
calls = []
service_totals_dict = {}
likely_dupes = False
end_offset_last = 0
requests_set = set()
appstats_key = long(self.recorder.start_timestamp * 1000)
for trace in self.recorder.traces:
total_call_count += 1
total_time += trace.duration_milliseconds()
# Don't accumulate total RPC time for traces that overlap asynchronously
if trace.start_offset_milliseconds() < end_offset_last:
total_time -= (end_offset_last - trace.start_offset_milliseconds())
end_offset_last = trace.start_offset_milliseconds() + trace.duration_milliseconds()
service_prefix = trace.service_call_name()
if "." in service_prefix:
service_prefix = service_prefix[:service_prefix.find(".")]
if service_prefix not in service_totals_dict:
service_totals_dict[service_prefix] = {
"total_call_count": 0,
"total_time": 0,
"total_misses": 0,
}
service_totals_dict[service_prefix]["total_call_count"] += 1
service_totals_dict[service_prefix]["total_time"] += trace.duration_milliseconds()
stack_frames_desc = []
for frame in trace.call_stack_list():
stack_frames_desc.append("%s:%s %s" %
(util.short_rpc_file_fmt(frame.class_or_file_name()),
frame.line_number(),
frame.function_name()))
request = trace.request_data_summary()
response = trace.response_data_summary()
# GetSystemStatsRequest is time-dependent, so repeated calls are
# likely intentional for profiling purposes. In particular, the
# memory sampling profiler generates a lot of these RPCs in prod.
likely_dupe = (request in requests_set
and not 'GetSystemStatsRequest' in request)
likely_dupes = likely_dupes or likely_dupe
requests_set.add(request)
request_short = request_pretty = None
response_short = response_pretty = None
miss = 0
try:
request_object = unformatter.unformat(request)
response_object = unformatter.unformat(response)
request_short, response_short, miss = cleanup.cleanup(request_object, response_object)
request_pretty = pformat(request_object)
response_pretty = pformat(response_object)
except Exception, e:
pass
# enable this if you want to improve prettification
# logging.warning("Prettifying RPC calls failed.\n%s\nRequest: %s\nResponse: %s",
# e, request, response, exc_info=True)
service_totals_dict[service_prefix]["total_misses"] += miss
calls.append({
"service": trace.service_call_name(),
"start_offset": util.milliseconds_fmt(trace.start_offset_milliseconds()),
"total_time": util.milliseconds_fmt(trace.duration_milliseconds()),
"request": request_pretty or request,
"response": response_pretty or response,
"request_short": request_short or cleanup.truncate(request),
"response_short": response_short or cleanup.truncate(response),
"stack_frames_desc": stack_frames_desc,
"likely_dupe": likely_dupe,
})
service_totals = []
for service_prefix in service_totals_dict:
service_totals.append({
"service_prefix": service_prefix,
"total_call_count": service_totals_dict[service_prefix]["total_call_count"],
"total_misses": service_totals_dict[service_prefix]["total_misses"],
"total_time": util.milliseconds_fmt(service_totals_dict[service_prefix]["total_time"]),
})
service_totals = sorted(service_totals, reverse=True, key=lambda service_total: float(service_total["total_time"]))
return {
"total_call_count": total_call_count,
"total_time": util.milliseconds_fmt(total_time),
"calls": calls,
"service_totals": service_totals,
"likely_dupes": likely_dupes,
"appstats_key": appstats_key,
}
def wrap(self, app):
"""Wrap and return a WSGI application with appstats recording enabled.
Args:
app: existing WSGI application to be wrapped
Returns:
new WSGI application that will run the original app with appstats
enabled.
"""
def wrapped_appstats_app(environ, start_response):
# Use this wrapper to grab the app stats recorder for RequestStats.save()
if recording.recorder_proxy.has_recorder_for_current_request():
self.recorder = recording.recorder_proxy.get_for_current_request()
return app(environ, start_response)
return recording.appstats_wsgi_middleware(wrapped_appstats_app)