[go: nahoru, domu]

blob: a71c0e0ab4e9e816b3c6abee99d05e7256c248aa [file] [log] [blame]
Steve Block6ded16b2010-05-10 14:33:55 +01001// Copyright 2010 the V8 project authors. All rights reserved.
2// Redistribution and use in source and binary forms, with or without
3// modification, are permitted provided that the following conditions are
4// met:
5//
6// * Redistributions of source code must retain the above copyright
7// notice, this list of conditions and the following disclaimer.
8// * Redistributions in binary form must reproduce the above
9// copyright notice, this list of conditions and the following
10// disclaimer in the documentation and/or other materials provided
11// with the distribution.
12// * Neither the name of Google Inc. nor the names of its
13// contributors may be used to endorse or promote products derived
14// from this software without specific prior written permission.
15//
16// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28#ifndef V8_CPU_PROFILER_H_
29#define V8_CPU_PROFILER_H_
30
Ben Murdoch257744e2011-11-30 15:57:28 +000031#include "allocation.h"
Ben Murdochb0fe1622011-05-05 13:52:32 +010032#include "atomicops.h"
Steve Block6ded16b2010-05-10 14:33:55 +010033#include "circular-queue.h"
Leon Clarkef7060e22010-06-03 12:02:55 +010034#include "unbound-queue.h"
Steve Block6ded16b2010-05-10 14:33:55 +010035
36namespace v8 {
37namespace internal {
38
39// Forward declarations.
40class CodeEntry;
41class CodeMap;
42class CpuProfile;
43class CpuProfilesCollection;
Kristian Monsen0d5e1162010-09-30 15:31:59 +010044class HashMap;
Steve Block6ded16b2010-05-10 14:33:55 +010045class ProfileGenerator;
Leon Clarkef7060e22010-06-03 12:02:55 +010046class TokenEnumerator;
Steve Block6ded16b2010-05-10 14:33:55 +010047
Steve Block44f0eee2011-05-26 01:26:41 +010048#define CODE_EVENTS_TYPE_LIST(V) \
49 V(CODE_CREATION, CodeCreateEventRecord) \
50 V(CODE_MOVE, CodeMoveEventRecord) \
Steve Block44f0eee2011-05-26 01:26:41 +010051 V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord)
Steve Block6ded16b2010-05-10 14:33:55 +010052
53
54class CodeEventRecord {
55 public:
56#define DECLARE_TYPE(type, ignore) type,
57 enum Type {
58 NONE = 0,
59 CODE_EVENTS_TYPE_LIST(DECLARE_TYPE)
60 NUMBER_OF_TYPES
61 };
62#undef DECLARE_TYPE
63
64 Type type;
65 unsigned order;
66};
67
68
69class CodeCreateEventRecord : public CodeEventRecord {
70 public:
71 Address start;
72 CodeEntry* entry;
73 unsigned size;
Steve Block44f0eee2011-05-26 01:26:41 +010074 Address shared;
Steve Block6ded16b2010-05-10 14:33:55 +010075
76 INLINE(void UpdateCodeMap(CodeMap* code_map));
77};
78
79
80class CodeMoveEventRecord : public CodeEventRecord {
81 public:
82 Address from;
83 Address to;
84
85 INLINE(void UpdateCodeMap(CodeMap* code_map));
86};
87
88
Steve Block44f0eee2011-05-26 01:26:41 +010089class SharedFunctionInfoMoveEventRecord : public CodeEventRecord {
Steve Block6ded16b2010-05-10 14:33:55 +010090 public:
Ben Murdoche0cee9b2011-05-25 10:26:03 +010091 Address from;
92 Address to;
Steve Block6ded16b2010-05-10 14:33:55 +010093
94 INLINE(void UpdateCodeMap(CodeMap* code_map));
95};
96
97
Ben Murdoch3fb3ca82011-12-02 17:19:32 +000098class TickSampleEventRecord {
Steve Block6ded16b2010-05-10 14:33:55 +010099 public:
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000100 // The parameterless constructor is used when we dequeue data from
101 // the ticks buffer.
102 TickSampleEventRecord() { }
103 explicit TickSampleEventRecord(unsigned order)
104 : filler(1),
105 order(order) {
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100106 ASSERT(filler != SamplingCircularQueue::kClear);
107 }
108
Steve Block6ded16b2010-05-10 14:33:55 +0100109 // The first machine word of a TickSampleEventRecord must not ever
110 // become equal to SamplingCircularQueue::kClear. As both order and
111 // TickSample's first field are not reliable in this sense (order
112 // can overflow, TickSample can have all fields reset), we are
113 // forced to use an artificial filler field.
114 int filler;
115 unsigned order;
116 TickSample sample;
117
118 static TickSampleEventRecord* cast(void* value) {
119 return reinterpret_cast<TickSampleEventRecord*>(value);
120 }
Steve Block6ded16b2010-05-10 14:33:55 +0100121};
122
123
124// This class implements both the profile events processor thread and
125// methods called by event producers: VM and stack sampler threads.
126class ProfilerEventsProcessor : public Thread {
127 public:
Ben Murdoch3fb3ca82011-12-02 17:19:32 +0000128 explicit ProfilerEventsProcessor(ProfileGenerator* generator);
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100129 virtual ~ProfilerEventsProcessor() {}
Steve Block6ded16b2010-05-10 14:33:55 +0100130
131 // Thread control.
132 virtual void Run();
133 inline void Stop() { running_ = false; }
134 INLINE(bool running()) { return running_; }
135
136 // Events adding methods. Called by VM threads.
137 void CallbackCreateEvent(Logger::LogEventsAndTags tag,
138 const char* prefix, String* name,
139 Address start);
140 void CodeCreateEvent(Logger::LogEventsAndTags tag,
141 String* name,
142 String* resource_name, int line_number,
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100143 Address start, unsigned size,
Steve Block44f0eee2011-05-26 01:26:41 +0100144 Address shared);
Steve Block6ded16b2010-05-10 14:33:55 +0100145 void CodeCreateEvent(Logger::LogEventsAndTags tag,
146 const char* name,
147 Address start, unsigned size);
148 void CodeCreateEvent(Logger::LogEventsAndTags tag,
149 int args_count,
150 Address start, unsigned size);
151 void CodeMoveEvent(Address from, Address to);
152 void CodeDeleteEvent(Address from);
Steve Block44f0eee2011-05-26 01:26:41 +0100153 void SharedFunctionInfoMoveEvent(Address from, Address to);
Steve Block6ded16b2010-05-10 14:33:55 +0100154 void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag,
155 const char* prefix, String* name,
156 Address start, unsigned size);
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100157 // Puts current stack into tick sample events buffer.
158 void AddCurrentStack();
Steve Block6ded16b2010-05-10 14:33:55 +0100159
160 // Tick sample events are filled directly in the buffer of the circular
161 // queue (because the structure is of fixed width, but usually not all
162 // stack frame entries are filled.) This method returns a pointer to the
163 // next record of the buffer.
164 INLINE(TickSample* TickSampleEvent());
165
166 private:
167 union CodeEventsContainer {
168 CodeEventRecord generic;
169#define DECLARE_CLASS(ignore, type) type type##_;
170 CODE_EVENTS_TYPE_LIST(DECLARE_CLASS)
171#undef DECLARE_TYPE
172 };
173
174 // Called from events processing thread (Run() method.)
175 bool ProcessCodeEvent(unsigned* dequeue_order);
176 bool ProcessTicks(unsigned dequeue_order);
177
178 INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag));
179
180 ProfileGenerator* generator_;
181 bool running_;
Leon Clarkef7060e22010-06-03 12:02:55 +0100182 UnboundQueue<CodeEventsContainer> events_buffer_;
Steve Block6ded16b2010-05-10 14:33:55 +0100183 SamplingCircularQueue ticks_buffer_;
Ben Murdoch7f4d5bd2010-06-15 11:15:29 +0100184 UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_;
Steve Block6ded16b2010-05-10 14:33:55 +0100185 unsigned enqueue_order_;
186};
187
188} } // namespace v8::internal
189
190
Ben Murdoch257744e2011-11-30 15:57:28 +0000191#define PROFILE(isolate, Call) \
192 LOG(isolate, Call); \
193 do { \
194 if (v8::internal::CpuProfiler::is_profiling(isolate)) { \
195 v8::internal::CpuProfiler::Call; \
196 } \
Steve Block6ded16b2010-05-10 14:33:55 +0100197 } while (false)
Steve Block6ded16b2010-05-10 14:33:55 +0100198
199
200namespace v8 {
201namespace internal {
202
Steve Block44f0eee2011-05-26 01:26:41 +0100203
204// TODO(isolates): isolatify this class.
Steve Block6ded16b2010-05-10 14:33:55 +0100205class CpuProfiler {
206 public:
207 static void Setup();
208 static void TearDown();
209
Steve Block6ded16b2010-05-10 14:33:55 +0100210 static void StartProfiling(const char* title);
211 static void StartProfiling(String* title);
212 static CpuProfile* StopProfiling(const char* title);
Leon Clarkef7060e22010-06-03 12:02:55 +0100213 static CpuProfile* StopProfiling(Object* security_token, String* title);
Steve Block6ded16b2010-05-10 14:33:55 +0100214 static int GetProfilesCount();
Leon Clarkef7060e22010-06-03 12:02:55 +0100215 static CpuProfile* GetProfile(Object* security_token, int index);
216 static CpuProfile* FindProfile(Object* security_token, unsigned uid);
Steve Block44f0eee2011-05-26 01:26:41 +0100217 static void DeleteAllProfiles();
218 static void DeleteProfile(CpuProfile* profile);
219 static bool HasDetachedProfiles();
Steve Block6ded16b2010-05-10 14:33:55 +0100220
221 // Invoked from stack sampler (thread or signal handler.)
Steve Block44f0eee2011-05-26 01:26:41 +0100222 static TickSample* TickSampleEvent(Isolate* isolate);
Steve Block6ded16b2010-05-10 14:33:55 +0100223
224 // Must be called via PROFILE macro, otherwise will crash when
225 // profiling is not enabled.
226 static void CallbackEvent(String* name, Address entry_point);
227 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
228 Code* code, const char* comment);
229 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
230 Code* code, String* name);
231 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
Ben Murdoche0cee9b2011-05-25 10:26:03 +0100232 Code* code,
233 SharedFunctionInfo *shared,
234 String* name);
235 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
236 Code* code,
237 SharedFunctionInfo *shared,
Steve Block6ded16b2010-05-10 14:33:55 +0100238 String* source, int line);
239 static void CodeCreateEvent(Logger::LogEventsAndTags tag,
240 Code* code, int args_count);
Ben Murdochf87a2032010-10-22 12:50:53 +0100241 static void CodeMovingGCEvent() {}
Steve Block6ded16b2010-05-10 14:33:55 +0100242 static void CodeMoveEvent(Address from, Address to);
243 static void CodeDeleteEvent(Address from);
Steve Block6ded16b2010-05-10 14:33:55 +0100244 static void GetterCallbackEvent(String* name, Address entry_point);
245 static void RegExpCodeCreateEvent(Code* code, String* source);
246 static void SetterCallbackEvent(String* name, Address entry_point);
Steve Block44f0eee2011-05-26 01:26:41 +0100247 static void SharedFunctionInfoMoveEvent(Address from, Address to);
248
249 // TODO(isolates): this doesn't have to use atomics anymore.
Steve Block6ded16b2010-05-10 14:33:55 +0100250
Steve Block44f0eee2011-05-26 01:26:41 +0100251 static INLINE(bool is_profiling(Isolate* isolate)) {
252 CpuProfiler* profiler = isolate->cpu_profiler();
253 return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_);
Steve Block6ded16b2010-05-10 14:33:55 +0100254 }
255
256 private:
257 CpuProfiler();
258 ~CpuProfiler();
259 void StartCollectingProfile(const char* title);
260 void StartCollectingProfile(String* title);
261 void StartProcessorIfNotStarted();
262 CpuProfile* StopCollectingProfile(const char* title);
Leon Clarkef7060e22010-06-03 12:02:55 +0100263 CpuProfile* StopCollectingProfile(Object* security_token, String* title);
Iain Merrick75681382010-08-19 15:07:18 +0100264 void StopProcessorIfLastProfile(const char* title);
Steve Block44f0eee2011-05-26 01:26:41 +0100265 void StopProcessor();
266 void ResetProfiles();
Steve Block6ded16b2010-05-10 14:33:55 +0100267
268 CpuProfilesCollection* profiles_;
269 unsigned next_profile_uid_;
Leon Clarkef7060e22010-06-03 12:02:55 +0100270 TokenEnumerator* token_enumerator_;
Steve Block6ded16b2010-05-10 14:33:55 +0100271 ProfileGenerator* generator_;
272 ProfilerEventsProcessor* processor_;
273 int saved_logging_nesting_;
Steve Block44f0eee2011-05-26 01:26:41 +0100274 bool need_to_stop_sampler_;
275 Atomic32 is_profiling_;
Steve Block6ded16b2010-05-10 14:33:55 +0100276
Steve Block6ded16b2010-05-10 14:33:55 +0100277 private:
278 DISALLOW_COPY_AND_ASSIGN(CpuProfiler);
279};
280
281} } // namespace v8::internal
282
283
284#endif // V8_CPU_PROFILER_H_