Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 1 | // Copyright 2010 the V8 project authors. All rights reserved. |
| 2 | // Redistribution and use in source and binary forms, with or without |
| 3 | // modification, are permitted provided that the following conditions are |
| 4 | // met: |
| 5 | // |
| 6 | // * Redistributions of source code must retain the above copyright |
| 7 | // notice, this list of conditions and the following disclaimer. |
| 8 | // * Redistributions in binary form must reproduce the above |
| 9 | // copyright notice, this list of conditions and the following |
| 10 | // disclaimer in the documentation and/or other materials provided |
| 11 | // with the distribution. |
| 12 | // * Neither the name of Google Inc. nor the names of its |
| 13 | // contributors may be used to endorse or promote products derived |
| 14 | // from this software without specific prior written permission. |
| 15 | // |
| 16 | // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 | // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 | // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 | // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 | // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 | // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 | // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 | // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 | // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 | // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 | // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | |
| 28 | #ifndef V8_CPU_PROFILER_H_ |
| 29 | #define V8_CPU_PROFILER_H_ |
| 30 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 31 | #include "allocation.h" |
Ben Murdoch | b0fe162 | 2011-05-05 13:52:32 +0100 | [diff] [blame] | 32 | #include "atomicops.h" |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 33 | #include "circular-queue.h" |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 34 | #include "unbound-queue.h" |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 35 | |
| 36 | namespace v8 { |
| 37 | namespace internal { |
| 38 | |
| 39 | // Forward declarations. |
| 40 | class CodeEntry; |
| 41 | class CodeMap; |
| 42 | class CpuProfile; |
| 43 | class CpuProfilesCollection; |
Kristian Monsen | 0d5e116 | 2010-09-30 15:31:59 +0100 | [diff] [blame] | 44 | class HashMap; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 45 | class ProfileGenerator; |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 46 | class TokenEnumerator; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 47 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 48 | #define CODE_EVENTS_TYPE_LIST(V) \ |
| 49 | V(CODE_CREATION, CodeCreateEventRecord) \ |
| 50 | V(CODE_MOVE, CodeMoveEventRecord) \ |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 51 | V(SHARED_FUNC_MOVE, SharedFunctionInfoMoveEventRecord) |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 52 | |
| 53 | |
| 54 | class CodeEventRecord { |
| 55 | public: |
| 56 | #define DECLARE_TYPE(type, ignore) type, |
| 57 | enum Type { |
| 58 | NONE = 0, |
| 59 | CODE_EVENTS_TYPE_LIST(DECLARE_TYPE) |
| 60 | NUMBER_OF_TYPES |
| 61 | }; |
| 62 | #undef DECLARE_TYPE |
| 63 | |
| 64 | Type type; |
| 65 | unsigned order; |
| 66 | }; |
| 67 | |
| 68 | |
| 69 | class CodeCreateEventRecord : public CodeEventRecord { |
| 70 | public: |
| 71 | Address start; |
| 72 | CodeEntry* entry; |
| 73 | unsigned size; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 74 | Address shared; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 75 | |
| 76 | INLINE(void UpdateCodeMap(CodeMap* code_map)); |
| 77 | }; |
| 78 | |
| 79 | |
| 80 | class CodeMoveEventRecord : public CodeEventRecord { |
| 81 | public: |
| 82 | Address from; |
| 83 | Address to; |
| 84 | |
| 85 | INLINE(void UpdateCodeMap(CodeMap* code_map)); |
| 86 | }; |
| 87 | |
| 88 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 89 | class SharedFunctionInfoMoveEventRecord : public CodeEventRecord { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 90 | public: |
Ben Murdoch | e0cee9b | 2011-05-25 10:26:03 +0100 | [diff] [blame] | 91 | Address from; |
| 92 | Address to; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 93 | |
| 94 | INLINE(void UpdateCodeMap(CodeMap* code_map)); |
| 95 | }; |
| 96 | |
| 97 | |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 98 | class TickSampleEventRecord { |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 99 | public: |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 100 | // The parameterless constructor is used when we dequeue data from |
| 101 | // the ticks buffer. |
| 102 | TickSampleEventRecord() { } |
| 103 | explicit TickSampleEventRecord(unsigned order) |
| 104 | : filler(1), |
| 105 | order(order) { |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 106 | ASSERT(filler != SamplingCircularQueue::kClear); |
| 107 | } |
| 108 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 109 | // The first machine word of a TickSampleEventRecord must not ever |
| 110 | // become equal to SamplingCircularQueue::kClear. As both order and |
| 111 | // TickSample's first field are not reliable in this sense (order |
| 112 | // can overflow, TickSample can have all fields reset), we are |
| 113 | // forced to use an artificial filler field. |
| 114 | int filler; |
| 115 | unsigned order; |
| 116 | TickSample sample; |
| 117 | |
| 118 | static TickSampleEventRecord* cast(void* value) { |
| 119 | return reinterpret_cast<TickSampleEventRecord*>(value); |
| 120 | } |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 121 | }; |
| 122 | |
| 123 | |
| 124 | // This class implements both the profile events processor thread and |
| 125 | // methods called by event producers: VM and stack sampler threads. |
| 126 | class ProfilerEventsProcessor : public Thread { |
| 127 | public: |
Ben Murdoch | 3fb3ca8 | 2011-12-02 17:19:32 +0000 | [diff] [blame] | 128 | explicit ProfilerEventsProcessor(ProfileGenerator* generator); |
Ben Murdoch | e0cee9b | 2011-05-25 10:26:03 +0100 | [diff] [blame] | 129 | virtual ~ProfilerEventsProcessor() {} |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 130 | |
| 131 | // Thread control. |
| 132 | virtual void Run(); |
| 133 | inline void Stop() { running_ = false; } |
| 134 | INLINE(bool running()) { return running_; } |
| 135 | |
| 136 | // Events adding methods. Called by VM threads. |
| 137 | void CallbackCreateEvent(Logger::LogEventsAndTags tag, |
| 138 | const char* prefix, String* name, |
| 139 | Address start); |
| 140 | void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 141 | String* name, |
| 142 | String* resource_name, int line_number, |
Ben Murdoch | e0cee9b | 2011-05-25 10:26:03 +0100 | [diff] [blame] | 143 | Address start, unsigned size, |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 144 | Address shared); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 145 | void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 146 | const char* name, |
| 147 | Address start, unsigned size); |
| 148 | void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 149 | int args_count, |
| 150 | Address start, unsigned size); |
| 151 | void CodeMoveEvent(Address from, Address to); |
| 152 | void CodeDeleteEvent(Address from); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 153 | void SharedFunctionInfoMoveEvent(Address from, Address to); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 154 | void RegExpCodeCreateEvent(Logger::LogEventsAndTags tag, |
| 155 | const char* prefix, String* name, |
| 156 | Address start, unsigned size); |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 157 | // Puts current stack into tick sample events buffer. |
| 158 | void AddCurrentStack(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 159 | |
| 160 | // Tick sample events are filled directly in the buffer of the circular |
| 161 | // queue (because the structure is of fixed width, but usually not all |
| 162 | // stack frame entries are filled.) This method returns a pointer to the |
| 163 | // next record of the buffer. |
| 164 | INLINE(TickSample* TickSampleEvent()); |
| 165 | |
| 166 | private: |
| 167 | union CodeEventsContainer { |
| 168 | CodeEventRecord generic; |
| 169 | #define DECLARE_CLASS(ignore, type) type type##_; |
| 170 | CODE_EVENTS_TYPE_LIST(DECLARE_CLASS) |
| 171 | #undef DECLARE_TYPE |
| 172 | }; |
| 173 | |
| 174 | // Called from events processing thread (Run() method.) |
| 175 | bool ProcessCodeEvent(unsigned* dequeue_order); |
| 176 | bool ProcessTicks(unsigned dequeue_order); |
| 177 | |
| 178 | INLINE(static bool FilterOutCodeCreateEvent(Logger::LogEventsAndTags tag)); |
| 179 | |
| 180 | ProfileGenerator* generator_; |
| 181 | bool running_; |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 182 | UnboundQueue<CodeEventsContainer> events_buffer_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 183 | SamplingCircularQueue ticks_buffer_; |
Ben Murdoch | 7f4d5bd | 2010-06-15 11:15:29 +0100 | [diff] [blame] | 184 | UnboundQueue<TickSampleEventRecord> ticks_from_vm_buffer_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 185 | unsigned enqueue_order_; |
| 186 | }; |
| 187 | |
| 188 | } } // namespace v8::internal |
| 189 | |
| 190 | |
Ben Murdoch | 257744e | 2011-11-30 15:57:28 +0000 | [diff] [blame] | 191 | #define PROFILE(isolate, Call) \ |
| 192 | LOG(isolate, Call); \ |
| 193 | do { \ |
| 194 | if (v8::internal::CpuProfiler::is_profiling(isolate)) { \ |
| 195 | v8::internal::CpuProfiler::Call; \ |
| 196 | } \ |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 197 | } while (false) |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 198 | |
| 199 | |
| 200 | namespace v8 { |
| 201 | namespace internal { |
| 202 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 203 | |
| 204 | // TODO(isolates): isolatify this class. |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 205 | class CpuProfiler { |
| 206 | public: |
| 207 | static void Setup(); |
| 208 | static void TearDown(); |
| 209 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 210 | static void StartProfiling(const char* title); |
| 211 | static void StartProfiling(String* title); |
| 212 | static CpuProfile* StopProfiling(const char* title); |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 213 | static CpuProfile* StopProfiling(Object* security_token, String* title); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 214 | static int GetProfilesCount(); |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 215 | static CpuProfile* GetProfile(Object* security_token, int index); |
| 216 | static CpuProfile* FindProfile(Object* security_token, unsigned uid); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 217 | static void DeleteAllProfiles(); |
| 218 | static void DeleteProfile(CpuProfile* profile); |
| 219 | static bool HasDetachedProfiles(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 220 | |
| 221 | // Invoked from stack sampler (thread or signal handler.) |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 222 | static TickSample* TickSampleEvent(Isolate* isolate); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 223 | |
| 224 | // Must be called via PROFILE macro, otherwise will crash when |
| 225 | // profiling is not enabled. |
| 226 | static void CallbackEvent(String* name, Address entry_point); |
| 227 | static void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 228 | Code* code, const char* comment); |
| 229 | static void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 230 | Code* code, String* name); |
| 231 | static void CodeCreateEvent(Logger::LogEventsAndTags tag, |
Ben Murdoch | e0cee9b | 2011-05-25 10:26:03 +0100 | [diff] [blame] | 232 | Code* code, |
| 233 | SharedFunctionInfo *shared, |
| 234 | String* name); |
| 235 | static void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 236 | Code* code, |
| 237 | SharedFunctionInfo *shared, |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 238 | String* source, int line); |
| 239 | static void CodeCreateEvent(Logger::LogEventsAndTags tag, |
| 240 | Code* code, int args_count); |
Ben Murdoch | f87a203 | 2010-10-22 12:50:53 +0100 | [diff] [blame] | 241 | static void CodeMovingGCEvent() {} |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 242 | static void CodeMoveEvent(Address from, Address to); |
| 243 | static void CodeDeleteEvent(Address from); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 244 | static void GetterCallbackEvent(String* name, Address entry_point); |
| 245 | static void RegExpCodeCreateEvent(Code* code, String* source); |
| 246 | static void SetterCallbackEvent(String* name, Address entry_point); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 247 | static void SharedFunctionInfoMoveEvent(Address from, Address to); |
| 248 | |
| 249 | // TODO(isolates): this doesn't have to use atomics anymore. |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 250 | |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 251 | static INLINE(bool is_profiling(Isolate* isolate)) { |
| 252 | CpuProfiler* profiler = isolate->cpu_profiler(); |
| 253 | return profiler != NULL && NoBarrier_Load(&profiler->is_profiling_); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 254 | } |
| 255 | |
| 256 | private: |
| 257 | CpuProfiler(); |
| 258 | ~CpuProfiler(); |
| 259 | void StartCollectingProfile(const char* title); |
| 260 | void StartCollectingProfile(String* title); |
| 261 | void StartProcessorIfNotStarted(); |
| 262 | CpuProfile* StopCollectingProfile(const char* title); |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 263 | CpuProfile* StopCollectingProfile(Object* security_token, String* title); |
Iain Merrick | 7568138 | 2010-08-19 15:07:18 +0100 | [diff] [blame] | 264 | void StopProcessorIfLastProfile(const char* title); |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 265 | void StopProcessor(); |
| 266 | void ResetProfiles(); |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 267 | |
| 268 | CpuProfilesCollection* profiles_; |
| 269 | unsigned next_profile_uid_; |
Leon Clarke | f7060e2 | 2010-06-03 12:02:55 +0100 | [diff] [blame] | 270 | TokenEnumerator* token_enumerator_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 271 | ProfileGenerator* generator_; |
| 272 | ProfilerEventsProcessor* processor_; |
| 273 | int saved_logging_nesting_; |
Steve Block | 44f0eee | 2011-05-26 01:26:41 +0100 | [diff] [blame] | 274 | bool need_to_stop_sampler_; |
| 275 | Atomic32 is_profiling_; |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 276 | |
Steve Block | 6ded16b | 2010-05-10 14:33:55 +0100 | [diff] [blame] | 277 | private: |
| 278 | DISALLOW_COPY_AND_ASSIGN(CpuProfiler); |
| 279 | }; |
| 280 | |
| 281 | } } // namespace v8::internal |
| 282 | |
| 283 | |
| 284 | #endif // V8_CPU_PROFILER_H_ |