blob: 3a4017251abf94a6d5421e1c0a90dc715c14bf14 [file] [log] [blame]
ssid07386852015-04-14 15:32:371// Copyright 2015 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/trace_event/malloc_dump_provider.h"
6
avibd1ed052015-12-24 04:03:447#include <stddef.h>
8
brettw1ce49f62017-04-27 19:42:329#include <unordered_map>
10
avibd1ed052015-12-24 04:03:4411#include "base/allocator/allocator_extension.h"
primianofd9072162016-03-25 02:13:2812#include "base/allocator/allocator_shim.h"
13#include "base/allocator/features.h"
siggiba33ec02016-08-26 16:13:0714#include "base/debug/profiler.h"
primianofd9072162016-03-25 02:13:2815#include "base/trace_event/heap_profiler_allocation_context.h"
16#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
17#include "base/trace_event/heap_profiler_allocation_register.h"
18#include "base/trace_event/heap_profiler_heap_dump_writer.h"
avibd1ed052015-12-24 04:03:4419#include "base/trace_event/process_memory_dump.h"
primianofd9072162016-03-25 02:13:2820#include "base/trace_event/trace_event_argument.h"
avibd1ed052015-12-24 04:03:4421#include "build/build_config.h"
22
ssid3aa02fe2015-11-07 16:15:0723#if defined(OS_MACOSX)
24#include <malloc/malloc.h>
25#else
ssid07386852015-04-14 15:32:3726#include <malloc.h>
ssid3aa02fe2015-11-07 16:15:0727#endif
siggi7bec59a2016-08-25 20:22:2628#if defined(OS_WIN)
29#include <windows.h>
30#endif
ssid07386852015-04-14 15:32:3731
ssid07386852015-04-14 15:32:3732namespace base {
33namespace trace_event {
34
primianofd9072162016-03-25 02:13:2835namespace {
siggi7bec59a2016-08-25 20:22:2636#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
primianofd9072162016-03-25 02:13:2837
38using allocator::AllocatorDispatch;
39
erikcheneff0ecb2017-02-20 13:04:5040void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
primianofd9072162016-03-25 02:13:2841 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5042 void* ptr = next->alloc_function(next, size, context);
primianofd9072162016-03-25 02:13:2843 if (ptr)
44 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
45 return ptr;
46}
47
erikcheneff0ecb2017-02-20 13:04:5048void* HookZeroInitAlloc(const AllocatorDispatch* self,
49 size_t n,
50 size_t size,
51 void* context) {
primianofd9072162016-03-25 02:13:2852 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5053 void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
primianofd9072162016-03-25 02:13:2854 if (ptr)
55 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
56 return ptr;
57}
58
etiennebdc2b22eb2017-03-21 17:11:4359void* HookAllocAligned(const AllocatorDispatch* self,
60 size_t alignment,
61 size_t size,
62 void* context) {
primianofd9072162016-03-25 02:13:2863 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5064 void* ptr = next->alloc_aligned_function(next, alignment, size, context);
primianofd9072162016-03-25 02:13:2865 if (ptr)
66 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
67 return ptr;
68}
69
erikcheneff0ecb2017-02-20 13:04:5070void* HookRealloc(const AllocatorDispatch* self,
71 void* address,
72 size_t size,
73 void* context) {
primianofd9072162016-03-25 02:13:2874 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5075 void* ptr = next->realloc_function(next, address, size, context);
primianofd9072162016-03-25 02:13:2876 MallocDumpProvider::GetInstance()->RemoveAllocation(address);
77 if (size > 0) // realloc(size == 0) means free().
78 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
79 return ptr;
80}
81
erikcheneff0ecb2017-02-20 13:04:5082void HookFree(const AllocatorDispatch* self, void* address, void* context) {
primianofd9072162016-03-25 02:13:2883 if (address)
84 MallocDumpProvider::GetInstance()->RemoveAllocation(address);
85 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5086 next->free_function(next, address, context);
primianofd9072162016-03-25 02:13:2887}
88
erikcheneff0ecb2017-02-20 13:04:5089size_t HookGetSizeEstimate(const AllocatorDispatch* self,
90 void* address,
91 void* context) {
siggi46e1b072016-09-09 16:43:3192 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5093 return next->get_size_estimate_function(next, address, context);
siggi46e1b072016-09-09 16:43:3194}
95
erikchen0d0395a2017-02-02 06:16:2996unsigned HookBatchMalloc(const AllocatorDispatch* self,
97 size_t size,
98 void** results,
erikcheneff0ecb2017-02-20 13:04:5099 unsigned num_requested,
100 void* context) {
erikchen0d0395a2017-02-02 06:16:29101 const AllocatorDispatch* const next = self->next;
102 unsigned count =
erikcheneff0ecb2017-02-20 13:04:50103 next->batch_malloc_function(next, size, results, num_requested, context);
erikchen0d0395a2017-02-02 06:16:29104 for (unsigned i = 0; i < count; ++i) {
105 MallocDumpProvider::GetInstance()->InsertAllocation(results[i], size);
106 }
107 return count;
108}
109
110void HookBatchFree(const AllocatorDispatch* self,
111 void** to_be_freed,
erikcheneff0ecb2017-02-20 13:04:50112 unsigned num_to_be_freed,
113 void* context) {
erikchen0d0395a2017-02-02 06:16:29114 const AllocatorDispatch* const next = self->next;
115 for (unsigned i = 0; i < num_to_be_freed; ++i) {
116 MallocDumpProvider::GetInstance()->RemoveAllocation(to_be_freed[i]);
117 }
erikcheneff0ecb2017-02-20 13:04:50118 next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
erikchen0d0395a2017-02-02 06:16:29119}
120
121void HookFreeDefiniteSize(const AllocatorDispatch* self,
122 void* ptr,
erikcheneff0ecb2017-02-20 13:04:50123 size_t size,
124 void* context) {
erikchen0d0395a2017-02-02 06:16:29125 if (ptr)
126 MallocDumpProvider::GetInstance()->RemoveAllocation(ptr);
127 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:50128 next->free_definite_size_function(next, ptr, size, context);
erikchen0d0395a2017-02-02 06:16:29129}
130
primianofd9072162016-03-25 02:13:28131AllocatorDispatch g_allocator_hooks = {
erikchen0d0395a2017-02-02 06:16:29132 &HookAlloc, /* alloc_function */
133 &HookZeroInitAlloc, /* alloc_zero_initialized_function */
etiennebdc2b22eb2017-03-21 17:11:43134 &HookAllocAligned, /* alloc_aligned_function */
erikchen0d0395a2017-02-02 06:16:29135 &HookRealloc, /* realloc_function */
136 &HookFree, /* free_function */
137 &HookGetSizeEstimate, /* get_size_estimate_function */
138 &HookBatchMalloc, /* batch_malloc_function */
139 &HookBatchFree, /* batch_free_function */
140 &HookFreeDefiniteSize, /* free_definite_size_function */
141 nullptr, /* next */
primianofd9072162016-03-25 02:13:28142};
primianofd9072162016-03-25 02:13:28143#endif // BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
144
siggi7bec59a2016-08-25 20:22:26145#if defined(OS_WIN)
146// A structure containing some information about a given heap.
147struct WinHeapInfo {
siggi7bec59a2016-08-25 20:22:26148 size_t committed_size;
149 size_t uncommitted_size;
150 size_t allocated_size;
151 size_t block_count;
152};
153
kraynovad507292016-11-25 18:01:23154// NOTE: crbug.com/665516
155// Unfortunately, there is no safe way to collect information from secondary
156// heaps due to limitations and racy nature of this piece of WinAPI.
siggi82535f62016-12-06 22:29:03157void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
siggi7bec59a2016-08-25 20:22:26158#if defined(SYZYASAN)
159 if (base::debug::IsBinaryInstrumented())
160 return;
161#endif
siggi82535f62016-12-06 22:29:03162
163 // Iterate through whichever heap our CRT is using.
164 HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
165 ::HeapLock(crt_heap);
kraynovad507292016-11-25 18:01:23166 PROCESS_HEAP_ENTRY heap_entry;
167 heap_entry.lpData = nullptr;
168 // Walk over all the entries in the main heap.
siggi82535f62016-12-06 22:29:03169 while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
kraynovad507292016-11-25 18:01:23170 if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
siggi82535f62016-12-06 22:29:03171 crt_heap_info->allocated_size += heap_entry.cbData;
172 crt_heap_info->block_count++;
kraynovad507292016-11-25 18:01:23173 } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
siggi82535f62016-12-06 22:29:03174 crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
175 crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
liamjmc56e1ffa2016-10-15 01:04:46176 }
siggi7bec59a2016-08-25 20:22:26177 }
siggi82535f62016-12-06 22:29:03178 CHECK(::HeapUnlock(crt_heap) == TRUE);
siggi7bec59a2016-08-25 20:22:26179}
180#endif // defined(OS_WIN)
181} // namespace
182
ssid07386852015-04-14 15:32:37183// static
primianofadec05e2015-06-03 16:57:32184const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
185
186// static
ssid07386852015-04-14 15:32:37187MallocDumpProvider* MallocDumpProvider::GetInstance() {
188 return Singleton<MallocDumpProvider,
189 LeakySingletonTraits<MallocDumpProvider>>::get();
190}
191
primianofd9072162016-03-25 02:13:28192MallocDumpProvider::MallocDumpProvider()
193 : heap_profiler_enabled_(false), tid_dumping_heap_(kInvalidThreadId) {}
ssid07386852015-04-14 15:32:37194
ssid3aa02fe2015-11-07 16:15:07195MallocDumpProvider::~MallocDumpProvider() {}
ssid07386852015-04-14 15:32:37196
197// Called at trace dump point time. Creates a snapshot the memory counters for
198// the current process.
ssid90694aeec2015-08-06 13:01:30199bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
200 ProcessMemoryDump* pmd) {
ssid09434092015-10-26 23:05:04201 size_t total_virtual_size = 0;
202 size_t resident_size = 0;
203 size_t allocated_objects_size = 0;
siggi7bec59a2016-08-25 20:22:26204 size_t allocated_objects_count = 0;
ssid86f78c12015-12-21 11:45:32205#if defined(USE_TCMALLOC)
primianodda6c272015-12-07 16:51:04206 bool res =
207 allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
208 DCHECK(res);
209 res = allocator::GetNumericProperty("generic.total_physical_bytes",
210 &resident_size);
211 DCHECK(res);
212 res = allocator::GetNumericProperty("generic.current_allocated_bytes",
213 &allocated_objects_size);
214 DCHECK(res);
ssid86f78c12015-12-21 11:45:32215#elif defined(OS_MACOSX) || defined(OS_IOS)
216 malloc_statistics_t stats = {0};
217 malloc_zone_statistics(nullptr, &stats);
218 total_virtual_size = stats.size_allocated;
219 allocated_objects_size = stats.size_in_use;
220
erikchen792525b2017-03-10 18:06:15221 // Resident size is approximated pretty well by stats.max_size_in_use.
222 // However, on macOS, freed blocks are both resident and reusable, which is
223 // semantically equivalent to deallocated. The implementation of libmalloc
224 // will also only hold a fixed number of freed regions before actually
225 // starting to deallocate them, so stats.max_size_in_use is also not
226 // representative of the peak size. As a result, stats.max_size_in_use is
227 // typically somewhere between actually resident [non-reusable] pages, and
228 // peak size. This is not very useful, so we just use stats.size_in_use for
229 // resident_size, even though it's an underestimate and fails to account for
230 // fragmentation. See
231 // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
232 resident_size = stats.size_in_use;
siggi7bec59a2016-08-25 20:22:26233#elif defined(OS_WIN)
kraynovad507292016-11-25 18:01:23234 WinHeapInfo main_heap_info = {};
235 WinHeapMemoryDumpImpl(&main_heap_info);
siggi7bec59a2016-08-25 20:22:26236 total_virtual_size =
kraynovad507292016-11-25 18:01:23237 main_heap_info.committed_size + main_heap_info.uncommitted_size;
siggi7bec59a2016-08-25 20:22:26238 // Resident size is approximated with committed heap size. Note that it is
239 // possible to do this with better accuracy on windows by intersecting the
240 // working set with the virtual memory ranges occuipied by the heap. It's not
241 // clear that this is worth it, as it's fairly expensive to do.
kraynovad507292016-11-25 18:01:23242 resident_size = main_heap_info.committed_size;
243 allocated_objects_size = main_heap_info.allocated_size;
244 allocated_objects_count = main_heap_info.block_count;
ssid3aa02fe2015-11-07 16:15:07245#else
primianodda6c272015-12-07 16:51:04246 struct mallinfo info = mallinfo();
247 DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
ssid09434092015-10-26 23:05:04248
primianodda6c272015-12-07 16:51:04249 // In case of Android's jemalloc |arena| is 0 and the outer pages size is
250 // reported by |hblkhd|. In case of dlmalloc the total is given by
251 // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
252 total_virtual_size = info.arena + info.hblkhd;
253 resident_size = info.uordblks;
siggi7bec59a2016-08-25 20:22:26254
255 // Total allocated space is given by |uordblks|.
primianodda6c272015-12-07 16:51:04256 allocated_objects_size = info.uordblks;
ssid3aa02fe2015-11-07 16:15:07257#endif
ssid09434092015-10-26 23:05:04258
primianofadec05e2015-06-03 16:57:32259 MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
ssid09434092015-10-26 23:05:04260 outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
261 total_virtual_size);
262 outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
263 MemoryAllocatorDump::kUnitsBytes, resident_size);
ssid07386852015-04-14 15:32:37264
primianofadec05e2015-06-03 16:57:32265 MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
266 inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
ssid09434092015-10-26 23:05:04267 MemoryAllocatorDump::kUnitsBytes,
268 allocated_objects_size);
siggi7bec59a2016-08-25 20:22:26269 if (allocated_objects_count != 0) {
siggi52114f272016-08-31 23:51:30270 inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
siggi7bec59a2016-08-25 20:22:26271 MemoryAllocatorDump::kUnitsObjects,
272 allocated_objects_count);
273 }
ssid07386852015-04-14 15:32:37274
siggi52114f272016-08-31 23:51:30275 if (resident_size > allocated_objects_size) {
ssid86f78c12015-12-21 11:45:32276 // Explicitly specify why is extra memory resident. In tcmalloc it accounts
277 // for free lists and caches. In mac and ios it accounts for the
278 // fragmentation and metadata.
279 MemoryAllocatorDump* other_dump =
280 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
281 other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
282 MemoryAllocatorDump::kUnitsBytes,
283 resident_size - allocated_objects_size);
284 }
285
primianofd9072162016-03-25 02:13:28286 // Heap profiler dumps.
287 if (!heap_profiler_enabled_)
288 return true;
289
290 // The dumps of the heap profiler should be created only when heap profiling
291 // was enabled (--enable-heap-profiling) AND a DETAILED dump is requested.
292 // However, when enabled, the overhead of the heap profiler should be always
293 // reported to avoid oscillations of the malloc total in LIGHT dumps.
294
295 tid_dumping_heap_ = PlatformThread::CurrentId();
296 // At this point the Insert/RemoveAllocation hooks will ignore this thread.
297 // Enclosing all the temporariy data structures in a scope, so that the heap
298 // profiler does not see unabalanced malloc/free calls from these containers.
299 {
300 TraceEventMemoryOverhead overhead;
brettw1ce49f62017-04-27 19:42:32301 std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
primianofd9072162016-03-25 02:13:28302 {
303 AutoLock lock(allocation_register_lock_);
304 if (allocation_register_) {
305 if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
ssid1eedc592016-04-15 01:56:44306 for (const auto& alloc_size : *allocation_register_) {
307 AllocationMetrics& metrics = metrics_by_context[alloc_size.context];
308 metrics.size += alloc_size.size;
309 metrics.count++;
310 }
primianofd9072162016-03-25 02:13:28311 }
312 allocation_register_->EstimateTraceMemoryOverhead(&overhead);
313 }
314 } // lock(allocation_register_lock_)
bashib873c0d42016-05-12 05:41:04315 pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
primianofd9072162016-03-25 02:13:28316 }
317 tid_dumping_heap_ = kInvalidThreadId;
318
ssid07386852015-04-14 15:32:37319 return true;
320}
321
primianofd9072162016-03-25 02:13:28322void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
323#if BUILDFLAG(USE_EXPERIMENTAL_ALLOCATOR_SHIM)
324 if (enabled) {
325 {
326 AutoLock lock(allocation_register_lock_);
327 allocation_register_.reset(new AllocationRegister());
328 }
329 allocator::InsertAllocatorDispatch(&g_allocator_hooks);
330 } else {
331 AutoLock lock(allocation_register_lock_);
332 allocation_register_.reset();
333 // Insert/RemoveAllocation below will no-op if the register is torn down.
334 // Once disabled, heap profiling will not re-enabled anymore for the
335 // lifetime of the process.
336 }
337#endif
338 heap_profiler_enabled_ = enabled;
339}
340
341void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
342 // CurrentId() can be a slow operation (crbug.com/497226). This apparently
343 // redundant condition short circuits the CurrentID() calls when unnecessary.
344 if (tid_dumping_heap_ != kInvalidThreadId &&
345 tid_dumping_heap_ == PlatformThread::CurrentId())
346 return;
347
348 // AllocationContextTracker will return nullptr when called re-reentrantly.
349 // This is the case of GetInstanceForCurrentThread() being called for the
350 // first time, which causes a new() inside the tracker which re-enters the
351 // heap profiler, in which case we just want to early out.
vmpstr5170bf92016-06-29 02:15:58352 auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
primianofd9072162016-03-25 02:13:28353 if (!tracker)
354 return;
dskiba9ab14b22017-01-18 21:53:42355
356 AllocationContext context;
357 if (!tracker->GetContextSnapshot(&context))
358 return;
primianofd9072162016-03-25 02:13:28359
360 AutoLock lock(allocation_register_lock_);
361 if (!allocation_register_)
362 return;
363
364 allocation_register_->Insert(address, size, context);
365}
366
367void MallocDumpProvider::RemoveAllocation(void* address) {
368 // No re-entrancy is expected here as none of the calls below should
369 // cause a free()-s (|allocation_register_| does its own heap management).
370 if (tid_dumping_heap_ != kInvalidThreadId &&
371 tid_dumping_heap_ == PlatformThread::CurrentId())
372 return;
373 AutoLock lock(allocation_register_lock_);
374 if (!allocation_register_)
375 return;
376 allocation_register_->Remove(address);
377}
378
ssid07386852015-04-14 15:32:37379} // namespace trace_event
380} // namespace base