blob: b7fdae8eefa4ac222de4fb74a26523c353003d22 [file] [log] [blame]
ssid07386852015-04-14 15:32:371// Copyright 2015 The Chromium Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
4
5#include "base/trace_event/malloc_dump_provider.h"
6
avibd1ed052015-12-24 04:03:447#include <stddef.h>
8
brettw1ce49f62017-04-27 19:42:329#include <unordered_map>
10
avibd1ed052015-12-24 04:03:4411#include "base/allocator/allocator_extension.h"
primianofd9072162016-03-25 02:13:2812#include "base/allocator/allocator_shim.h"
13#include "base/allocator/features.h"
siggiba33ec02016-08-26 16:13:0714#include "base/debug/profiler.h"
primianofd9072162016-03-25 02:13:2815#include "base/trace_event/heap_profiler_allocation_context.h"
16#include "base/trace_event/heap_profiler_allocation_context_tracker.h"
Dmitry Skibadd1180752017-07-20 10:12:3717#include "base/trace_event/heap_profiler_heap_dump_writer.h"
avibd1ed052015-12-24 04:03:4418#include "base/trace_event/process_memory_dump.h"
primianofd9072162016-03-25 02:13:2819#include "base/trace_event/trace_event_argument.h"
avibd1ed052015-12-24 04:03:4420#include "build/build_config.h"
21
ssid3aa02fe2015-11-07 16:15:0722#if defined(OS_MACOSX)
23#include <malloc/malloc.h>
24#else
ssid07386852015-04-14 15:32:3725#include <malloc.h>
ssid3aa02fe2015-11-07 16:15:0726#endif
siggi7bec59a2016-08-25 20:22:2627#if defined(OS_WIN)
28#include <windows.h>
29#endif
ssid07386852015-04-14 15:32:3730
ssid07386852015-04-14 15:32:3731namespace base {
32namespace trace_event {
33
primianofd9072162016-03-25 02:13:2834namespace {
primiano73228cd2017-05-25 15:16:0935#if BUILDFLAG(USE_ALLOCATOR_SHIM)
primianofd9072162016-03-25 02:13:2836
37using allocator::AllocatorDispatch;
38
erikcheneff0ecb2017-02-20 13:04:5039void* HookAlloc(const AllocatorDispatch* self, size_t size, void* context) {
primianofd9072162016-03-25 02:13:2840 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5041 void* ptr = next->alloc_function(next, size, context);
primianofd9072162016-03-25 02:13:2842 if (ptr)
43 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
44 return ptr;
45}
46
erikcheneff0ecb2017-02-20 13:04:5047void* HookZeroInitAlloc(const AllocatorDispatch* self,
48 size_t n,
49 size_t size,
50 void* context) {
primianofd9072162016-03-25 02:13:2851 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5052 void* ptr = next->alloc_zero_initialized_function(next, n, size, context);
primianofd9072162016-03-25 02:13:2853 if (ptr)
54 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, n * size);
55 return ptr;
56}
57
etiennebdc2b22eb2017-03-21 17:11:4358void* HookAllocAligned(const AllocatorDispatch* self,
59 size_t alignment,
60 size_t size,
61 void* context) {
primianofd9072162016-03-25 02:13:2862 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5063 void* ptr = next->alloc_aligned_function(next, alignment, size, context);
primianofd9072162016-03-25 02:13:2864 if (ptr)
65 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
66 return ptr;
67}
68
erikcheneff0ecb2017-02-20 13:04:5069void* HookRealloc(const AllocatorDispatch* self,
70 void* address,
71 size_t size,
72 void* context) {
primianofd9072162016-03-25 02:13:2873 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5074 void* ptr = next->realloc_function(next, address, size, context);
primianofd9072162016-03-25 02:13:2875 MallocDumpProvider::GetInstance()->RemoveAllocation(address);
76 if (size > 0) // realloc(size == 0) means free().
77 MallocDumpProvider::GetInstance()->InsertAllocation(ptr, size);
78 return ptr;
79}
80
erikcheneff0ecb2017-02-20 13:04:5081void HookFree(const AllocatorDispatch* self, void* address, void* context) {
primianofd9072162016-03-25 02:13:2882 if (address)
83 MallocDumpProvider::GetInstance()->RemoveAllocation(address);
84 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5085 next->free_function(next, address, context);
primianofd9072162016-03-25 02:13:2886}
87
erikcheneff0ecb2017-02-20 13:04:5088size_t HookGetSizeEstimate(const AllocatorDispatch* self,
89 void* address,
90 void* context) {
siggi46e1b072016-09-09 16:43:3191 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:5092 return next->get_size_estimate_function(next, address, context);
siggi46e1b072016-09-09 16:43:3193}
94
erikchen0d0395a2017-02-02 06:16:2995unsigned HookBatchMalloc(const AllocatorDispatch* self,
96 size_t size,
97 void** results,
erikcheneff0ecb2017-02-20 13:04:5098 unsigned num_requested,
99 void* context) {
erikchen0d0395a2017-02-02 06:16:29100 const AllocatorDispatch* const next = self->next;
101 unsigned count =
erikcheneff0ecb2017-02-20 13:04:50102 next->batch_malloc_function(next, size, results, num_requested, context);
erikchen0d0395a2017-02-02 06:16:29103 for (unsigned i = 0; i < count; ++i) {
104 MallocDumpProvider::GetInstance()->InsertAllocation(results[i], size);
105 }
106 return count;
107}
108
109void HookBatchFree(const AllocatorDispatch* self,
110 void** to_be_freed,
erikcheneff0ecb2017-02-20 13:04:50111 unsigned num_to_be_freed,
112 void* context) {
erikchen0d0395a2017-02-02 06:16:29113 const AllocatorDispatch* const next = self->next;
114 for (unsigned i = 0; i < num_to_be_freed; ++i) {
115 MallocDumpProvider::GetInstance()->RemoveAllocation(to_be_freed[i]);
116 }
erikcheneff0ecb2017-02-20 13:04:50117 next->batch_free_function(next, to_be_freed, num_to_be_freed, context);
erikchen0d0395a2017-02-02 06:16:29118}
119
120void HookFreeDefiniteSize(const AllocatorDispatch* self,
121 void* ptr,
erikcheneff0ecb2017-02-20 13:04:50122 size_t size,
123 void* context) {
erikchen0d0395a2017-02-02 06:16:29124 if (ptr)
125 MallocDumpProvider::GetInstance()->RemoveAllocation(ptr);
126 const AllocatorDispatch* const next = self->next;
erikcheneff0ecb2017-02-20 13:04:50127 next->free_definite_size_function(next, ptr, size, context);
erikchen0d0395a2017-02-02 06:16:29128}
129
primianofd9072162016-03-25 02:13:28130AllocatorDispatch g_allocator_hooks = {
erikchen0d0395a2017-02-02 06:16:29131 &HookAlloc, /* alloc_function */
132 &HookZeroInitAlloc, /* alloc_zero_initialized_function */
etiennebdc2b22eb2017-03-21 17:11:43133 &HookAllocAligned, /* alloc_aligned_function */
erikchen0d0395a2017-02-02 06:16:29134 &HookRealloc, /* realloc_function */
135 &HookFree, /* free_function */
136 &HookGetSizeEstimate, /* get_size_estimate_function */
137 &HookBatchMalloc, /* batch_malloc_function */
138 &HookBatchFree, /* batch_free_function */
139 &HookFreeDefiniteSize, /* free_definite_size_function */
140 nullptr, /* next */
primianofd9072162016-03-25 02:13:28141};
primiano73228cd2017-05-25 15:16:09142#endif // BUILDFLAG(USE_ALLOCATOR_SHIM)
primianofd9072162016-03-25 02:13:28143
siggi7bec59a2016-08-25 20:22:26144#if defined(OS_WIN)
145// A structure containing some information about a given heap.
146struct WinHeapInfo {
siggi7bec59a2016-08-25 20:22:26147 size_t committed_size;
148 size_t uncommitted_size;
149 size_t allocated_size;
150 size_t block_count;
151};
152
kraynovad507292016-11-25 18:01:23153// NOTE: crbug.com/665516
154// Unfortunately, there is no safe way to collect information from secondary
155// heaps due to limitations and racy nature of this piece of WinAPI.
siggi82535f62016-12-06 22:29:03156void WinHeapMemoryDumpImpl(WinHeapInfo* crt_heap_info) {
siggi7bec59a2016-08-25 20:22:26157#if defined(SYZYASAN)
158 if (base::debug::IsBinaryInstrumented())
159 return;
160#endif
siggi82535f62016-12-06 22:29:03161
162 // Iterate through whichever heap our CRT is using.
163 HANDLE crt_heap = reinterpret_cast<HANDLE>(_get_heap_handle());
164 ::HeapLock(crt_heap);
kraynovad507292016-11-25 18:01:23165 PROCESS_HEAP_ENTRY heap_entry;
166 heap_entry.lpData = nullptr;
167 // Walk over all the entries in the main heap.
siggi82535f62016-12-06 22:29:03168 while (::HeapWalk(crt_heap, &heap_entry) != FALSE) {
kraynovad507292016-11-25 18:01:23169 if ((heap_entry.wFlags & PROCESS_HEAP_ENTRY_BUSY) != 0) {
siggi82535f62016-12-06 22:29:03170 crt_heap_info->allocated_size += heap_entry.cbData;
171 crt_heap_info->block_count++;
kraynovad507292016-11-25 18:01:23172 } else if ((heap_entry.wFlags & PROCESS_HEAP_REGION) != 0) {
siggi82535f62016-12-06 22:29:03173 crt_heap_info->committed_size += heap_entry.Region.dwCommittedSize;
174 crt_heap_info->uncommitted_size += heap_entry.Region.dwUnCommittedSize;
liamjmc56e1ffa2016-10-15 01:04:46175 }
siggi7bec59a2016-08-25 20:22:26176 }
siggi82535f62016-12-06 22:29:03177 CHECK(::HeapUnlock(crt_heap) == TRUE);
siggi7bec59a2016-08-25 20:22:26178}
179#endif // defined(OS_WIN)
180} // namespace
181
ssid07386852015-04-14 15:32:37182// static
primianofadec05e2015-06-03 16:57:32183const char MallocDumpProvider::kAllocatedObjects[] = "malloc/allocated_objects";
184
185// static
ssid07386852015-04-14 15:32:37186MallocDumpProvider* MallocDumpProvider::GetInstance() {
187 return Singleton<MallocDumpProvider,
188 LeakySingletonTraits<MallocDumpProvider>>::get();
189}
190
primianofd9072162016-03-25 02:13:28191MallocDumpProvider::MallocDumpProvider()
erikchenbd599af52017-05-22 21:15:07192 : tid_dumping_heap_(kInvalidThreadId) {}
ssid07386852015-04-14 15:32:37193
ssid3aa02fe2015-11-07 16:15:07194MallocDumpProvider::~MallocDumpProvider() {}
ssid07386852015-04-14 15:32:37195
196// Called at trace dump point time. Creates a snapshot the memory counters for
197// the current process.
ssid90694aeec2015-08-06 13:01:30198bool MallocDumpProvider::OnMemoryDump(const MemoryDumpArgs& args,
199 ProcessMemoryDump* pmd) {
Erik Chen4f64989c2017-11-10 18:15:53200 {
201 base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
202 if (!emit_metrics_on_memory_dump_)
203 return true;
204 }
205
ssid09434092015-10-26 23:05:04206 size_t total_virtual_size = 0;
207 size_t resident_size = 0;
208 size_t allocated_objects_size = 0;
siggi7bec59a2016-08-25 20:22:26209 size_t allocated_objects_count = 0;
ssid86f78c12015-12-21 11:45:32210#if defined(USE_TCMALLOC)
primianodda6c272015-12-07 16:51:04211 bool res =
212 allocator::GetNumericProperty("generic.heap_size", &total_virtual_size);
213 DCHECK(res);
214 res = allocator::GetNumericProperty("generic.total_physical_bytes",
215 &resident_size);
216 DCHECK(res);
217 res = allocator::GetNumericProperty("generic.current_allocated_bytes",
218 &allocated_objects_size);
219 DCHECK(res);
ssid86f78c12015-12-21 11:45:32220#elif defined(OS_MACOSX) || defined(OS_IOS)
221 malloc_statistics_t stats = {0};
222 malloc_zone_statistics(nullptr, &stats);
223 total_virtual_size = stats.size_allocated;
224 allocated_objects_size = stats.size_in_use;
225
erikchen792525b2017-03-10 18:06:15226 // Resident size is approximated pretty well by stats.max_size_in_use.
227 // However, on macOS, freed blocks are both resident and reusable, which is
228 // semantically equivalent to deallocated. The implementation of libmalloc
229 // will also only hold a fixed number of freed regions before actually
230 // starting to deallocate them, so stats.max_size_in_use is also not
231 // representative of the peak size. As a result, stats.max_size_in_use is
232 // typically somewhere between actually resident [non-reusable] pages, and
233 // peak size. This is not very useful, so we just use stats.size_in_use for
234 // resident_size, even though it's an underestimate and fails to account for
235 // fragmentation. See
236 // https://bugs.chromium.org/p/chromium/issues/detail?id=695263#c1.
237 resident_size = stats.size_in_use;
siggi7bec59a2016-08-25 20:22:26238#elif defined(OS_WIN)
Erik Chen99e81672017-11-08 23:15:07239 // This is too expensive on Windows, crbug.com/780735.
240 if (args.level_of_detail == MemoryDumpLevelOfDetail::DETAILED) {
241 WinHeapInfo main_heap_info = {};
242 WinHeapMemoryDumpImpl(&main_heap_info);
243 total_virtual_size =
244 main_heap_info.committed_size + main_heap_info.uncommitted_size;
245 // Resident size is approximated with committed heap size. Note that it is
246 // possible to do this with better accuracy on windows by intersecting the
247 // working set with the virtual memory ranges occuipied by the heap. It's
248 // not clear that this is worth it, as it's fairly expensive to do.
249 resident_size = main_heap_info.committed_size;
250 allocated_objects_size = main_heap_info.allocated_size;
251 allocated_objects_count = main_heap_info.block_count;
252 }
scottmg6ea9ff3e2017-05-19 00:08:16253#elif defined(OS_FUCHSIA)
254// TODO(fuchsia): Port, see https://crbug.com/706592.
ssid3aa02fe2015-11-07 16:15:07255#else
primianodda6c272015-12-07 16:51:04256 struct mallinfo info = mallinfo();
257 DCHECK_GE(info.arena + info.hblkhd, info.uordblks);
ssid09434092015-10-26 23:05:04258
primianodda6c272015-12-07 16:51:04259 // In case of Android's jemalloc |arena| is 0 and the outer pages size is
260 // reported by |hblkhd|. In case of dlmalloc the total is given by
261 // |arena| + |hblkhd|. For more details see link: http://goo.gl/fMR8lF.
262 total_virtual_size = info.arena + info.hblkhd;
263 resident_size = info.uordblks;
siggi7bec59a2016-08-25 20:22:26264
265 // Total allocated space is given by |uordblks|.
primianodda6c272015-12-07 16:51:04266 allocated_objects_size = info.uordblks;
ssid3aa02fe2015-11-07 16:15:07267#endif
ssid09434092015-10-26 23:05:04268
primianofadec05e2015-06-03 16:57:32269 MemoryAllocatorDump* outer_dump = pmd->CreateAllocatorDump("malloc");
ssid09434092015-10-26 23:05:04270 outer_dump->AddScalar("virtual_size", MemoryAllocatorDump::kUnitsBytes,
271 total_virtual_size);
272 outer_dump->AddScalar(MemoryAllocatorDump::kNameSize,
273 MemoryAllocatorDump::kUnitsBytes, resident_size);
ssid07386852015-04-14 15:32:37274
primianofadec05e2015-06-03 16:57:32275 MemoryAllocatorDump* inner_dump = pmd->CreateAllocatorDump(kAllocatedObjects);
276 inner_dump->AddScalar(MemoryAllocatorDump::kNameSize,
ssid09434092015-10-26 23:05:04277 MemoryAllocatorDump::kUnitsBytes,
278 allocated_objects_size);
siggi7bec59a2016-08-25 20:22:26279 if (allocated_objects_count != 0) {
siggi52114f272016-08-31 23:51:30280 inner_dump->AddScalar(MemoryAllocatorDump::kNameObjectCount,
siggi7bec59a2016-08-25 20:22:26281 MemoryAllocatorDump::kUnitsObjects,
282 allocated_objects_count);
283 }
ssid07386852015-04-14 15:32:37284
siggi52114f272016-08-31 23:51:30285 if (resident_size > allocated_objects_size) {
ssid86f78c12015-12-21 11:45:32286 // Explicitly specify why is extra memory resident. In tcmalloc it accounts
287 // for free lists and caches. In mac and ios it accounts for the
288 // fragmentation and metadata.
289 MemoryAllocatorDump* other_dump =
290 pmd->CreateAllocatorDump("malloc/metadata_fragmentation_caches");
291 other_dump->AddScalar(MemoryAllocatorDump::kNameSize,
292 MemoryAllocatorDump::kUnitsBytes,
293 resident_size - allocated_objects_size);
294 }
295
primianofd9072162016-03-25 02:13:28296 // Heap profiler dumps.
erikchenbd599af52017-05-22 21:15:07297 if (!allocation_register_.is_enabled())
primianofd9072162016-03-25 02:13:28298 return true;
299
primianofd9072162016-03-25 02:13:28300 tid_dumping_heap_ = PlatformThread::CurrentId();
301 // At this point the Insert/RemoveAllocation hooks will ignore this thread.
etiennebc9079412017-05-10 17:58:48302 // Enclosing all the temporary data structures in a scope, so that the heap
303 // profiler does not see unbalanced malloc/free calls from these containers.
primianofd9072162016-03-25 02:13:28304 {
Dmitry Skibadd1180752017-07-20 10:12:37305 TraceEventMemoryOverhead overhead;
306 std::unordered_map<AllocationContext, AllocationMetrics> metrics_by_context;
Siddhartha4cafb7a2017-10-10 20:39:02307 if (AllocationContextTracker::capture_mode() !=
308 AllocationContextTracker::CaptureMode::DISABLED) {
Dmitry Skibadd1180752017-07-20 10:12:37309 ShardedAllocationRegister::OutputMetrics shim_metrics =
310 allocation_register_.UpdateAndReturnsMetrics(metrics_by_context);
etiennebc9079412017-05-10 17:58:48311
erikchenbd599af52017-05-22 21:15:07312 // Aggregate data for objects allocated through the shim.
etienneb7de5d922017-05-24 17:49:09313 inner_dump->AddScalar("shim_allocated_objects_size",
314 MemoryAllocatorDump::kUnitsBytes,
315 shim_metrics.size);
316 inner_dump->AddScalar("shim_allocator_object_count",
317 MemoryAllocatorDump::kUnitsObjects,
318 shim_metrics.count);
erikchenbd599af52017-05-22 21:15:07319 }
Dmitry Skibadd1180752017-07-20 10:12:37320 allocation_register_.EstimateTraceMemoryOverhead(&overhead);
etiennebc9079412017-05-10 17:58:48321
Dmitry Skibadd1180752017-07-20 10:12:37322 pmd->DumpHeapUsage(metrics_by_context, overhead, "malloc");
primianofd9072162016-03-25 02:13:28323 }
324 tid_dumping_heap_ = kInvalidThreadId;
325
ssid07386852015-04-14 15:32:37326 return true;
327}
328
primianofd9072162016-03-25 02:13:28329void MallocDumpProvider::OnHeapProfilingEnabled(bool enabled) {
primiano73228cd2017-05-25 15:16:09330#if BUILDFLAG(USE_ALLOCATOR_SHIM)
primianofd9072162016-03-25 02:13:28331 if (enabled) {
erikchenbd599af52017-05-22 21:15:07332 allocation_register_.SetEnabled();
primianofd9072162016-03-25 02:13:28333 allocator::InsertAllocatorDispatch(&g_allocator_hooks);
334 } else {
erikchenbd599af52017-05-22 21:15:07335 allocation_register_.SetDisabled();
primianofd9072162016-03-25 02:13:28336 }
337#endif
primianofd9072162016-03-25 02:13:28338}
339
340void MallocDumpProvider::InsertAllocation(void* address, size_t size) {
341 // CurrentId() can be a slow operation (crbug.com/497226). This apparently
342 // redundant condition short circuits the CurrentID() calls when unnecessary.
343 if (tid_dumping_heap_ != kInvalidThreadId &&
344 tid_dumping_heap_ == PlatformThread::CurrentId())
345 return;
346
347 // AllocationContextTracker will return nullptr when called re-reentrantly.
348 // This is the case of GetInstanceForCurrentThread() being called for the
349 // first time, which causes a new() inside the tracker which re-enters the
350 // heap profiler, in which case we just want to early out.
vmpstr5170bf92016-06-29 02:15:58351 auto* tracker = AllocationContextTracker::GetInstanceForCurrentThread();
primianofd9072162016-03-25 02:13:28352 if (!tracker)
353 return;
dskiba9ab14b22017-01-18 21:53:42354
355 AllocationContext context;
356 if (!tracker->GetContextSnapshot(&context))
357 return;
primianofd9072162016-03-25 02:13:28358
erikchenbd599af52017-05-22 21:15:07359 if (!allocation_register_.is_enabled())
primianofd9072162016-03-25 02:13:28360 return;
361
erikchenbd599af52017-05-22 21:15:07362 allocation_register_.Insert(address, size, context);
primianofd9072162016-03-25 02:13:28363}
364
365void MallocDumpProvider::RemoveAllocation(void* address) {
366 // No re-entrancy is expected here as none of the calls below should
367 // cause a free()-s (|allocation_register_| does its own heap management).
368 if (tid_dumping_heap_ != kInvalidThreadId &&
369 tid_dumping_heap_ == PlatformThread::CurrentId())
370 return;
erikchenbd599af52017-05-22 21:15:07371 if (!allocation_register_.is_enabled())
primianofd9072162016-03-25 02:13:28372 return;
erikchenbd599af52017-05-22 21:15:07373 allocation_register_.Remove(address);
primianofd9072162016-03-25 02:13:28374}
375
Erik Chen4f64989c2017-11-10 18:15:53376void MallocDumpProvider::EnableMetrics() {
377 base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
378 emit_metrics_on_memory_dump_ = true;
379}
380
381void MallocDumpProvider::DisableMetrics() {
382 base::AutoLock auto_lock(emit_metrics_on_memory_dump_lock_);
383 emit_metrics_on_memory_dump_ = false;
384}
385
ssid07386852015-04-14 15:32:37386} // namespace trace_event
387} // namespace base