blob: 620fa6535a8904fc206743f190dd1f6d819ce699 [file] [log] [blame]
[email protected]c4488402012-01-11 01:05:491// Copyright (c) 2012 The Chromium Authors. All rights reserved.
[email protected]1bee3982009-12-17 23:15:282// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
[email protected]96449d2c2009-11-25 00:01:324
5// This file contains the implementation of the command buffer helper class.
6
[email protected]1df19862013-05-24 11:26:297#include "gpu/command_buffer/client/cmd_buffer_helper.h"
[email protected]cf1aa982013-11-05 21:49:378
avif15d60a2015-12-21 17:06:339#include <stdint.h>
10
martina.kollarovac640df12015-07-10 08:30:3811#include <algorithm>
[email protected]cf1aa982013-11-05 21:49:3712#include "base/logging.h"
reveman0cf65ee82015-08-25 22:15:2413#include "base/strings/stringprintf.h"
gabb23705312016-05-11 18:44:5614#include "base/threading/thread_task_runner_handle.h"
[email protected]da618e22014-07-11 09:27:1615#include "base/time/time.h"
reveman0cf65ee82015-08-25 22:15:2416#include "base/trace_event/memory_allocator_dump.h"
17#include "base/trace_event/memory_dump_manager.h"
18#include "base/trace_event/process_memory_dump.h"
martina.kollarovac0d13d12016-01-20 08:53:4419#include "base/trace_event/trace_event.h"
martina.kollarovac640df12015-07-10 08:30:3820#include "gpu/command_buffer/common/buffer.h"
[email protected]1df19862013-05-24 11:26:2921#include "gpu/command_buffer/common/command_buffer.h"
martina.kollarovac640df12015-07-10 08:30:3822#include "gpu/command_buffer/common/constants.h"
[email protected]96449d2c2009-11-25 00:01:3223
[email protected]a7a27ace2009-12-12 00:11:2524namespace gpu {
[email protected]96449d2c2009-11-25 00:01:3225
[email protected]96449d2c2009-11-25 00:01:3226CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
Sunny Sachanandani5acdc812018-01-25 01:56:0727 : command_buffer_(command_buffer) {}
[email protected]96449d2c2009-11-25 00:01:3228
[email protected]362e6d602012-10-17 16:55:0629void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
30 flush_automatically_ = enabled;
[email protected]15691b42014-02-12 00:56:0031 CalcImmediateEntries(0);
[email protected]362e6d602012-10-17 16:55:0632}
33
[email protected]d35e6a72012-08-25 01:51:1334bool CommandBufferHelper::IsContextLost() {
sunnyps128566052016-12-09 21:06:4335 if (!context_lost_)
36 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
[email protected]d35e6a72012-08-25 01:51:1337 return context_lost_;
38}
39
[email protected]15691b42014-02-12 00:56:0040void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
41 DCHECK_GE(waiting_count, 0);
42
Antoine Labour1a9ef392017-08-24 19:40:3343 // If not allocated, no entries are available. If not usable, it will not be
44 // allocated.
45 if (!HaveRingBuffer()) {
[email protected]15691b42014-02-12 00:56:0046 immediate_entry_count_ = 0;
47 return;
48 }
49
50 // Get maximum safe contiguous entries.
sunnyps128566052016-12-09 21:06:4351 const int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:0052 if (curr_get > put_) {
53 immediate_entry_count_ = curr_get - put_ - 1;
54 } else {
55 immediate_entry_count_ =
56 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
57 }
58
59 // Limit entry count to force early flushing.
60 if (flush_automatically_) {
avif15d60a2015-12-21 17:06:3361 int32_t limit =
[email protected]15691b42014-02-12 00:56:0062 total_entry_count_ /
Sunny Sachanandani5acdc812018-01-25 01:56:0763 ((curr_get == last_flush_put_) ? kAutoFlushSmall : kAutoFlushBig);
[email protected]15691b42014-02-12 00:56:0064
James Darpinian62ca13c2018-05-31 04:12:0665 int32_t pending = (put_ + total_entry_count_ - last_ordering_barrier_put_) %
66 total_entry_count_;
[email protected]15691b42014-02-12 00:56:0067
68 if (pending > 0 && pending >= limit) {
69 // Time to force flush.
70 immediate_entry_count_ = 0;
71 } else {
72 // Limit remaining entries, but not lower than waiting_count entries to
73 // prevent deadlock when command size is greater than the flush limit.
74 limit -= pending;
75 limit = limit < waiting_count ? waiting_count : limit;
76 immediate_entry_count_ =
77 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
78 }
79 }
80}
81
[email protected]503b3a22011-12-12 23:29:4082bool CommandBufferHelper::AllocateRingBuffer() {
[email protected]c4488402012-01-11 01:05:4983 if (!usable()) {
84 return false;
85 }
86
[email protected]617296e2011-12-15 05:37:5787 if (HaveRingBuffer()) {
88 return true;
89 }
90
avif15d60a2015-12-21 17:06:3391 int32_t id = -1;
[email protected]44096602014-03-26 04:53:5892 scoped_refptr<Buffer> buffer =
93 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
[email protected]503b3a22011-12-12 23:29:4094 if (id < 0) {
Antoine Labour1a9ef392017-08-24 19:40:3395 usable_ = false;
96 context_lost_ = true;
97 CalcImmediateEntries(0);
[email protected]503b3a22011-12-12 23:29:4098 return false;
99 }
100
Antoine Labour1a9ef392017-08-24 19:40:33101 SetGetBuffer(id, std::move(buffer));
102 return true;
103}
104
105void CommandBufferHelper::SetGetBuffer(int32_t id,
106 scoped_refptr<Buffer> buffer) {
[email protected]503b3a22011-12-12 23:29:40107 command_buffer_->SetGetBuffer(id);
Antoine Labour1a9ef392017-08-24 19:40:33108 ring_buffer_ = std::move(buffer);
109 ring_buffer_id_ = id;
Antoine Labourd3469942017-05-16 21:23:42110 ++set_get_buffer_count_;
Antoine Labour1a9ef392017-08-24 19:40:33111 entries_ = ring_buffer_
112 ? static_cast<CommandBufferEntry*>(ring_buffer_->memory())
113 : 0;
114 total_entry_count_ =
115 ring_buffer_ ? ring_buffer_size_ / sizeof(CommandBufferEntry) : 0;
[email protected]bae23772014-04-16 09:50:55116 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
117 // No need to query it through IPC.
118 put_ = 0;
Sunny Sachanandani5acdc812018-01-25 01:56:07119 last_flush_put_ = 0;
120 last_ordering_barrier_put_ = 0;
sunnyps128566052016-12-09 21:06:43121 cached_get_offset_ = 0;
Antoine Labourd3469942017-05-16 21:23:42122 service_on_old_buffer_ = true;
[email protected]15691b42014-02-12 00:56:00123 CalcImmediateEntries(0);
[email protected]617296e2011-12-15 05:37:57124}
125
[email protected]a5cf3cb2012-08-23 01:08:42126void CommandBufferHelper::FreeRingBuffer() {
Antoine Labour1a9ef392017-08-24 19:40:33127 if (HaveRingBuffer()) {
James Darpiniand1b4ae22018-06-19 19:49:08128 OrderingBarrier();
Antoine Labour1a9ef392017-08-24 19:40:33129 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
James Darpiniand1b4ae22018-06-19 19:49:08130 // SetGetBuffer is an IPC, so previous work needs to be flushed first.
131 Flush();
Antoine Labour1a9ef392017-08-24 19:40:33132 SetGetBuffer(-1, nullptr);
133 }
[email protected]a5cf3cb2012-08-23 01:08:42134}
135
Antoine Labour5cb6cecb2019-01-05 00:47:25136gpu::ContextResult CommandBufferHelper::Initialize(uint32_t ring_buffer_size) {
[email protected]503b3a22011-12-12 23:29:40137 ring_buffer_size_ = ring_buffer_size;
danakj45cfd232017-10-18 19:31:31138 if (!AllocateRingBuffer()) {
139 // This would fail if CreateTransferBuffer fails, which will not fail for
140 // transient reasons such as context loss. See http://crrev.com/c/720269
danakj514516a2017-10-19 20:20:31141 LOG(ERROR) << "ContextResult::kFatalFailure: "
142 << "CommandBufferHelper::AllocateRingBuffer() failed";
danakj45cfd232017-10-18 19:31:31143 return gpu::ContextResult::kFatalFailure;
144 }
145 return gpu::ContextResult::kSuccess;
[email protected]503b3a22011-12-12 23:29:40146}
147
[email protected]96449d2c2009-11-25 00:01:32148CommandBufferHelper::~CommandBufferHelper() {
Antoine Labour1a9ef392017-08-24 19:40:33149 FreeRingBuffer();
[email protected]96449d2c2009-11-25 00:01:32150}
151
sunnyps128566052016-12-09 21:06:43152void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
Antoine Labourd3469942017-05-16 21:23:42153 // If the service hasn't seen the current get buffer yet (i.e. hasn't
154 // processed the latest SetGetBuffer), it's as if it hadn't processed anything
155 // in it, i.e. get == 0.
156 service_on_old_buffer_ =
157 (state.set_get_buffer_count != set_get_buffer_count_);
158 cached_get_offset_ = service_on_old_buffer_ ? 0 : state.get_offset;
sunnyps128566052016-12-09 21:06:43159 cached_last_token_read_ = state.token;
160 context_lost_ = error::IsError(state.error);
161}
162
avif15d60a2015-12-21 17:06:33163bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
vmiura926192c2015-12-11 20:10:03164 DCHECK(start >= 0 && start <= total_entry_count_);
165 DCHECK(end >= 0 && end <= total_entry_count_);
Antoine Labourd3469942017-05-16 21:23:42166 CommandBuffer::State last_state = command_buffer_->WaitForGetOffsetInRange(
167 set_get_buffer_count_, start, end);
sunnyps128566052016-12-09 21:06:43168 UpdateCachedState(last_state);
169 return !context_lost_;
[email protected]96449d2c2009-11-25 00:01:32170}
171
[email protected]7d5b8d12011-01-14 23:43:15172void CommandBufferHelper::Flush() {
Khushal058b1a92018-02-14 21:25:50173 TRACE_EVENT0("gpu", "CommandBufferHelper::Flush");
[email protected]15691b42014-02-12 00:56:00174 // Wrap put_ before flush.
175 if (put_ == total_entry_count_)
176 put_ = 0;
177
Antoine Labour1a9ef392017-08-24 19:40:33178 if (HaveRingBuffer()) {
[email protected]da618e22014-07-11 09:27:16179 last_flush_time_ = base::TimeTicks::Now();
Sunny Sachanandani5acdc812018-01-25 01:56:07180 last_flush_put_ = put_;
181 last_ordering_barrier_put_ = put_;
[email protected]c4488402012-01-11 01:05:49182 command_buffer_->Flush(put_);
[email protected]cbe0ded2014-02-21 20:42:52183 ++flush_generation_;
[email protected]15691b42014-02-12 00:56:00184 CalcImmediateEntries(0);
[email protected]c4488402012-01-11 01:05:49185 }
[email protected]7d5b8d12011-01-14 23:43:15186}
187
Antoine Labour1a9ef392017-08-24 19:40:33188void CommandBufferHelper::FlushLazy() {
Sunny Sachanandani5acdc812018-01-25 01:56:07189 if (put_ == last_flush_put_ && put_ == last_ordering_barrier_put_)
Antoine Labour1a9ef392017-08-24 19:40:33190 return;
191 Flush();
192}
193
vmiurab700b432015-02-06 16:42:51194void CommandBufferHelper::OrderingBarrier() {
195 // Wrap put_ before setting the barrier.
196 if (put_ == total_entry_count_)
197 put_ = 0;
198
Antoine Labour1a9ef392017-08-24 19:40:33199 if (HaveRingBuffer()) {
Sunny Sachanandani5acdc812018-01-25 01:56:07200 last_ordering_barrier_put_ = put_;
vmiurab700b432015-02-06 16:42:51201 command_buffer_->OrderingBarrier(put_);
202 ++flush_generation_;
203 CalcImmediateEntries(0);
204 }
205}
206
[email protected]15691b42014-02-12 00:56:00207#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
208void CommandBufferHelper::PeriodicFlushCheck() {
[email protected]da618e22014-07-11 09:27:16209 base::TimeTicks current_time = base::TimeTicks::Now();
210 if (current_time - last_flush_time_ >
211 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
[email protected]15691b42014-02-12 00:56:00212 Flush();
[email protected]da618e22014-07-11 09:27:16213 }
[email protected]15691b42014-02-12 00:56:00214}
215#endif
216
[email protected]96449d2c2009-11-25 00:01:32217// Calls Flush() and then waits until the buffer is empty. Break early if the
218// error is set.
219bool CommandBufferHelper::Finish() {
[email protected]366ae242011-05-10 02:23:58220 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
[email protected]a24e7582012-02-15 23:21:32221 // If there is no work just exit.
Antoine Labourd3469942017-05-16 21:23:42222 if (put_ == cached_get_offset_ && !service_on_old_buffer_) {
Antoine Labour1a9ef392017-08-24 19:40:33223 return !context_lost_;
[email protected]a24e7582012-02-15 23:21:32224 }
Antoine Labour1a9ef392017-08-24 19:40:33225 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36226 if (!WaitForGetOffsetInRange(put_, put_))
227 return false;
sunnyps128566052016-12-09 21:06:43228 DCHECK_EQ(cached_get_offset_, put_);
[email protected]7fe4198b2014-03-18 21:52:36229
230 CalcImmediateEntries(0);
[email protected]96449d2c2009-11-25 00:01:32231
232 return true;
233}
234
235// Inserts a new token into the command stream. It uses an increasing value
236// scheme so that we don't lose tokens (a token has passed if the current token
237// value is higher than that token). Calls Finish() if the token value wraps,
Antoine Labour1a9ef392017-08-24 19:40:33238// which will be rare. If we can't allocate a command buffer, token doesn't
239// increase, ensuring WaitForToken eventually returns.
avif15d60a2015-12-21 17:06:33240int32_t CommandBufferHelper::InsertToken() {
[email protected]96449d2c2009-11-25 00:01:32241 // Increment token as 31-bit integer. Negative values are used to signal an
242 // error.
[email protected]c4488402012-01-11 01:05:49243 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
244 if (cmd) {
Antoine Labour1a9ef392017-08-24 19:40:33245 token_ = (token_ + 1) & 0x7FFFFFFF;
[email protected]c4488402012-01-11 01:05:49246 cmd->Init(token_);
247 if (token_ == 0) {
248 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
sunnyps128566052016-12-09 21:06:43249 bool finished = Finish(); // we wrapped
250 DCHECK(!finished || (cached_last_token_read_ == 0));
[email protected]c4488402012-01-11 01:05:49251 }
[email protected]96449d2c2009-11-25 00:01:32252 }
253 return token_;
254}
255
sunnyps128566052016-12-09 21:06:43256bool CommandBufferHelper::HasTokenPassed(int32_t token) {
257 // If token_ wrapped around we Finish'd.
258 if (token > token_)
259 return true;
260 // Don't update state if we don't have to.
261 if (token <= cached_last_token_read_)
262 return true;
Adrienne Walker7a78f8c2019-08-09 18:35:32263 RefreshCachedToken();
264 return token <= cached_last_token_read_;
265}
266
267void CommandBufferHelper::RefreshCachedToken() {
sunnyps128566052016-12-09 21:06:43268 CommandBuffer::State last_state = command_buffer_->GetLastState();
269 UpdateCachedState(last_state);
Adrienne Walker7a78f8c2019-08-09 18:35:32270}
271
272bool CommandBufferHelper::HasCachedTokenPassed(int32_t token) {
273 if (token > token_)
274 return true;
sunnyps128566052016-12-09 21:06:43275 return token <= cached_last_token_read_;
276}
277
[email protected]96449d2c2009-11-25 00:01:32278// Waits until the current token value is greater or equal to the value passed
279// in argument.
avif15d60a2015-12-21 17:06:33280void CommandBufferHelper::WaitForToken(int32_t token) {
Antoine Labour1a9ef392017-08-24 19:40:33281 DCHECK_GE(token, 0);
282 if (HasTokenPassed(token))
[email protected]c4488402012-01-11 01:05:49283 return;
Antoine Labour1a9ef392017-08-24 19:40:33284 FlushLazy();
sunnyps128566052016-12-09 21:06:43285 CommandBuffer::State last_state =
286 command_buffer_->WaitForTokenInRange(token, token_);
287 UpdateCachedState(last_state);
[email protected]96449d2c2009-11-25 00:01:32288}
289
290// Waits for available entries, basically waiting until get >= put + count + 1.
291// It actually waits for contiguous entries, so it may need to wrap the buffer
[email protected]47257372013-01-04 18:37:48292// around, adding a noops. Thus this function may change the value of put_. The
[email protected]9310b262010-06-03 16:15:47293// function will return early if an error occurs, in which case the available
294// space may not be available.
avif15d60a2015-12-21 17:06:33295void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
Antoine Labour1a9ef392017-08-24 19:40:33296 if (!AllocateRingBuffer())
[email protected]3110b122013-11-19 23:25:54297 return;
[email protected]3110b122013-11-19 23:25:54298 DCHECK(HaveRingBuffer());
[email protected]cf1aa982013-11-05 21:49:37299 DCHECK(count < total_entry_count_);
[email protected]47257372013-01-04 18:37:48300 if (put_ + count > total_entry_count_) {
[email protected]96449d2c2009-11-25 00:01:32301 // There's not enough room between the current put and the end of the
[email protected]47257372013-01-04 18:37:48302 // buffer, so we need to wrap. We will add noops all the way to the end,
303 // but we need to make sure get wraps first, actually that get is 1 or
304 // more (since put will wrap to 0 after we add the noops).
[email protected]cf1aa982013-11-05 21:49:37305 DCHECK_LE(1, put_);
sunnyps128566052016-12-09 21:06:43306 int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:00307 if (curr_get > put_ || curr_get == 0) {
[email protected]366ae242011-05-10 02:23:58308 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
Antoine Labour1a9ef392017-08-24 19:40:33309 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36310 if (!WaitForGetOffsetInRange(1, put_))
311 return;
sunnyps128566052016-12-09 21:06:43312 curr_get = cached_get_offset_;
[email protected]7fe4198b2014-03-18 21:52:36313 DCHECK_LE(curr_get, put_);
314 DCHECK_NE(0, curr_get);
[email protected]96449d2c2009-11-25 00:01:32315 }
[email protected]47257372013-01-04 18:37:48316 // Insert Noops to fill out the buffer.
avif15d60a2015-12-21 17:06:33317 int32_t num_entries = total_entry_count_ - put_;
[email protected]47257372013-01-04 18:37:48318 while (num_entries > 0) {
avif15d60a2015-12-21 17:06:33319 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
[email protected]47257372013-01-04 18:37:48320 cmd::Noop::Set(&entries_[put_], num_to_skip);
321 put_ += num_to_skip;
322 num_entries -= num_to_skip;
323 }
[email protected]96449d2c2009-11-25 00:01:32324 put_ = 0;
325 }
[email protected]15691b42014-02-12 00:56:00326
327 // Try to get 'count' entries without flushing.
328 CalcImmediateEntries(count);
329 if (immediate_entry_count_ < count) {
James Darpinian62ca13c2018-05-31 04:12:06330 // Update cached_get_offset_ and try again.
331 UpdateCachedState(command_buffer_->GetLastState());
332 CalcImmediateEntries(count);
333 }
334
335 if (immediate_entry_count_ < count) {
336 // Try again with a shallow Flush(). Flush can change immediate_entry_count_
337 // because of the auto flush logic.
Antoine Labour1a9ef392017-08-24 19:40:33338 FlushLazy();
[email protected]15691b42014-02-12 00:56:00339 CalcImmediateEntries(count);
340 if (immediate_entry_count_ < count) {
341 // Buffer is full. Need to wait for entries.
342 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
vmiura926192c2015-12-11 20:10:03343 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
344 put_))
[email protected]7fe4198b2014-03-18 21:52:36345 return;
346 CalcImmediateEntries(count);
347 DCHECK_GE(immediate_entry_count_, count);
[email protected]15691b42014-02-12 00:56:00348 }
[email protected]3110b122013-11-19 23:25:54349 }
[email protected]96449d2c2009-11-25 00:01:32350}
351
avif15d60a2015-12-21 17:06:33352int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
sunnyps128566052016-12-09 21:06:43353 int32_t current_get_offset = cached_get_offset_;
reveman0cf65ee82015-08-25 22:15:24354 if (current_get_offset > put_) {
355 return current_get_offset - put_ - 1;
356 } else {
357 return current_get_offset + total_entry_count_ - put_ -
358 (current_get_offset == 0 ? 1 : 0);
359 }
360}
361
362bool CommandBufferHelper::OnMemoryDump(
363 const base::trace_event::MemoryDumpArgs& args,
364 base::trace_event::ProcessMemoryDump* pmd) {
ericrkeff776982016-11-03 21:37:31365 using base::trace_event::MemoryAllocatorDump;
366 using base::trace_event::MemoryDumpLevelOfDetail;
367
reveman0cf65ee82015-08-25 22:15:24368 if (!HaveRingBuffer())
369 return true;
370
avif15d60a2015-12-21 17:06:33371 const uint64_t tracing_process_id =
reveman0cf65ee82015-08-25 22:15:24372 base::trace_event::MemoryDumpManager::GetInstance()
373 ->GetTracingProcessId();
374
ericrkeff776982016-11-03 21:37:31375 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
376 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
377 dump->AddScalar(MemoryAllocatorDump::kNameSize,
378 MemoryAllocatorDump::kUnitsBytes, ring_buffer_size_);
379
380 if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
381 dump->AddScalar(
382 "free_size", MemoryAllocatorDump::kUnitsBytes,
383 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
Hajime Hoshi35e4fd72017-06-12 04:21:23384 base::UnguessableToken shared_memory_guid =
Alexandr Ilin15bb7032018-07-13 10:09:06385 ring_buffer_->backing()->GetGUID();
ericrkeff776982016-11-03 21:37:31386 const int kImportance = 2;
Hajime Hoshi35e4fd72017-06-12 04:21:23387 if (!shared_memory_guid.is_empty()) {
Hajime Hoshi841a20892017-08-16 10:18:47388 pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
389 kImportance);
Hajime Hoshi35e4fd72017-06-12 04:21:23390 } else {
Hajime Hoshi841a20892017-08-16 10:18:47391 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
Hajime Hoshi35e4fd72017-06-12 04:21:23392 pmd->CreateSharedGlobalAllocatorDump(guid);
393 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
394 }
ericrkeff776982016-11-03 21:37:31395 }
reveman0cf65ee82015-08-25 22:15:24396 return true;
397}
[email protected]96449d2c2009-11-25 00:01:32398
[email protected]a7a27ace2009-12-12 00:11:25399} // namespace gpu