blob: a5247a092c4dbe933a289a17ab3affeb10ee1f7e [file] [log] [blame]
[email protected]c4488402012-01-11 01:05:491// Copyright (c) 2012 The Chromium Authors. All rights reserved.
[email protected]1bee3982009-12-17 23:15:282// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
[email protected]96449d2c2009-11-25 00:01:324
5// This file contains the implementation of the command buffer helper class.
6
[email protected]1df19862013-05-24 11:26:297#include "gpu/command_buffer/client/cmd_buffer_helper.h"
[email protected]cf1aa982013-11-05 21:49:378
avif15d60a2015-12-21 17:06:339#include <stdint.h>
10
martina.kollarovac640df12015-07-10 08:30:3811#include <algorithm>
[email protected]cf1aa982013-11-05 21:49:3712#include "base/logging.h"
reveman0cf65ee82015-08-25 22:15:2413#include "base/strings/stringprintf.h"
gabb23705312016-05-11 18:44:5614#include "base/threading/thread_task_runner_handle.h"
[email protected]da618e22014-07-11 09:27:1615#include "base/time/time.h"
reveman0cf65ee82015-08-25 22:15:2416#include "base/trace_event/memory_allocator_dump.h"
17#include "base/trace_event/memory_dump_manager.h"
18#include "base/trace_event/process_memory_dump.h"
martina.kollarovac0d13d12016-01-20 08:53:4419#include "base/trace_event/trace_event.h"
martina.kollarovac640df12015-07-10 08:30:3820#include "gpu/command_buffer/common/buffer.h"
[email protected]1df19862013-05-24 11:26:2921#include "gpu/command_buffer/common/command_buffer.h"
martina.kollarovac640df12015-07-10 08:30:3822#include "gpu/command_buffer/common/constants.h"
[email protected]96449d2c2009-11-25 00:01:3223
[email protected]a7a27ace2009-12-12 00:11:2524namespace gpu {
[email protected]96449d2c2009-11-25 00:01:3225
[email protected]96449d2c2009-11-25 00:01:3226CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
Sunny Sachanandani5acdc812018-01-25 01:56:0727 : command_buffer_(command_buffer) {}
[email protected]96449d2c2009-11-25 00:01:3228
[email protected]362e6d602012-10-17 16:55:0629void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
30 flush_automatically_ = enabled;
[email protected]15691b42014-02-12 00:56:0031 CalcImmediateEntries(0);
[email protected]362e6d602012-10-17 16:55:0632}
33
[email protected]d35e6a72012-08-25 01:51:1334bool CommandBufferHelper::IsContextLost() {
sunnyps128566052016-12-09 21:06:4335 if (!context_lost_)
36 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
[email protected]d35e6a72012-08-25 01:51:1337 return context_lost_;
38}
39
[email protected]15691b42014-02-12 00:56:0040void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
41 DCHECK_GE(waiting_count, 0);
42
Antoine Labour1a9ef392017-08-24 19:40:3343 // If not allocated, no entries are available. If not usable, it will not be
44 // allocated.
45 if (!HaveRingBuffer()) {
[email protected]15691b42014-02-12 00:56:0046 immediate_entry_count_ = 0;
47 return;
48 }
49
50 // Get maximum safe contiguous entries.
sunnyps128566052016-12-09 21:06:4351 const int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:0052 if (curr_get > put_) {
53 immediate_entry_count_ = curr_get - put_ - 1;
54 } else {
55 immediate_entry_count_ =
56 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
57 }
58
59 // Limit entry count to force early flushing.
60 if (flush_automatically_) {
avif15d60a2015-12-21 17:06:3361 int32_t limit =
[email protected]15691b42014-02-12 00:56:0062 total_entry_count_ /
Sunny Sachanandani5acdc812018-01-25 01:56:0763 ((curr_get == last_flush_put_) ? kAutoFlushSmall : kAutoFlushBig);
[email protected]15691b42014-02-12 00:56:0064
avif15d60a2015-12-21 17:06:3365 int32_t pending =
Sunny Sachanandani5acdc812018-01-25 01:56:0766 (put_ + total_entry_count_ - last_flush_put_) % total_entry_count_;
[email protected]15691b42014-02-12 00:56:0067
68 if (pending > 0 && pending >= limit) {
69 // Time to force flush.
70 immediate_entry_count_ = 0;
71 } else {
72 // Limit remaining entries, but not lower than waiting_count entries to
73 // prevent deadlock when command size is greater than the flush limit.
74 limit -= pending;
75 limit = limit < waiting_count ? waiting_count : limit;
76 immediate_entry_count_ =
77 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
78 }
79 }
80}
81
[email protected]503b3a22011-12-12 23:29:4082bool CommandBufferHelper::AllocateRingBuffer() {
[email protected]c4488402012-01-11 01:05:4983 if (!usable()) {
84 return false;
85 }
86
[email protected]617296e2011-12-15 05:37:5787 if (HaveRingBuffer()) {
88 return true;
89 }
90
avif15d60a2015-12-21 17:06:3391 int32_t id = -1;
[email protected]44096602014-03-26 04:53:5892 scoped_refptr<Buffer> buffer =
93 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
[email protected]503b3a22011-12-12 23:29:4094 if (id < 0) {
Antoine Labour1a9ef392017-08-24 19:40:3395 usable_ = false;
96 context_lost_ = true;
97 CalcImmediateEntries(0);
[email protected]503b3a22011-12-12 23:29:4098 return false;
99 }
100
Antoine Labour1a9ef392017-08-24 19:40:33101 SetGetBuffer(id, std::move(buffer));
102 return true;
103}
104
105void CommandBufferHelper::SetGetBuffer(int32_t id,
106 scoped_refptr<Buffer> buffer) {
[email protected]503b3a22011-12-12 23:29:40107 command_buffer_->SetGetBuffer(id);
Antoine Labour1a9ef392017-08-24 19:40:33108 ring_buffer_ = std::move(buffer);
109 ring_buffer_id_ = id;
Antoine Labourd3469942017-05-16 21:23:42110 ++set_get_buffer_count_;
Antoine Labour1a9ef392017-08-24 19:40:33111 entries_ = ring_buffer_
112 ? static_cast<CommandBufferEntry*>(ring_buffer_->memory())
113 : 0;
114 total_entry_count_ =
115 ring_buffer_ ? ring_buffer_size_ / sizeof(CommandBufferEntry) : 0;
[email protected]bae23772014-04-16 09:50:55116 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
117 // No need to query it through IPC.
118 put_ = 0;
Sunny Sachanandani5acdc812018-01-25 01:56:07119 last_flush_put_ = 0;
120 last_ordering_barrier_put_ = 0;
sunnyps128566052016-12-09 21:06:43121 cached_get_offset_ = 0;
Antoine Labourd3469942017-05-16 21:23:42122 service_on_old_buffer_ = true;
[email protected]15691b42014-02-12 00:56:00123 CalcImmediateEntries(0);
[email protected]617296e2011-12-15 05:37:57124}
125
[email protected]a5cf3cb2012-08-23 01:08:42126void CommandBufferHelper::FreeRingBuffer() {
Antoine Labour1a9ef392017-08-24 19:40:33127 if (HaveRingBuffer()) {
128 FlushLazy();
129 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
130 SetGetBuffer(-1, nullptr);
131 }
[email protected]a5cf3cb2012-08-23 01:08:42132}
133
danakj45cfd232017-10-18 19:31:31134gpu::ContextResult CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
[email protected]503b3a22011-12-12 23:29:40135 ring_buffer_size_ = ring_buffer_size;
danakj45cfd232017-10-18 19:31:31136 if (!AllocateRingBuffer()) {
137 // This would fail if CreateTransferBuffer fails, which will not fail for
138 // transient reasons such as context loss. See http://crrev.com/c/720269
danakj514516a2017-10-19 20:20:31139 LOG(ERROR) << "ContextResult::kFatalFailure: "
140 << "CommandBufferHelper::AllocateRingBuffer() failed";
danakj45cfd232017-10-18 19:31:31141 return gpu::ContextResult::kFatalFailure;
142 }
143 return gpu::ContextResult::kSuccess;
[email protected]503b3a22011-12-12 23:29:40144}
145
[email protected]96449d2c2009-11-25 00:01:32146CommandBufferHelper::~CommandBufferHelper() {
Antoine Labour1a9ef392017-08-24 19:40:33147 FreeRingBuffer();
[email protected]96449d2c2009-11-25 00:01:32148}
149
sunnyps128566052016-12-09 21:06:43150void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
Antoine Labourd3469942017-05-16 21:23:42151 // If the service hasn't seen the current get buffer yet (i.e. hasn't
152 // processed the latest SetGetBuffer), it's as if it hadn't processed anything
153 // in it, i.e. get == 0.
154 service_on_old_buffer_ =
155 (state.set_get_buffer_count != set_get_buffer_count_);
156 cached_get_offset_ = service_on_old_buffer_ ? 0 : state.get_offset;
sunnyps128566052016-12-09 21:06:43157 cached_last_token_read_ = state.token;
158 context_lost_ = error::IsError(state.error);
159}
160
avif15d60a2015-12-21 17:06:33161bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
vmiura926192c2015-12-11 20:10:03162 DCHECK(start >= 0 && start <= total_entry_count_);
163 DCHECK(end >= 0 && end <= total_entry_count_);
Antoine Labourd3469942017-05-16 21:23:42164 CommandBuffer::State last_state = command_buffer_->WaitForGetOffsetInRange(
165 set_get_buffer_count_, start, end);
sunnyps128566052016-12-09 21:06:43166 UpdateCachedState(last_state);
167 return !context_lost_;
[email protected]96449d2c2009-11-25 00:01:32168}
169
[email protected]7d5b8d12011-01-14 23:43:15170void CommandBufferHelper::Flush() {
[email protected]15691b42014-02-12 00:56:00171 // Wrap put_ before flush.
172 if (put_ == total_entry_count_)
173 put_ = 0;
174
Antoine Labour1a9ef392017-08-24 19:40:33175 if (HaveRingBuffer()) {
[email protected]da618e22014-07-11 09:27:16176 last_flush_time_ = base::TimeTicks::Now();
Sunny Sachanandani5acdc812018-01-25 01:56:07177 last_flush_put_ = put_;
178 last_ordering_barrier_put_ = put_;
[email protected]c4488402012-01-11 01:05:49179 command_buffer_->Flush(put_);
[email protected]cbe0ded2014-02-21 20:42:52180 ++flush_generation_;
[email protected]15691b42014-02-12 00:56:00181 CalcImmediateEntries(0);
[email protected]c4488402012-01-11 01:05:49182 }
[email protected]7d5b8d12011-01-14 23:43:15183}
184
Antoine Labour1a9ef392017-08-24 19:40:33185void CommandBufferHelper::FlushLazy() {
Sunny Sachanandani5acdc812018-01-25 01:56:07186 if (put_ == last_flush_put_ && put_ == last_ordering_barrier_put_)
Antoine Labour1a9ef392017-08-24 19:40:33187 return;
188 Flush();
189}
190
vmiurab700b432015-02-06 16:42:51191void CommandBufferHelper::OrderingBarrier() {
192 // Wrap put_ before setting the barrier.
193 if (put_ == total_entry_count_)
194 put_ = 0;
195
Antoine Labour1a9ef392017-08-24 19:40:33196 if (HaveRingBuffer()) {
Sunny Sachanandani5acdc812018-01-25 01:56:07197 last_ordering_barrier_put_ = put_;
vmiurab700b432015-02-06 16:42:51198 command_buffer_->OrderingBarrier(put_);
199 ++flush_generation_;
200 CalcImmediateEntries(0);
201 }
202}
203
[email protected]15691b42014-02-12 00:56:00204#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
205void CommandBufferHelper::PeriodicFlushCheck() {
[email protected]da618e22014-07-11 09:27:16206 base::TimeTicks current_time = base::TimeTicks::Now();
207 if (current_time - last_flush_time_ >
208 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
[email protected]15691b42014-02-12 00:56:00209 Flush();
[email protected]da618e22014-07-11 09:27:16210 }
[email protected]15691b42014-02-12 00:56:00211}
212#endif
213
[email protected]96449d2c2009-11-25 00:01:32214// Calls Flush() and then waits until the buffer is empty. Break early if the
215// error is set.
216bool CommandBufferHelper::Finish() {
[email protected]366ae242011-05-10 02:23:58217 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
[email protected]a24e7582012-02-15 23:21:32218 // If there is no work just exit.
Antoine Labourd3469942017-05-16 21:23:42219 if (put_ == cached_get_offset_ && !service_on_old_buffer_) {
Antoine Labour1a9ef392017-08-24 19:40:33220 return !context_lost_;
[email protected]a24e7582012-02-15 23:21:32221 }
Antoine Labour1a9ef392017-08-24 19:40:33222 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36223 if (!WaitForGetOffsetInRange(put_, put_))
224 return false;
sunnyps128566052016-12-09 21:06:43225 DCHECK_EQ(cached_get_offset_, put_);
[email protected]7fe4198b2014-03-18 21:52:36226
227 CalcImmediateEntries(0);
[email protected]96449d2c2009-11-25 00:01:32228
229 return true;
230}
231
232// Inserts a new token into the command stream. It uses an increasing value
233// scheme so that we don't lose tokens (a token has passed if the current token
234// value is higher than that token). Calls Finish() if the token value wraps,
Antoine Labour1a9ef392017-08-24 19:40:33235// which will be rare. If we can't allocate a command buffer, token doesn't
236// increase, ensuring WaitForToken eventually returns.
avif15d60a2015-12-21 17:06:33237int32_t CommandBufferHelper::InsertToken() {
[email protected]96449d2c2009-11-25 00:01:32238 // Increment token as 31-bit integer. Negative values are used to signal an
239 // error.
[email protected]c4488402012-01-11 01:05:49240 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
241 if (cmd) {
Antoine Labour1a9ef392017-08-24 19:40:33242 token_ = (token_ + 1) & 0x7FFFFFFF;
[email protected]c4488402012-01-11 01:05:49243 cmd->Init(token_);
244 if (token_ == 0) {
245 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
sunnyps128566052016-12-09 21:06:43246 bool finished = Finish(); // we wrapped
247 DCHECK(!finished || (cached_last_token_read_ == 0));
[email protected]c4488402012-01-11 01:05:49248 }
[email protected]96449d2c2009-11-25 00:01:32249 }
250 return token_;
251}
252
sunnyps128566052016-12-09 21:06:43253bool CommandBufferHelper::HasTokenPassed(int32_t token) {
254 // If token_ wrapped around we Finish'd.
255 if (token > token_)
256 return true;
257 // Don't update state if we don't have to.
258 if (token <= cached_last_token_read_)
259 return true;
260 CommandBuffer::State last_state = command_buffer_->GetLastState();
261 UpdateCachedState(last_state);
262 return token <= cached_last_token_read_;
263}
264
[email protected]96449d2c2009-11-25 00:01:32265// Waits until the current token value is greater or equal to the value passed
266// in argument.
avif15d60a2015-12-21 17:06:33267void CommandBufferHelper::WaitForToken(int32_t token) {
Antoine Labour1a9ef392017-08-24 19:40:33268 DCHECK_GE(token, 0);
269 if (HasTokenPassed(token))
[email protected]c4488402012-01-11 01:05:49270 return;
Antoine Labour1a9ef392017-08-24 19:40:33271 FlushLazy();
sunnyps128566052016-12-09 21:06:43272 CommandBuffer::State last_state =
273 command_buffer_->WaitForTokenInRange(token, token_);
274 UpdateCachedState(last_state);
[email protected]96449d2c2009-11-25 00:01:32275}
276
277// Waits for available entries, basically waiting until get >= put + count + 1.
278// It actually waits for contiguous entries, so it may need to wrap the buffer
[email protected]47257372013-01-04 18:37:48279// around, adding a noops. Thus this function may change the value of put_. The
[email protected]9310b262010-06-03 16:15:47280// function will return early if an error occurs, in which case the available
281// space may not be available.
avif15d60a2015-12-21 17:06:33282void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
Antoine Labour1a9ef392017-08-24 19:40:33283 if (!AllocateRingBuffer())
[email protected]3110b122013-11-19 23:25:54284 return;
[email protected]3110b122013-11-19 23:25:54285 DCHECK(HaveRingBuffer());
[email protected]cf1aa982013-11-05 21:49:37286 DCHECK(count < total_entry_count_);
[email protected]47257372013-01-04 18:37:48287 if (put_ + count > total_entry_count_) {
[email protected]96449d2c2009-11-25 00:01:32288 // There's not enough room between the current put and the end of the
[email protected]47257372013-01-04 18:37:48289 // buffer, so we need to wrap. We will add noops all the way to the end,
290 // but we need to make sure get wraps first, actually that get is 1 or
291 // more (since put will wrap to 0 after we add the noops).
[email protected]cf1aa982013-11-05 21:49:37292 DCHECK_LE(1, put_);
sunnyps128566052016-12-09 21:06:43293 int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:00294 if (curr_get > put_ || curr_get == 0) {
[email protected]366ae242011-05-10 02:23:58295 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
Antoine Labour1a9ef392017-08-24 19:40:33296 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36297 if (!WaitForGetOffsetInRange(1, put_))
298 return;
sunnyps128566052016-12-09 21:06:43299 curr_get = cached_get_offset_;
[email protected]7fe4198b2014-03-18 21:52:36300 DCHECK_LE(curr_get, put_);
301 DCHECK_NE(0, curr_get);
[email protected]96449d2c2009-11-25 00:01:32302 }
[email protected]47257372013-01-04 18:37:48303 // Insert Noops to fill out the buffer.
avif15d60a2015-12-21 17:06:33304 int32_t num_entries = total_entry_count_ - put_;
[email protected]47257372013-01-04 18:37:48305 while (num_entries > 0) {
avif15d60a2015-12-21 17:06:33306 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
[email protected]47257372013-01-04 18:37:48307 cmd::Noop::Set(&entries_[put_], num_to_skip);
308 put_ += num_to_skip;
309 num_entries -= num_to_skip;
310 }
[email protected]96449d2c2009-11-25 00:01:32311 put_ = 0;
312 }
[email protected]15691b42014-02-12 00:56:00313
314 // Try to get 'count' entries without flushing.
315 CalcImmediateEntries(count);
316 if (immediate_entry_count_ < count) {
317 // Try again with a shallow Flush().
Antoine Labour1a9ef392017-08-24 19:40:33318 FlushLazy();
[email protected]15691b42014-02-12 00:56:00319 CalcImmediateEntries(count);
320 if (immediate_entry_count_ < count) {
321 // Buffer is full. Need to wait for entries.
322 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
vmiura926192c2015-12-11 20:10:03323 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
324 put_))
[email protected]7fe4198b2014-03-18 21:52:36325 return;
326 CalcImmediateEntries(count);
327 DCHECK_GE(immediate_entry_count_, count);
[email protected]15691b42014-02-12 00:56:00328 }
[email protected]3110b122013-11-19 23:25:54329 }
[email protected]96449d2c2009-11-25 00:01:32330}
331
avif15d60a2015-12-21 17:06:33332int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
sunnyps128566052016-12-09 21:06:43333 int32_t current_get_offset = cached_get_offset_;
reveman0cf65ee82015-08-25 22:15:24334 if (current_get_offset > put_) {
335 return current_get_offset - put_ - 1;
336 } else {
337 return current_get_offset + total_entry_count_ - put_ -
338 (current_get_offset == 0 ? 1 : 0);
339 }
340}
341
342bool CommandBufferHelper::OnMemoryDump(
343 const base::trace_event::MemoryDumpArgs& args,
344 base::trace_event::ProcessMemoryDump* pmd) {
ericrkeff776982016-11-03 21:37:31345 using base::trace_event::MemoryAllocatorDump;
346 using base::trace_event::MemoryDumpLevelOfDetail;
347
reveman0cf65ee82015-08-25 22:15:24348 if (!HaveRingBuffer())
349 return true;
350
avif15d60a2015-12-21 17:06:33351 const uint64_t tracing_process_id =
reveman0cf65ee82015-08-25 22:15:24352 base::trace_event::MemoryDumpManager::GetInstance()
353 ->GetTracingProcessId();
354
ericrkeff776982016-11-03 21:37:31355 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
356 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
357 dump->AddScalar(MemoryAllocatorDump::kNameSize,
358 MemoryAllocatorDump::kUnitsBytes, ring_buffer_size_);
359
360 if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
361 dump->AddScalar(
362 "free_size", MemoryAllocatorDump::kUnitsBytes,
363 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
Hajime Hoshi35e4fd72017-06-12 04:21:23364 base::UnguessableToken shared_memory_guid =
365 ring_buffer_->backing()->shared_memory_handle().GetGUID();
ericrkeff776982016-11-03 21:37:31366 const int kImportance = 2;
Hajime Hoshi35e4fd72017-06-12 04:21:23367 if (!shared_memory_guid.is_empty()) {
Hajime Hoshi841a20892017-08-16 10:18:47368 pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
369 kImportance);
Hajime Hoshi35e4fd72017-06-12 04:21:23370 } else {
Hajime Hoshi841a20892017-08-16 10:18:47371 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
Hajime Hoshi35e4fd72017-06-12 04:21:23372 pmd->CreateSharedGlobalAllocatorDump(guid);
373 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
374 }
ericrkeff776982016-11-03 21:37:31375 }
reveman0cf65ee82015-08-25 22:15:24376 return true;
377}
[email protected]96449d2c2009-11-25 00:01:32378
[email protected]a7a27ace2009-12-12 00:11:25379} // namespace gpu