blob: 6aa43745b66876b4cf93b2dd018e7fc2a4915ed9 [file] [log] [blame]
[email protected]c4488402012-01-11 01:05:491// Copyright (c) 2012 The Chromium Authors. All rights reserved.
[email protected]1bee3982009-12-17 23:15:282// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
[email protected]96449d2c2009-11-25 00:01:324
5// This file contains the implementation of the command buffer helper class.
6
[email protected]1df19862013-05-24 11:26:297#include "gpu/command_buffer/client/cmd_buffer_helper.h"
[email protected]cf1aa982013-11-05 21:49:378
avif15d60a2015-12-21 17:06:339#include <stdint.h>
10
martina.kollarovac640df12015-07-10 08:30:3811#include <algorithm>
[email protected]cf1aa982013-11-05 21:49:3712#include "base/logging.h"
reveman0cf65ee82015-08-25 22:15:2413#include "base/strings/stringprintf.h"
gabb23705312016-05-11 18:44:5614#include "base/threading/thread_task_runner_handle.h"
[email protected]da618e22014-07-11 09:27:1615#include "base/time/time.h"
reveman0cf65ee82015-08-25 22:15:2416#include "base/trace_event/memory_allocator_dump.h"
17#include "base/trace_event/memory_dump_manager.h"
18#include "base/trace_event/process_memory_dump.h"
martina.kollarovac0d13d12016-01-20 08:53:4419#include "base/trace_event/trace_event.h"
martina.kollarovac640df12015-07-10 08:30:3820#include "gpu/command_buffer/common/buffer.h"
[email protected]1df19862013-05-24 11:26:2921#include "gpu/command_buffer/common/command_buffer.h"
martina.kollarovac640df12015-07-10 08:30:3822#include "gpu/command_buffer/common/constants.h"
[email protected]96449d2c2009-11-25 00:01:3223
[email protected]a7a27ace2009-12-12 00:11:2524namespace gpu {
[email protected]96449d2c2009-11-25 00:01:3225
[email protected]96449d2c2009-11-25 00:01:3226CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
Sunny Sachanandani5acdc812018-01-25 01:56:0727 : command_buffer_(command_buffer) {}
[email protected]96449d2c2009-11-25 00:01:3228
[email protected]362e6d602012-10-17 16:55:0629void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
30 flush_automatically_ = enabled;
[email protected]15691b42014-02-12 00:56:0031 CalcImmediateEntries(0);
[email protected]362e6d602012-10-17 16:55:0632}
33
[email protected]d35e6a72012-08-25 01:51:1334bool CommandBufferHelper::IsContextLost() {
sunnyps128566052016-12-09 21:06:4335 if (!context_lost_)
36 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
[email protected]d35e6a72012-08-25 01:51:1337 return context_lost_;
38}
39
[email protected]15691b42014-02-12 00:56:0040void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
41 DCHECK_GE(waiting_count, 0);
42
Antoine Labour1a9ef392017-08-24 19:40:3343 // If not allocated, no entries are available. If not usable, it will not be
44 // allocated.
45 if (!HaveRingBuffer()) {
[email protected]15691b42014-02-12 00:56:0046 immediate_entry_count_ = 0;
47 return;
48 }
49
50 // Get maximum safe contiguous entries.
sunnyps128566052016-12-09 21:06:4351 const int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:0052 if (curr_get > put_) {
53 immediate_entry_count_ = curr_get - put_ - 1;
54 } else {
55 immediate_entry_count_ =
56 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
57 }
58
59 // Limit entry count to force early flushing.
60 if (flush_automatically_) {
avif15d60a2015-12-21 17:06:3361 int32_t limit =
[email protected]15691b42014-02-12 00:56:0062 total_entry_count_ /
Sunny Sachanandani5acdc812018-01-25 01:56:0763 ((curr_get == last_flush_put_) ? kAutoFlushSmall : kAutoFlushBig);
[email protected]15691b42014-02-12 00:56:0064
avif15d60a2015-12-21 17:06:3365 int32_t pending =
Sunny Sachanandani5acdc812018-01-25 01:56:0766 (put_ + total_entry_count_ - last_flush_put_) % total_entry_count_;
[email protected]15691b42014-02-12 00:56:0067
68 if (pending > 0 && pending >= limit) {
69 // Time to force flush.
70 immediate_entry_count_ = 0;
71 } else {
72 // Limit remaining entries, but not lower than waiting_count entries to
73 // prevent deadlock when command size is greater than the flush limit.
74 limit -= pending;
75 limit = limit < waiting_count ? waiting_count : limit;
76 immediate_entry_count_ =
77 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
78 }
79 }
80}
81
[email protected]503b3a22011-12-12 23:29:4082bool CommandBufferHelper::AllocateRingBuffer() {
[email protected]c4488402012-01-11 01:05:4983 if (!usable()) {
84 return false;
85 }
86
[email protected]617296e2011-12-15 05:37:5787 if (HaveRingBuffer()) {
88 return true;
89 }
90
avif15d60a2015-12-21 17:06:3391 int32_t id = -1;
[email protected]44096602014-03-26 04:53:5892 scoped_refptr<Buffer> buffer =
93 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
[email protected]503b3a22011-12-12 23:29:4094 if (id < 0) {
Antoine Labour1a9ef392017-08-24 19:40:3395 usable_ = false;
96 context_lost_ = true;
97 CalcImmediateEntries(0);
[email protected]503b3a22011-12-12 23:29:4098 return false;
99 }
100
Antoine Labour1a9ef392017-08-24 19:40:33101 SetGetBuffer(id, std::move(buffer));
102 return true;
103}
104
105void CommandBufferHelper::SetGetBuffer(int32_t id,
106 scoped_refptr<Buffer> buffer) {
[email protected]503b3a22011-12-12 23:29:40107 command_buffer_->SetGetBuffer(id);
Antoine Labour1a9ef392017-08-24 19:40:33108 ring_buffer_ = std::move(buffer);
109 ring_buffer_id_ = id;
Antoine Labourd3469942017-05-16 21:23:42110 ++set_get_buffer_count_;
Antoine Labour1a9ef392017-08-24 19:40:33111 entries_ = ring_buffer_
112 ? static_cast<CommandBufferEntry*>(ring_buffer_->memory())
113 : 0;
114 total_entry_count_ =
115 ring_buffer_ ? ring_buffer_size_ / sizeof(CommandBufferEntry) : 0;
[email protected]bae23772014-04-16 09:50:55116 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
117 // No need to query it through IPC.
118 put_ = 0;
Sunny Sachanandani5acdc812018-01-25 01:56:07119 last_flush_put_ = 0;
120 last_ordering_barrier_put_ = 0;
sunnyps128566052016-12-09 21:06:43121 cached_get_offset_ = 0;
Antoine Labourd3469942017-05-16 21:23:42122 service_on_old_buffer_ = true;
[email protected]15691b42014-02-12 00:56:00123 CalcImmediateEntries(0);
[email protected]617296e2011-12-15 05:37:57124}
125
[email protected]a5cf3cb2012-08-23 01:08:42126void CommandBufferHelper::FreeRingBuffer() {
Antoine Labour1a9ef392017-08-24 19:40:33127 if (HaveRingBuffer()) {
128 FlushLazy();
129 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
130 SetGetBuffer(-1, nullptr);
131 }
[email protected]a5cf3cb2012-08-23 01:08:42132}
133
danakj45cfd232017-10-18 19:31:31134gpu::ContextResult CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
[email protected]503b3a22011-12-12 23:29:40135 ring_buffer_size_ = ring_buffer_size;
danakj45cfd232017-10-18 19:31:31136 if (!AllocateRingBuffer()) {
137 // This would fail if CreateTransferBuffer fails, which will not fail for
138 // transient reasons such as context loss. See http://crrev.com/c/720269
danakj514516a2017-10-19 20:20:31139 LOG(ERROR) << "ContextResult::kFatalFailure: "
140 << "CommandBufferHelper::AllocateRingBuffer() failed";
danakj45cfd232017-10-18 19:31:31141 return gpu::ContextResult::kFatalFailure;
142 }
143 return gpu::ContextResult::kSuccess;
[email protected]503b3a22011-12-12 23:29:40144}
145
[email protected]96449d2c2009-11-25 00:01:32146CommandBufferHelper::~CommandBufferHelper() {
Antoine Labour1a9ef392017-08-24 19:40:33147 FreeRingBuffer();
[email protected]96449d2c2009-11-25 00:01:32148}
149
sunnyps128566052016-12-09 21:06:43150void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
Antoine Labourd3469942017-05-16 21:23:42151 // If the service hasn't seen the current get buffer yet (i.e. hasn't
152 // processed the latest SetGetBuffer), it's as if it hadn't processed anything
153 // in it, i.e. get == 0.
154 service_on_old_buffer_ =
155 (state.set_get_buffer_count != set_get_buffer_count_);
156 cached_get_offset_ = service_on_old_buffer_ ? 0 : state.get_offset;
sunnyps128566052016-12-09 21:06:43157 cached_last_token_read_ = state.token;
158 context_lost_ = error::IsError(state.error);
159}
160
avif15d60a2015-12-21 17:06:33161bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
vmiura926192c2015-12-11 20:10:03162 DCHECK(start >= 0 && start <= total_entry_count_);
163 DCHECK(end >= 0 && end <= total_entry_count_);
Antoine Labourd3469942017-05-16 21:23:42164 CommandBuffer::State last_state = command_buffer_->WaitForGetOffsetInRange(
165 set_get_buffer_count_, start, end);
sunnyps128566052016-12-09 21:06:43166 UpdateCachedState(last_state);
167 return !context_lost_;
[email protected]96449d2c2009-11-25 00:01:32168}
169
[email protected]7d5b8d12011-01-14 23:43:15170void CommandBufferHelper::Flush() {
Khushal058b1a92018-02-14 21:25:50171 TRACE_EVENT0("gpu", "CommandBufferHelper::Flush");
[email protected]15691b42014-02-12 00:56:00172 // Wrap put_ before flush.
173 if (put_ == total_entry_count_)
174 put_ = 0;
175
Antoine Labour1a9ef392017-08-24 19:40:33176 if (HaveRingBuffer()) {
[email protected]da618e22014-07-11 09:27:16177 last_flush_time_ = base::TimeTicks::Now();
Sunny Sachanandani5acdc812018-01-25 01:56:07178 last_flush_put_ = put_;
179 last_ordering_barrier_put_ = put_;
[email protected]c4488402012-01-11 01:05:49180 command_buffer_->Flush(put_);
[email protected]cbe0ded2014-02-21 20:42:52181 ++flush_generation_;
[email protected]15691b42014-02-12 00:56:00182 CalcImmediateEntries(0);
[email protected]c4488402012-01-11 01:05:49183 }
[email protected]7d5b8d12011-01-14 23:43:15184}
185
Antoine Labour1a9ef392017-08-24 19:40:33186void CommandBufferHelper::FlushLazy() {
Sunny Sachanandani5acdc812018-01-25 01:56:07187 if (put_ == last_flush_put_ && put_ == last_ordering_barrier_put_)
Antoine Labour1a9ef392017-08-24 19:40:33188 return;
189 Flush();
190}
191
vmiurab700b432015-02-06 16:42:51192void CommandBufferHelper::OrderingBarrier() {
193 // Wrap put_ before setting the barrier.
194 if (put_ == total_entry_count_)
195 put_ = 0;
196
Antoine Labour1a9ef392017-08-24 19:40:33197 if (HaveRingBuffer()) {
Sunny Sachanandani5acdc812018-01-25 01:56:07198 last_ordering_barrier_put_ = put_;
vmiurab700b432015-02-06 16:42:51199 command_buffer_->OrderingBarrier(put_);
200 ++flush_generation_;
201 CalcImmediateEntries(0);
202 }
203}
204
[email protected]15691b42014-02-12 00:56:00205#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
206void CommandBufferHelper::PeriodicFlushCheck() {
[email protected]da618e22014-07-11 09:27:16207 base::TimeTicks current_time = base::TimeTicks::Now();
208 if (current_time - last_flush_time_ >
209 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
[email protected]15691b42014-02-12 00:56:00210 Flush();
[email protected]da618e22014-07-11 09:27:16211 }
[email protected]15691b42014-02-12 00:56:00212}
213#endif
214
[email protected]96449d2c2009-11-25 00:01:32215// Calls Flush() and then waits until the buffer is empty. Break early if the
216// error is set.
217bool CommandBufferHelper::Finish() {
[email protected]366ae242011-05-10 02:23:58218 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
[email protected]a24e7582012-02-15 23:21:32219 // If there is no work just exit.
Antoine Labourd3469942017-05-16 21:23:42220 if (put_ == cached_get_offset_ && !service_on_old_buffer_) {
Antoine Labour1a9ef392017-08-24 19:40:33221 return !context_lost_;
[email protected]a24e7582012-02-15 23:21:32222 }
Antoine Labour1a9ef392017-08-24 19:40:33223 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36224 if (!WaitForGetOffsetInRange(put_, put_))
225 return false;
sunnyps128566052016-12-09 21:06:43226 DCHECK_EQ(cached_get_offset_, put_);
[email protected]7fe4198b2014-03-18 21:52:36227
228 CalcImmediateEntries(0);
[email protected]96449d2c2009-11-25 00:01:32229
230 return true;
231}
232
233// Inserts a new token into the command stream. It uses an increasing value
234// scheme so that we don't lose tokens (a token has passed if the current token
235// value is higher than that token). Calls Finish() if the token value wraps,
Antoine Labour1a9ef392017-08-24 19:40:33236// which will be rare. If we can't allocate a command buffer, token doesn't
237// increase, ensuring WaitForToken eventually returns.
avif15d60a2015-12-21 17:06:33238int32_t CommandBufferHelper::InsertToken() {
[email protected]96449d2c2009-11-25 00:01:32239 // Increment token as 31-bit integer. Negative values are used to signal an
240 // error.
[email protected]c4488402012-01-11 01:05:49241 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
242 if (cmd) {
Antoine Labour1a9ef392017-08-24 19:40:33243 token_ = (token_ + 1) & 0x7FFFFFFF;
[email protected]c4488402012-01-11 01:05:49244 cmd->Init(token_);
245 if (token_ == 0) {
246 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
sunnyps128566052016-12-09 21:06:43247 bool finished = Finish(); // we wrapped
248 DCHECK(!finished || (cached_last_token_read_ == 0));
[email protected]c4488402012-01-11 01:05:49249 }
[email protected]96449d2c2009-11-25 00:01:32250 }
251 return token_;
252}
253
sunnyps128566052016-12-09 21:06:43254bool CommandBufferHelper::HasTokenPassed(int32_t token) {
255 // If token_ wrapped around we Finish'd.
256 if (token > token_)
257 return true;
258 // Don't update state if we don't have to.
259 if (token <= cached_last_token_read_)
260 return true;
261 CommandBuffer::State last_state = command_buffer_->GetLastState();
262 UpdateCachedState(last_state);
263 return token <= cached_last_token_read_;
264}
265
[email protected]96449d2c2009-11-25 00:01:32266// Waits until the current token value is greater or equal to the value passed
267// in argument.
avif15d60a2015-12-21 17:06:33268void CommandBufferHelper::WaitForToken(int32_t token) {
Antoine Labour1a9ef392017-08-24 19:40:33269 DCHECK_GE(token, 0);
270 if (HasTokenPassed(token))
[email protected]c4488402012-01-11 01:05:49271 return;
Antoine Labour1a9ef392017-08-24 19:40:33272 FlushLazy();
sunnyps128566052016-12-09 21:06:43273 CommandBuffer::State last_state =
274 command_buffer_->WaitForTokenInRange(token, token_);
275 UpdateCachedState(last_state);
[email protected]96449d2c2009-11-25 00:01:32276}
277
278// Waits for available entries, basically waiting until get >= put + count + 1.
279// It actually waits for contiguous entries, so it may need to wrap the buffer
[email protected]47257372013-01-04 18:37:48280// around, adding a noops. Thus this function may change the value of put_. The
[email protected]9310b262010-06-03 16:15:47281// function will return early if an error occurs, in which case the available
282// space may not be available.
avif15d60a2015-12-21 17:06:33283void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
Antoine Labour1a9ef392017-08-24 19:40:33284 if (!AllocateRingBuffer())
[email protected]3110b122013-11-19 23:25:54285 return;
[email protected]3110b122013-11-19 23:25:54286 DCHECK(HaveRingBuffer());
[email protected]cf1aa982013-11-05 21:49:37287 DCHECK(count < total_entry_count_);
[email protected]47257372013-01-04 18:37:48288 if (put_ + count > total_entry_count_) {
[email protected]96449d2c2009-11-25 00:01:32289 // There's not enough room between the current put and the end of the
[email protected]47257372013-01-04 18:37:48290 // buffer, so we need to wrap. We will add noops all the way to the end,
291 // but we need to make sure get wraps first, actually that get is 1 or
292 // more (since put will wrap to 0 after we add the noops).
[email protected]cf1aa982013-11-05 21:49:37293 DCHECK_LE(1, put_);
sunnyps128566052016-12-09 21:06:43294 int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:00295 if (curr_get > put_ || curr_get == 0) {
[email protected]366ae242011-05-10 02:23:58296 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
Antoine Labour1a9ef392017-08-24 19:40:33297 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36298 if (!WaitForGetOffsetInRange(1, put_))
299 return;
sunnyps128566052016-12-09 21:06:43300 curr_get = cached_get_offset_;
[email protected]7fe4198b2014-03-18 21:52:36301 DCHECK_LE(curr_get, put_);
302 DCHECK_NE(0, curr_get);
[email protected]96449d2c2009-11-25 00:01:32303 }
[email protected]47257372013-01-04 18:37:48304 // Insert Noops to fill out the buffer.
avif15d60a2015-12-21 17:06:33305 int32_t num_entries = total_entry_count_ - put_;
[email protected]47257372013-01-04 18:37:48306 while (num_entries > 0) {
avif15d60a2015-12-21 17:06:33307 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
[email protected]47257372013-01-04 18:37:48308 cmd::Noop::Set(&entries_[put_], num_to_skip);
309 put_ += num_to_skip;
310 num_entries -= num_to_skip;
311 }
[email protected]96449d2c2009-11-25 00:01:32312 put_ = 0;
313 }
[email protected]15691b42014-02-12 00:56:00314
315 // Try to get 'count' entries without flushing.
316 CalcImmediateEntries(count);
317 if (immediate_entry_count_ < count) {
318 // Try again with a shallow Flush().
Antoine Labour1a9ef392017-08-24 19:40:33319 FlushLazy();
[email protected]15691b42014-02-12 00:56:00320 CalcImmediateEntries(count);
321 if (immediate_entry_count_ < count) {
322 // Buffer is full. Need to wait for entries.
323 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
vmiura926192c2015-12-11 20:10:03324 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
325 put_))
[email protected]7fe4198b2014-03-18 21:52:36326 return;
327 CalcImmediateEntries(count);
328 DCHECK_GE(immediate_entry_count_, count);
[email protected]15691b42014-02-12 00:56:00329 }
[email protected]3110b122013-11-19 23:25:54330 }
[email protected]96449d2c2009-11-25 00:01:32331}
332
avif15d60a2015-12-21 17:06:33333int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
sunnyps128566052016-12-09 21:06:43334 int32_t current_get_offset = cached_get_offset_;
reveman0cf65ee82015-08-25 22:15:24335 if (current_get_offset > put_) {
336 return current_get_offset - put_ - 1;
337 } else {
338 return current_get_offset + total_entry_count_ - put_ -
339 (current_get_offset == 0 ? 1 : 0);
340 }
341}
342
343bool CommandBufferHelper::OnMemoryDump(
344 const base::trace_event::MemoryDumpArgs& args,
345 base::trace_event::ProcessMemoryDump* pmd) {
ericrkeff776982016-11-03 21:37:31346 using base::trace_event::MemoryAllocatorDump;
347 using base::trace_event::MemoryDumpLevelOfDetail;
348
reveman0cf65ee82015-08-25 22:15:24349 if (!HaveRingBuffer())
350 return true;
351
avif15d60a2015-12-21 17:06:33352 const uint64_t tracing_process_id =
reveman0cf65ee82015-08-25 22:15:24353 base::trace_event::MemoryDumpManager::GetInstance()
354 ->GetTracingProcessId();
355
ericrkeff776982016-11-03 21:37:31356 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
357 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
358 dump->AddScalar(MemoryAllocatorDump::kNameSize,
359 MemoryAllocatorDump::kUnitsBytes, ring_buffer_size_);
360
361 if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
362 dump->AddScalar(
363 "free_size", MemoryAllocatorDump::kUnitsBytes,
364 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
Hajime Hoshi35e4fd72017-06-12 04:21:23365 base::UnguessableToken shared_memory_guid =
366 ring_buffer_->backing()->shared_memory_handle().GetGUID();
ericrkeff776982016-11-03 21:37:31367 const int kImportance = 2;
Hajime Hoshi35e4fd72017-06-12 04:21:23368 if (!shared_memory_guid.is_empty()) {
Hajime Hoshi841a20892017-08-16 10:18:47369 pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
370 kImportance);
Hajime Hoshi35e4fd72017-06-12 04:21:23371 } else {
Hajime Hoshi841a20892017-08-16 10:18:47372 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
Hajime Hoshi35e4fd72017-06-12 04:21:23373 pmd->CreateSharedGlobalAllocatorDump(guid);
374 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
375 }
ericrkeff776982016-11-03 21:37:31376 }
reveman0cf65ee82015-08-25 22:15:24377 return true;
378}
[email protected]96449d2c2009-11-25 00:01:32379
[email protected]a7a27ace2009-12-12 00:11:25380} // namespace gpu