blob: afa6c3902cf353b77487ee54d675ee4d552b5e73 [file] [log] [blame]
[email protected]c4488402012-01-11 01:05:491// Copyright (c) 2012 The Chromium Authors. All rights reserved.
[email protected]1bee3982009-12-17 23:15:282// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
[email protected]96449d2c2009-11-25 00:01:324
5// This file contains the implementation of the command buffer helper class.
6
[email protected]1df19862013-05-24 11:26:297#include "gpu/command_buffer/client/cmd_buffer_helper.h"
[email protected]cf1aa982013-11-05 21:49:378
avif15d60a2015-12-21 17:06:339#include <stdint.h>
10
martina.kollarovac640df12015-07-10 08:30:3811#include <algorithm>
[email protected]cf1aa982013-11-05 21:49:3712#include "base/logging.h"
reveman0cf65ee82015-08-25 22:15:2413#include "base/strings/stringprintf.h"
gabb23705312016-05-11 18:44:5614#include "base/threading/thread_task_runner_handle.h"
[email protected]da618e22014-07-11 09:27:1615#include "base/time/time.h"
reveman0cf65ee82015-08-25 22:15:2416#include "base/trace_event/memory_allocator_dump.h"
17#include "base/trace_event/memory_dump_manager.h"
18#include "base/trace_event/process_memory_dump.h"
martina.kollarovac0d13d12016-01-20 08:53:4419#include "base/trace_event/trace_event.h"
martina.kollarovac640df12015-07-10 08:30:3820#include "gpu/command_buffer/common/buffer.h"
[email protected]1df19862013-05-24 11:26:2921#include "gpu/command_buffer/common/command_buffer.h"
martina.kollarovac640df12015-07-10 08:30:3822#include "gpu/command_buffer/common/constants.h"
[email protected]96449d2c2009-11-25 00:01:3223
[email protected]a7a27ace2009-12-12 00:11:2524namespace gpu {
[email protected]96449d2c2009-11-25 00:01:3225
[email protected]96449d2c2009-11-25 00:01:3226CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
27 : command_buffer_(command_buffer),
[email protected]503b3a22011-12-12 23:29:4028 ring_buffer_id_(-1),
29 ring_buffer_size_(0),
sunnyps128566052016-12-09 21:06:4330 entries_(nullptr),
[email protected]9310b262010-06-03 16:15:4731 total_entry_count_(0),
[email protected]15691b42014-02-12 00:56:0032 immediate_entry_count_(0),
[email protected]96449d2c2009-11-25 00:01:3233 token_(0),
[email protected]7d5b8d12011-01-14 23:43:1534 put_(0),
[email protected]b36897c12011-07-12 18:04:1035 last_put_sent_(0),
sunnyps128566052016-12-09 21:06:4336 cached_last_token_read_(0),
37 cached_get_offset_(0),
Antoine Labourd3469942017-05-16 21:23:4238 set_get_buffer_count_(0),
39 service_on_old_buffer_(false),
[email protected]15691b42014-02-12 00:56:0040#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
[email protected]b36897c12011-07-12 18:04:1041 commands_issued_(0),
[email protected]15691b42014-02-12 00:56:0042#endif
[email protected]c4488402012-01-11 01:05:4943 usable_(true),
[email protected]d35e6a72012-08-25 01:51:1344 context_lost_(false),
[email protected]362e6d602012-10-17 16:55:0645 flush_automatically_(true),
[email protected]3d668fb2014-02-22 00:49:3846 flush_generation_(0) {
[email protected]96449d2c2009-11-25 00:01:3247}
48
[email protected]362e6d602012-10-17 16:55:0649void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
50 flush_automatically_ = enabled;
[email protected]15691b42014-02-12 00:56:0051 CalcImmediateEntries(0);
[email protected]362e6d602012-10-17 16:55:0652}
53
[email protected]d35e6a72012-08-25 01:51:1354bool CommandBufferHelper::IsContextLost() {
sunnyps128566052016-12-09 21:06:4355 if (!context_lost_)
56 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
[email protected]d35e6a72012-08-25 01:51:1357 return context_lost_;
58}
59
[email protected]15691b42014-02-12 00:56:0060void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
61 DCHECK_GE(waiting_count, 0);
62
63 // Check if usable & allocated.
64 if (!usable() || !HaveRingBuffer()) {
65 immediate_entry_count_ = 0;
66 return;
67 }
68
69 // Get maximum safe contiguous entries.
sunnyps128566052016-12-09 21:06:4370 const int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:0071 if (curr_get > put_) {
72 immediate_entry_count_ = curr_get - put_ - 1;
73 } else {
74 immediate_entry_count_ =
75 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
76 }
77
78 // Limit entry count to force early flushing.
79 if (flush_automatically_) {
avif15d60a2015-12-21 17:06:3380 int32_t limit =
[email protected]15691b42014-02-12 00:56:0081 total_entry_count_ /
82 ((curr_get == last_put_sent_) ? kAutoFlushSmall : kAutoFlushBig);
83
avif15d60a2015-12-21 17:06:3384 int32_t pending =
[email protected]15691b42014-02-12 00:56:0085 (put_ + total_entry_count_ - last_put_sent_) % total_entry_count_;
86
87 if (pending > 0 && pending >= limit) {
88 // Time to force flush.
89 immediate_entry_count_ = 0;
90 } else {
91 // Limit remaining entries, but not lower than waiting_count entries to
92 // prevent deadlock when command size is greater than the flush limit.
93 limit -= pending;
94 limit = limit < waiting_count ? waiting_count : limit;
95 immediate_entry_count_ =
96 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
97 }
98 }
99}
100
[email protected]503b3a22011-12-12 23:29:40101bool CommandBufferHelper::AllocateRingBuffer() {
[email protected]c4488402012-01-11 01:05:49102 if (!usable()) {
103 return false;
104 }
105
[email protected]617296e2011-12-15 05:37:57106 if (HaveRingBuffer()) {
107 return true;
108 }
109
avif15d60a2015-12-21 17:06:33110 int32_t id = -1;
[email protected]44096602014-03-26 04:53:58111 scoped_refptr<Buffer> buffer =
112 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
[email protected]503b3a22011-12-12 23:29:40113 if (id < 0) {
[email protected]c4488402012-01-11 01:05:49114 ClearUsable();
sunnyps128566052016-12-09 21:06:43115 DCHECK(context_lost_);
[email protected]503b3a22011-12-12 23:29:40116 return false;
117 }
118
[email protected]67c80782012-12-21 01:16:52119 ring_buffer_ = buffer;
[email protected]503b3a22011-12-12 23:29:40120 ring_buffer_id_ = id;
121 command_buffer_->SetGetBuffer(id);
Antoine Labourd3469942017-05-16 21:23:42122 ++set_get_buffer_count_;
[email protected]44096602014-03-26 04:53:58123 entries_ = static_cast<CommandBufferEntry*>(ring_buffer_->memory());
[email protected]bae23772014-04-16 09:50:55124 total_entry_count_ = ring_buffer_size_ / sizeof(CommandBufferEntry);
125 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
126 // No need to query it through IPC.
127 put_ = 0;
Antoine Labourd3469942017-05-16 21:23:42128 last_put_sent_ = 0;
sunnyps128566052016-12-09 21:06:43129 cached_get_offset_ = 0;
Antoine Labourd3469942017-05-16 21:23:42130 service_on_old_buffer_ = true;
[email protected]15691b42014-02-12 00:56:00131 CalcImmediateEntries(0);
[email protected]96449d2c2009-11-25 00:01:32132 return true;
133}
134
[email protected]a5cf3cb2012-08-23 01:08:42135void CommandBufferHelper::FreeResources() {
[email protected]617296e2011-12-15 05:37:57136 if (HaveRingBuffer()) {
137 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
138 ring_buffer_id_ = -1;
[email protected]15691b42014-02-12 00:56:00139 CalcImmediateEntries(0);
boliuada52452015-07-29 21:42:25140 entries_ = nullptr;
141 ring_buffer_ = nullptr;
[email protected]617296e2011-12-15 05:37:57142 }
143}
144
[email protected]a5cf3cb2012-08-23 01:08:42145void CommandBufferHelper::FreeRingBuffer() {
sunnyps128566052016-12-09 21:06:43146 CHECK((put_ == cached_get_offset_) ||
147 error::IsError(command_buffer_->GetLastState().error));
[email protected]a5cf3cb2012-08-23 01:08:42148 FreeResources();
149}
150
avif15d60a2015-12-21 17:06:33151bool CommandBufferHelper::Initialize(int32_t ring_buffer_size) {
[email protected]503b3a22011-12-12 23:29:40152 ring_buffer_size_ = ring_buffer_size;
153 return AllocateRingBuffer();
154}
155
[email protected]96449d2c2009-11-25 00:01:32156CommandBufferHelper::~CommandBufferHelper() {
[email protected]a5cf3cb2012-08-23 01:08:42157 FreeResources();
[email protected]96449d2c2009-11-25 00:01:32158}
159
sunnyps128566052016-12-09 21:06:43160void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
Antoine Labourd3469942017-05-16 21:23:42161 // If the service hasn't seen the current get buffer yet (i.e. hasn't
162 // processed the latest SetGetBuffer), it's as if it hadn't processed anything
163 // in it, i.e. get == 0.
164 service_on_old_buffer_ =
165 (state.set_get_buffer_count != set_get_buffer_count_);
166 cached_get_offset_ = service_on_old_buffer_ ? 0 : state.get_offset;
sunnyps128566052016-12-09 21:06:43167 cached_last_token_read_ = state.token;
168 context_lost_ = error::IsError(state.error);
169}
170
avif15d60a2015-12-21 17:06:33171bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
vmiura926192c2015-12-11 20:10:03172 DCHECK(start >= 0 && start <= total_entry_count_);
173 DCHECK(end >= 0 && end <= total_entry_count_);
[email protected]c4488402012-01-11 01:05:49174 if (!usable()) {
175 return false;
176 }
Antoine Labourd3469942017-05-16 21:23:42177 CommandBuffer::State last_state = command_buffer_->WaitForGetOffsetInRange(
178 set_get_buffer_count_, start, end);
sunnyps128566052016-12-09 21:06:43179 UpdateCachedState(last_state);
180 return !context_lost_;
[email protected]96449d2c2009-11-25 00:01:32181}
182
[email protected]7d5b8d12011-01-14 23:43:15183void CommandBufferHelper::Flush() {
[email protected]15691b42014-02-12 00:56:00184 // Wrap put_ before flush.
185 if (put_ == total_entry_count_)
186 put_ = 0;
187
vmiurab700b432015-02-06 16:42:51188 if (usable()) {
[email protected]da618e22014-07-11 09:27:16189 last_flush_time_ = base::TimeTicks::Now();
[email protected]c4488402012-01-11 01:05:49190 last_put_sent_ = put_;
191 command_buffer_->Flush(put_);
[email protected]cbe0ded2014-02-21 20:42:52192 ++flush_generation_;
[email protected]15691b42014-02-12 00:56:00193 CalcImmediateEntries(0);
[email protected]c4488402012-01-11 01:05:49194 }
[email protected]7d5b8d12011-01-14 23:43:15195}
196
vmiurab700b432015-02-06 16:42:51197void CommandBufferHelper::OrderingBarrier() {
198 // Wrap put_ before setting the barrier.
199 if (put_ == total_entry_count_)
200 put_ = 0;
201
202 if (usable()) {
203 command_buffer_->OrderingBarrier(put_);
204 ++flush_generation_;
205 CalcImmediateEntries(0);
206 }
207}
208
[email protected]15691b42014-02-12 00:56:00209#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
210void CommandBufferHelper::PeriodicFlushCheck() {
[email protected]da618e22014-07-11 09:27:16211 base::TimeTicks current_time = base::TimeTicks::Now();
212 if (current_time - last_flush_time_ >
213 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
[email protected]15691b42014-02-12 00:56:00214 Flush();
[email protected]da618e22014-07-11 09:27:16215 }
[email protected]15691b42014-02-12 00:56:00216}
217#endif
218
[email protected]96449d2c2009-11-25 00:01:32219// Calls Flush() and then waits until the buffer is empty. Break early if the
220// error is set.
221bool CommandBufferHelper::Finish() {
[email protected]366ae242011-05-10 02:23:58222 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
[email protected]c4488402012-01-11 01:05:49223 if (!usable()) {
224 return false;
225 }
[email protected]a24e7582012-02-15 23:21:32226 // If there is no work just exit.
Antoine Labourd3469942017-05-16 21:23:42227 if (put_ == cached_get_offset_ && !service_on_old_buffer_) {
[email protected]a24e7582012-02-15 23:21:32228 return true;
229 }
reveman91d23272015-08-20 13:41:38230 DCHECK(HaveRingBuffer() ||
231 error::IsError(command_buffer_->GetLastState().error));
Antoine Labourd3469942017-05-16 21:23:42232 if (last_put_sent_ != put_)
233 Flush();
[email protected]7fe4198b2014-03-18 21:52:36234 if (!WaitForGetOffsetInRange(put_, put_))
235 return false;
sunnyps128566052016-12-09 21:06:43236 DCHECK_EQ(cached_get_offset_, put_);
[email protected]7fe4198b2014-03-18 21:52:36237
238 CalcImmediateEntries(0);
[email protected]96449d2c2009-11-25 00:01:32239
240 return true;
241}
242
243// Inserts a new token into the command stream. It uses an increasing value
244// scheme so that we don't lose tokens (a token has passed if the current token
245// value is higher than that token). Calls Finish() if the token value wraps,
246// which will be rare.
avif15d60a2015-12-21 17:06:33247int32_t CommandBufferHelper::InsertToken() {
[email protected]617296e2011-12-15 05:37:57248 AllocateRingBuffer();
[email protected]c4488402012-01-11 01:05:49249 if (!usable()) {
250 return token_;
251 }
[email protected]cf1aa982013-11-05 21:49:37252 DCHECK(HaveRingBuffer());
[email protected]96449d2c2009-11-25 00:01:32253 // Increment token as 31-bit integer. Negative values are used to signal an
254 // error.
255 token_ = (token_ + 1) & 0x7FFFFFFF;
[email protected]c4488402012-01-11 01:05:49256 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
257 if (cmd) {
258 cmd->Init(token_);
259 if (token_ == 0) {
260 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
sunnyps128566052016-12-09 21:06:43261 bool finished = Finish(); // we wrapped
262 DCHECK(!finished || (cached_last_token_read_ == 0));
[email protected]c4488402012-01-11 01:05:49263 }
[email protected]96449d2c2009-11-25 00:01:32264 }
265 return token_;
266}
267
sunnyps128566052016-12-09 21:06:43268bool CommandBufferHelper::HasTokenPassed(int32_t token) {
269 // If token_ wrapped around we Finish'd.
270 if (token > token_)
271 return true;
272 // Don't update state if we don't have to.
273 if (token <= cached_last_token_read_)
274 return true;
275 CommandBuffer::State last_state = command_buffer_->GetLastState();
276 UpdateCachedState(last_state);
277 return token <= cached_last_token_read_;
278}
279
[email protected]96449d2c2009-11-25 00:01:32280// Waits until the current token value is greater or equal to the value passed
281// in argument.
avif15d60a2015-12-21 17:06:33282void CommandBufferHelper::WaitForToken(int32_t token) {
[email protected]ea51dbd2013-01-18 10:03:53283 if (!usable() || !HaveRingBuffer()) {
[email protected]c4488402012-01-11 01:05:49284 return;
285 }
[email protected]96449d2c2009-11-25 00:01:32286 // Return immediately if corresponding InsertToken failed.
287 if (token < 0)
288 return;
sunnyps128566052016-12-09 21:06:43289 if (token > token_)
290 return; // we wrapped
291 if (cached_last_token_read_ >= token)
292 return;
293 UpdateCachedState(command_buffer_->GetLastState());
294 if (cached_last_token_read_ >= token)
[email protected]7fe4198b2014-03-18 21:52:36295 return;
296 Flush();
sunnyps128566052016-12-09 21:06:43297 CommandBuffer::State last_state =
298 command_buffer_->WaitForTokenInRange(token, token_);
299 UpdateCachedState(last_state);
[email protected]96449d2c2009-11-25 00:01:32300}
301
302// Waits for available entries, basically waiting until get >= put + count + 1.
303// It actually waits for contiguous entries, so it may need to wrap the buffer
[email protected]47257372013-01-04 18:37:48304// around, adding a noops. Thus this function may change the value of put_. The
[email protected]9310b262010-06-03 16:15:47305// function will return early if an error occurs, in which case the available
306// space may not be available.
avif15d60a2015-12-21 17:06:33307void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
[email protected]3110b122013-11-19 23:25:54308 AllocateRingBuffer();
309 if (!usable()) {
310 return;
311 }
312 DCHECK(HaveRingBuffer());
[email protected]cf1aa982013-11-05 21:49:37313 DCHECK(count < total_entry_count_);
[email protected]47257372013-01-04 18:37:48314 if (put_ + count > total_entry_count_) {
[email protected]96449d2c2009-11-25 00:01:32315 // There's not enough room between the current put and the end of the
[email protected]47257372013-01-04 18:37:48316 // buffer, so we need to wrap. We will add noops all the way to the end,
317 // but we need to make sure get wraps first, actually that get is 1 or
318 // more (since put will wrap to 0 after we add the noops).
[email protected]cf1aa982013-11-05 21:49:37319 DCHECK_LE(1, put_);
sunnyps128566052016-12-09 21:06:43320 int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:00321 if (curr_get > put_ || curr_get == 0) {
[email protected]366ae242011-05-10 02:23:58322 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
[email protected]7fe4198b2014-03-18 21:52:36323 Flush();
324 if (!WaitForGetOffsetInRange(1, put_))
325 return;
sunnyps128566052016-12-09 21:06:43326 curr_get = cached_get_offset_;
[email protected]7fe4198b2014-03-18 21:52:36327 DCHECK_LE(curr_get, put_);
328 DCHECK_NE(0, curr_get);
[email protected]96449d2c2009-11-25 00:01:32329 }
[email protected]47257372013-01-04 18:37:48330 // Insert Noops to fill out the buffer.
avif15d60a2015-12-21 17:06:33331 int32_t num_entries = total_entry_count_ - put_;
[email protected]47257372013-01-04 18:37:48332 while (num_entries > 0) {
avif15d60a2015-12-21 17:06:33333 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
[email protected]47257372013-01-04 18:37:48334 cmd::Noop::Set(&entries_[put_], num_to_skip);
335 put_ += num_to_skip;
336 num_entries -= num_to_skip;
337 }
[email protected]96449d2c2009-11-25 00:01:32338 put_ = 0;
339 }
[email protected]15691b42014-02-12 00:56:00340
341 // Try to get 'count' entries without flushing.
342 CalcImmediateEntries(count);
343 if (immediate_entry_count_ < count) {
344 // Try again with a shallow Flush().
[email protected]3110b122013-11-19 23:25:54345 Flush();
[email protected]15691b42014-02-12 00:56:00346 CalcImmediateEntries(count);
347 if (immediate_entry_count_ < count) {
348 // Buffer is full. Need to wait for entries.
349 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
vmiura926192c2015-12-11 20:10:03350 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
351 put_))
[email protected]7fe4198b2014-03-18 21:52:36352 return;
353 CalcImmediateEntries(count);
354 DCHECK_GE(immediate_entry_count_, count);
[email protected]15691b42014-02-12 00:56:00355 }
[email protected]3110b122013-11-19 23:25:54356 }
[email protected]96449d2c2009-11-25 00:01:32357}
358
avif15d60a2015-12-21 17:06:33359int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
sunnyps128566052016-12-09 21:06:43360 int32_t current_get_offset = cached_get_offset_;
reveman0cf65ee82015-08-25 22:15:24361 if (current_get_offset > put_) {
362 return current_get_offset - put_ - 1;
363 } else {
364 return current_get_offset + total_entry_count_ - put_ -
365 (current_get_offset == 0 ? 1 : 0);
366 }
367}
368
369bool CommandBufferHelper::OnMemoryDump(
370 const base::trace_event::MemoryDumpArgs& args,
371 base::trace_event::ProcessMemoryDump* pmd) {
ericrkeff776982016-11-03 21:37:31372 using base::trace_event::MemoryAllocatorDump;
373 using base::trace_event::MemoryDumpLevelOfDetail;
374
reveman0cf65ee82015-08-25 22:15:24375 if (!HaveRingBuffer())
376 return true;
377
avif15d60a2015-12-21 17:06:33378 const uint64_t tracing_process_id =
reveman0cf65ee82015-08-25 22:15:24379 base::trace_event::MemoryDumpManager::GetInstance()
380 ->GetTracingProcessId();
381
ericrkeff776982016-11-03 21:37:31382 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
383 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
384 dump->AddScalar(MemoryAllocatorDump::kNameSize,
385 MemoryAllocatorDump::kUnitsBytes, ring_buffer_size_);
386
387 if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
388 dump->AddScalar(
389 "free_size", MemoryAllocatorDump::kUnitsBytes,
390 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
Hajime Hoshi35e4fd72017-06-12 04:21:23391 base::UnguessableToken shared_memory_guid =
392 ring_buffer_->backing()->shared_memory_handle().GetGUID();
ericrkeff776982016-11-03 21:37:31393 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
394 const int kImportance = 2;
Hajime Hoshi35e4fd72017-06-12 04:21:23395 if (!shared_memory_guid.is_empty()) {
396 pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), guid,
397 shared_memory_guid, kImportance);
398 } else {
399 pmd->CreateSharedGlobalAllocatorDump(guid);
400 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
401 }
ericrkeff776982016-11-03 21:37:31402 }
reveman0cf65ee82015-08-25 22:15:24403 return true;
404}
[email protected]96449d2c2009-11-25 00:01:32405
[email protected]a7a27ace2009-12-12 00:11:25406} // namespace gpu