blob: 5a764e48d45904bf780cacad5c5d03de37f26c84 [file] [log] [blame]
[email protected]c4488402012-01-11 01:05:491// Copyright (c) 2012 The Chromium Authors. All rights reserved.
[email protected]1bee3982009-12-17 23:15:282// Use of this source code is governed by a BSD-style license that can be
3// found in the LICENSE file.
[email protected]96449d2c2009-11-25 00:01:324
5// This file contains the implementation of the command buffer helper class.
6
[email protected]1df19862013-05-24 11:26:297#include "gpu/command_buffer/client/cmd_buffer_helper.h"
[email protected]cf1aa982013-11-05 21:49:378
avif15d60a2015-12-21 17:06:339#include <stdint.h>
10
martina.kollarovac640df12015-07-10 08:30:3811#include <algorithm>
[email protected]cf1aa982013-11-05 21:49:3712#include "base/logging.h"
reveman0cf65ee82015-08-25 22:15:2413#include "base/strings/stringprintf.h"
gabb23705312016-05-11 18:44:5614#include "base/threading/thread_task_runner_handle.h"
[email protected]da618e22014-07-11 09:27:1615#include "base/time/time.h"
reveman0cf65ee82015-08-25 22:15:2416#include "base/trace_event/memory_allocator_dump.h"
17#include "base/trace_event/memory_dump_manager.h"
18#include "base/trace_event/process_memory_dump.h"
martina.kollarovac0d13d12016-01-20 08:53:4419#include "base/trace_event/trace_event.h"
martina.kollarovac640df12015-07-10 08:30:3820#include "gpu/command_buffer/common/buffer.h"
[email protected]1df19862013-05-24 11:26:2921#include "gpu/command_buffer/common/command_buffer.h"
martina.kollarovac640df12015-07-10 08:30:3822#include "gpu/command_buffer/common/constants.h"
[email protected]96449d2c2009-11-25 00:01:3223
[email protected]a7a27ace2009-12-12 00:11:2524namespace gpu {
[email protected]96449d2c2009-11-25 00:01:3225
[email protected]96449d2c2009-11-25 00:01:3226CommandBufferHelper::CommandBufferHelper(CommandBuffer* command_buffer)
Sunny Sachanandani5acdc812018-01-25 01:56:0727 : command_buffer_(command_buffer) {}
[email protected]96449d2c2009-11-25 00:01:3228
[email protected]362e6d602012-10-17 16:55:0629void CommandBufferHelper::SetAutomaticFlushes(bool enabled) {
30 flush_automatically_ = enabled;
[email protected]15691b42014-02-12 00:56:0031 CalcImmediateEntries(0);
[email protected]362e6d602012-10-17 16:55:0632}
33
[email protected]d35e6a72012-08-25 01:51:1334bool CommandBufferHelper::IsContextLost() {
sunnyps128566052016-12-09 21:06:4335 if (!context_lost_)
36 context_lost_ = error::IsError(command_buffer()->GetLastState().error);
[email protected]d35e6a72012-08-25 01:51:1337 return context_lost_;
38}
39
[email protected]15691b42014-02-12 00:56:0040void CommandBufferHelper::CalcImmediateEntries(int waiting_count) {
41 DCHECK_GE(waiting_count, 0);
42
Antoine Labour1a9ef392017-08-24 19:40:3343 // If not allocated, no entries are available. If not usable, it will not be
44 // allocated.
45 if (!HaveRingBuffer()) {
[email protected]15691b42014-02-12 00:56:0046 immediate_entry_count_ = 0;
47 return;
48 }
49
50 // Get maximum safe contiguous entries.
sunnyps128566052016-12-09 21:06:4351 const int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:0052 if (curr_get > put_) {
53 immediate_entry_count_ = curr_get - put_ - 1;
54 } else {
55 immediate_entry_count_ =
56 total_entry_count_ - put_ - (curr_get == 0 ? 1 : 0);
57 }
58
59 // Limit entry count to force early flushing.
60 if (flush_automatically_) {
avif15d60a2015-12-21 17:06:3361 int32_t limit =
[email protected]15691b42014-02-12 00:56:0062 total_entry_count_ /
Sunny Sachanandani5acdc812018-01-25 01:56:0763 ((curr_get == last_flush_put_) ? kAutoFlushSmall : kAutoFlushBig);
[email protected]15691b42014-02-12 00:56:0064
James Darpinian62ca13ce2018-05-31 04:12:0665 int32_t pending = (put_ + total_entry_count_ - last_ordering_barrier_put_) %
66 total_entry_count_;
[email protected]15691b42014-02-12 00:56:0067
68 if (pending > 0 && pending >= limit) {
69 // Time to force flush.
70 immediate_entry_count_ = 0;
71 } else {
72 // Limit remaining entries, but not lower than waiting_count entries to
73 // prevent deadlock when command size is greater than the flush limit.
74 limit -= pending;
75 limit = limit < waiting_count ? waiting_count : limit;
76 immediate_entry_count_ =
77 immediate_entry_count_ > limit ? limit : immediate_entry_count_;
78 }
79 }
80}
81
[email protected]503b3a22011-12-12 23:29:4082bool CommandBufferHelper::AllocateRingBuffer() {
[email protected]c4488402012-01-11 01:05:4983 if (!usable()) {
84 return false;
85 }
86
[email protected]617296e2011-12-15 05:37:5787 if (HaveRingBuffer()) {
88 return true;
89 }
90
avif15d60a2015-12-21 17:06:3391 int32_t id = -1;
[email protected]44096602014-03-26 04:53:5892 scoped_refptr<Buffer> buffer =
93 command_buffer_->CreateTransferBuffer(ring_buffer_size_, &id);
[email protected]503b3a22011-12-12 23:29:4094 if (id < 0) {
Antoine Labour1a9ef392017-08-24 19:40:3395 usable_ = false;
96 context_lost_ = true;
97 CalcImmediateEntries(0);
[email protected]503b3a22011-12-12 23:29:4098 return false;
99 }
100
Antoine Labour1a9ef392017-08-24 19:40:33101 SetGetBuffer(id, std::move(buffer));
102 return true;
103}
104
105void CommandBufferHelper::SetGetBuffer(int32_t id,
106 scoped_refptr<Buffer> buffer) {
[email protected]503b3a22011-12-12 23:29:40107 command_buffer_->SetGetBuffer(id);
Antoine Labour1a9ef392017-08-24 19:40:33108 ring_buffer_ = std::move(buffer);
109 ring_buffer_id_ = id;
Antoine Labourd3469942017-05-16 21:23:42110 ++set_get_buffer_count_;
Antoine Labour1a9ef392017-08-24 19:40:33111 entries_ = ring_buffer_
112 ? static_cast<CommandBufferEntry*>(ring_buffer_->memory())
113 : 0;
114 total_entry_count_ =
115 ring_buffer_ ? ring_buffer_size_ / sizeof(CommandBufferEntry) : 0;
[email protected]bae23772014-04-16 09:50:55116 // Call to SetGetBuffer(id) above resets get and put offsets to 0.
117 // No need to query it through IPC.
118 put_ = 0;
Sunny Sachanandani5acdc812018-01-25 01:56:07119 last_flush_put_ = 0;
120 last_ordering_barrier_put_ = 0;
sunnyps128566052016-12-09 21:06:43121 cached_get_offset_ = 0;
Antoine Labourd3469942017-05-16 21:23:42122 service_on_old_buffer_ = true;
[email protected]15691b42014-02-12 00:56:00123 CalcImmediateEntries(0);
[email protected]617296e2011-12-15 05:37:57124}
125
[email protected]a5cf3cb2012-08-23 01:08:42126void CommandBufferHelper::FreeRingBuffer() {
Antoine Labour1a9ef392017-08-24 19:40:33127 if (HaveRingBuffer()) {
James Darpiniand1b4ae22018-06-19 19:49:08128 OrderingBarrier();
Antoine Labour1a9ef392017-08-24 19:40:33129 command_buffer_->DestroyTransferBuffer(ring_buffer_id_);
James Darpiniand1b4ae22018-06-19 19:49:08130 // SetGetBuffer is an IPC, so previous work needs to be flushed first.
131 Flush();
Antoine Labour1a9ef392017-08-24 19:40:33132 SetGetBuffer(-1, nullptr);
133 }
[email protected]a5cf3cb2012-08-23 01:08:42134}
135
Antoine Labour5cb6cecb2019-01-05 00:47:25136gpu::ContextResult CommandBufferHelper::Initialize(uint32_t ring_buffer_size) {
[email protected]503b3a22011-12-12 23:29:40137 ring_buffer_size_ = ring_buffer_size;
danakj45cfd232017-10-18 19:31:31138 if (!AllocateRingBuffer()) {
139 // This would fail if CreateTransferBuffer fails, which will not fail for
140 // transient reasons such as context loss. See http://crrev.com/c/720269
danakj514516a2017-10-19 20:20:31141 LOG(ERROR) << "ContextResult::kFatalFailure: "
142 << "CommandBufferHelper::AllocateRingBuffer() failed";
danakj45cfd232017-10-18 19:31:31143 return gpu::ContextResult::kFatalFailure;
144 }
145 return gpu::ContextResult::kSuccess;
[email protected]503b3a22011-12-12 23:29:40146}
147
[email protected]96449d2c2009-11-25 00:01:32148CommandBufferHelper::~CommandBufferHelper() {
Antoine Labour1a9ef392017-08-24 19:40:33149 FreeRingBuffer();
[email protected]96449d2c2009-11-25 00:01:32150}
151
sunnyps128566052016-12-09 21:06:43152void CommandBufferHelper::UpdateCachedState(const CommandBuffer::State& state) {
Antoine Labourd3469942017-05-16 21:23:42153 // If the service hasn't seen the current get buffer yet (i.e. hasn't
154 // processed the latest SetGetBuffer), it's as if it hadn't processed anything
155 // in it, i.e. get == 0.
156 service_on_old_buffer_ =
157 (state.set_get_buffer_count != set_get_buffer_count_);
158 cached_get_offset_ = service_on_old_buffer_ ? 0 : state.get_offset;
sunnyps128566052016-12-09 21:06:43159 cached_last_token_read_ = state.token;
160 context_lost_ = error::IsError(state.error);
161}
162
avif15d60a2015-12-21 17:06:33163bool CommandBufferHelper::WaitForGetOffsetInRange(int32_t start, int32_t end) {
vmiura926192c2015-12-11 20:10:03164 DCHECK(start >= 0 && start <= total_entry_count_);
165 DCHECK(end >= 0 && end <= total_entry_count_);
Antoine Labourd3469942017-05-16 21:23:42166 CommandBuffer::State last_state = command_buffer_->WaitForGetOffsetInRange(
167 set_get_buffer_count_, start, end);
sunnyps128566052016-12-09 21:06:43168 UpdateCachedState(last_state);
169 return !context_lost_;
[email protected]96449d2c2009-11-25 00:01:32170}
171
[email protected]7d5b8d12011-01-14 23:43:15172void CommandBufferHelper::Flush() {
Khushal058b1a92018-02-14 21:25:50173 TRACE_EVENT0("gpu", "CommandBufferHelper::Flush");
[email protected]15691b42014-02-12 00:56:00174 // Wrap put_ before flush.
175 if (put_ == total_entry_count_)
176 put_ = 0;
177
Antoine Labour1a9ef392017-08-24 19:40:33178 if (HaveRingBuffer()) {
[email protected]da618e22014-07-11 09:27:16179 last_flush_time_ = base::TimeTicks::Now();
Sunny Sachanandani5acdc812018-01-25 01:56:07180 last_flush_put_ = put_;
181 last_ordering_barrier_put_ = put_;
[email protected]c4488402012-01-11 01:05:49182 command_buffer_->Flush(put_);
[email protected]cbe0ded2014-02-21 20:42:52183 ++flush_generation_;
[email protected]15691b42014-02-12 00:56:00184 CalcImmediateEntries(0);
[email protected]c4488402012-01-11 01:05:49185 }
[email protected]7d5b8d12011-01-14 23:43:15186}
187
Antoine Labour1a9ef392017-08-24 19:40:33188void CommandBufferHelper::FlushLazy() {
Sunny Sachanandani5acdc812018-01-25 01:56:07189 if (put_ == last_flush_put_ && put_ == last_ordering_barrier_put_)
Antoine Labour1a9ef392017-08-24 19:40:33190 return;
191 Flush();
192}
193
vmiurab700b432015-02-06 16:42:51194void CommandBufferHelper::OrderingBarrier() {
195 // Wrap put_ before setting the barrier.
196 if (put_ == total_entry_count_)
197 put_ = 0;
198
Antoine Labour1a9ef392017-08-24 19:40:33199 if (HaveRingBuffer()) {
Sunny Sachanandani5acdc812018-01-25 01:56:07200 last_ordering_barrier_put_ = put_;
vmiurab700b432015-02-06 16:42:51201 command_buffer_->OrderingBarrier(put_);
202 ++flush_generation_;
203 CalcImmediateEntries(0);
204 }
205}
206
[email protected]15691b42014-02-12 00:56:00207#if defined(CMD_HELPER_PERIODIC_FLUSH_CHECK)
208void CommandBufferHelper::PeriodicFlushCheck() {
[email protected]da618e22014-07-11 09:27:16209 base::TimeTicks current_time = base::TimeTicks::Now();
210 if (current_time - last_flush_time_ >
211 base::TimeDelta::FromMicroseconds(kPeriodicFlushDelayInMicroseconds)) {
[email protected]15691b42014-02-12 00:56:00212 Flush();
[email protected]da618e22014-07-11 09:27:16213 }
[email protected]15691b42014-02-12 00:56:00214}
215#endif
216
[email protected]96449d2c2009-11-25 00:01:32217// Calls Flush() and then waits until the buffer is empty. Break early if the
218// error is set.
219bool CommandBufferHelper::Finish() {
[email protected]366ae242011-05-10 02:23:58220 TRACE_EVENT0("gpu", "CommandBufferHelper::Finish");
[email protected]a24e7582012-02-15 23:21:32221 // If there is no work just exit.
Antoine Labourd3469942017-05-16 21:23:42222 if (put_ == cached_get_offset_ && !service_on_old_buffer_) {
Antoine Labour1a9ef392017-08-24 19:40:33223 return !context_lost_;
[email protected]a24e7582012-02-15 23:21:32224 }
Antoine Labour1a9ef392017-08-24 19:40:33225 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36226 if (!WaitForGetOffsetInRange(put_, put_))
227 return false;
sunnyps128566052016-12-09 21:06:43228 DCHECK_EQ(cached_get_offset_, put_);
[email protected]7fe4198b2014-03-18 21:52:36229
230 CalcImmediateEntries(0);
[email protected]96449d2c2009-11-25 00:01:32231
232 return true;
233}
234
235// Inserts a new token into the command stream. It uses an increasing value
236// scheme so that we don't lose tokens (a token has passed if the current token
237// value is higher than that token). Calls Finish() if the token value wraps,
Antoine Labour1a9ef392017-08-24 19:40:33238// which will be rare. If we can't allocate a command buffer, token doesn't
239// increase, ensuring WaitForToken eventually returns.
avif15d60a2015-12-21 17:06:33240int32_t CommandBufferHelper::InsertToken() {
[email protected]96449d2c2009-11-25 00:01:32241 // Increment token as 31-bit integer. Negative values are used to signal an
242 // error.
[email protected]c4488402012-01-11 01:05:49243 cmd::SetToken* cmd = GetCmdSpace<cmd::SetToken>();
244 if (cmd) {
Antoine Labour1a9ef392017-08-24 19:40:33245 token_ = (token_ + 1) & 0x7FFFFFFF;
[email protected]c4488402012-01-11 01:05:49246 cmd->Init(token_);
247 if (token_ == 0) {
248 TRACE_EVENT0("gpu", "CommandBufferHelper::InsertToken(wrapped)");
sunnyps128566052016-12-09 21:06:43249 bool finished = Finish(); // we wrapped
250 DCHECK(!finished || (cached_last_token_read_ == 0));
[email protected]c4488402012-01-11 01:05:49251 }
[email protected]96449d2c2009-11-25 00:01:32252 }
253 return token_;
254}
255
sunnyps128566052016-12-09 21:06:43256bool CommandBufferHelper::HasTokenPassed(int32_t token) {
257 // If token_ wrapped around we Finish'd.
258 if (token > token_)
259 return true;
260 // Don't update state if we don't have to.
261 if (token <= cached_last_token_read_)
262 return true;
263 CommandBuffer::State last_state = command_buffer_->GetLastState();
264 UpdateCachedState(last_state);
265 return token <= cached_last_token_read_;
266}
267
[email protected]96449d2c2009-11-25 00:01:32268// Waits until the current token value is greater or equal to the value passed
269// in argument.
avif15d60a2015-12-21 17:06:33270void CommandBufferHelper::WaitForToken(int32_t token) {
Antoine Labour1a9ef392017-08-24 19:40:33271 DCHECK_GE(token, 0);
272 if (HasTokenPassed(token))
[email protected]c4488402012-01-11 01:05:49273 return;
Antoine Labour1a9ef392017-08-24 19:40:33274 FlushLazy();
sunnyps128566052016-12-09 21:06:43275 CommandBuffer::State last_state =
276 command_buffer_->WaitForTokenInRange(token, token_);
277 UpdateCachedState(last_state);
[email protected]96449d2c2009-11-25 00:01:32278}
279
280// Waits for available entries, basically waiting until get >= put + count + 1.
281// It actually waits for contiguous entries, so it may need to wrap the buffer
[email protected]47257372013-01-04 18:37:48282// around, adding a noops. Thus this function may change the value of put_. The
[email protected]9310b262010-06-03 16:15:47283// function will return early if an error occurs, in which case the available
284// space may not be available.
avif15d60a2015-12-21 17:06:33285void CommandBufferHelper::WaitForAvailableEntries(int32_t count) {
Antoine Labour1a9ef392017-08-24 19:40:33286 if (!AllocateRingBuffer())
[email protected]3110b122013-11-19 23:25:54287 return;
[email protected]3110b122013-11-19 23:25:54288 DCHECK(HaveRingBuffer());
[email protected]cf1aa982013-11-05 21:49:37289 DCHECK(count < total_entry_count_);
[email protected]47257372013-01-04 18:37:48290 if (put_ + count > total_entry_count_) {
[email protected]96449d2c2009-11-25 00:01:32291 // There's not enough room between the current put and the end of the
[email protected]47257372013-01-04 18:37:48292 // buffer, so we need to wrap. We will add noops all the way to the end,
293 // but we need to make sure get wraps first, actually that get is 1 or
294 // more (since put will wrap to 0 after we add the noops).
[email protected]cf1aa982013-11-05 21:49:37295 DCHECK_LE(1, put_);
sunnyps128566052016-12-09 21:06:43296 int32_t curr_get = cached_get_offset_;
[email protected]15691b42014-02-12 00:56:00297 if (curr_get > put_ || curr_get == 0) {
[email protected]366ae242011-05-10 02:23:58298 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries");
Antoine Labour1a9ef392017-08-24 19:40:33299 FlushLazy();
[email protected]7fe4198b2014-03-18 21:52:36300 if (!WaitForGetOffsetInRange(1, put_))
301 return;
sunnyps128566052016-12-09 21:06:43302 curr_get = cached_get_offset_;
[email protected]7fe4198b2014-03-18 21:52:36303 DCHECK_LE(curr_get, put_);
304 DCHECK_NE(0, curr_get);
[email protected]96449d2c2009-11-25 00:01:32305 }
[email protected]47257372013-01-04 18:37:48306 // Insert Noops to fill out the buffer.
avif15d60a2015-12-21 17:06:33307 int32_t num_entries = total_entry_count_ - put_;
[email protected]47257372013-01-04 18:37:48308 while (num_entries > 0) {
avif15d60a2015-12-21 17:06:33309 int32_t num_to_skip = std::min(CommandHeader::kMaxSize, num_entries);
[email protected]47257372013-01-04 18:37:48310 cmd::Noop::Set(&entries_[put_], num_to_skip);
311 put_ += num_to_skip;
312 num_entries -= num_to_skip;
313 }
[email protected]96449d2c2009-11-25 00:01:32314 put_ = 0;
315 }
[email protected]15691b42014-02-12 00:56:00316
317 // Try to get 'count' entries without flushing.
318 CalcImmediateEntries(count);
319 if (immediate_entry_count_ < count) {
James Darpinian62ca13ce2018-05-31 04:12:06320 // Update cached_get_offset_ and try again.
321 UpdateCachedState(command_buffer_->GetLastState());
322 CalcImmediateEntries(count);
323 }
324
325 if (immediate_entry_count_ < count) {
326 // Try again with a shallow Flush(). Flush can change immediate_entry_count_
327 // because of the auto flush logic.
Antoine Labour1a9ef392017-08-24 19:40:33328 FlushLazy();
[email protected]15691b42014-02-12 00:56:00329 CalcImmediateEntries(count);
330 if (immediate_entry_count_ < count) {
331 // Buffer is full. Need to wait for entries.
332 TRACE_EVENT0("gpu", "CommandBufferHelper::WaitForAvailableEntries1");
vmiura926192c2015-12-11 20:10:03333 if (!WaitForGetOffsetInRange((put_ + count + 1) % total_entry_count_,
334 put_))
[email protected]7fe4198b2014-03-18 21:52:36335 return;
336 CalcImmediateEntries(count);
337 DCHECK_GE(immediate_entry_count_, count);
[email protected]15691b42014-02-12 00:56:00338 }
[email protected]3110b122013-11-19 23:25:54339 }
[email protected]96449d2c2009-11-25 00:01:32340}
341
avif15d60a2015-12-21 17:06:33342int32_t CommandBufferHelper::GetTotalFreeEntriesNoWaiting() const {
sunnyps128566052016-12-09 21:06:43343 int32_t current_get_offset = cached_get_offset_;
reveman0cf65ee82015-08-25 22:15:24344 if (current_get_offset > put_) {
345 return current_get_offset - put_ - 1;
346 } else {
347 return current_get_offset + total_entry_count_ - put_ -
348 (current_get_offset == 0 ? 1 : 0);
349 }
350}
351
352bool CommandBufferHelper::OnMemoryDump(
353 const base::trace_event::MemoryDumpArgs& args,
354 base::trace_event::ProcessMemoryDump* pmd) {
ericrkeff776982016-11-03 21:37:31355 using base::trace_event::MemoryAllocatorDump;
356 using base::trace_event::MemoryDumpLevelOfDetail;
357
reveman0cf65ee82015-08-25 22:15:24358 if (!HaveRingBuffer())
359 return true;
360
avif15d60a2015-12-21 17:06:33361 const uint64_t tracing_process_id =
reveman0cf65ee82015-08-25 22:15:24362 base::trace_event::MemoryDumpManager::GetInstance()
363 ->GetTracingProcessId();
364
ericrkeff776982016-11-03 21:37:31365 MemoryAllocatorDump* dump = pmd->CreateAllocatorDump(base::StringPrintf(
366 "gpu/command_buffer_memory/buffer_%d", ring_buffer_id_));
367 dump->AddScalar(MemoryAllocatorDump::kNameSize,
368 MemoryAllocatorDump::kUnitsBytes, ring_buffer_size_);
369
370 if (args.level_of_detail != MemoryDumpLevelOfDetail::BACKGROUND) {
371 dump->AddScalar(
372 "free_size", MemoryAllocatorDump::kUnitsBytes,
373 GetTotalFreeEntriesNoWaiting() * sizeof(CommandBufferEntry));
Hajime Hoshi35e4fd72017-06-12 04:21:23374 base::UnguessableToken shared_memory_guid =
Alexandr Ilin15bb7032018-07-13 10:09:06375 ring_buffer_->backing()->GetGUID();
ericrkeff776982016-11-03 21:37:31376 const int kImportance = 2;
Hajime Hoshi35e4fd72017-06-12 04:21:23377 if (!shared_memory_guid.is_empty()) {
Hajime Hoshi841a20892017-08-16 10:18:47378 pmd->CreateSharedMemoryOwnershipEdge(dump->guid(), shared_memory_guid,
379 kImportance);
Hajime Hoshi35e4fd72017-06-12 04:21:23380 } else {
Hajime Hoshi841a20892017-08-16 10:18:47381 auto guid = GetBufferGUIDForTracing(tracing_process_id, ring_buffer_id_);
Hajime Hoshi35e4fd72017-06-12 04:21:23382 pmd->CreateSharedGlobalAllocatorDump(guid);
383 pmd->AddOwnershipEdge(dump->guid(), guid, kImportance);
384 }
ericrkeff776982016-11-03 21:37:31385 }
reveman0cf65ee82015-08-25 22:15:24386 return true;
387}
[email protected]96449d2c2009-11-25 00:01:32388
[email protected]a7a27ace2009-12-12 00:11:25389} // namespace gpu