1 // Copyright (c) 2012 The Chromium Authors. All rights reserved. 2 // Use of this source code is governed by a BSD-style license that can be 3 // found in the LICENSE file. 4 5 #include "gpu/command_buffer/client/query_tracker.h" 6 7 #include <GLES2/gl2.h> 8 #include <GLES2/gl2ext.h> 9 #include <GLES2/gl2extchromium.h> 10 11 #include "gpu/command_buffer/client/gles2_cmd_helper.h" 12 #include "gpu/command_buffer/client/gles2_implementation.h" 13 #include "gpu/command_buffer/client/mapped_memory.h" 14 #include "gpu/command_buffer/common/time.h" 15 16 namespace gpu { 17 namespace gles2 { 18 19 QuerySyncManager::QuerySyncManager(MappedMemoryManager* manager) 20 : mapped_memory_(manager) { 21 DCHECK(manager); 22 } 23 24 QuerySyncManager::~QuerySyncManager() { 25 while (!buckets_.empty()) { 26 mapped_memory_->Free(buckets_.front()->syncs); 27 delete buckets_.front(); 28 buckets_.pop_front(); 29 } 30 } 31 32 bool QuerySyncManager::Alloc(QuerySyncManager::QueryInfo* info) { 33 DCHECK(info); 34 if (free_queries_.empty()) { 35 int32 shm_id; 36 unsigned int shm_offset; 37 void* mem = mapped_memory_->Alloc( 38 kSyncsPerBucket * sizeof(QuerySync), &shm_id, &shm_offset); 39 if (!mem) { 40 return false; 41 } 42 QuerySync* syncs = static_cast<QuerySync*>(mem); 43 Bucket* bucket = new Bucket(syncs); 44 buckets_.push_back(bucket); 45 for (size_t ii = 0; ii < kSyncsPerBucket; ++ii) { 46 free_queries_.push_back(QueryInfo(bucket, shm_id, shm_offset, syncs)); 47 ++syncs; 48 shm_offset += sizeof(*syncs); 49 } 50 } 51 *info = free_queries_.front(); 52 ++(info->bucket->used_query_count); 53 info->sync->Reset(); 54 free_queries_.pop_front(); 55 return true; 56 } 57 58 void QuerySyncManager::Free(const QuerySyncManager::QueryInfo& info) { 59 DCHECK_GT(info.bucket->used_query_count, 0u); 60 --(info.bucket->used_query_count); 61 free_queries_.push_back(info); 62 } 63 64 void QuerySyncManager::Shrink() { 65 std::deque<QueryInfo> new_queue; 66 while (!free_queries_.empty()) { 67 if (free_queries_.front().bucket->used_query_count) 68 new_queue.push_back(free_queries_.front()); 69 free_queries_.pop_front(); 70 } 71 free_queries_.swap(new_queue); 72 73 std::deque<Bucket*> new_buckets; 74 while (!buckets_.empty()) { 75 Bucket* bucket = buckets_.front(); 76 if (bucket->used_query_count) { 77 new_buckets.push_back(bucket); 78 } else { 79 mapped_memory_->Free(bucket->syncs); 80 delete bucket; 81 } 82 buckets_.pop_front(); 83 } 84 buckets_.swap(new_buckets); 85 } 86 87 QueryTracker::Query::Query(GLuint id, GLenum target, 88 const QuerySyncManager::QueryInfo& info) 89 : id_(id), 90 target_(target), 91 info_(info), 92 state_(kUninitialized), 93 submit_count_(0), 94 token_(0), 95 flushed_(false), 96 client_begin_time_us_(0), 97 result_(0) { 98 } 99 100 101 void QueryTracker::Query::Begin(GLES2Implementation* gl) { 102 // init memory, inc count 103 MarkAsActive(); 104 105 switch (target()) { 106 case GL_GET_ERROR_QUERY_CHROMIUM: 107 // To nothing on begin for error queries. 108 break; 109 case GL_LATENCY_QUERY_CHROMIUM: 110 client_begin_time_us_ = MicrosecondsSinceOriginOfTime(); 111 // tell service about id, shared memory and count 112 gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset()); 113 break; 114 case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM: 115 case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM: 116 default: 117 // tell service about id, shared memory and count 118 gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset()); 119 break; 120 } 121 } 122 123 void QueryTracker::Query::End(GLES2Implementation* gl) { 124 switch (target()) { 125 case GL_GET_ERROR_QUERY_CHROMIUM: { 126 GLenum error = gl->GetClientSideGLError(); 127 if (error == GL_NO_ERROR) { 128 // There was no error so start the query on the serivce. 129 // it will end immediately. 130 gl->helper()->BeginQueryEXT(target(), id(), shm_id(), shm_offset()); 131 } else { 132 // There's an error on the client, no need to bother the service. just 133 // set the query as completed and return the error. 134 if (error != GL_NO_ERROR) { 135 state_ = kComplete; 136 result_ = error; 137 return; 138 } 139 } 140 } 141 } 142 gl->helper()->EndQueryEXT(target(), submit_count()); 143 MarkAsPending(gl->helper()->InsertToken()); 144 } 145 146 bool QueryTracker::Query::CheckResultsAvailable( 147 CommandBufferHelper* helper) { 148 if (Pending()) { 149 if (info_.sync->process_count == submit_count_ || 150 helper->IsContextLost()) { 151 // Need a MemoryBarrier here so that sync->result read after 152 // sync->process_count. 153 base::subtle::MemoryBarrier(); 154 switch (target()) { 155 case GL_COMMANDS_ISSUED_CHROMIUM: 156 result_ = std::min(info_.sync->result, 157 static_cast<uint64>(0xFFFFFFFFL)); 158 break; 159 case GL_LATENCY_QUERY_CHROMIUM: 160 DCHECK(info_.sync->result >= client_begin_time_us_); 161 result_ = std::min(info_.sync->result - client_begin_time_us_, 162 static_cast<uint64>(0xFFFFFFFFL)); 163 break; 164 case GL_ASYNC_PIXEL_UNPACK_COMPLETED_CHROMIUM: 165 case GL_ASYNC_PIXEL_PACK_COMPLETED_CHROMIUM: 166 default: 167 result_ = info_.sync->result; 168 break; 169 } 170 state_ = kComplete; 171 } else { 172 if (!flushed_) { 173 // TODO(gman): We could reduce the number of flushes by having a 174 // flush count, recording that count at the time we insert the 175 // EndQuery command and then only flushing here if we've have not 176 // passed that count yet. 177 flushed_ = true; 178 helper->Flush(); 179 } else { 180 // Insert no-ops so that eventually the GPU process will see more work. 181 helper->Noop(1); 182 } 183 } 184 } 185 return state_ == kComplete; 186 } 187 188 uint32 QueryTracker::Query::GetResult() const { 189 DCHECK(state_ == kComplete || state_ == kUninitialized); 190 return result_; 191 } 192 193 QueryTracker::QueryTracker(MappedMemoryManager* manager) 194 : query_sync_manager_(manager) { 195 } 196 197 QueryTracker::~QueryTracker() { 198 while (!queries_.empty()) { 199 delete queries_.begin()->second; 200 queries_.erase(queries_.begin()); 201 } 202 while (!removed_queries_.empty()) { 203 delete removed_queries_.front(); 204 removed_queries_.pop_front(); 205 } 206 } 207 208 QueryTracker::Query* QueryTracker::CreateQuery(GLuint id, GLenum target) { 209 DCHECK_NE(0u, id); 210 FreeCompletedQueries(); 211 QuerySyncManager::QueryInfo info; 212 if (!query_sync_manager_.Alloc(&info)) { 213 return NULL; 214 } 215 Query* query = new Query(id, target, info); 216 std::pair<QueryMap::iterator, bool> result = 217 queries_.insert(std::make_pair(id, query)); 218 DCHECK(result.second); 219 return query; 220 } 221 222 QueryTracker::Query* QueryTracker::GetQuery( 223 GLuint client_id) { 224 QueryMap::iterator it = queries_.find(client_id); 225 return it != queries_.end() ? it->second : NULL; 226 } 227 228 void QueryTracker::RemoveQuery(GLuint client_id) { 229 QueryMap::iterator it = queries_.find(client_id); 230 if (it != queries_.end()) { 231 Query* query = it->second; 232 // When you delete a query you can't mark its memory as unused until it's 233 // completed. 234 // Note: If you don't do this you won't mess up the service but you will 235 // mess up yourself. 236 removed_queries_.push_back(query); 237 queries_.erase(it); 238 FreeCompletedQueries(); 239 } 240 } 241 242 void QueryTracker::Shrink() { 243 FreeCompletedQueries(); 244 query_sync_manager_.Shrink(); 245 } 246 247 void QueryTracker::FreeCompletedQueries() { 248 QueryList::iterator it = removed_queries_.begin(); 249 while (it != removed_queries_.end()) { 250 Query* query = *it; 251 if (query->Pending() && 252 query->info_.sync->process_count != query->submit_count()) { 253 ++it; 254 continue; 255 } 256 257 query_sync_manager_.Free(query->info_); 258 it = removed_queries_.erase(it); 259 delete query; 260 } 261 } 262 263 } // namespace gles2 264 } // namespace gpu 265