1 /* 2 * Copyright 2014 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrGpuResource_DEFINED 9 #define GrGpuResource_DEFINED 10 11 #include "../private/GrResourceKey.h" 12 #include "../private/GrTypesPriv.h" 13 #include "../private/SkNoncopyable.h" 14 15 class GrContext; 16 class GrGpu; 17 class GrResourceCache; 18 class SkTraceMemoryDump; 19 20 /** 21 * Base class for GrGpuResource. Handles the various types of refs we need. Separated out as a base 22 * class to isolate the ref-cnting behavior and provide friendship without exposing all of 23 * GrGpuResource. 24 * 25 * Gpu resources can have three types of refs: 26 * 1) Normal ref (+ by ref(), - by unref()): These are used by code that is issuing draw calls 27 * that read and write the resource via GrOpList and by any object that must own a 28 * GrGpuResource and is itself owned (directly or indirectly) by Skia-client code. 29 * 2) Pending read (+ by addPendingRead(), - by completedRead()): GrContext has scheduled a read 30 * of the resource by the GPU as a result of a skia API call but hasn't executed it yet. 31 * 3) Pending write (+ by addPendingWrite(), - by completedWrite()): GrContext has scheduled a 32 * write to the resource by the GPU as a result of a skia API call but hasn't executed it yet. 33 * 34 * The latter two ref types are private and intended only for Gr core code. 35 * 36 * PRIOR to the last ref/IO count being removed DERIVED::notifyAllCntsWillBeZero() will be called 37 * (static poly morphism using CRTP). It is legal for additional ref's or pending IOs to be added 38 * during this time. AFTER all the ref/io counts reach zero DERIVED::notifyAllCntsAreZero() will be 39 * called. Similarly when the ref (but not necessarily pending read/write) count reaches 0 40 * DERIVED::notifyRefCountIsZero() will be called. In the case when an unref() causes both 41 * the ref cnt to reach zero and the other counts are zero, notifyRefCountIsZero() will be called 42 * before notifyAllCntsAreZero(). Moreover, if notifyRefCountIsZero() returns false then 43 * notifyAllCntsAreZero() won't be called at all. notifyRefCountIsZero() must return false if the 44 * object may be deleted after notifyRefCntIsZero() returns. 45 * 46 * GrIORef and GrGpuResource are separate classes for organizational reasons and to be 47 * able to give access via friendship to only the functions related to pending IO operations. 48 */ 49 template <typename DERIVED> class GrIORef : public SkNoncopyable { 50 public: 51 // Some of the signatures are written to mirror SkRefCnt so that GrGpuResource can work with 52 // templated helper classes (e.g. sk_sp). However, we have different categories of 53 // refs (e.g. pending reads). We also don't require thread safety as GrCacheable objects are 54 // not intended to cross thread boundaries. ref()55 void ref() const { 56 this->validate(); 57 ++fRefCnt; 58 } 59 unref()60 void unref() const { 61 this->validate(); 62 63 if (fRefCnt == 1) { 64 if (!this->internalHasPendingIO()) { 65 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero(); 66 } 67 SkASSERT(fRefCnt > 0); 68 } 69 if (--fRefCnt == 0) { 70 if (!static_cast<const DERIVED*>(this)->notifyRefCountIsZero()) { 71 return; 72 } 73 } 74 75 this->didRemoveRefOrPendingIO(kRef_CntType); 76 } 77 validate()78 void validate() const { 79 #ifdef SK_DEBUG 80 SkASSERT(fRefCnt >= 0); 81 SkASSERT(fPendingReads >= 0); 82 SkASSERT(fPendingWrites >= 0); 83 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 0); 84 #endif 85 } 86 87 protected: GrIORef()88 GrIORef() : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { } 89 90 enum CntType { 91 kRef_CntType, 92 kPendingRead_CntType, 93 kPendingWrite_CntType, 94 }; 95 internalHasPendingRead()96 bool internalHasPendingRead() const { return SkToBool(fPendingReads); } internalHasPendingWrite()97 bool internalHasPendingWrite() const { return SkToBool(fPendingWrites); } internalHasPendingIO()98 bool internalHasPendingIO() const { return SkToBool(fPendingWrites | fPendingReads); } 99 internalHasRef()100 bool internalHasRef() const { return SkToBool(fRefCnt); } internalHasUniqueRef()101 bool internalHasUniqueRef() const { return fRefCnt == 1; } 102 103 private: 104 // This is for a unit test. 105 template <typename T> 106 friend void testingOnly_getIORefCnts(const T*, int* refCnt, int* readCnt, int* writeCnt); 107 addPendingRead()108 void addPendingRead() const { 109 this->validate(); 110 ++fPendingReads; 111 } 112 completedRead()113 void completedRead() const { 114 this->validate(); 115 if (fPendingReads == 1 && !fPendingWrites && !fRefCnt) { 116 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero(); 117 } 118 --fPendingReads; 119 this->didRemoveRefOrPendingIO(kPendingRead_CntType); 120 } 121 addPendingWrite()122 void addPendingWrite() const { 123 this->validate(); 124 ++fPendingWrites; 125 } 126 completedWrite()127 void completedWrite() const { 128 this->validate(); 129 if (fPendingWrites == 1 && !fPendingReads && !fRefCnt) { 130 static_cast<const DERIVED*>(this)->notifyAllCntsWillBeZero(); 131 } 132 --fPendingWrites; 133 this->didRemoveRefOrPendingIO(kPendingWrite_CntType); 134 } 135 didRemoveRefOrPendingIO(CntType cntTypeRemoved)136 void didRemoveRefOrPendingIO(CntType cntTypeRemoved) const { 137 if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) { 138 static_cast<const DERIVED*>(this)->notifyAllCntsAreZero(cntTypeRemoved); 139 } 140 } 141 142 mutable int32_t fRefCnt; 143 mutable int32_t fPendingReads; 144 mutable int32_t fPendingWrites; 145 146 friend class GrIORefProxy; // needs to forward on wrapped IO calls 147 friend class GrResourceCache; // to check IO ref counts. 148 149 template <typename, GrIOType> friend class GrPendingIOResource; 150 }; 151 152 /** 153 * Base class for objects that can be kept in the GrResourceCache. 154 */ 155 class SK_API GrGpuResource : public GrIORef<GrGpuResource> { 156 public: 157 /** 158 * Tests whether a object has been abandoned or released. All objects will 159 * be in this state after their creating GrContext is destroyed or has 160 * contextLost called. It's up to the client to test wasDestroyed() before 161 * attempting to use an object if it holds refs on objects across 162 * ~GrContext, freeResources with the force flag, or contextLost. 163 * 164 * @return true if the object has been released or abandoned, 165 * false otherwise. 166 */ wasDestroyed()167 bool wasDestroyed() const { return nullptr == fGpu; } 168 169 /** 170 * Retrieves the context that owns the object. Note that it is possible for 171 * this to return NULL. When objects have been release()ed or abandon()ed 172 * they no longer have an owning context. Destroying a GrContext 173 * automatically releases all its resources. 174 */ 175 const GrContext* getContext() const; 176 GrContext* getContext(); 177 178 /** 179 * Retrieves the amount of GPU memory used by this resource in bytes. It is 180 * approximate since we aren't aware of additional padding or copies made 181 * by the driver. 182 * 183 * @return the amount of GPU memory used in bytes 184 */ gpuMemorySize()185 size_t gpuMemorySize() const { 186 if (kInvalidGpuMemorySize == fGpuMemorySize) { 187 fGpuMemorySize = this->onGpuMemorySize(); 188 SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize); 189 } 190 return fGpuMemorySize; 191 } 192 193 class UniqueID { 194 public: 195 UniqueID() = default; 196 UniqueID(uint32_t id)197 explicit UniqueID(uint32_t id) : fID(id) {} 198 asUInt()199 uint32_t asUInt() const { return fID; } 200 201 bool operator==(const UniqueID& other) const { return fID == other.fID; } 202 bool operator!=(const UniqueID& other) const { return !(*this == other); } 203 makeInvalid()204 void makeInvalid() { fID = SK_InvalidUniqueID; } isInvalid()205 bool isInvalid() const { return fID == SK_InvalidUniqueID; } 206 207 protected: 208 uint32_t fID = SK_InvalidUniqueID; 209 }; 210 211 /** 212 * Gets an id that is unique for this GrGpuResource object. It is static in that it does 213 * not change when the content of the GrGpuResource object changes. This will never return 214 * 0. 215 */ uniqueID()216 UniqueID uniqueID() const { return fUniqueID; } 217 218 /** Returns the current unique key for the resource. It will be invalid if the resource has no 219 associated unique key. */ getUniqueKey()220 const GrUniqueKey& getUniqueKey() const { return fUniqueKey; } 221 222 /** 223 * Internal-only helper class used for manipulations of the resource by the cache. 224 */ 225 class CacheAccess; 226 inline CacheAccess cacheAccess(); 227 inline const CacheAccess cacheAccess() const; 228 229 /** 230 * Internal-only helper class used for manipulations of the resource by internal code. 231 */ 232 class ResourcePriv; 233 inline ResourcePriv resourcePriv(); 234 inline const ResourcePriv resourcePriv() const; 235 236 /** 237 * Dumps memory usage information for this GrGpuResource to traceMemoryDump. 238 * Typically, subclasses should not need to override this, and should only 239 * need to override setMemoryBacking. 240 **/ 241 virtual void dumpMemoryStatistics(SkTraceMemoryDump* traceMemoryDump) const; 242 243 /** 244 * Describes the type of gpu resource that is represented by the implementing 245 * class (e.g. texture, buffer object, stencil). This data is used for diagnostic 246 * purposes by dumpMemoryStatistics(). 247 * 248 * The value returned is expected to be long lived and will not be copied by the caller. 249 */ 250 virtual const char* getResourceType() const = 0; 251 252 static uint32_t CreateUniqueID(); 253 254 protected: 255 // This must be called by every non-wrapped GrGpuObject. It should be called once the object is 256 // fully initialized (i.e. only from the constructors of the final class). 257 void registerWithCache(SkBudgeted); 258 259 // This must be called by every GrGpuObject that references any wrapped backend objects. It 260 // should be called once the object is fully initialized (i.e. only from the constructors of the 261 // final class). 262 void registerWithCacheWrapped(GrWrapCacheable); 263 264 GrGpuResource(GrGpu*); 265 virtual ~GrGpuResource(); 266 getGpu()267 GrGpu* getGpu() const { return fGpu; } 268 269 /** Overridden to free GPU resources in the backend API. */ onRelease()270 virtual void onRelease() { } 271 /** Overridden to abandon any internal handles, ptrs, etc to backend API resources. 272 This may be called when the underlying 3D context is no longer valid and so no 273 backend API calls should be made. */ onAbandon()274 virtual void onAbandon() { } 275 276 /** 277 * Allows subclasses to add additional backing information to the SkTraceMemoryDump. 278 **/ setMemoryBacking(SkTraceMemoryDump *,const SkString &)279 virtual void setMemoryBacking(SkTraceMemoryDump*, const SkString&) const {} 280 281 /** 282 * Returns a string that uniquely identifies this resource. 283 */ 284 SkString getResourceName() const; 285 286 /** 287 * A helper for subclasses that override dumpMemoryStatistics(). This method using a format 288 * consistent with the default implementation of dumpMemoryStatistics() but allows the caller 289 * to customize various inputs. 290 */ 291 void dumpMemoryStatisticsPriv(SkTraceMemoryDump* traceMemoryDump, const SkString& resourceName, 292 const char* type, size_t size) const; 293 294 295 private: 296 bool isPurgeable() const; 297 bool hasRefOrPendingIO() const; 298 299 /** 300 * Called by the registerWithCache if the resource is available to be used as scratch. 301 * Resource subclasses should override this if the instances should be recycled as scratch 302 * resources and populate the scratchKey with the key. 303 * By default resources are not recycled as scratch. 304 **/ computeScratchKey(GrScratchKey *)305 virtual void computeScratchKey(GrScratchKey*) const {} 306 307 /** 308 * Removes references to objects in the underlying 3D API without freeing them. 309 * Called by CacheAccess. 310 */ 311 void abandon(); 312 313 /** 314 * Frees the object in the underlying 3D API. Called by CacheAccess. 315 */ 316 void release(); 317 318 virtual size_t onGpuMemorySize() const = 0; 319 320 /** 321 * Called by GrResourceCache when a resource loses its last ref or pending IO. 322 */ willRemoveLastRefOrPendingIO()323 virtual void willRemoveLastRefOrPendingIO() {} 324 325 // See comments in CacheAccess and ResourcePriv. 326 void setUniqueKey(const GrUniqueKey&); 327 void removeUniqueKey(); 328 void notifyAllCntsWillBeZero() const; 329 void notifyAllCntsAreZero(CntType) const; 330 bool notifyRefCountIsZero() const; 331 void removeScratchKey(); 332 void makeBudgeted(); 333 void makeUnbudgeted(); 334 335 #ifdef SK_DEBUG 336 friend class GrGpu; // for assert in GrGpu to access getGpu 337 #endif 338 339 // An index into a heap when this resource is purgeable or an array when not. This is maintained 340 // by the cache. 341 int fCacheArrayIndex; 342 // This value reflects how recently this resource was accessed in the cache. This is maintained 343 // by the cache. 344 uint32_t fTimestamp; 345 GrStdSteadyClock::time_point fTimeWhenBecamePurgeable; 346 347 static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0); 348 GrScratchKey fScratchKey; 349 GrUniqueKey fUniqueKey; 350 351 // This is not ref'ed but abandon() or release() will be called before the GrGpu object 352 // is destroyed. Those calls set will this to NULL. 353 GrGpu* fGpu; 354 mutable size_t fGpuMemorySize = kInvalidGpuMemorySize; 355 356 GrBudgetedType fBudgetedType = GrBudgetedType::kUnbudgetedUncacheable; 357 bool fRefsWrappedObjects = false; 358 const UniqueID fUniqueID; 359 360 typedef GrIORef<GrGpuResource> INHERITED; 361 friend class GrIORef<GrGpuResource>; // to access notifyAllCntsAreZero and notifyRefCntIsZero. 362 }; 363 364 #endif 365