1 /* 2 * Copyright 2016 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrSurfaceProxy_DEFINED 9 #define GrSurfaceProxy_DEFINED 10 11 #include "GrGpuResource.h" 12 #include "GrSurface.h" 13 14 #include "SkRect.h" 15 16 class GrBackendTexture; 17 class GrCaps; 18 class GrOpList; 19 class GrProxyProvider; 20 class GrRenderTargetOpList; 21 class GrRenderTargetProxy; 22 class GrResourceProvider; 23 class GrSurfaceContext; 24 class GrSurfaceProxyPriv; 25 class GrTextureOpList; 26 class GrTextureProxy; 27 28 // This class replicates the functionality GrIORef<GrSurface> but tracks the 29 // utilitization for later resource allocation (for the deferred case) and 30 // forwards on the utilization in the wrapped case 31 class GrIORefProxy : public SkNoncopyable { 32 public: 33 void ref() const { 34 this->validate(); 35 36 ++fRefCnt; 37 if (fTarget) { 38 fTarget->ref(); 39 } 40 } 41 42 void unref() const { 43 this->validate(); 44 45 if (fTarget) { 46 fTarget->unref(); 47 } 48 49 --fRefCnt; 50 this->didRemoveRefOrPendingIO(); 51 } 52 53 #ifdef SK_DEBUG 54 bool isUnique_debugOnly() const { // For asserts. 55 SkASSERT(fRefCnt >= 0 && fPendingWrites >= 0 && fPendingReads >= 0); 56 return 1 == fRefCnt + fPendingWrites + fPendingReads; 57 } 58 #endif 59 60 void validate() const { 61 #ifdef SK_DEBUG 62 SkASSERT(fRefCnt >= 0); 63 SkASSERT(fPendingReads >= 0); 64 SkASSERT(fPendingWrites >= 0); 65 SkASSERT(fRefCnt + fPendingReads + fPendingWrites >= 1); 66 67 if (fTarget) { 68 // The backing GrSurface can have more refs than the proxy if the proxy 69 // started off wrapping an external resource (that came in with refs). 70 // The GrSurface should never have fewer refs than the proxy however. 71 SkASSERT(fTarget->fRefCnt >= fRefCnt); 72 SkASSERT(fTarget->fPendingReads >= fPendingReads); 73 SkASSERT(fTarget->fPendingWrites >= fPendingWrites); 74 } 75 #endif 76 } 77 78 int32_t getProxyRefCnt_TestOnly() const; 79 int32_t getBackingRefCnt_TestOnly() const; 80 int32_t getPendingReadCnt_TestOnly() const; 81 int32_t getPendingWriteCnt_TestOnly() const; 82 83 void addPendingRead() const { 84 this->validate(); 85 86 ++fPendingReads; 87 if (fTarget) { 88 fTarget->addPendingRead(); 89 } 90 } 91 92 void completedRead() const { 93 this->validate(); 94 95 if (fTarget) { 96 fTarget->completedRead(); 97 } 98 99 --fPendingReads; 100 this->didRemoveRefOrPendingIO(); 101 } 102 103 void addPendingWrite() const { 104 this->validate(); 105 106 ++fPendingWrites; 107 if (fTarget) { 108 fTarget->addPendingWrite(); 109 } 110 } 111 112 void completedWrite() const { 113 this->validate(); 114 115 if (fTarget) { 116 fTarget->completedWrite(); 117 } 118 119 --fPendingWrites; 120 this->didRemoveRefOrPendingIO(); 121 } 122 123 protected: 124 GrIORefProxy() : fTarget(nullptr), fRefCnt(1), fPendingReads(0), fPendingWrites(0) {} 125 GrIORefProxy(sk_sp<GrSurface> surface) : fRefCnt(1), fPendingReads(0), fPendingWrites(0) { 126 // Since we're manually forwarding on refs & unrefs we don't want sk_sp doing 127 // anything extra. 128 fTarget = surface.release(); 129 } 130 virtual ~GrIORefProxy() { 131 // We don't unref 'fTarget' here since the 'unref' method will already 132 // have forwarded on the unref call that got use here. 133 } 134 135 // This GrIORefProxy was deferred before but has just been instantiated. To 136 // make all the reffing & unreffing work out we now need to transfer any deferred 137 // refs & unrefs to the new GrSurface 138 void transferRefs() { 139 SkASSERT(fTarget); 140 141 SkASSERT(fTarget->fRefCnt > 0); 142 fTarget->fRefCnt += (fRefCnt-1); // don't xfer the proxy's creation ref 143 fTarget->fPendingReads += fPendingReads; 144 fTarget->fPendingWrites += fPendingWrites; 145 } 146 147 bool internalHasPendingIO() const { 148 if (fTarget) { 149 return fTarget->internalHasPendingIO(); 150 } 151 152 return SkToBool(fPendingWrites | fPendingReads); 153 } 154 155 bool internalHasPendingWrite() const { 156 if (fTarget) { 157 return fTarget->internalHasPendingWrite(); 158 } 159 160 return SkToBool(fPendingWrites); 161 } 162 163 // For deferred proxies this will be null. For wrapped proxies it will point to the 164 // wrapped resource. 165 GrSurface* fTarget; 166 167 private: 168 // This class is used to manage conversion of refs to pending reads/writes. 169 friend class GrSurfaceProxyRef; 170 template <typename, GrIOType> friend class GrPendingIOResource; 171 172 void didRemoveRefOrPendingIO() const { 173 if (0 == fPendingReads && 0 == fPendingWrites && 0 == fRefCnt) { 174 delete this; 175 } 176 } 177 178 mutable int32_t fRefCnt; 179 mutable int32_t fPendingReads; 180 mutable int32_t fPendingWrites; 181 }; 182 183 class GrSurfaceProxy : public GrIORefProxy { 184 public: 185 enum class LazyState { 186 kNot, // The proxy is instantiated or does not have a lazy callback 187 kPartially, // The proxy has a lazy callback but knows basic information about itself. 188 kFully, // The proxy has a lazy callback and also doesn't know its width, height, etc. 189 }; 190 191 LazyState lazyInstantiationState() const { 192 if (fTarget || !SkToBool(fLazyInstantiateCallback)) { 193 return LazyState::kNot; 194 } else { 195 if (fWidth <= 0) { 196 SkASSERT(fHeight <= 0); 197 return LazyState::kFully; 198 } else { 199 SkASSERT(fHeight > 0); 200 return LazyState::kPartially; 201 } 202 } 203 } 204 205 GrPixelConfig config() const { return fConfig; } 206 int width() const { 207 SkASSERT(LazyState::kFully != this->lazyInstantiationState()); 208 return fWidth; 209 } 210 int height() const { 211 SkASSERT(LazyState::kFully != this->lazyInstantiationState()); 212 return fHeight; 213 } 214 int worstCaseWidth() const; 215 int worstCaseHeight() const; 216 GrSurfaceOrigin origin() const { 217 SkASSERT(LazyState::kFully != this->lazyInstantiationState()); 218 SkASSERT(kTopLeft_GrSurfaceOrigin == fOrigin || kBottomLeft_GrSurfaceOrigin == fOrigin); 219 return fOrigin; 220 } 221 222 class UniqueID { 223 public: 224 static UniqueID InvalidID() { 225 return UniqueID(uint32_t(SK_InvalidUniqueID)); 226 } 227 228 // wrapped 229 explicit UniqueID(const GrGpuResource::UniqueID& id) : fID(id.asUInt()) { } 230 // deferred and lazy-callback 231 UniqueID() : fID(GrGpuResource::CreateUniqueID()) { } 232 233 uint32_t asUInt() const { return fID; } 234 235 bool operator==(const UniqueID& other) const { 236 return fID == other.fID; 237 } 238 bool operator!=(const UniqueID& other) const { 239 return !(*this == other); 240 } 241 242 void makeInvalid() { fID = SK_InvalidUniqueID; } 243 bool isInvalid() const { return SK_InvalidUniqueID == fID; } 244 245 private: 246 explicit UniqueID(uint32_t id) : fID(id) {} 247 248 uint32_t fID; 249 }; 250 251 /* 252 * The contract for the uniqueID is: 253 * for wrapped resources: 254 * the uniqueID will match that of the wrapped resource 255 * 256 * for deferred resources: 257 * the uniqueID will be different from the real resource, when it is allocated 258 * the proxy's uniqueID will not change across the instantiate call 259 * 260 * the uniqueIDs of the proxies and the resources draw from the same pool 261 * 262 * What this boils down to is that the uniqueID of a proxy can be used to consistently 263 * track/identify a proxy but should never be used to distinguish between 264 * resources and proxies - beware! 265 */ 266 UniqueID uniqueID() const { return fUniqueID; } 267 268 UniqueID underlyingUniqueID() const { 269 if (fTarget) { 270 return UniqueID(fTarget->uniqueID()); 271 } 272 273 return fUniqueID; 274 } 275 276 virtual bool instantiate(GrResourceProvider* resourceProvider) = 0; 277 278 /** 279 * Helper that gets the width and height of the surface as a bounding rectangle. 280 */ 281 SkRect getBoundsRect() const { 282 SkASSERT(LazyState::kFully != this->lazyInstantiationState()); 283 return SkRect::MakeIWH(this->width(), this->height()); 284 } 285 286 /** 287 * @return the texture proxy associated with the surface proxy, may be NULL. 288 */ 289 virtual GrTextureProxy* asTextureProxy() { return nullptr; } 290 virtual const GrTextureProxy* asTextureProxy() const { return nullptr; } 291 292 /** 293 * @return the render target proxy associated with the surface proxy, may be NULL. 294 */ 295 virtual GrRenderTargetProxy* asRenderTargetProxy() { return nullptr; } 296 virtual const GrRenderTargetProxy* asRenderTargetProxy() const { return nullptr; } 297 298 /** 299 * Does the resource count against the resource budget? 300 */ 301 SkBudgeted isBudgeted() const { return fBudgeted; } 302 303 void setLastOpList(GrOpList* opList); 304 GrOpList* getLastOpList() { return fLastOpList; } 305 306 GrRenderTargetOpList* getLastRenderTargetOpList(); 307 GrTextureOpList* getLastTextureOpList(); 308 309 /** 310 * Retrieves the amount of GPU memory that will be or currently is used by this resource 311 * in bytes. It is approximate since we aren't aware of additional padding or copies made 312 * by the driver. 313 * 314 * @return the amount of GPU memory used in bytes 315 */ 316 size_t gpuMemorySize() const { 317 SkASSERT(LazyState::kFully != this->lazyInstantiationState()); 318 if (fTarget) { 319 return fTarget->gpuMemorySize(); 320 } 321 if (kInvalidGpuMemorySize == fGpuMemorySize) { 322 fGpuMemorySize = this->onUninstantiatedGpuMemorySize(); 323 SkASSERT(kInvalidGpuMemorySize != fGpuMemorySize); 324 } 325 return fGpuMemorySize; 326 } 327 328 // Helper function that creates a temporary SurfaceContext to perform the copy 329 // It always returns a kExact-backed proxy bc it is used when converting an SkSpecialImage 330 // to an SkImage. The copy is is not a render target and not multisampled. 331 static sk_sp<GrTextureProxy> Copy(GrContext*, GrSurfaceProxy* src, GrMipMapped, 332 SkIRect srcRect, SkBudgeted); 333 334 // Copy the entire 'src' 335 // It always returns a kExact-backed proxy bc it is used in SkGpuDevice::snapSpecial 336 static sk_sp<GrTextureProxy> Copy(GrContext* context, GrSurfaceProxy* src, GrMipMapped, 337 SkBudgeted budgeted); 338 339 // Test-only entry point - should decrease in use as proxies propagate 340 static sk_sp<GrSurfaceContext> TestCopy(GrContext* context, const GrSurfaceDesc& dstDesc, 341 GrSurfaceProxy* srcProxy); 342 343 bool isWrapped_ForTesting() const; 344 345 SkDEBUGCODE(void validate(GrContext*) const;) 346 347 // Provides access to functions that aren't part of the public API. 348 inline GrSurfaceProxyPriv priv(); 349 inline const GrSurfaceProxyPriv priv() const; 350 351 protected: 352 // Deferred version 353 GrSurfaceProxy(const GrSurfaceDesc& desc, SkBackingFit fit, SkBudgeted budgeted, uint32_t flags) 354 : GrSurfaceProxy(nullptr, desc, fit, budgeted, flags) { 355 // Note: this ctor pulls a new uniqueID from the same pool at the GrGpuResources 356 } 357 358 using LazyInstantiateCallback = std::function<sk_sp<GrTexture>(GrResourceProvider*, 359 GrSurfaceOrigin* outOrigin)>; 360 361 // Lazy-callback version 362 GrSurfaceProxy(LazyInstantiateCallback&& callback, const GrSurfaceDesc& desc, 363 SkBackingFit fit, SkBudgeted budgeted, uint32_t flags); 364 365 // Wrapped version 366 GrSurfaceProxy(sk_sp<GrSurface> surface, GrSurfaceOrigin origin, SkBackingFit fit); 367 368 virtual ~GrSurfaceProxy(); 369 370 friend class GrSurfaceProxyPriv; 371 372 // Methods made available via GrSurfaceProxyPriv 373 bool hasPendingIO() const { 374 return this->internalHasPendingIO(); 375 } 376 377 bool hasPendingWrite() const { 378 return this->internalHasPendingWrite(); 379 } 380 381 void computeScratchKey(GrScratchKey*) const; 382 383 virtual sk_sp<GrSurface> createSurface(GrResourceProvider*) const = 0; 384 void assign(sk_sp<GrSurface> surface); 385 386 sk_sp<GrSurface> createSurfaceImpl(GrResourceProvider*, int sampleCnt, bool needsStencil, 387 GrSurfaceFlags flags, GrMipMapped mipMapped, 388 SkDestinationSurfaceColorMode mipColorMode) const; 389 390 bool instantiateImpl(GrResourceProvider* resourceProvider, int sampleCnt, bool needsStencil, 391 GrSurfaceFlags flags, GrMipMapped mipMapped, 392 SkDestinationSurfaceColorMode mipColorMode, const GrUniqueKey*); 393 394 private: 395 // For wrapped resources, 'fConfig', 'fWidth', 'fHeight', and 'fOrigin; will always be filled in 396 // from the wrapped resource. 397 GrPixelConfig fConfig; 398 int fWidth; 399 int fHeight; 400 GrSurfaceOrigin fOrigin; 401 SkBackingFit fFit; // always kApprox for lazy-callback resources 402 // always kExact for wrapped resources 403 mutable SkBudgeted fBudgeted; // always kYes for lazy-callback resources 404 // set from the backing resource for wrapped resources 405 // mutable bc of SkSurface/SkImage wishy-washiness 406 const uint32_t fFlags; 407 408 const UniqueID fUniqueID; // set from the backing resource for wrapped resources 409 410 LazyInstantiateCallback fLazyInstantiateCallback; 411 SkDEBUGCODE(virtual void validateLazyTexture(const GrTexture*) = 0;) 412 413 static const size_t kInvalidGpuMemorySize = ~static_cast<size_t>(0); 414 SkDEBUGCODE(size_t getRawGpuMemorySize_debugOnly() const { return fGpuMemorySize; }) 415 416 virtual size_t onUninstantiatedGpuMemorySize() const = 0; 417 418 bool fNeedsClear; 419 420 // This entry is lazily evaluated so, when the proxy wraps a resource, the resource 421 // will be called but, when the proxy is deferred, it will compute the answer itself. 422 // If the proxy computes its own answer that answer is checked (in debug mode) in 423 // the instantiation method. 424 mutable size_t fGpuMemorySize; 425 426 // The last opList that wrote to or is currently going to write to this surface 427 // The opList can be closed (e.g., no surface context is currently bound 428 // to this proxy). 429 // This back-pointer is required so that we can add a dependancy between 430 // the opList used to create the current contents of this surface 431 // and the opList of a destination surface to which this one is being drawn or copied. 432 // This pointer is unreffed. OpLists own a ref on their surface proxies. 433 GrOpList* fLastOpList; 434 435 typedef GrIORefProxy INHERITED; 436 }; 437 438 #endif 439