1 /* 2 * Copyright 2011 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8 #ifndef GrGpu_DEFINED 9 #define GrGpu_DEFINED 10 11 #include "GrGpuCommandBuffer.h" 12 #include "GrProgramDesc.h" 13 #include "GrSwizzle.h" 14 #include "GrAllocator.h" 15 #include "GrTextureProducer.h" 16 #include "GrTypes.h" 17 #include "GrXferProcessor.h" 18 #include "SkPath.h" 19 #include "SkTArray.h" 20 #include <map> 21 22 class GrBuffer; 23 class GrContext; 24 struct GrContextOptions; 25 class GrGLContext; 26 class GrMesh; 27 class GrNonInstancedVertices; 28 class GrPath; 29 class GrPathRange; 30 class GrPathRenderer; 31 class GrPathRendererChain; 32 class GrPathRendering; 33 class GrPipeline; 34 class GrPrimitiveProcessor; 35 class GrRenderTarget; 36 class GrSemaphore; 37 class GrStencilAttachment; 38 class GrStencilSettings; 39 class GrSurface; 40 class GrTexture; 41 42 namespace gr_instanced { class InstancedRendering; } 43 44 class GrGpu : public SkRefCnt { 45 public: 46 /** 47 * Create an instance of GrGpu that matches the specified backend. If the requested backend is 48 * not supported (at compile-time or run-time) this returns nullptr. The context will not be 49 * fully constructed and should not be used by GrGpu until after this function returns. 50 */ 51 static GrGpu* Create(GrBackend, GrBackendContext, const GrContextOptions&, GrContext* context); 52 53 //////////////////////////////////////////////////////////////////////////// 54 55 GrGpu(GrContext* context); 56 ~GrGpu() override; 57 getContext()58 GrContext* getContext() { return fContext; } getContext()59 const GrContext* getContext() const { return fContext; } 60 61 /** 62 * Gets the capabilities of the draw target. 63 */ caps()64 const GrCaps* caps() const { return fCaps.get(); } 65 pathRendering()66 GrPathRendering* pathRendering() { return fPathRendering.get(); } 67 68 enum class DisconnectType { 69 // No cleanup should be attempted, immediately cease making backend API calls 70 kAbandon, 71 // Free allocated resources (not known by GrResourceCache) before returning and 72 // ensure no backend backend 3D API calls will be made after disconnect() returns. 73 kCleanup, 74 }; 75 76 // Called by GrContext when the underlying backend context is already or will be destroyed 77 // before GrContext. 78 virtual void disconnect(DisconnectType); 79 80 /** 81 * The GrGpu object normally assumes that no outsider is setting state 82 * within the underlying 3D API's context/device/whatever. This call informs 83 * the GrGpu that the state was modified and it shouldn't make assumptions 84 * about the state. 85 */ 86 void markContextDirty(uint32_t state = kAll_GrBackendState) { fResetBits |= state; } 87 88 /** 89 * Creates a texture object. If kRenderTarget_GrSurfaceFlag the texture can 90 * be used as a render target by calling GrTexture::asRenderTarget(). Not all 91 * pixel configs can be used as render targets. Support for configs as textures 92 * or render targets can be checked using GrCaps. 93 * 94 * @param desc describes the texture to be created. 95 * @param budgeted does this texture count against the resource cache budget? 96 * @param texels array of mipmap levels containing texel data to load. 97 * Each level begins with full-size palette data for paletted textures. 98 * For compressed formats the level contains the compressed pixel data. 99 * Otherwise, it contains width*height texels. If there is only one 100 * element and it contains nullptr fPixels, texture data is 101 * uninitialized. 102 * @return The texture object if successful, otherwise nullptr. 103 */ 104 GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, 105 const SkTArray<GrMipLevel>& texels); 106 107 /** 108 * Simplified createTexture() interface for when there is no initial texel data to upload. 109 */ createTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted)110 GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted) { 111 return this->createTexture(desc, budgeted, SkTArray<GrMipLevel>()); 112 } 113 114 /** Simplified createTexture() interface for when there is only a base level */ createTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,const void * level0Data,size_t rowBytes)115 GrTexture* createTexture(const GrSurfaceDesc& desc, SkBudgeted budgeted, const void* level0Data, 116 size_t rowBytes) { 117 SkASSERT(level0Data); 118 GrMipLevel level = { level0Data, rowBytes }; 119 SkSTArray<1, GrMipLevel> array; 120 array.push_back() = level; 121 return this->createTexture(desc, budgeted, array); 122 } 123 124 /** 125 * Implements GrResourceProvider::wrapBackendTexture 126 */ 127 sk_sp<GrTexture> wrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership); 128 129 /** 130 * Implements GrResourceProvider::wrapBackendRenderTarget 131 */ 132 sk_sp<GrRenderTarget> wrapBackendRenderTarget(const GrBackendRenderTargetDesc&); 133 134 /** 135 * Implements GrResourceProvider::wrapBackendTextureAsRenderTarget 136 */ 137 sk_sp<GrRenderTarget> wrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&); 138 139 /** 140 * Creates a buffer in GPU memory. For a client-side buffer use GrBuffer::CreateCPUBacked. 141 * 142 * @param size size of buffer to create. 143 * @param intendedType hint to the graphics subsystem about what the buffer will be used for. 144 * @param accessPattern hint to the graphics subsystem about how the data will be accessed. 145 * @param data optional data with which to initialize the buffer. 146 * 147 * @return the buffer if successful, otherwise nullptr. 148 */ 149 GrBuffer* createBuffer(size_t size, GrBufferType intendedType, GrAccessPattern accessPattern, 150 const void* data = nullptr); 151 152 /** 153 * Creates an instanced rendering object if it is supported on this platform. 154 */ 155 gr_instanced::InstancedRendering* createInstancedRendering(); 156 157 /** 158 * Resolves MSAA. 159 */ 160 void resolveRenderTarget(GrRenderTarget* target); 161 162 /** Info struct returned by getReadPixelsInfo about performing intermediate draws before 163 reading pixels for performance or correctness. */ 164 struct ReadPixelTempDrawInfo { 165 /** If the GrGpu is requesting that the caller do a draw to an intermediate surface then 166 this is descriptor for the temp surface. The draw should always be a rect with 167 dst 0,0,w,h. */ 168 GrSurfaceDesc fTempSurfaceDesc; 169 /** Indicates whether there is a performance advantage to using an exact match texture 170 (in terms of width and height) for the intermediate texture instead of approximate. */ 171 SkBackingFit fTempSurfaceFit; 172 /** Swizzle to apply during the draw. This is used to compensate for either feature or 173 performance limitations in the underlying 3D API. */ 174 GrSwizzle fSwizzle; 175 /** The config that should be used to read from the temp surface after the draw. This may be 176 different than the original read config in order to compensate for swizzling. The 177 read data will effectively be in the original read config. */ 178 GrPixelConfig fReadConfig; 179 }; 180 181 /** Describes why an intermediate draw must/should be performed before readPixels. */ 182 enum DrawPreference { 183 /** On input means that the caller would proceed without draw if the GrGpu doesn't request 184 one. 185 On output means that the GrGpu is not requesting a draw. */ 186 kNoDraw_DrawPreference, 187 /** Means that the client would prefer a draw for performance of the readback but 188 can satisfy a straight readPixels call on the inputs without an intermediate draw. 189 getReadPixelsInfo will never set the draw preference to this value but may leave 190 it set. */ 191 kCallerPrefersDraw_DrawPreference, 192 /** On output means that GrGpu would prefer a draw for performance of the readback but 193 can satisfy a straight readPixels call on the inputs without an intermediate draw. The 194 caller of getReadPixelsInfo should never specify this on intput. */ 195 kGpuPrefersDraw_DrawPreference, 196 /** On input means that the caller requires a draw to do a transformation and there is no 197 CPU fallback. 198 On output means that GrGpu can only satisfy the readPixels request if the intermediate 199 draw is performed. 200 */ 201 kRequireDraw_DrawPreference 202 }; 203 204 /** 205 * Used to negotiate whether and how an intermediate draw should or must be performed before 206 * a readPixels call. If this returns false then GrGpu could not deduce an intermediate draw 207 * that would allow a successful readPixels call. The passed width, height, and rowBytes, 208 * must be non-zero and already reflect clipping to the src bounds. 209 */ 210 bool getReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, size_t rowBytes, 211 GrPixelConfig readConfig, DrawPreference*, ReadPixelTempDrawInfo*); 212 213 /** Info struct returned by getWritePixelsInfo about performing an intermediate draw in order 214 to write pixels to a GrSurface for either performance or correctness reasons. */ 215 struct WritePixelTempDrawInfo { 216 /** If the GrGpu is requesting that the caller upload to an intermediate surface and draw 217 that to the dst then this is the descriptor for the intermediate surface. The caller 218 should upload the pixels such that the upper left pixel of the upload rect is at 0,0 in 219 the intermediate surface.*/ 220 GrSurfaceDesc fTempSurfaceDesc; 221 /** Swizzle to apply during the draw. This is used to compensate for either feature or 222 performance limitations in the underlying 3D API. */ 223 GrSwizzle fSwizzle; 224 /** The config that should be specified when uploading the *original* data to the temp 225 surface before the draw. This may be different than the original src data config in 226 order to compensate for swizzling that will occur when drawing. */ 227 GrPixelConfig fWriteConfig; 228 }; 229 230 /** 231 * Used to negotiate whether and how an intermediate surface should be used to write pixels to 232 * a GrSurface. If this returns false then GrGpu could not deduce an intermediate draw 233 * that would allow a successful transfer of the src pixels to the dst. The passed width, 234 * height, and rowBytes, must be non-zero and already reflect clipping to the dst bounds. 235 */ 236 bool getWritePixelsInfo(GrSurface* dstSurface, int width, int height, 237 GrPixelConfig srcConfig, DrawPreference*, WritePixelTempDrawInfo*); 238 239 /** 240 * Reads a rectangle of pixels from a render target. 241 * 242 * @param surface The surface to read from 243 * @param left left edge of the rectangle to read (inclusive) 244 * @param top top edge of the rectangle to read (inclusive) 245 * @param width width of rectangle to read in pixels. 246 * @param height height of rectangle to read in pixels. 247 * @param config the pixel config of the destination buffer 248 * @param buffer memory to read the rectangle into. 249 * @param rowBytes the number of bytes between consecutive rows. Zero 250 * means rows are tightly packed. 251 * @param invertY buffer should be populated bottom-to-top as opposed 252 * to top-to-bottom (skia's usual order) 253 * 254 * @return true if the read succeeded, false if not. The read can fail 255 * because of a unsupported pixel config or because no render 256 * target is currently set. 257 */ 258 bool readPixels(GrSurface* surface, 259 int left, int top, int width, int height, 260 GrPixelConfig config, void* buffer, size_t rowBytes); 261 262 /** 263 * Updates the pixels in a rectangle of a surface. 264 * 265 * @param surface The surface to write to. 266 * @param left left edge of the rectangle to write (inclusive) 267 * @param top top edge of the rectangle to write (inclusive) 268 * @param width width of rectangle to write in pixels. 269 * @param height height of rectangle to write in pixels. 270 * @param config the pixel config of the source buffer 271 * @param texels array of mipmap levels containing texture data 272 */ 273 bool writePixels(GrSurface* surface, 274 int left, int top, int width, int height, 275 GrPixelConfig config, 276 const SkTArray<GrMipLevel>& texels); 277 278 /** 279 * This function is a shim which creates a SkTArray<GrMipLevel> of size 1. 280 * It then calls writePixels with that SkTArray. 281 * 282 * @param buffer memory to read pixels from. 283 * @param rowBytes number of bytes between consecutive rows. Zero 284 * means rows are tightly packed. 285 */ 286 bool writePixels(GrSurface* surface, 287 int left, int top, int width, int height, 288 GrPixelConfig config, const void* buffer, 289 size_t rowBytes); 290 291 /** 292 * Updates the pixels in a rectangle of a surface using a buffer 293 * 294 * @param surface The surface to write to. 295 * @param left left edge of the rectangle to write (inclusive) 296 * @param top top edge of the rectangle to write (inclusive) 297 * @param width width of rectangle to write in pixels. 298 * @param height height of rectangle to write in pixels. 299 * @param config the pixel config of the source buffer 300 * @param transferBuffer GrBuffer to read pixels from (type must be "kCpuToGpu") 301 * @param offset offset from the start of the buffer 302 * @param rowBytes number of bytes between consecutive rows. Zero 303 * means rows are tightly packed. 304 */ 305 bool transferPixels(GrSurface* surface, 306 int left, int top, int width, int height, 307 GrPixelConfig config, GrBuffer* transferBuffer, 308 size_t offset, size_t rowBytes, GrFence* fence); 309 310 // After the client interacts directly with the 3D context state the GrGpu 311 // must resync its internal state and assumptions about 3D context state. 312 // Each time this occurs the GrGpu bumps a timestamp. 313 // state of the 3D context 314 // At 10 resets / frame and 60fps a 64bit timestamp will overflow in about 315 // a billion years. 316 typedef uint64_t ResetTimestamp; 317 318 // This timestamp is always older than the current timestamp 319 static const ResetTimestamp kExpiredTimestamp = 0; 320 // Returns a timestamp based on the number of times the context was reset. 321 // This timestamp can be used to lazily detect when cached 3D context state 322 // is dirty. getResetTimestamp()323 ResetTimestamp getResetTimestamp() const { return fResetTimestamp; } 324 325 // Called to perform a surface to surface copy. Fallbacks to issuing a draw from the src to dst 326 // take place at the GrOpList level and this function implement faster copy paths. The rect 327 // and point are pre-clipped. The src rect and implied dst rect are guaranteed to be within the 328 // src/dst bounds and non-empty. 329 bool copySurface(GrSurface* dst, 330 GrSurface* src, 331 const SkIRect& srcRect, 332 const SkIPoint& dstPoint); 333 334 struct MultisampleSpecs { MultisampleSpecsMultisampleSpecs335 MultisampleSpecs(uint8_t uniqueID, int effectiveSampleCnt, const SkPoint* locations) 336 : fUniqueID(uniqueID), 337 fEffectiveSampleCnt(effectiveSampleCnt), 338 fSampleLocations(locations) {} 339 340 // Nonzero ID that uniquely identifies these multisample specs. 341 uint8_t fUniqueID; 342 // The actual number of samples the GPU will run. NOTE: this value can be greater than the 343 // the render target's sample count. 344 int fEffectiveSampleCnt; 345 // If sample locations are supported, points to the subpixel locations at which the GPU will 346 // sample. Pixel center is at (.5, .5), and (0, 0) indicates the top left corner. 347 const SkPoint* fSampleLocations; 348 }; 349 350 // Finds a render target's multisample specs. The pipeline is only needed in case we need to 351 // flush the draw state prior to querying multisample info. The pipeline is not expected to 352 // affect the multisample information itself. 353 const MultisampleSpecs& queryMultisampleSpecs(const GrPipeline&); 354 355 // Finds the multisample specs with a given unique id. getMultisampleSpecs(uint8_t uniqueID)356 const MultisampleSpecs& getMultisampleSpecs(uint8_t uniqueID) { 357 SkASSERT(uniqueID > 0 && uniqueID < fMultisampleSpecs.count()); 358 return fMultisampleSpecs[uniqueID]; 359 } 360 361 // Creates a GrGpuCommandBuffer in which the GrOpList can send draw commands to instead of 362 // directly to the Gpu object. This currently does not take a GrRenderTarget. The command buffer 363 // is expected to infer the render target from the first draw, clear, or discard. This is an 364 // awkward workaround that goes away after MDB is complete and the render target is known from 365 // the GrRenderTargetOpList. 366 virtual GrGpuCommandBuffer* createCommandBuffer( 367 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo, 368 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) = 0; 369 370 // Called by GrOpList when flushing. 371 // Provides a hook for post-flush actions (e.g. Vulkan command buffer submits). finishOpList()372 virtual void finishOpList() {} 373 374 virtual GrFence SK_WARN_UNUSED_RESULT insertFence() = 0; 375 virtual bool waitFence(GrFence, uint64_t timeout = 1000) = 0; 376 virtual void deleteFence(GrFence) const = 0; 377 378 virtual sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT makeSemaphore() = 0; 379 virtual void insertSemaphore(sk_sp<GrSemaphore> semaphore) = 0; 380 virtual void waitSemaphore(sk_sp<GrSemaphore> semaphore) = 0; 381 382 // Ensures that all queued up driver-level commands have been sent to the GPU. For example, on 383 // OpenGL, this calls glFlush. 384 virtual void flush() = 0; 385 386 /////////////////////////////////////////////////////////////////////////// 387 // Debugging and Stats 388 389 class Stats { 390 public: 391 #if GR_GPU_STATS Stats()392 Stats() { this->reset(); } 393 reset()394 void reset() { 395 fRenderTargetBinds = 0; 396 fShaderCompilations = 0; 397 fTextureCreates = 0; 398 fTextureUploads = 0; 399 fTransfersToTexture = 0; 400 fStencilAttachmentCreates = 0; 401 fNumDraws = 0; 402 fNumFailedDraws = 0; 403 } 404 renderTargetBinds()405 int renderTargetBinds() const { return fRenderTargetBinds; } incRenderTargetBinds()406 void incRenderTargetBinds() { fRenderTargetBinds++; } shaderCompilations()407 int shaderCompilations() const { return fShaderCompilations; } incShaderCompilations()408 void incShaderCompilations() { fShaderCompilations++; } textureCreates()409 int textureCreates() const { return fTextureCreates; } incTextureCreates()410 void incTextureCreates() { fTextureCreates++; } textureUploads()411 int textureUploads() const { return fTextureUploads; } incTextureUploads()412 void incTextureUploads() { fTextureUploads++; } transfersToTexture()413 int transfersToTexture() const { return fTransfersToTexture; } incTransfersToTexture()414 void incTransfersToTexture() { fTransfersToTexture++; } incStencilAttachmentCreates()415 void incStencilAttachmentCreates() { fStencilAttachmentCreates++; } incNumDraws()416 void incNumDraws() { fNumDraws++; } incNumFailedDraws()417 void incNumFailedDraws() { ++fNumFailedDraws; } 418 void dump(SkString*); 419 void dumpKeyValuePairs(SkTArray<SkString>* keys, SkTArray<double>* values); numDraws()420 int numDraws() const { return fNumDraws; } numFailedDraws()421 int numFailedDraws() const { return fNumFailedDraws; } 422 private: 423 int fRenderTargetBinds; 424 int fShaderCompilations; 425 int fTextureCreates; 426 int fTextureUploads; 427 int fTransfersToTexture; 428 int fStencilAttachmentCreates; 429 int fNumDraws; 430 int fNumFailedDraws; 431 #else 432 void dump(SkString*) {} 433 void dumpKeyValuePairs(SkTArray<SkString>*, SkTArray<double>*) {} 434 void incRenderTargetBinds() {} 435 void incShaderCompilations() {} 436 void incTextureCreates() {} 437 void incTextureUploads() {} 438 void incTransfersToTexture() {} 439 void incStencilAttachmentCreates() {} 440 void incNumDraws() {} 441 void incNumFailedDraws() {} 442 #endif 443 }; 444 stats()445 Stats* stats() { return &fStats; } 446 447 /** Creates a texture directly in the backend API without wrapping it in a GrTexture. This is 448 only to be used for testing (particularly for testing the methods that import an externally 449 created texture into Skia. Must be matched with a call to deleteTestingOnlyTexture(). */ 450 virtual GrBackendObject createTestingOnlyBackendTexture(void* pixels, int w, int h, 451 GrPixelConfig config, 452 bool isRenderTarget = false) = 0; 453 /** Check a handle represents an actual texture in the backend API that has not been freed. */ 454 virtual bool isTestingOnlyBackendTexture(GrBackendObject) const = 0; 455 /** If ownership of the backend texture has been transferred pass true for abandonTexture. This 456 will do any necessary cleanup of the handle without freeing the texture in the backend 457 API. */ 458 virtual void deleteTestingOnlyBackendTexture(GrBackendObject, 459 bool abandonTexture = false) = 0; 460 461 // width and height may be larger than rt (if underlying API allows it). 462 // Returns nullptr if compatible sb could not be created, otherwise the caller owns the ref on 463 // the GrStencilAttachment. 464 virtual GrStencilAttachment* createStencilAttachmentForRenderTarget(const GrRenderTarget*, 465 int width, 466 int height) = 0; 467 // clears target's entire stencil buffer to 0 468 virtual void clearStencil(GrRenderTarget* target) = 0; 469 470 // draws an outline rectangle for debugging/visualization purposes. 471 virtual void drawDebugWireRect(GrRenderTarget*, const SkIRect&, GrColor) = 0; 472 473 // Determines whether a texture will need to be rescaled in order to be used with the 474 // GrSamplerParams. This variation is called when the caller will create a new texture using the 475 // resource provider from a non-texture src (cpu-backed image, ...). 476 bool isACopyNeededForTextureParams(int width, int height, const GrSamplerParams&, 477 GrTextureProducer::CopyParams*, 478 SkScalar scaleAdjust[2]) const; 479 480 // Like the above but this variation should be called when the caller is not creating the 481 // original texture but rather was handed the original texture. It adds additional checks 482 // relevant to original textures that were created external to Skia via 483 // GrResourceProvider::wrap methods. isACopyNeededForTextureParams(GrTextureProxy * proxy,const GrSamplerParams & params,GrTextureProducer::CopyParams * copyParams,SkScalar scaleAdjust[2])484 bool isACopyNeededForTextureParams(GrTextureProxy* proxy, const GrSamplerParams& params, 485 GrTextureProducer::CopyParams* copyParams, 486 SkScalar scaleAdjust[2]) const { 487 if (this->isACopyNeededForTextureParams(proxy->width(), proxy->height(), params, 488 copyParams, scaleAdjust)) { 489 return true; 490 } 491 return this->onIsACopyNeededForTextureParams(proxy, params, copyParams, scaleAdjust); 492 } 493 494 // This is only to be used in GL-specific tests. glContextForTesting()495 virtual const GrGLContext* glContextForTesting() const { return nullptr; } 496 497 // This is only to be used by testing code resetShaderCacheForTesting()498 virtual void resetShaderCacheForTesting() const {} 499 handleDirtyContext()500 void handleDirtyContext() { 501 if (fResetBits) { 502 this->resetContext(); 503 } 504 } 505 506 protected: ElevateDrawPreference(GrGpu::DrawPreference * preference,GrGpu::DrawPreference elevation)507 static void ElevateDrawPreference(GrGpu::DrawPreference* preference, 508 GrGpu::DrawPreference elevation) { 509 GR_STATIC_ASSERT(GrGpu::kCallerPrefersDraw_DrawPreference > GrGpu::kNoDraw_DrawPreference); 510 GR_STATIC_ASSERT(GrGpu::kGpuPrefersDraw_DrawPreference > 511 GrGpu::kCallerPrefersDraw_DrawPreference); 512 GR_STATIC_ASSERT(GrGpu::kRequireDraw_DrawPreference > 513 GrGpu::kGpuPrefersDraw_DrawPreference); 514 *preference = SkTMax(*preference, elevation); 515 } 516 517 // Handles cases where a surface will be updated without a call to flushRenderTarget 518 void didWriteToSurface(GrSurface* surface, const SkIRect* bounds, uint32_t mipLevels = 1) const; 519 520 Stats fStats; 521 std::unique_ptr<GrPathRendering> fPathRendering; 522 // Subclass must initialize this in its constructor. 523 sk_sp<const GrCaps> fCaps; 524 525 typedef SkTArray<SkPoint, true> SamplePattern; 526 527 private: 528 // called when the 3D context state is unknown. Subclass should emit any 529 // assumed 3D context state and dirty any state cache. 530 virtual void onResetContext(uint32_t resetBits) = 0; 531 532 // Called before certain draws in order to guarantee coherent results from dst reads. 533 virtual void xferBarrier(GrRenderTarget*, GrXferBarrierType) = 0; 534 535 // overridden by backend-specific derived class to create objects. 536 // Texture size and sample size will have already been validated in base class before 537 // onCreateTexture/CompressedTexture are called. 538 virtual GrTexture* onCreateTexture(const GrSurfaceDesc& desc, 539 SkBudgeted budgeted, 540 const SkTArray<GrMipLevel>& texels) = 0; 541 virtual GrTexture* onCreateCompressedTexture(const GrSurfaceDesc& desc, 542 SkBudgeted budgeted, 543 const SkTArray<GrMipLevel>& texels) = 0; 544 545 virtual sk_sp<GrTexture> onWrapBackendTexture(const GrBackendTextureDesc&, GrWrapOwnership) = 0; 546 virtual sk_sp<GrRenderTarget> onWrapBackendRenderTarget(const GrBackendRenderTargetDesc&) = 0; 547 virtual sk_sp<GrRenderTarget> onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc&)=0; 548 virtual GrBuffer* onCreateBuffer(size_t size, GrBufferType intendedType, GrAccessPattern, 549 const void* data) = 0; 550 551 virtual gr_instanced::InstancedRendering* onCreateInstancedRendering() = 0; 552 onIsACopyNeededForTextureParams(GrTextureProxy * proxy,const GrSamplerParams &,GrTextureProducer::CopyParams *,SkScalar scaleAdjust[2])553 virtual bool onIsACopyNeededForTextureParams(GrTextureProxy* proxy, const GrSamplerParams&, 554 GrTextureProducer::CopyParams*, 555 SkScalar scaleAdjust[2]) const { 556 return false; 557 } 558 559 virtual bool onGetReadPixelsInfo(GrSurface* srcSurface, int readWidth, int readHeight, 560 size_t rowBytes, GrPixelConfig readConfig, DrawPreference*, 561 ReadPixelTempDrawInfo*) = 0; 562 virtual bool onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height, 563 GrPixelConfig srcConfig, DrawPreference*, 564 WritePixelTempDrawInfo*) = 0; 565 566 // overridden by backend-specific derived class to perform the surface read 567 virtual bool onReadPixels(GrSurface*, 568 int left, int top, 569 int width, int height, 570 GrPixelConfig, 571 void* buffer, 572 size_t rowBytes) = 0; 573 574 // overridden by backend-specific derived class to perform the surface write 575 virtual bool onWritePixels(GrSurface*, 576 int left, int top, int width, int height, 577 GrPixelConfig config, 578 const SkTArray<GrMipLevel>& texels) = 0; 579 580 // overridden by backend-specific derived class to perform the surface write 581 virtual bool onTransferPixels(GrSurface*, 582 int left, int top, int width, int height, 583 GrPixelConfig config, GrBuffer* transferBuffer, 584 size_t offset, size_t rowBytes) = 0; 585 586 // overridden by backend-specific derived class to perform the resolve 587 virtual void onResolveRenderTarget(GrRenderTarget* target) = 0; 588 589 // overridden by backend specific derived class to perform the copy surface 590 virtual bool onCopySurface(GrSurface* dst, 591 GrSurface* src, 592 const SkIRect& srcRect, 593 const SkIPoint& dstPoint) = 0; 594 595 // overridden by backend specific derived class to perform the multisample queries 596 virtual void onQueryMultisampleSpecs(GrRenderTarget*, const GrStencilSettings&, 597 int* effectiveSampleCnt, SamplePattern*) = 0; 598 resetContext()599 void resetContext() { 600 this->onResetContext(fResetBits); 601 fResetBits = 0; 602 ++fResetTimestamp; 603 } 604 605 struct SamplePatternComparator { 606 bool operator()(const SamplePattern&, const SamplePattern&) const; 607 }; 608 609 typedef std::map<SamplePattern, uint8_t, SamplePatternComparator> MultisampleSpecsIdMap; 610 611 ResetTimestamp fResetTimestamp; 612 uint32_t fResetBits; 613 MultisampleSpecsIdMap fMultisampleSpecsIdMap; 614 SkSTArray<1, MultisampleSpecs, true> fMultisampleSpecs; 615 // The context owns us, not vice-versa, so this ptr is not ref'ed by Gpu. 616 GrContext* fContext; 617 618 friend class GrPathRendering; 619 friend class gr_instanced::InstancedRendering; 620 typedef SkRefCnt INHERITED; 621 }; 622 623 #endif 624