1 /*
2  * Copyright 2011 Google Inc.
3  *
4  * Use of this source code is governed by a BSD-style license that can be
5  * found in the LICENSE file.
6  */
7 
8 
9 #include "GrGLGpu.h"
10 #include "GrGLStencilAttachment.h"
11 #include "GrGLTextureRenderTarget.h"
12 #include "GrGpuResourcePriv.h"
13 #include "GrPipeline.h"
14 #include "GrRenderTargetPriv.h"
15 #include "GrSurfacePriv.h"
16 #include "GrTemplates.h"
17 #include "GrTexturePriv.h"
18 #include "GrTypes.h"
19 #include "GrVertices.h"
20 #include "SkStrokeRec.h"
21 #include "SkTemplates.h"
22 
23 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
24 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
25 
26 #define SKIP_CACHE_CHECK    true
27 
28 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
29     #define CLEAR_ERROR_BEFORE_ALLOC(iface)   GrGLClearErr(iface)
30     #define GL_ALLOC_CALL(iface, call)        GR_GL_CALL_NOERRCHECK(iface, call)
31     #define CHECK_ALLOC_ERROR(iface)          GR_GL_GET_ERROR(iface)
32 #else
33     #define CLEAR_ERROR_BEFORE_ALLOC(iface)
34     #define GL_ALLOC_CALL(iface, call)        GR_GL_CALL(iface, call)
35     #define CHECK_ALLOC_ERROR(iface)          GR_GL_NO_ERROR
36 #endif
37 
38 
39 ///////////////////////////////////////////////////////////////////////////////
40 
41 
42 static const GrGLenum gXfermodeEquation2Blend[] = {
43     // Basic OpenGL blend equations.
44     GR_GL_FUNC_ADD,
45     GR_GL_FUNC_SUBTRACT,
46     GR_GL_FUNC_REVERSE_SUBTRACT,
47 
48     // GL_KHR_blend_equation_advanced.
49     GR_GL_SCREEN,
50     GR_GL_OVERLAY,
51     GR_GL_DARKEN,
52     GR_GL_LIGHTEN,
53     GR_GL_COLORDODGE,
54     GR_GL_COLORBURN,
55     GR_GL_HARDLIGHT,
56     GR_GL_SOFTLIGHT,
57     GR_GL_DIFFERENCE,
58     GR_GL_EXCLUSION,
59     GR_GL_MULTIPLY,
60     GR_GL_HSL_HUE,
61     GR_GL_HSL_SATURATION,
62     GR_GL_HSL_COLOR,
63     GR_GL_HSL_LUMINOSITY
64 };
65 GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
66 GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
67 GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
68 GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
69 GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
70 GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
71 GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
72 GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
73 GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
74 GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
75 GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
76 GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
77 GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
78 GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
79 GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
80 GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
81 GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
82 GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
83 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
84 
85 static const GrGLenum gXfermodeCoeff2Blend[] = {
86     GR_GL_ZERO,
87     GR_GL_ONE,
88     GR_GL_SRC_COLOR,
89     GR_GL_ONE_MINUS_SRC_COLOR,
90     GR_GL_DST_COLOR,
91     GR_GL_ONE_MINUS_DST_COLOR,
92     GR_GL_SRC_ALPHA,
93     GR_GL_ONE_MINUS_SRC_ALPHA,
94     GR_GL_DST_ALPHA,
95     GR_GL_ONE_MINUS_DST_ALPHA,
96     GR_GL_CONSTANT_COLOR,
97     GR_GL_ONE_MINUS_CONSTANT_COLOR,
98     GR_GL_CONSTANT_ALPHA,
99     GR_GL_ONE_MINUS_CONSTANT_ALPHA,
100 
101     // extended blend coeffs
102     GR_GL_SRC1_COLOR,
103     GR_GL_ONE_MINUS_SRC1_COLOR,
104     GR_GL_SRC1_ALPHA,
105     GR_GL_ONE_MINUS_SRC1_ALPHA,
106 };
107 
BlendCoeffReferencesConstant(GrBlendCoeff coeff)108 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
109     static const bool gCoeffReferencesBlendConst[] = {
110         false,
111         false,
112         false,
113         false,
114         false,
115         false,
116         false,
117         false,
118         false,
119         false,
120         true,
121         true,
122         true,
123         true,
124 
125         // extended blend coeffs
126         false,
127         false,
128         false,
129         false,
130     };
131     return gCoeffReferencesBlendConst[coeff];
132     GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
133 
134     GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
135     GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
136     GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
137     GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
138     GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
139     GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
140     GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
141     GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
142     GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
143     GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
144     GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
145     GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
146     GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
147     GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
148 
149     GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
150     GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
151     GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
152     GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
153 
154     // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
155     GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
156 }
157 
158 ///////////////////////////////////////////////////////////////////////////////
159 
160 static bool gPrintStartupSpew;
161 
GrGLGpu(const GrGLContext & ctx,GrContext * context)162 GrGLGpu::GrGLGpu(const GrGLContext& ctx, GrContext* context)
163     : GrGpu(context)
164     , fGLContext(ctx) {
165 
166     SkASSERT(ctx.isInitialized());
167     fCaps.reset(SkRef(ctx.caps()));
168 
169     fHWBoundTextureUniqueIDs.reset(this->glCaps().maxFragmentTextureUnits());
170 
171     GrGLClearErr(fGLContext.interface());
172     if (gPrintStartupSpew) {
173         const GrGLubyte* vendor;
174         const GrGLubyte* renderer;
175         const GrGLubyte* version;
176         GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
177         GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
178         GL_CALL_RET(version, GetString(GR_GL_VERSION));
179         SkDebugf("------------------------- create GrGLGpu %p --------------\n",
180                  this);
181         SkDebugf("------ VENDOR %s\n", vendor);
182         SkDebugf("------ RENDERER %s\n", renderer);
183         SkDebugf("------ VERSION %s\n",  version);
184         SkDebugf("------ EXTENSIONS\n");
185         ctx.extensions().print();
186         SkDebugf("\n");
187         SkDebugf("%s", this->glCaps().dump().c_str());
188     }
189 
190     fProgramCache = SkNEW_ARGS(ProgramCache, (this));
191 
192     SkASSERT(this->glCaps().maxVertexAttributes() >= GrGeometryProcessor::kMaxVertexAttribs);
193 
194     fLastSuccessfulStencilFmtIdx = 0;
195     fHWProgramID = 0;
196     fTempSrcFBOID = 0;
197     fTempDstFBOID = 0;
198     fStencilClearFBOID = 0;
199 
200     if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
201         fPathRendering.reset(new GrGLPathRendering(this));
202     }
203 }
204 
~GrGLGpu()205 GrGLGpu::~GrGLGpu() {
206     if (0 != fHWProgramID) {
207         // detach the current program so there is no confusion on OpenGL's part
208         // that we want it to be deleted
209         SkASSERT(fHWProgramID == fCurrentProgram->programID());
210         GL_CALL(UseProgram(0));
211     }
212 
213     if (0 != fTempSrcFBOID) {
214         GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID));
215     }
216     if (0 != fTempDstFBOID) {
217         GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID));
218     }
219     if (0 != fStencilClearFBOID) {
220         GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
221     }
222 
223     delete fProgramCache;
224 }
225 
contextAbandoned()226 void GrGLGpu::contextAbandoned() {
227     INHERITED::contextAbandoned();
228     fProgramCache->abandon();
229     fHWProgramID = 0;
230     fTempSrcFBOID = 0;
231     fTempDstFBOID = 0;
232     fStencilClearFBOID = 0;
233     if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
234         this->glPathRendering()->abandonGpuResources();
235     }
236 }
237 
238 ///////////////////////////////////////////////////////////////////////////////
preferredReadPixelsConfig(GrPixelConfig readConfig,GrPixelConfig surfaceConfig) const239 GrPixelConfig GrGLGpu::preferredReadPixelsConfig(GrPixelConfig readConfig,
240                                                  GrPixelConfig surfaceConfig) const {
241     if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == readConfig) {
242         return kBGRA_8888_GrPixelConfig;
243     } else if (this->glContext().isMesa() &&
244                GrBytesPerPixel(readConfig) == 4 &&
245                GrPixelConfigSwapRAndB(readConfig) == surfaceConfig) {
246         // Mesa 3D takes a slow path on when reading back  BGRA from an RGBA surface and vice-versa.
247         // Perhaps this should be guarded by some compiletime or runtime check.
248         return surfaceConfig;
249     } else if (readConfig == kBGRA_8888_GrPixelConfig
250             && !this->glCaps().readPixelsSupported(
251                 this->glInterface(),
252                 GR_GL_BGRA,
253                 GR_GL_UNSIGNED_BYTE,
254                 surfaceConfig
255             )) {
256         return kRGBA_8888_GrPixelConfig;
257     } else {
258         return readConfig;
259     }
260 }
261 
preferredWritePixelsConfig(GrPixelConfig writeConfig,GrPixelConfig surfaceConfig) const262 GrPixelConfig GrGLGpu::preferredWritePixelsConfig(GrPixelConfig writeConfig,
263                                                   GrPixelConfig surfaceConfig) const {
264     if (GR_GL_RGBA_8888_PIXEL_OPS_SLOW && kRGBA_8888_GrPixelConfig == writeConfig) {
265         return kBGRA_8888_GrPixelConfig;
266     } else {
267         return writeConfig;
268     }
269 }
270 
canWriteTexturePixels(const GrTexture * texture,GrPixelConfig srcConfig) const271 bool GrGLGpu::canWriteTexturePixels(const GrTexture* texture, GrPixelConfig srcConfig) const {
272     if (kIndex_8_GrPixelConfig == srcConfig || kIndex_8_GrPixelConfig == texture->config()) {
273         return false;
274     }
275     if (srcConfig != texture->config() && kGLES_GrGLStandard == this->glStandard()) {
276         // In general ES2 requires the internal format of the texture and the format of the src
277         // pixels to match. However, It may or may not be possible to upload BGRA data to a RGBA
278         // texture. It depends upon which extension added BGRA. The Apple extension allows it
279         // (BGRA's internal format is RGBA) while the EXT extension does not (BGRA is its own
280         // internal format).
281         if (this->glCaps().isConfigTexturable(kBGRA_8888_GrPixelConfig) &&
282             !this->glCaps().bgraIsInternalFormat() &&
283             kBGRA_8888_GrPixelConfig == srcConfig &&
284             kRGBA_8888_GrPixelConfig == texture->config()) {
285             return true;
286         } else {
287             return false;
288         }
289     } else {
290         return true;
291     }
292 }
293 
fullReadPixelsIsFasterThanPartial() const294 bool GrGLGpu::fullReadPixelsIsFasterThanPartial() const {
295     return SkToBool(GR_GL_FULL_READPIXELS_FASTER_THAN_PARTIAL);
296 }
297 
onResetContext(uint32_t resetBits)298 void GrGLGpu::onResetContext(uint32_t resetBits) {
299     // we don't use the zb at all
300     if (resetBits & kMisc_GrGLBackendState) {
301         GL_CALL(Disable(GR_GL_DEPTH_TEST));
302         GL_CALL(DepthMask(GR_GL_FALSE));
303 
304         fHWDrawFace = GrPipelineBuilder::kInvalid_DrawFace;
305         fHWDitherEnabled = kUnknown_TriState;
306 
307         if (kGL_GrGLStandard == this->glStandard()) {
308             // Desktop-only state that we never change
309             if (!this->glCaps().isCoreProfile()) {
310                 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
311                 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
312                 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
313                 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
314                 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
315                 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
316             }
317             // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
318             // core profile. This seems like a bug since the core spec removes any mention of
319             // GL_ARB_imaging.
320             if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
321                 GL_CALL(Disable(GR_GL_COLOR_TABLE));
322             }
323             GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
324             // Since ES doesn't support glPointSize at all we always use the VS to
325             // set the point size
326             GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
327 
328             // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
329             // currently part of our gl interface. There are probably others as
330             // well.
331         }
332 
333         if (kGLES_GrGLStandard == this->glStandard() &&
334                 fGLContext.hasExtension("GL_ARM_shader_framebuffer_fetch")) {
335             // The arm extension requires specifically enabling MSAA fetching per sample.
336             // On some devices this may have a perf hit.  Also multiple render targets are disabled
337             GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
338         }
339         fHWWriteToColor = kUnknown_TriState;
340         // we only ever use lines in hairline mode
341         GL_CALL(LineWidth(1));
342     }
343 
344     if (resetBits & kMSAAEnable_GrGLBackendState) {
345         fMSAAEnabled = kUnknown_TriState;
346     }
347 
348     fHWActiveTextureUnitIdx = -1; // invalid
349 
350     if (resetBits & kTextureBinding_GrGLBackendState) {
351         for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) {
352             fHWBoundTextureUniqueIDs[s] = SK_InvalidUniqueID;
353         }
354     }
355 
356     if (resetBits & kBlend_GrGLBackendState) {
357         fHWBlendState.invalidate();
358     }
359 
360     if (resetBits & kView_GrGLBackendState) {
361         fHWScissorSettings.invalidate();
362         fHWViewport.invalidate();
363     }
364 
365     if (resetBits & kStencil_GrGLBackendState) {
366         fHWStencilSettings.invalidate();
367         fHWStencilTestEnabled = kUnknown_TriState;
368     }
369 
370     // Vertex
371     if (resetBits & kVertex_GrGLBackendState) {
372         fHWGeometryState.invalidate();
373     }
374 
375     if (resetBits & kRenderTarget_GrGLBackendState) {
376         fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
377     }
378 
379     if (resetBits & kPathRendering_GrGLBackendState) {
380         if (this->caps()->shaderCaps()->pathRenderingSupport()) {
381             this->glPathRendering()->resetContext();
382         }
383     }
384 
385     // we assume these values
386     if (resetBits & kPixelStore_GrGLBackendState) {
387         if (this->glCaps().unpackRowLengthSupport()) {
388             GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
389         }
390         if (this->glCaps().packRowLengthSupport()) {
391             GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
392         }
393         if (this->glCaps().unpackFlipYSupport()) {
394             GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
395         }
396         if (this->glCaps().packFlipYSupport()) {
397             GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
398         }
399     }
400 
401     if (resetBits & kProgram_GrGLBackendState) {
402         fHWProgramID = 0;
403     }
404 }
405 
resolve_origin(GrSurfaceOrigin origin,bool renderTarget)406 static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
407     // By default, GrRenderTargets are GL's normal orientation so that they
408     // can be drawn to by the outside world without the client having
409     // to render upside down.
410     if (kDefault_GrSurfaceOrigin == origin) {
411         return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
412     } else {
413         return origin;
414     }
415 }
416 
onWrapBackendTexture(const GrBackendTextureDesc & desc)417 GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc) {
418     if (!this->configToGLFormats(desc.fConfig, false, NULL, NULL, NULL)) {
419         return NULL;
420     }
421 
422     if (0 == desc.fTextureHandle) {
423         return NULL;
424     }
425 
426     int maxSize = this->caps()->maxTextureSize();
427     if (desc.fWidth > maxSize || desc.fHeight > maxSize) {
428         return NULL;
429     }
430 
431     GrGLTexture::IDDesc idDesc;
432     GrSurfaceDesc surfDesc;
433 
434     idDesc.fTextureID = static_cast<GrGLuint>(desc.fTextureHandle);
435     idDesc.fLifeCycle = GrGpuResource::kWrapped_LifeCycle;
436 
437     // next line relies on GrBackendTextureDesc's flags matching GrTexture's
438     surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
439     surfDesc.fWidth = desc.fWidth;
440     surfDesc.fHeight = desc.fHeight;
441     surfDesc.fConfig = desc.fConfig;
442     surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
443     bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
444     // FIXME:  this should be calling resolve_origin(), but Chrome code is currently
445     // assuming the old behaviour, which is that backend textures are always
446     // BottomLeft, even for non-RT's.  Once Chrome is fixed, change this to:
447     // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
448     if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
449         surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
450     } else {
451         surfDesc.fOrigin = desc.fOrigin;
452     }
453 
454     GrGLTexture* texture = NULL;
455     if (renderTarget) {
456         GrGLRenderTarget::IDDesc rtIDDesc;
457         if (!this->createRenderTargetObjects(surfDesc, GrGpuResource::kUncached_LifeCycle,
458                                              idDesc.fTextureID, &rtIDDesc)) {
459             return NULL;
460         }
461         texture = SkNEW_ARGS(GrGLTextureRenderTarget, (this, surfDesc, idDesc, rtIDDesc));
462     } else {
463         texture = SkNEW_ARGS(GrGLTexture, (this, surfDesc, idDesc));
464     }
465     if (NULL == texture) {
466         return NULL;
467     }
468 
469     return texture;
470 }
471 
onWrapBackendRenderTarget(const GrBackendRenderTargetDesc & wrapDesc)472 GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc) {
473     GrGLRenderTarget::IDDesc idDesc;
474     idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
475     idDesc.fMSColorRenderbufferID = 0;
476     idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
477     idDesc.fLifeCycle = GrGpuResource::kWrapped_LifeCycle;
478 
479     GrSurfaceDesc desc;
480     desc.fConfig = wrapDesc.fConfig;
481     desc.fFlags = kCheckAllocation_GrSurfaceFlag;
482     desc.fWidth = wrapDesc.fWidth;
483     desc.fHeight = wrapDesc.fHeight;
484     desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
485     desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true);
486 
487     GrRenderTarget* tgt = SkNEW_ARGS(GrGLRenderTarget, (this, desc, idDesc));
488     if (wrapDesc.fStencilBits) {
489         GrGLStencilAttachment::IDDesc sbDesc;
490         GrGLStencilAttachment::Format format;
491         format.fInternalFormat = GrGLStencilAttachment::kUnknownInternalFormat;
492         format.fPacked = false;
493         format.fStencilBits = wrapDesc.fStencilBits;
494         format.fTotalBits = wrapDesc.fStencilBits;
495         GrGLStencilAttachment* sb = SkNEW_ARGS(GrGLStencilAttachment,
496                                            (this,
497                                             sbDesc,
498                                             desc.fWidth,
499                                             desc.fHeight,
500                                             desc.fSampleCnt,
501                                             format));
502         tgt->renderTargetPriv().didAttachStencilAttachment(sb);
503         sb->unref();
504     }
505     return tgt;
506 }
507 
508 ////////////////////////////////////////////////////////////////////////////////
509 
onWriteTexturePixels(GrTexture * texture,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes)510 bool GrGLGpu::onWriteTexturePixels(GrTexture* texture,
511                                    int left, int top, int width, int height,
512                                    GrPixelConfig config, const void* buffer,
513                                    size_t rowBytes) {
514     if (NULL == buffer) {
515         return false;
516     }
517     GrGLTexture* glTex = static_cast<GrGLTexture*>(texture);
518 
519     this->setScratchTextureUnit();
520     GL_CALL(BindTexture(GR_GL_TEXTURE_2D, glTex->textureID()));
521 
522     bool success = false;
523     if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
524         // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
525         SkASSERT(config == glTex->desc().fConfig);
526         success = this->uploadCompressedTexData(glTex->desc(), buffer, false, left, top, width,
527                                                 height);
528     } else {
529         success = this->uploadTexData(glTex->desc(), false, left, top, width, height, config,
530                                       buffer, rowBytes);
531     }
532 
533     if (success) {
534         texture->texturePriv().dirtyMipMaps(true);
535         return true;
536     }
537 
538     return false;
539 }
540 
adjust_pixel_ops_params(int surfaceWidth,int surfaceHeight,size_t bpp,int * left,int * top,int * width,int * height,const void ** data,size_t * rowBytes)541 static bool adjust_pixel_ops_params(int surfaceWidth,
542                                     int surfaceHeight,
543                                     size_t bpp,
544                                     int* left, int* top, int* width, int* height,
545                                     const void** data,
546                                     size_t* rowBytes) {
547     if (!*rowBytes) {
548         *rowBytes = *width * bpp;
549     }
550 
551     SkIRect subRect = SkIRect::MakeXYWH(*left, *top, *width, *height);
552     SkIRect bounds = SkIRect::MakeWH(surfaceWidth, surfaceHeight);
553 
554     if (!subRect.intersect(bounds)) {
555         return false;
556     }
557     *data = reinterpret_cast<const void*>(reinterpret_cast<intptr_t>(*data) +
558           (subRect.fTop - *top) * *rowBytes + (subRect.fLeft - *left) * bpp);
559 
560     *left = subRect.fLeft;
561     *top = subRect.fTop;
562     *width = subRect.width();
563     *height = subRect.height();
564     return true;
565 }
566 
check_alloc_error(const GrSurfaceDesc & desc,const GrGLInterface * interface)567 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
568                                          const GrGLInterface* interface) {
569     if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
570         return GR_GL_GET_ERROR(interface);
571     } else {
572         return CHECK_ALLOC_ERROR(interface);
573     }
574 }
575 
uploadTexData(const GrSurfaceDesc & desc,bool isNewTexture,int left,int top,int width,int height,GrPixelConfig dataConfig,const void * data,size_t rowBytes)576 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
577                             bool isNewTexture,
578                             int left, int top, int width, int height,
579                             GrPixelConfig dataConfig,
580                             const void* data,
581                             size_t rowBytes) {
582     SkASSERT(data || isNewTexture);
583 
584     // If we're uploading compressed data then we should be using uploadCompressedTexData
585     SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
586 
587     size_t bpp = GrBytesPerPixel(dataConfig);
588     if (!adjust_pixel_ops_params(desc.fWidth, desc.fHeight, bpp, &left, &top,
589                                  &width, &height, &data, &rowBytes)) {
590         return false;
591     }
592     size_t trimRowBytes = width * bpp;
593 
594     // in case we need a temporary, trimmed copy of the src pixels
595     GrAutoMalloc<128 * 128> tempStorage;
596 
597     // We currently lazily create MIPMAPs when the we see a draw with
598     // GrTextureParams::kMipMap_FilterMode. Using texture storage requires that the
599     // MIP levels are all created when the texture is created. So for now we don't use
600     // texture storage.
601     bool useTexStorage = false &&
602                          isNewTexture &&
603                          this->glCaps().texStorageSupport();
604 
605     if (useTexStorage && kGL_GrGLStandard == this->glStandard()) {
606         // 565 is not a sized internal format on desktop GL. So on desktop with
607         // 565 we always use an unsized internal format to let the system pick
608         // the best sized format to convert the 565 data to. Since TexStorage
609         // only allows sized internal formats we will instead use TexImage2D.
610         useTexStorage = desc.fConfig != kRGB_565_GrPixelConfig;
611     }
612 
613     GrGLenum internalFormat = 0x0; // suppress warning
614     GrGLenum externalFormat = 0x0; // suppress warning
615     GrGLenum externalType = 0x0;   // suppress warning
616 
617     // glTexStorage requires sized internal formats on both desktop and ES. ES2 requires an unsized
618     // format for glTexImage, unlike ES3 and desktop.
619     bool useSizedFormat = useTexStorage;
620     if (kGL_GrGLStandard == this->glStandard() ||
621         (this->glVersion() >= GR_GL_VER(3, 0) &&
622          // ES3 only works with sized BGRA8 format if "GL_APPLE_texture_format_BGRA8888" enabled
623          (kBGRA_8888_GrPixelConfig != dataConfig || !this->glCaps().bgraIsInternalFormat())))  {
624         useSizedFormat = true;
625     }
626 
627     if (!this->configToGLFormats(dataConfig, useSizedFormat, &internalFormat,
628                                  &externalFormat, &externalType)) {
629         return false;
630     }
631 
632     /*
633      *  check whether to allocate a temporary buffer for flipping y or
634      *  because our srcData has extra bytes past each row. If so, we need
635      *  to trim those off here, since GL ES may not let us specify
636      *  GL_UNPACK_ROW_LENGTH.
637      */
638     bool restoreGLRowLength = false;
639     bool swFlipY = false;
640     bool glFlipY = false;
641     if (data) {
642         if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
643             if (this->glCaps().unpackFlipYSupport()) {
644                 glFlipY = true;
645             } else {
646                 swFlipY = true;
647             }
648         }
649         if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
650             // can't use this for flipping, only non-neg values allowed. :(
651             if (rowBytes != trimRowBytes) {
652                 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
653                 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
654                 restoreGLRowLength = true;
655             }
656         } else {
657             if (trimRowBytes != rowBytes || swFlipY) {
658                 // copy data into our new storage, skipping the trailing bytes
659                 size_t trimSize = height * trimRowBytes;
660                 const char* src = (const char*)data;
661                 if (swFlipY) {
662                     src += (height - 1) * rowBytes;
663                 }
664                 char* dst = (char*)tempStorage.reset(trimSize);
665                 for (int y = 0; y < height; y++) {
666                     memcpy(dst, src, trimRowBytes);
667                     if (swFlipY) {
668                         src -= rowBytes;
669                     } else {
670                         src += rowBytes;
671                     }
672                     dst += trimRowBytes;
673                 }
674                 // now point data to our copied version
675                 data = tempStorage.get();
676             }
677         }
678         if (glFlipY) {
679             GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
680         }
681         GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT,
682               static_cast<GrGLint>(GrUnpackAlignment(dataConfig))));
683     }
684     bool succeeded = true;
685     if (isNewTexture &&
686         0 == left && 0 == top &&
687         desc.fWidth == width && desc.fHeight == height) {
688         CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
689         if (useTexStorage) {
690             // We never resize  or change formats of textures.
691             GL_ALLOC_CALL(this->glInterface(),
692                           TexStorage2D(GR_GL_TEXTURE_2D,
693                                        1, // levels
694                                        internalFormat,
695                                        desc.fWidth, desc.fHeight));
696         } else {
697             GL_ALLOC_CALL(this->glInterface(),
698                           TexImage2D(GR_GL_TEXTURE_2D,
699                                      0, // level
700                                      internalFormat,
701                                      desc.fWidth, desc.fHeight,
702                                      0, // border
703                                      externalFormat, externalType,
704                                      data));
705         }
706         GrGLenum error = check_alloc_error(desc, this->glInterface());
707         if (error != GR_GL_NO_ERROR) {
708             succeeded = false;
709         } else {
710             // if we have data and we used TexStorage to create the texture, we
711             // now upload with TexSubImage.
712             if (data && useTexStorage) {
713                 GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
714                                       0, // level
715                                       left, top,
716                                       width, height,
717                                       externalFormat, externalType,
718                                       data));
719             }
720         }
721     } else {
722         if (swFlipY || glFlipY) {
723             top = desc.fHeight - (top + height);
724         }
725         GL_CALL(TexSubImage2D(GR_GL_TEXTURE_2D,
726                               0, // level
727                               left, top,
728                               width, height,
729                               externalFormat, externalType, data));
730     }
731 
732     if (restoreGLRowLength) {
733         SkASSERT(this->glCaps().unpackRowLengthSupport());
734         GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
735     }
736     if (glFlipY) {
737         GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
738     }
739     return succeeded;
740 }
741 
742 // TODO: This function is using a lot of wonky semantics like, if width == -1
743 // then set width = desc.fWdith ... blah. A better way to do it might be to
744 // create a CompressedTexData struct that takes a desc/ptr and figures out
745 // the proper upload semantics. Then users can construct this function how they
746 // see fit if they want to go against the "standard" way to do it.
uploadCompressedTexData(const GrSurfaceDesc & desc,const void * data,bool isNewTexture,int left,int top,int width,int height)747 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
748                                       const void* data,
749                                       bool isNewTexture,
750                                       int left, int top, int width, int height) {
751     SkASSERT(data || isNewTexture);
752 
753     // No support for software flip y, yet...
754     SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
755 
756     if (-1 == width) {
757         width = desc.fWidth;
758     }
759 #ifdef SK_DEBUG
760     else {
761         SkASSERT(width <= desc.fWidth);
762     }
763 #endif
764 
765     if (-1 == height) {
766         height = desc.fHeight;
767     }
768 #ifdef SK_DEBUG
769     else {
770         SkASSERT(height <= desc.fHeight);
771     }
772 #endif
773 
774     // Make sure that the width and height that we pass to OpenGL
775     // is a multiple of the block size.
776     size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
777 
778     // We only need the internal format for compressed 2D textures.
779     GrGLenum internalFormat = 0;
780     if (!this->configToGLFormats(desc.fConfig, false, &internalFormat, NULL, NULL)) {
781         return false;
782     }
783 
784     if (isNewTexture) {
785         CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
786         GL_ALLOC_CALL(this->glInterface(),
787                       CompressedTexImage2D(GR_GL_TEXTURE_2D,
788                                            0, // level
789                                            internalFormat,
790                                            width, height,
791                                            0, // border
792                                            SkToInt(dataSize),
793                                            data));
794         GrGLenum error = check_alloc_error(desc, this->glInterface());
795         if (error != GR_GL_NO_ERROR) {
796             return false;
797         }
798     } else {
799         // Paletted textures can't be updated.
800         if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
801             return false;
802         }
803         GL_CALL(CompressedTexSubImage2D(GR_GL_TEXTURE_2D,
804                                         0, // level
805                                         left, top,
806                                         width, height,
807                                         internalFormat,
808                                         SkToInt(dataSize),
809                                         data));
810     }
811 
812     return true;
813 }
814 
renderbuffer_storage_msaa(GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)815 static bool renderbuffer_storage_msaa(GrGLContext& ctx,
816                                       int sampleCount,
817                                       GrGLenum format,
818                                       int width, int height) {
819     CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
820     SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
821     switch (ctx.caps()->msFBOType()) {
822         case GrGLCaps::kDesktop_ARB_MSFBOType:
823         case GrGLCaps::kDesktop_EXT_MSFBOType:
824         case GrGLCaps::kES_3_0_MSFBOType:
825             GL_ALLOC_CALL(ctx.interface(),
826                             RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
827                                                             sampleCount,
828                                                             format,
829                                                             width, height));
830             break;
831         case GrGLCaps::kES_Apple_MSFBOType:
832             GL_ALLOC_CALL(ctx.interface(),
833                             RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
834                                                                     sampleCount,
835                                                                     format,
836                                                                     width, height));
837             break;
838         case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
839         case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
840             GL_ALLOC_CALL(ctx.interface(),
841                             RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
842                                                                 sampleCount,
843                                                                 format,
844                                                                 width, height));
845             break;
846         case GrGLCaps::kNone_MSFBOType:
847             SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers.");
848             break;
849     }
850     return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));
851 }
852 
createRenderTargetObjects(const GrSurfaceDesc & desc,GrGpuResource::LifeCycle lifeCycle,GrGLuint texID,GrGLRenderTarget::IDDesc * idDesc)853 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc,
854                                         GrGpuResource::LifeCycle lifeCycle,
855                                         GrGLuint texID,
856                                         GrGLRenderTarget::IDDesc* idDesc) {
857     idDesc->fMSColorRenderbufferID = 0;
858     idDesc->fRTFBOID = 0;
859     idDesc->fTexFBOID = 0;
860     idDesc->fLifeCycle = lifeCycle;
861 
862     GrGLenum status;
863 
864     GrGLenum msColorFormat = 0; // suppress warning
865 
866     if (desc.fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
867         goto FAILED;
868     }
869 
870     GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID));
871     if (!idDesc->fTexFBOID) {
872         goto FAILED;
873     }
874 
875 
876     // If we are using multisampling we will create two FBOS. We render to one and then resolve to
877     // the texture bound to the other. The exception is the IMG multisample extension. With this
878     // extension the texture is multisampled when rendered to and then auto-resolves it when it is
879     // rendered from.
880     if (desc.fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
881         GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID));
882         GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
883         if (!idDesc->fRTFBOID ||
884             !idDesc->fMSColorRenderbufferID ||
885             !this->configToGLFormats(desc.fConfig,
886                                      // ES2 and ES3 require sized internal formats for rb storage.
887                                      kGLES_GrGLStandard == this->glStandard(),
888                                      &msColorFormat,
889                                      NULL,
890                                      NULL)) {
891             goto FAILED;
892         }
893     } else {
894         idDesc->fRTFBOID = idDesc->fTexFBOID;
895     }
896 
897     // below here we may bind the FBO
898     fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
899     if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
900         SkASSERT(desc.fSampleCnt > 0);
901         GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID));
902         if (!renderbuffer_storage_msaa(fGLContext,
903                                        desc.fSampleCnt,
904                                        msColorFormat,
905                                        desc.fWidth, desc.fHeight)) {
906             goto FAILED;
907         }
908         fStats.incRenderTargetBinds();
909         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID));
910         GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
911                                       GR_GL_COLOR_ATTACHMENT0,
912                                       GR_GL_RENDERBUFFER,
913                                       idDesc->fMSColorRenderbufferID));
914         if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
915             !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
916             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
917             if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
918                 goto FAILED;
919             }
920             fGLContext.caps()->markConfigAsValidColorAttachment(desc.fConfig);
921         }
922     }
923     fStats.incRenderTargetBinds();
924     GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID));
925 
926     if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 0) {
927         GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
928                                                 GR_GL_COLOR_ATTACHMENT0,
929                                                 GR_GL_TEXTURE_2D,
930                                                 texID, 0, desc.fSampleCnt));
931     } else {
932         GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
933                                      GR_GL_COLOR_ATTACHMENT0,
934                                      GR_GL_TEXTURE_2D,
935                                      texID, 0));
936     }
937     if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
938         !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
939         GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
940         if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
941             goto FAILED;
942         }
943         fGLContext.caps()->markConfigAsValidColorAttachment(desc.fConfig);
944     }
945 
946     return true;
947 
948 FAILED:
949     if (idDesc->fMSColorRenderbufferID) {
950         GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
951     }
952     if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
953         GL_CALL(DeleteFramebuffers(1, &idDesc->fRTFBOID));
954     }
955     if (idDesc->fTexFBOID) {
956         GL_CALL(DeleteFramebuffers(1, &idDesc->fTexFBOID));
957     }
958     return false;
959 }
960 
961 // good to set a break-point here to know when createTexture fails
return_null_texture()962 static GrTexture* return_null_texture() {
963 //    SkDEBUGFAIL("null texture");
964     return NULL;
965 }
966 
967 #if 0 && defined(SK_DEBUG)
968 static size_t as_size_t(int x) {
969     return x;
970 }
971 #endif
972 
onCreateTexture(const GrSurfaceDesc & desc,GrGpuResource::LifeCycle lifeCycle,const void * srcData,size_t rowBytes)973 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
974                                     GrGpuResource::LifeCycle lifeCycle,
975                                     const void* srcData, size_t rowBytes) {
976     // We fail if the MSAA was requested and is not available.
977     if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
978         //SkDebugf("MSAA RT requested but not supported on this platform.");
979         return return_null_texture();
980     }
981 
982     bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
983 
984     GrGLTexture::IDDesc idDesc;
985     GL_CALL(GenTextures(1, &idDesc.fTextureID));
986     idDesc.fLifeCycle = lifeCycle;
987 
988     if (!idDesc.fTextureID) {
989         return return_null_texture();
990     }
991 
992     this->setScratchTextureUnit();
993     GL_CALL(BindTexture(GR_GL_TEXTURE_2D, idDesc.fTextureID));
994 
995     if (renderTarget && this->glCaps().textureUsageSupport()) {
996         // provides a hint about how this texture will be used
997         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
998                               GR_GL_TEXTURE_USAGE,
999                               GR_GL_FRAMEBUFFER_ATTACHMENT));
1000     }
1001 
1002     // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1003     // drivers have a bug where an FBO won't be complete if it includes a
1004     // texture that is not mipmap complete (considering the filter in use).
1005     GrGLTexture::TexParams initialTexParams;
1006     // we only set a subset here so invalidate first
1007     initialTexParams.invalidate();
1008     initialTexParams.fMinFilter = GR_GL_NEAREST;
1009     initialTexParams.fMagFilter = GR_GL_NEAREST;
1010     initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
1011     initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
1012     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1013                           GR_GL_TEXTURE_MAG_FILTER,
1014                           initialTexParams.fMagFilter));
1015     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1016                           GR_GL_TEXTURE_MIN_FILTER,
1017                           initialTexParams.fMinFilter));
1018     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1019                           GR_GL_TEXTURE_WRAP_S,
1020                           initialTexParams.fWrapS));
1021     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1022                           GR_GL_TEXTURE_WRAP_T,
1023                           initialTexParams.fWrapT));
1024     if (!this->uploadTexData(desc, true, 0, 0,
1025                              desc.fWidth, desc.fHeight,
1026                              desc.fConfig, srcData, rowBytes)) {
1027         GL_CALL(DeleteTextures(1, &idDesc.fTextureID));
1028         return return_null_texture();
1029     }
1030 
1031     GrGLTexture* tex;
1032     if (renderTarget) {
1033         // unbind the texture from the texture unit before binding it to the frame buffer
1034         GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1035         GrGLRenderTarget::IDDesc rtIDDesc;
1036 
1037         if (!this->createRenderTargetObjects(desc, lifeCycle, idDesc.fTextureID, &rtIDDesc)) {
1038             GL_CALL(DeleteTextures(1, &idDesc.fTextureID));
1039             return return_null_texture();
1040         }
1041         tex = SkNEW_ARGS(GrGLTextureRenderTarget, (this, desc, idDesc, rtIDDesc));
1042     } else {
1043         tex = SkNEW_ARGS(GrGLTexture, (this, desc, idDesc));
1044     }
1045     tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1046 #ifdef TRACE_TEXTURE_CREATION
1047     SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
1048              glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
1049 #endif
1050     return tex;
1051 }
1052 
onCreateCompressedTexture(const GrSurfaceDesc & desc,GrGpuResource::LifeCycle lifeCycle,const void * srcData)1053 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
1054                                               GrGpuResource::LifeCycle lifeCycle,
1055                                               const void* srcData) {
1056     // Make sure that we're not flipping Y.
1057     if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
1058         return return_null_texture();
1059     }
1060 
1061     GrGLTexture::IDDesc idDesc;
1062     GL_CALL(GenTextures(1, &idDesc.fTextureID));
1063     idDesc.fLifeCycle = lifeCycle;
1064 
1065     if (!idDesc.fTextureID) {
1066         return return_null_texture();
1067     }
1068 
1069     this->setScratchTextureUnit();
1070     GL_CALL(BindTexture(GR_GL_TEXTURE_2D, idDesc.fTextureID));
1071 
1072     // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1073     // drivers have a bug where an FBO won't be complete if it includes a
1074     // texture that is not mipmap complete (considering the filter in use).
1075     GrGLTexture::TexParams initialTexParams;
1076     // we only set a subset here so invalidate first
1077     initialTexParams.invalidate();
1078     initialTexParams.fMinFilter = GR_GL_NEAREST;
1079     initialTexParams.fMagFilter = GR_GL_NEAREST;
1080     initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
1081     initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
1082     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1083                           GR_GL_TEXTURE_MAG_FILTER,
1084                           initialTexParams.fMagFilter));
1085     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1086                           GR_GL_TEXTURE_MIN_FILTER,
1087                           initialTexParams.fMinFilter));
1088     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1089                           GR_GL_TEXTURE_WRAP_S,
1090                           initialTexParams.fWrapS));
1091     GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1092                           GR_GL_TEXTURE_WRAP_T,
1093                           initialTexParams.fWrapT));
1094 
1095     if (!this->uploadCompressedTexData(desc, srcData)) {
1096         GL_CALL(DeleteTextures(1, &idDesc.fTextureID));
1097         return return_null_texture();
1098     }
1099 
1100     GrGLTexture* tex;
1101     tex = SkNEW_ARGS(GrGLTexture, (this, desc, idDesc));
1102     tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1103 #ifdef TRACE_TEXTURE_CREATION
1104     SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
1105              glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
1106 #endif
1107     return tex;
1108 }
1109 
1110 namespace {
1111 
1112 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
1113 
get_stencil_rb_sizes(const GrGLInterface * gl,GrGLStencilAttachment::Format * format)1114 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
1115                                  GrGLStencilAttachment::Format* format) {
1116 
1117     // we shouldn't ever know one size and not the other
1118     SkASSERT((kUnknownBitCount == format->fStencilBits) ==
1119              (kUnknownBitCount == format->fTotalBits));
1120     if (kUnknownBitCount == format->fStencilBits) {
1121         GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1122                                          GR_GL_RENDERBUFFER_STENCIL_SIZE,
1123                                          (GrGLint*)&format->fStencilBits);
1124         if (format->fPacked) {
1125             GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1126                                              GR_GL_RENDERBUFFER_DEPTH_SIZE,
1127                                              (GrGLint*)&format->fTotalBits);
1128             format->fTotalBits += format->fStencilBits;
1129         } else {
1130             format->fTotalBits = format->fStencilBits;
1131         }
1132     }
1133 }
1134 }
1135 
createStencilAttachmentForRenderTarget(GrRenderTarget * rt,int width,int height)1136 bool GrGLGpu::createStencilAttachmentForRenderTarget(GrRenderTarget* rt, int width, int height) {
1137     // All internally created RTs are also textures. We don't create
1138     // SBs for a client's standalone RT (that is a RT that isn't also a texture).
1139     SkASSERT(rt->asTexture());
1140     SkASSERT(width >= rt->width());
1141     SkASSERT(height >= rt->height());
1142 
1143     int samples = rt->numSamples();
1144     GrGLStencilAttachment::IDDesc sbDesc;
1145 
1146     int stencilFmtCnt = this->glCaps().stencilFormats().count();
1147     for (int i = 0; i < stencilFmtCnt; ++i) {
1148         if (!sbDesc.fRenderbufferID) {
1149             GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID));
1150         }
1151         if (!sbDesc.fRenderbufferID) {
1152             return false;
1153         }
1154         GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
1155         // we start with the last stencil format that succeeded in hopes
1156         // that we won't go through this loop more than once after the
1157         // first (painful) stencil creation.
1158         int sIdx = (i + fLastSuccessfulStencilFmtIdx) % stencilFmtCnt;
1159         const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx];
1160         CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1161         // we do this "if" so that we don't call the multisample
1162         // version on a GL that doesn't have an MSAA extension.
1163         bool created;
1164         if (samples > 0) {
1165             created = renderbuffer_storage_msaa(fGLContext,
1166                                                 samples,
1167                                                 sFmt.fInternalFormat,
1168                                                 width, height);
1169         } else {
1170             GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1171                                                                    sFmt.fInternalFormat,
1172                                                                    width, height));
1173             created = (GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
1174         }
1175         if (created) {
1176             fStats.incStencilAttachmentCreates();
1177             // After sized formats we attempt an unsized format and take
1178             // whatever sizes GL gives us. In that case we query for the size.
1179             GrGLStencilAttachment::Format format = sFmt;
1180             get_stencil_rb_sizes(this->glInterface(), &format);
1181             SkAutoTUnref<GrGLStencilAttachment> sb(SkNEW_ARGS(GrGLStencilAttachment,
1182                                                   (this, sbDesc, width, height, samples, format)));
1183             if (this->attachStencilAttachmentToRenderTarget(sb, rt)) {
1184                 fLastSuccessfulStencilFmtIdx = sIdx;
1185                 rt->renderTargetPriv().didAttachStencilAttachment(sb);
1186 // This work around is currently breaking on windows 7 hd2000 bot when we bind a color buffer
1187 #if 0
1188                 // Clear the stencil buffer. We use a special purpose FBO for this so that the
1189                 // entire stencil buffer is cleared, even if it is attached to an FBO with a
1190                 // smaller color target.
1191                 if (0 == fStencilClearFBOID) {
1192                     GL_CALL(GenFramebuffers(1, &fStencilClearFBOID));
1193                 }
1194 
1195                 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fStencilClearFBOID));
1196                 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1197                 fStats.incRenderTargetBinds();
1198                 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1199                                                 GR_GL_STENCIL_ATTACHMENT,
1200                                                 GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
1201                 if (sFmt.fPacked) {
1202                     GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1203                                                     GR_GL_DEPTH_ATTACHMENT,
1204                                                     GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
1205                 }
1206 
1207                 GL_CALL(ClearStencil(0));
1208                 // Many GL implementations seem to have trouble with clearing an FBO with only
1209                 // a stencil buffer.
1210                 GrGLuint tempRB;
1211                 GL_CALL(GenRenderbuffers(1, &tempRB));
1212                 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, tempRB));
1213                 if (samples > 0) {
1214                     renderbuffer_storage_msaa(fGLContext, samples, GR_GL_RGBA8, width, height);
1215                 } else {
1216                     GL_CALL(RenderbufferStorage(GR_GL_RENDERBUFFER, GR_GL_RGBA8, width, height));
1217                 }
1218                 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1219                                                 GR_GL_COLOR_ATTACHMENT0,
1220                                                 GR_GL_RENDERBUFFER, tempRB));
1221 
1222                 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1223 
1224                 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1225                                                 GR_GL_COLOR_ATTACHMENT0,
1226                                                 GR_GL_RENDERBUFFER, 0));
1227                 GL_CALL(DeleteRenderbuffers(1, &tempRB));
1228 
1229                 // Unbind the SB from the FBO so that we don't keep it alive.
1230                 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1231                                                 GR_GL_STENCIL_ATTACHMENT,
1232                                                 GR_GL_RENDERBUFFER, 0));
1233                 if (sFmt.fPacked) {
1234                     GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1235                                                     GR_GL_DEPTH_ATTACHMENT,
1236                                                     GR_GL_RENDERBUFFER, 0));
1237                 }
1238 #endif
1239                 return true;
1240             }
1241             // Remove the scratch key from this resource so we don't grab it from the cache ever
1242             // again.
1243             sb->resourcePriv().removeScratchKey();
1244             // Set this to 0 since we handed the valid ID off to the failed stencil buffer resource.
1245             sbDesc.fRenderbufferID = 0;
1246         }
1247     }
1248     GL_CALL(DeleteRenderbuffers(1, &sbDesc.fRenderbufferID));
1249     return false;
1250 }
1251 
attachStencilAttachmentToRenderTarget(GrStencilAttachment * sb,GrRenderTarget * rt)1252 bool GrGLGpu::attachStencilAttachmentToRenderTarget(GrStencilAttachment* sb, GrRenderTarget* rt) {
1253     GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
1254 
1255     GrGLuint fbo = glrt->renderFBOID();
1256 
1257     if (NULL == sb) {
1258         if (rt->renderTargetPriv().getStencilAttachment()) {
1259             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1260                                             GR_GL_STENCIL_ATTACHMENT,
1261                                             GR_GL_RENDERBUFFER, 0));
1262             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1263                                             GR_GL_DEPTH_ATTACHMENT,
1264                                             GR_GL_RENDERBUFFER, 0));
1265 #ifdef SK_DEBUG
1266             GrGLenum status;
1267             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1268             SkASSERT(GR_GL_FRAMEBUFFER_COMPLETE == status);
1269 #endif
1270         }
1271         return true;
1272     } else {
1273         GrGLStencilAttachment* glsb = static_cast<GrGLStencilAttachment*>(sb);
1274         GrGLuint rb = glsb->renderbufferID();
1275 
1276         fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1277         fStats.incRenderTargetBinds();
1278         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fbo));
1279         GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1280                                         GR_GL_STENCIL_ATTACHMENT,
1281                                         GR_GL_RENDERBUFFER, rb));
1282         if (glsb->format().fPacked) {
1283             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1284                                             GR_GL_DEPTH_ATTACHMENT,
1285                                             GR_GL_RENDERBUFFER, rb));
1286         } else {
1287             GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1288                                             GR_GL_DEPTH_ATTACHMENT,
1289                                             GR_GL_RENDERBUFFER, 0));
1290         }
1291 
1292         GrGLenum status;
1293         if (!this->glCaps().isColorConfigAndStencilFormatVerified(rt->config(), glsb->format())) {
1294             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1295             if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1296                 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1297                                               GR_GL_STENCIL_ATTACHMENT,
1298                                               GR_GL_RENDERBUFFER, 0));
1299                 if (glsb->format().fPacked) {
1300                     GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1301                                                   GR_GL_DEPTH_ATTACHMENT,
1302                                                   GR_GL_RENDERBUFFER, 0));
1303                 }
1304                 return false;
1305             } else {
1306                 fGLContext.caps()->markColorConfigAndStencilFormatAsVerified(
1307                     rt->config(),
1308                     glsb->format());
1309             }
1310         }
1311         return true;
1312     }
1313 }
1314 
1315 ////////////////////////////////////////////////////////////////////////////////
1316 
onCreateVertexBuffer(size_t size,bool dynamic)1317 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
1318     GrGLVertexBuffer::Desc desc;
1319     desc.fDynamic = dynamic;
1320     desc.fSizeInBytes = size;
1321 
1322     if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1323         desc.fID = 0;
1324         GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1325         return vertexBuffer;
1326     } else {
1327         GL_CALL(GenBuffers(1, &desc.fID));
1328         if (desc.fID) {
1329             fHWGeometryState.setVertexBufferID(this, desc.fID);
1330             CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1331             // make sure driver can allocate memory for this buffer
1332             GL_ALLOC_CALL(this->glInterface(),
1333                           BufferData(GR_GL_ARRAY_BUFFER,
1334                                      (GrGLsizeiptr) desc.fSizeInBytes,
1335                                      NULL,   // data ptr
1336                                      desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
1337             if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1338                 GL_CALL(DeleteBuffers(1, &desc.fID));
1339                 this->notifyVertexBufferDelete(desc.fID);
1340                 return NULL;
1341             }
1342             GrGLVertexBuffer* vertexBuffer = SkNEW_ARGS(GrGLVertexBuffer, (this, desc));
1343             return vertexBuffer;
1344         }
1345         return NULL;
1346     }
1347 }
1348 
onCreateIndexBuffer(size_t size,bool dynamic)1349 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
1350     GrGLIndexBuffer::Desc desc;
1351     desc.fDynamic = dynamic;
1352     desc.fSizeInBytes = size;
1353 
1354     if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && desc.fDynamic) {
1355         desc.fID = 0;
1356         GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1357         return indexBuffer;
1358     } else {
1359         GL_CALL(GenBuffers(1, &desc.fID));
1360         if (desc.fID) {
1361             fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
1362             CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1363             // make sure driver can allocate memory for this buffer
1364             GL_ALLOC_CALL(this->glInterface(),
1365                           BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
1366                                      (GrGLsizeiptr) desc.fSizeInBytes,
1367                                      NULL,  // data ptr
1368                                      desc.fDynamic ? GR_GL_DYNAMIC_DRAW : GR_GL_STATIC_DRAW));
1369             if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1370                 GL_CALL(DeleteBuffers(1, &desc.fID));
1371                 this->notifyIndexBufferDelete(desc.fID);
1372                 return NULL;
1373             }
1374             GrIndexBuffer* indexBuffer = SkNEW_ARGS(GrGLIndexBuffer, (this, desc));
1375             return indexBuffer;
1376         }
1377         return NULL;
1378     }
1379 }
1380 
flushScissor(const GrScissorState & scissorState,const GrGLIRect & rtViewport,GrSurfaceOrigin rtOrigin)1381 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
1382                            const GrGLIRect& rtViewport,
1383                            GrSurfaceOrigin rtOrigin) {
1384     if (scissorState.enabled()) {
1385         GrGLIRect scissor;
1386         scissor.setRelativeTo(rtViewport,
1387                               scissorState.rect().fLeft,
1388                               scissorState.rect().fTop,
1389                               scissorState.rect().width(),
1390                               scissorState.rect().height(),
1391                               rtOrigin);
1392         // if the scissor fully contains the viewport then we fall through and
1393         // disable the scissor test.
1394         if (!scissor.contains(rtViewport)) {
1395             if (fHWScissorSettings.fRect != scissor) {
1396                 scissor.pushToGLScissor(this->glInterface());
1397                 fHWScissorSettings.fRect = scissor;
1398             }
1399             if (kYes_TriState != fHWScissorSettings.fEnabled) {
1400                 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1401                 fHWScissorSettings.fEnabled = kYes_TriState;
1402             }
1403             return;
1404         }
1405     }
1406 
1407     // See fall through note above
1408     this->disableScissor();
1409 }
1410 
flushGLState(const DrawArgs & args)1411 bool GrGLGpu::flushGLState(const DrawArgs& args) {
1412     GrXferProcessor::BlendInfo blendInfo;
1413     const GrPipeline& pipeline = *args.fPipeline;
1414     args.fPipeline->getXferProcessor()->getBlendInfo(&blendInfo);
1415 
1416     this->flushDither(pipeline.isDitherState());
1417     this->flushColorWrite(blendInfo.fWriteColor);
1418     this->flushDrawFace(pipeline.getDrawFace());
1419 
1420     fCurrentProgram.reset(fProgramCache->getProgram(args));
1421     if (NULL == fCurrentProgram.get()) {
1422         GrContextDebugf(this->getContext(), "Failed to create program!\n");
1423         return false;
1424     }
1425 
1426     fCurrentProgram.get()->ref();
1427 
1428     GrGLuint programID = fCurrentProgram->programID();
1429     if (fHWProgramID != programID) {
1430         GL_CALL(UseProgram(programID));
1431         fHWProgramID = programID;
1432     }
1433 
1434     if (blendInfo.fWriteColor) {
1435         this->flushBlend(blendInfo);
1436     }
1437 
1438     fCurrentProgram->setData(*args.fPrimitiveProcessor, pipeline, *args.fBatchTracker);
1439 
1440     GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.getRenderTarget());
1441     this->flushStencil(pipeline.getStencil());
1442     this->flushScissor(pipeline.getScissorState(), glRT->getViewport(), glRT->origin());
1443     this->flushHWAAState(glRT, pipeline.isHWAntialiasState());
1444 
1445     // This must come after textures are flushed because a texture may need
1446     // to be msaa-resolved (which will modify bound FBO state).
1447     this->flushRenderTarget(glRT, NULL);
1448 
1449     return true;
1450 }
1451 
setupGeometry(const GrPrimitiveProcessor & primProc,const GrNonInstancedVertices & vertices,size_t * indexOffsetInBytes)1452 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
1453                             const GrNonInstancedVertices& vertices,
1454                             size_t* indexOffsetInBytes) {
1455     GrGLVertexBuffer* vbuf;
1456     vbuf = (GrGLVertexBuffer*) vertices.vertexBuffer();
1457 
1458     SkASSERT(vbuf);
1459     SkASSERT(!vbuf->isMapped());
1460 
1461     GrGLIndexBuffer* ibuf = NULL;
1462     if (vertices.isIndexed()) {
1463         SkASSERT(indexOffsetInBytes);
1464 
1465         *indexOffsetInBytes = 0;
1466         ibuf = (GrGLIndexBuffer*)vertices.indexBuffer();
1467 
1468         SkASSERT(ibuf);
1469         SkASSERT(!ibuf->isMapped());
1470         *indexOffsetInBytes += ibuf->baseOffset();
1471     }
1472     GrGLAttribArrayState* attribState =
1473         fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf);
1474 
1475     int vaCount = primProc.numAttribs();
1476     if (vaCount > 0) {
1477 
1478         GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride());
1479 
1480         size_t vertexOffsetInBytes = stride * vertices.startVertex();
1481 
1482         vertexOffsetInBytes += vbuf->baseOffset();
1483 
1484         uint32_t usedAttribArraysMask = 0;
1485         size_t offset = 0;
1486 
1487         for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
1488             const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
1489             usedAttribArraysMask |= (1 << attribIndex);
1490             GrVertexAttribType attribType = attrib.fType;
1491             attribState->set(this,
1492                              attribIndex,
1493                              vbuf,
1494                              GrGLAttribTypeToLayout(attribType).fCount,
1495                              GrGLAttribTypeToLayout(attribType).fType,
1496                              GrGLAttribTypeToLayout(attribType).fNormalized,
1497                              stride,
1498                              reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + offset));
1499             offset += attrib.fOffset;
1500         }
1501         attribState->disableUnusedArrays(this, usedAttribArraysMask);
1502     }
1503 }
1504 
buildProgramDesc(GrProgramDesc * desc,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline,const GrBatchTracker & batchTracker) const1505 void GrGLGpu::buildProgramDesc(GrProgramDesc* desc,
1506                                const GrPrimitiveProcessor& primProc,
1507                                const GrPipeline& pipeline,
1508                                const GrBatchTracker& batchTracker) const {
1509     if (!GrGLProgramDescBuilder::Build(desc, primProc, pipeline, this, batchTracker)) {
1510         SkDEBUGFAIL("Failed to generate GL program descriptor");
1511     }
1512 }
1513 
disableScissor()1514 void GrGLGpu::disableScissor() {
1515     if (kNo_TriState != fHWScissorSettings.fEnabled) {
1516         GL_CALL(Disable(GR_GL_SCISSOR_TEST));
1517         fHWScissorSettings.fEnabled = kNo_TriState;
1518         return;
1519     }
1520 }
1521 
onClear(GrRenderTarget * target,const SkIRect * rect,GrColor color,bool canIgnoreRect)1522 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect* rect, GrColor color,
1523                       bool canIgnoreRect) {
1524     // parent class should never let us get here with no RT
1525     SkASSERT(target);
1526     GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
1527 
1528     if (canIgnoreRect && this->glCaps().fullClearIsFree()) {
1529         rect = NULL;
1530     }
1531 
1532     SkIRect clippedRect;
1533     if (rect) {
1534         // flushScissor expects rect to be clipped to the target.
1535         clippedRect = *rect;
1536         SkIRect rtRect = SkIRect::MakeWH(target->width(), target->height());
1537         if (clippedRect.intersect(rtRect)) {
1538             rect = &clippedRect;
1539         } else {
1540             return;
1541         }
1542     }
1543 
1544     this->flushRenderTarget(glRT, rect);
1545     GrScissorState scissorState;
1546     if (rect) {
1547         scissorState.set(*rect);
1548     }
1549     this->flushScissor(scissorState, glRT->getViewport(), glRT->origin());
1550 
1551     GrGLfloat r, g, b, a;
1552     static const GrGLfloat scale255 = 1.f / 255.f;
1553     a = GrColorUnpackA(color) * scale255;
1554     GrGLfloat scaleRGB = scale255;
1555     r = GrColorUnpackR(color) * scaleRGB;
1556     g = GrColorUnpackG(color) * scaleRGB;
1557     b = GrColorUnpackB(color) * scaleRGB;
1558 
1559     GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
1560     fHWWriteToColor = kYes_TriState;
1561     GL_CALL(ClearColor(r, g, b, a));
1562     GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
1563 }
1564 
discard(GrRenderTarget * renderTarget)1565 void GrGLGpu::discard(GrRenderTarget* renderTarget) {
1566     SkASSERT(renderTarget);
1567     if (!this->caps()->discardRenderTargetSupport()) {
1568         return;
1569     }
1570 
1571     GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
1572     if (renderTarget->getUniqueID() != fHWBoundRenderTargetUniqueID) {
1573         fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1574         fStats.incRenderTargetBinds();
1575         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID()));
1576     }
1577     switch (this->glCaps().invalidateFBType()) {
1578         case GrGLCaps::kNone_InvalidateFBType:
1579             SkFAIL("Should never get here.");
1580             break;
1581         case GrGLCaps::kInvalidate_InvalidateFBType:
1582             if (0 == glRT->renderFBOID()) {
1583                 //  When rendering to the default framebuffer the legal values for attachments
1584                 //  are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment
1585                 //  types.
1586                 static const GrGLenum attachments[] = { GR_GL_COLOR };
1587                 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
1588                         attachments));
1589             } else {
1590                 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 };
1591                 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
1592                         attachments));
1593             }
1594             break;
1595         case GrGLCaps::kDiscard_InvalidateFBType: {
1596             if (0 == glRT->renderFBOID()) {
1597                 //  When rendering to the default framebuffer the legal values for attachments
1598                 //  are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment
1599                 //  types. See glDiscardFramebuffer() spec.
1600                 static const GrGLenum attachments[] = { GR_GL_COLOR };
1601                 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
1602                         attachments));
1603             } else {
1604                 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 };
1605                 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
1606                         attachments));
1607             }
1608             break;
1609         }
1610     }
1611     renderTarget->flagAsResolved();
1612 }
1613 
1614 
clearStencil(GrRenderTarget * target)1615 void GrGLGpu::clearStencil(GrRenderTarget* target) {
1616     if (NULL == target) {
1617         return;
1618     }
1619     GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
1620     this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
1621 
1622     this->disableScissor();
1623 
1624     GL_CALL(StencilMask(0xffffffff));
1625     GL_CALL(ClearStencil(0));
1626     GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1627     fHWStencilSettings.invalidate();
1628 }
1629 
onClearStencilClip(GrRenderTarget * target,const SkIRect & rect,bool insideClip)1630 void GrGLGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
1631     SkASSERT(target);
1632 
1633     GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
1634     // this should only be called internally when we know we have a
1635     // stencil buffer.
1636     SkASSERT(sb);
1637     GrGLint stencilBitCount =  sb->bits();
1638 #if 0
1639     SkASSERT(stencilBitCount > 0);
1640     GrGLint clipStencilMask  = (1 << (stencilBitCount - 1));
1641 #else
1642     // we could just clear the clip bit but when we go through
1643     // ANGLE a partial stencil mask will cause clears to be
1644     // turned into draws. Our contract on GrDrawTarget says that
1645     // changing the clip between stencil passes may or may not
1646     // zero the client's clip bits. So we just clear the whole thing.
1647     static const GrGLint clipStencilMask  = ~0;
1648 #endif
1649     GrGLint value;
1650     if (insideClip) {
1651         value = (1 << (stencilBitCount - 1));
1652     } else {
1653         value = 0;
1654     }
1655     GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
1656     this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
1657 
1658     GrScissorState scissorState;
1659     scissorState.set(rect);
1660     this->flushScissor(scissorState, glRT->getViewport(), glRT->origin());
1661 
1662     GL_CALL(StencilMask((uint32_t) clipStencilMask));
1663     GL_CALL(ClearStencil(value));
1664     GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
1665     fHWStencilSettings.invalidate();
1666 }
1667 
readPixelsWillPayForYFlip(GrRenderTarget * renderTarget,int left,int top,int width,int height,GrPixelConfig config,size_t rowBytes) const1668 bool GrGLGpu::readPixelsWillPayForYFlip(GrRenderTarget* renderTarget,
1669                                         int left, int top,
1670                                         int width, int height,
1671                                         GrPixelConfig config,
1672                                         size_t rowBytes) const {
1673     // If this rendertarget is aready TopLeft, we don't need to flip.
1674     if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
1675         return false;
1676     }
1677 
1678     // if GL can do the flip then we'll never pay for it.
1679     if (this->glCaps().packFlipYSupport()) {
1680         return false;
1681     }
1682 
1683     // If we have to do memcpy to handle non-trim rowBytes then we
1684     // get the flip for free. Otherwise it costs.
1685     if (this->glCaps().packRowLengthSupport()) {
1686         return true;
1687     }
1688     // If we have to do memcpys to handle rowBytes then y-flip is free
1689     // Note the rowBytes might be tight to the passed in data, but if data
1690     // gets clipped in x to the target the rowBytes will no longer be tight.
1691     if (left >= 0 && (left + width) < renderTarget->width()) {
1692            return 0 == rowBytes ||
1693                   GrBytesPerPixel(config) * width == rowBytes;
1694     } else {
1695         return false;
1696     }
1697 }
1698 
onReadPixels(GrRenderTarget * target,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes)1699 bool GrGLGpu::onReadPixels(GrRenderTarget* target,
1700                            int left, int top,
1701                            int width, int height,
1702                            GrPixelConfig config,
1703                            void* buffer,
1704                            size_t rowBytes) {
1705     // We cannot read pixels into a compressed buffer
1706     if (GrPixelConfigIsCompressed(config)) {
1707         return false;
1708     }
1709 
1710     GrGLenum format = 0;
1711     GrGLenum type = 0;
1712     bool flipY = kBottomLeft_GrSurfaceOrigin == target->origin();
1713     if (!this->configToGLFormats(config, false, NULL, &format, &type)) {
1714         return false;
1715     }
1716     size_t bpp = GrBytesPerPixel(config);
1717     if (!adjust_pixel_ops_params(target->width(), target->height(), bpp,
1718                                  &left, &top, &width, &height,
1719                                  const_cast<const void**>(&buffer),
1720                                  &rowBytes)) {
1721         return false;
1722     }
1723 
1724     // resolve the render target if necessary
1725     GrGLRenderTarget* tgt = static_cast<GrGLRenderTarget*>(target);
1726     switch (tgt->getResolveType()) {
1727         case GrGLRenderTarget::kCantResolve_ResolveType:
1728             return false;
1729         case GrGLRenderTarget::kAutoResolves_ResolveType:
1730             this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), &SkIRect::EmptyIRect());
1731             break;
1732         case GrGLRenderTarget::kCanResolve_ResolveType:
1733             this->onResolveRenderTarget(tgt);
1734             // we don't track the state of the READ FBO ID.
1735             fStats.incRenderTargetBinds();
1736             GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER,
1737                                     tgt->textureFBOID()));
1738             break;
1739         default:
1740             SkFAIL("Unknown resolve type");
1741     }
1742 
1743     const GrGLIRect& glvp = tgt->getViewport();
1744 
1745     // the read rect is viewport-relative
1746     GrGLIRect readRect;
1747     readRect.setRelativeTo(glvp, left, top, width, height, target->origin());
1748 
1749     size_t tightRowBytes = bpp * width;
1750     if (0 == rowBytes) {
1751         rowBytes = tightRowBytes;
1752     }
1753     size_t readDstRowBytes = tightRowBytes;
1754     void* readDst = buffer;
1755 
1756     // determine if GL can read using the passed rowBytes or if we need
1757     // a scratch buffer.
1758     GrAutoMalloc<32 * sizeof(GrColor)> scratch;
1759     if (rowBytes != tightRowBytes) {
1760         if (this->glCaps().packRowLengthSupport()) {
1761             SkASSERT(!(rowBytes % sizeof(GrColor)));
1762             GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
1763                                 static_cast<GrGLint>(rowBytes / sizeof(GrColor))));
1764             readDstRowBytes = rowBytes;
1765         } else {
1766             scratch.reset(tightRowBytes * height);
1767             readDst = scratch.get();
1768         }
1769     }
1770     if (flipY && this->glCaps().packFlipYSupport()) {
1771         GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
1772     }
1773     GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
1774                        readRect.fWidth, readRect.fHeight,
1775                        format, type, readDst));
1776     if (readDstRowBytes != tightRowBytes) {
1777         SkASSERT(this->glCaps().packRowLengthSupport());
1778         GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
1779     }
1780     if (flipY && this->glCaps().packFlipYSupport()) {
1781         GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
1782         flipY = false;
1783     }
1784 
1785     // now reverse the order of the rows, since GL's are bottom-to-top, but our
1786     // API presents top-to-bottom. We must preserve the padding contents. Note
1787     // that the above readPixels did not overwrite the padding.
1788     if (readDst == buffer) {
1789         SkASSERT(rowBytes == readDstRowBytes);
1790         if (flipY) {
1791             scratch.reset(tightRowBytes);
1792             void* tmpRow = scratch.get();
1793             // flip y in-place by rows
1794             const int halfY = height >> 1;
1795             char* top = reinterpret_cast<char*>(buffer);
1796             char* bottom = top + (height - 1) * rowBytes;
1797             for (int y = 0; y < halfY; y++) {
1798                 memcpy(tmpRow, top, tightRowBytes);
1799                 memcpy(top, bottom, tightRowBytes);
1800                 memcpy(bottom, tmpRow, tightRowBytes);
1801                 top += rowBytes;
1802                 bottom -= rowBytes;
1803             }
1804         }
1805     } else {
1806         SkASSERT(readDst != buffer);        SkASSERT(rowBytes != tightRowBytes);
1807         // copy from readDst to buffer while flipping y
1808         // const int halfY = height >> 1;
1809         const char* src = reinterpret_cast<const char*>(readDst);
1810         char* dst = reinterpret_cast<char*>(buffer);
1811         if (flipY) {
1812             dst += (height-1) * rowBytes;
1813         }
1814         for (int y = 0; y < height; y++) {
1815             memcpy(dst, src, tightRowBytes);
1816             src += readDstRowBytes;
1817             if (!flipY) {
1818                 dst += rowBytes;
1819             } else {
1820                 dst -= rowBytes;
1821             }
1822         }
1823     }
1824     return true;
1825 }
1826 
flushRenderTarget(GrGLRenderTarget * target,const SkIRect * bound)1827 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bound) {
1828 
1829     SkASSERT(target);
1830 
1831     uint32_t rtID = target->getUniqueID();
1832     if (fHWBoundRenderTargetUniqueID != rtID) {
1833         fStats.incRenderTargetBinds();
1834         GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()));
1835 #ifdef SK_DEBUG
1836         // don't do this check in Chromium -- this is causing
1837         // lots of repeated command buffer flushes when the compositor is
1838         // rendering with Ganesh, which is really slow; even too slow for
1839         // Debug mode.
1840         if (!this->glContext().isChromium()) {
1841             GrGLenum status;
1842             GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1843             if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1844                 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status);
1845             }
1846         }
1847 #endif
1848         fHWBoundRenderTargetUniqueID = rtID;
1849         const GrGLIRect& vp = target->getViewport();
1850         if (fHWViewport != vp) {
1851             vp.pushToGLViewport(this->glInterface());
1852             fHWViewport = vp;
1853         }
1854     }
1855     if (NULL == bound || !bound->isEmpty()) {
1856         target->flagAsNeedingResolve(bound);
1857     }
1858 
1859     GrTexture *texture = target->asTexture();
1860     if (texture) {
1861         texture->texturePriv().dirtyMipMaps(true);
1862     }
1863 }
1864 
1865 GrGLenum gPrimitiveType2GLMode[] = {
1866     GR_GL_TRIANGLES,
1867     GR_GL_TRIANGLE_STRIP,
1868     GR_GL_TRIANGLE_FAN,
1869     GR_GL_POINTS,
1870     GR_GL_LINES,
1871     GR_GL_LINE_STRIP
1872 };
1873 
1874 #define SWAP_PER_DRAW 0
1875 
1876 #if SWAP_PER_DRAW
1877     #if defined(SK_BUILD_FOR_MAC)
1878         #include <AGL/agl.h>
1879     #elif defined(SK_BUILD_FOR_WIN32)
1880         #include <gl/GL.h>
SwapBuf()1881         void SwapBuf() {
1882             DWORD procID = GetCurrentProcessId();
1883             HWND hwnd = GetTopWindow(GetDesktopWindow());
1884             while(hwnd) {
1885                 DWORD wndProcID = 0;
1886                 GetWindowThreadProcessId(hwnd, &wndProcID);
1887                 if(wndProcID == procID) {
1888                     SwapBuffers(GetDC(hwnd));
1889                 }
1890                 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
1891             }
1892          }
1893     #endif
1894 #endif
1895 
onDraw(const DrawArgs & args,const GrNonInstancedVertices & vertices)1896 void GrGLGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
1897     if (!this->flushGLState(args)) {
1898         return;
1899     }
1900 
1901     size_t indexOffsetInBytes = 0;
1902     this->setupGeometry(*args.fPrimitiveProcessor, vertices, &indexOffsetInBytes);
1903 
1904     SkASSERT((size_t)vertices.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode));
1905 
1906     if (vertices.isIndexed()) {
1907         GrGLvoid* indices =
1908             reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) *
1909                                         vertices.startIndex());
1910         // info.startVertex() was accounted for by setupGeometry.
1911         GL_CALL(DrawElements(gPrimitiveType2GLMode[vertices.primitiveType()],
1912                              vertices.indexCount(),
1913                              GR_GL_UNSIGNED_SHORT,
1914                              indices));
1915     } else {
1916         // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for
1917         // startVertex in the DrawElements case. So we always rely on setupGeometry to have
1918         // accounted for startVertex.
1919         GL_CALL(DrawArrays(gPrimitiveType2GLMode[vertices.primitiveType()], 0,
1920                            vertices.vertexCount()));
1921     }
1922 #if SWAP_PER_DRAW
1923     glFlush();
1924     #if defined(SK_BUILD_FOR_MAC)
1925         aglSwapBuffers(aglGetCurrentContext());
1926         int set_a_break_pt_here = 9;
1927         aglSwapBuffers(aglGetCurrentContext());
1928     #elif defined(SK_BUILD_FOR_WIN32)
1929         SwapBuf();
1930         int set_a_break_pt_here = 9;
1931         SwapBuf();
1932     #endif
1933 #endif
1934 }
1935 
onStencilPath(const GrPath * path,const StencilPathState & state)1936 void GrGLGpu::onStencilPath(const GrPath* path, const StencilPathState& state) {
1937     this->flushColorWrite(false);
1938     this->flushDrawFace(GrPipelineBuilder::kBoth_DrawFace);
1939 
1940     GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(state.fRenderTarget);
1941     SkISize size = SkISize::Make(rt->width(), rt->height());
1942     this->glPathRendering()->setProjectionMatrix(*state.fViewMatrix, size, rt->origin());
1943     this->flushScissor(*state.fScissor, rt->getViewport(), rt->origin());
1944     this->flushHWAAState(rt, state.fUseHWAA);
1945     this->flushRenderTarget(rt, NULL);
1946 
1947     fPathRendering->stencilPath(path, *state.fStencil);
1948 }
1949 
onDrawPath(const DrawArgs & args,const GrPath * path,const GrStencilSettings & stencil)1950 void GrGLGpu::onDrawPath(const DrawArgs& args, const GrPath* path,
1951                          const GrStencilSettings& stencil) {
1952     if (!this->flushGLState(args)) {
1953         return;
1954     }
1955     fPathRendering->drawPath(path, stencil);
1956 }
1957 
onDrawPaths(const DrawArgs & args,const GrPathRange * pathRange,const void * indices,GrDrawTarget::PathIndexType indexType,const float transformValues[],GrDrawTarget::PathTransformType transformType,int count,const GrStencilSettings & stencil)1958 void GrGLGpu::onDrawPaths(const DrawArgs& args,
1959                           const GrPathRange* pathRange,
1960                           const void* indices,
1961                           GrDrawTarget::PathIndexType indexType,
1962                           const float transformValues[],
1963                           GrDrawTarget::PathTransformType transformType,
1964                            int count,
1965                            const GrStencilSettings& stencil) {
1966     if (!this->flushGLState(args)) {
1967         return;
1968     }
1969     fPathRendering->drawPaths(pathRange, indices, indexType, transformValues,
1970                               transformType, count, stencil);
1971 }
1972 
onResolveRenderTarget(GrRenderTarget * target)1973 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
1974     GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
1975     if (rt->needsResolve()) {
1976         // Some extensions automatically resolves the texture when it is read.
1977         if (this->glCaps().usesMSAARenderBuffers()) {
1978             SkASSERT(rt->textureFBOID() != rt->renderFBOID());
1979             fStats.incRenderTargetBinds();
1980             fStats.incRenderTargetBinds();
1981             GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
1982             GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
1983             // make sure we go through flushRenderTarget() since we've modified
1984             // the bound DRAW FBO ID.
1985             fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1986             const GrGLIRect& vp = rt->getViewport();
1987             const SkIRect dirtyRect = rt->getResolveRect();
1988 
1989             if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
1990                 // Apple's extension uses the scissor as the blit bounds.
1991                 GrScissorState scissorState;
1992                 scissorState.set(dirtyRect);
1993                 this->flushScissor(scissorState, vp, rt->origin());
1994                 GL_CALL(ResolveMultisampleFramebuffer());
1995             } else {
1996                 GrGLIRect r;
1997                 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
1998                                 dirtyRect.width(), dirtyRect.height(), target->origin());
1999 
2000                 int right = r.fLeft + r.fWidth;
2001                 int top = r.fBottom + r.fHeight;
2002 
2003                 // BlitFrameBuffer respects the scissor, so disable it.
2004                 this->disableScissor();
2005                 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
2006                                         r.fLeft, r.fBottom, right, top,
2007                                         GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2008             }
2009         }
2010         rt->flagAsResolved();
2011     }
2012 }
2013 
2014 namespace {
2015 
2016 
gr_to_gl_stencil_op(GrStencilOp op)2017 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2018     static const GrGLenum gTable[] = {
2019         GR_GL_KEEP,        // kKeep_StencilOp
2020         GR_GL_REPLACE,     // kReplace_StencilOp
2021         GR_GL_INCR_WRAP,   // kIncWrap_StencilOp
2022         GR_GL_INCR,        // kIncClamp_StencilOp
2023         GR_GL_DECR_WRAP,   // kDecWrap_StencilOp
2024         GR_GL_DECR,        // kDecClamp_StencilOp
2025         GR_GL_ZERO,        // kZero_StencilOp
2026         GR_GL_INVERT,      // kInvert_StencilOp
2027     };
2028     GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount);
2029     GR_STATIC_ASSERT(0 == kKeep_StencilOp);
2030     GR_STATIC_ASSERT(1 == kReplace_StencilOp);
2031     GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
2032     GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
2033     GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
2034     GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
2035     GR_STATIC_ASSERT(6 == kZero_StencilOp);
2036     GR_STATIC_ASSERT(7 == kInvert_StencilOp);
2037     SkASSERT((unsigned) op < kStencilOpCount);
2038     return gTable[op];
2039 }
2040 
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings & settings,GrGLenum glFace,GrStencilSettings::Face grFace)2041 void set_gl_stencil(const GrGLInterface* gl,
2042                     const GrStencilSettings& settings,
2043                     GrGLenum glFace,
2044                     GrStencilSettings::Face grFace) {
2045     GrGLenum glFunc = GrToGLStencilFunc(settings.func(grFace));
2046     GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace));
2047     GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace));
2048 
2049     GrGLint ref = settings.funcRef(grFace);
2050     GrGLint mask = settings.funcMask(grFace);
2051     GrGLint writeMask = settings.writeMask(grFace);
2052 
2053     if (GR_GL_FRONT_AND_BACK == glFace) {
2054         // we call the combined func just in case separate stencil is not
2055         // supported.
2056         GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2057         GR_GL_CALL(gl, StencilMask(writeMask));
2058         GR_GL_CALL(gl, StencilOp(glFailOp, glPassOp, glPassOp));
2059     } else {
2060         GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2061         GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2062         GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, glPassOp, glPassOp));
2063     }
2064 }
2065 }
2066 
flushStencil(const GrStencilSettings & stencilSettings)2067 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) {
2068     if (fHWStencilSettings != stencilSettings) {
2069         if (stencilSettings.isDisabled()) {
2070             if (kNo_TriState != fHWStencilTestEnabled) {
2071                 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2072                 fHWStencilTestEnabled = kNo_TriState;
2073             }
2074         } else {
2075             if (kYes_TriState != fHWStencilTestEnabled) {
2076                 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2077                 fHWStencilTestEnabled = kYes_TriState;
2078             }
2079         }
2080         if (!stencilSettings.isDisabled()) {
2081             if (this->caps()->twoSidedStencilSupport()) {
2082                 set_gl_stencil(this->glInterface(),
2083                                stencilSettings,
2084                                GR_GL_FRONT,
2085                                GrStencilSettings::kFront_Face);
2086                 set_gl_stencil(this->glInterface(),
2087                                stencilSettings,
2088                                GR_GL_BACK,
2089                                GrStencilSettings::kBack_Face);
2090             } else {
2091                 set_gl_stencil(this->glInterface(),
2092                                stencilSettings,
2093                                GR_GL_FRONT_AND_BACK,
2094                                GrStencilSettings::kFront_Face);
2095             }
2096         }
2097         fHWStencilSettings = stencilSettings;
2098     }
2099 }
2100 
flushHWAAState(GrRenderTarget * rt,bool useHWAA)2101 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA) {
2102     SkASSERT(!useHWAA || rt->isMultisampled());
2103 
2104     if (kGL_GrGLStandard == this->glStandard()) {
2105         if (useHWAA) {
2106             if (kYes_TriState != fMSAAEnabled) {
2107                 GL_CALL(Enable(GR_GL_MULTISAMPLE));
2108                 fMSAAEnabled = kYes_TriState;
2109             }
2110         } else {
2111             if (kNo_TriState != fMSAAEnabled) {
2112                 GL_CALL(Disable(GR_GL_MULTISAMPLE));
2113                 fMSAAEnabled = kNo_TriState;
2114             }
2115         }
2116     }
2117 }
2118 
flushBlend(const GrXferProcessor::BlendInfo & blendInfo)2119 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo) {
2120     // Any optimization to disable blending should have already been applied and
2121     // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
2122 
2123     GrBlendEquation equation = blendInfo.fEquation;
2124     GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
2125     GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
2126     bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
2127                     kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
2128     if (blendOff) {
2129         if (kNo_TriState != fHWBlendState.fEnabled) {
2130             GL_CALL(Disable(GR_GL_BLEND));
2131             fHWBlendState.fEnabled = kNo_TriState;
2132         }
2133         return;
2134     }
2135 
2136     if (kYes_TriState != fHWBlendState.fEnabled) {
2137         GL_CALL(Enable(GR_GL_BLEND));
2138         fHWBlendState.fEnabled = kYes_TriState;
2139     }
2140 
2141     if (fHWBlendState.fEquation != equation) {
2142         GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
2143         fHWBlendState.fEquation = equation;
2144     }
2145 
2146     if (GrBlendEquationIsAdvanced(equation)) {
2147         SkASSERT(this->caps()->advancedBlendEquationSupport());
2148         // Advanced equations have no other blend state.
2149         return;
2150     }
2151 
2152     if (fHWBlendState.fSrcCoeff != srcCoeff ||
2153         fHWBlendState.fDstCoeff != dstCoeff) {
2154         GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
2155                           gXfermodeCoeff2Blend[dstCoeff]));
2156         fHWBlendState.fSrcCoeff = srcCoeff;
2157         fHWBlendState.fDstCoeff = dstCoeff;
2158     }
2159 
2160     GrColor blendConst = blendInfo.fBlendConstant;
2161     if ((BlendCoeffReferencesConstant(srcCoeff) ||
2162          BlendCoeffReferencesConstant(dstCoeff)) &&
2163         (!fHWBlendState.fConstColorValid ||
2164          fHWBlendState.fConstColor != blendConst)) {
2165         GrGLfloat c[4];
2166         GrColorToRGBAFloat(blendConst, c);
2167         GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
2168         fHWBlendState.fConstColor = blendConst;
2169         fHWBlendState.fConstColorValid = true;
2170     }
2171 }
2172 
tile_to_gl_wrap(SkShader::TileMode tm)2173 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
2174     static const GrGLenum gWrapModes[] = {
2175         GR_GL_CLAMP_TO_EDGE,
2176         GR_GL_REPEAT,
2177         GR_GL_MIRRORED_REPEAT
2178     };
2179     GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
2180     GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
2181     GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
2182     GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
2183     return gWrapModes[tm];
2184 }
2185 
bindTexture(int unitIdx,const GrTextureParams & params,GrGLTexture * texture)2186 void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) {
2187     SkASSERT(texture);
2188 
2189     // If we created a rt/tex and rendered to it without using a texture and now we're texturing
2190     // from the rt it will still be the last bound texture, but it needs resolving. So keep this
2191     // out of the "last != next" check.
2192     GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
2193     if (texRT) {
2194         this->onResolveRenderTarget(texRT);
2195     }
2196 
2197     uint32_t textureID = texture->getUniqueID();
2198     if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) {
2199         this->setTextureUnit(unitIdx);
2200         GL_CALL(BindTexture(GR_GL_TEXTURE_2D, texture->textureID()));
2201         fHWBoundTextureUniqueIDs[unitIdx] = textureID;
2202     }
2203 
2204     ResetTimestamp timestamp;
2205     const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(&timestamp);
2206     bool setAll = timestamp < this->getResetTimestamp();
2207     GrGLTexture::TexParams newTexParams;
2208 
2209     static GrGLenum glMinFilterModes[] = {
2210         GR_GL_NEAREST,
2211         GR_GL_LINEAR,
2212         GR_GL_LINEAR_MIPMAP_LINEAR
2213     };
2214     static GrGLenum glMagFilterModes[] = {
2215         GR_GL_NEAREST,
2216         GR_GL_LINEAR,
2217         GR_GL_LINEAR
2218     };
2219     GrTextureParams::FilterMode filterMode = params.filterMode();
2220 
2221     if (GrTextureParams::kMipMap_FilterMode == filterMode) {
2222         if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) {
2223             filterMode = GrTextureParams::kBilerp_FilterMode;
2224         }
2225     }
2226 
2227     newTexParams.fMinFilter = glMinFilterModes[filterMode];
2228     newTexParams.fMagFilter = glMagFilterModes[filterMode];
2229 
2230     if (GrTextureParams::kMipMap_FilterMode == filterMode &&
2231         texture->texturePriv().mipMapsAreDirty()) {
2232         GL_CALL(GenerateMipmap(GR_GL_TEXTURE_2D));
2233         texture->texturePriv().dirtyMipMaps(false);
2234     }
2235 
2236     newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
2237     newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
2238     memcpy(newTexParams.fSwizzleRGBA,
2239            GrGLShaderBuilder::GetTexParamSwizzle(texture->config(), this->glCaps()),
2240            sizeof(newTexParams.fSwizzleRGBA));
2241     if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
2242         this->setTextureUnit(unitIdx);
2243         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2244                               GR_GL_TEXTURE_MAG_FILTER,
2245                               newTexParams.fMagFilter));
2246     }
2247     if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
2248         this->setTextureUnit(unitIdx);
2249         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2250                               GR_GL_TEXTURE_MIN_FILTER,
2251                               newTexParams.fMinFilter));
2252     }
2253     if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
2254         this->setTextureUnit(unitIdx);
2255         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2256                               GR_GL_TEXTURE_WRAP_S,
2257                               newTexParams.fWrapS));
2258     }
2259     if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
2260         this->setTextureUnit(unitIdx);
2261         GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
2262                               GR_GL_TEXTURE_WRAP_T,
2263                               newTexParams.fWrapT));
2264     }
2265     if (this->glCaps().textureSwizzleSupport() &&
2266         (setAll || memcmp(newTexParams.fSwizzleRGBA,
2267                           oldTexParams.fSwizzleRGBA,
2268                           sizeof(newTexParams.fSwizzleRGBA)))) {
2269         this->setTextureUnit(unitIdx);
2270         if (this->glStandard() == kGLES_GrGLStandard) {
2271             // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
2272             const GrGLenum* swizzle = newTexParams.fSwizzleRGBA;
2273             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
2274             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
2275             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
2276             GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
2277         } else {
2278             GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint));
2279             const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA);
2280             GL_CALL(TexParameteriv(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle));
2281         }
2282     }
2283     texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
2284 }
2285 
flushDither(bool dither)2286 void GrGLGpu::flushDither(bool dither) {
2287     if (dither) {
2288         if (kYes_TriState != fHWDitherEnabled) {
2289             GL_CALL(Enable(GR_GL_DITHER));
2290             fHWDitherEnabled = kYes_TriState;
2291         }
2292     } else {
2293         if (kNo_TriState != fHWDitherEnabled) {
2294             GL_CALL(Disable(GR_GL_DITHER));
2295             fHWDitherEnabled = kNo_TriState;
2296         }
2297     }
2298 }
2299 
flushColorWrite(bool writeColor)2300 void GrGLGpu::flushColorWrite(bool writeColor) {
2301     if (!writeColor) {
2302         if (kNo_TriState != fHWWriteToColor) {
2303             GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
2304                               GR_GL_FALSE, GR_GL_FALSE));
2305             fHWWriteToColor = kNo_TriState;
2306         }
2307     } else {
2308         if (kYes_TriState != fHWWriteToColor) {
2309             GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
2310             fHWWriteToColor = kYes_TriState;
2311         }
2312     }
2313 }
2314 
flushDrawFace(GrPipelineBuilder::DrawFace face)2315 void GrGLGpu::flushDrawFace(GrPipelineBuilder::DrawFace face) {
2316     if (fHWDrawFace != face) {
2317         switch (face) {
2318             case GrPipelineBuilder::kCCW_DrawFace:
2319                 GL_CALL(Enable(GR_GL_CULL_FACE));
2320                 GL_CALL(CullFace(GR_GL_BACK));
2321                 break;
2322             case GrPipelineBuilder::kCW_DrawFace:
2323                 GL_CALL(Enable(GR_GL_CULL_FACE));
2324                 GL_CALL(CullFace(GR_GL_FRONT));
2325                 break;
2326             case GrPipelineBuilder::kBoth_DrawFace:
2327                 GL_CALL(Disable(GR_GL_CULL_FACE));
2328                 break;
2329             default:
2330                 SkFAIL("Unknown draw face.");
2331         }
2332         fHWDrawFace = face;
2333     }
2334 }
2335 
configToGLFormats(GrPixelConfig config,bool getSizedInternalFormat,GrGLenum * internalFormat,GrGLenum * externalFormat,GrGLenum * externalType)2336 bool GrGLGpu::configToGLFormats(GrPixelConfig config,
2337                                 bool getSizedInternalFormat,
2338                                 GrGLenum* internalFormat,
2339                                 GrGLenum* externalFormat,
2340                                 GrGLenum* externalType) {
2341     GrGLenum dontCare;
2342     if (NULL == internalFormat) {
2343         internalFormat = &dontCare;
2344     }
2345     if (NULL == externalFormat) {
2346         externalFormat = &dontCare;
2347     }
2348     if (NULL == externalType) {
2349         externalType = &dontCare;
2350     }
2351 
2352     if(!this->glCaps().isConfigTexturable(config)) {
2353         return false;
2354     }
2355 
2356     switch (config) {
2357         case kRGBA_8888_GrPixelConfig:
2358             *internalFormat = GR_GL_RGBA;
2359             *externalFormat = GR_GL_RGBA;
2360             if (getSizedInternalFormat) {
2361                 *internalFormat = GR_GL_RGBA8;
2362             } else {
2363                 *internalFormat = GR_GL_RGBA;
2364             }
2365             *externalType = GR_GL_UNSIGNED_BYTE;
2366             break;
2367         case kBGRA_8888_GrPixelConfig:
2368             if (this->glCaps().bgraIsInternalFormat()) {
2369                 if (getSizedInternalFormat) {
2370                     *internalFormat = GR_GL_BGRA8;
2371                 } else {
2372                     *internalFormat = GR_GL_BGRA;
2373                 }
2374             } else {
2375                 if (getSizedInternalFormat) {
2376                     *internalFormat = GR_GL_RGBA8;
2377                 } else {
2378                     *internalFormat = GR_GL_RGBA;
2379                 }
2380             }
2381             *externalFormat = GR_GL_BGRA;
2382             *externalType = GR_GL_UNSIGNED_BYTE;
2383             break;
2384         case kSRGBA_8888_GrPixelConfig:
2385             *internalFormat = GR_GL_SRGB_ALPHA;
2386             *externalFormat = GR_GL_SRGB_ALPHA;
2387             if (getSizedInternalFormat) {
2388                 *internalFormat = GR_GL_SRGB8_ALPHA8;
2389             } else {
2390                 *internalFormat = GR_GL_SRGB_ALPHA;
2391             }
2392             *externalType = GR_GL_UNSIGNED_BYTE;
2393             break;
2394         case kRGB_565_GrPixelConfig:
2395             *internalFormat = GR_GL_RGB;
2396             *externalFormat = GR_GL_RGB;
2397             if (getSizedInternalFormat) {
2398                 if (!this->glCaps().ES2CompatibilitySupport()) {
2399                     *internalFormat = GR_GL_RGB5;
2400                 } else {
2401                     *internalFormat = GR_GL_RGB565;
2402                 }
2403             } else {
2404                 *internalFormat = GR_GL_RGB;
2405             }
2406             *externalType = GR_GL_UNSIGNED_SHORT_5_6_5;
2407             break;
2408         case kRGBA_4444_GrPixelConfig:
2409             *internalFormat = GR_GL_RGBA;
2410             *externalFormat = GR_GL_RGBA;
2411             if (getSizedInternalFormat) {
2412                 *internalFormat = GR_GL_RGBA4;
2413             } else {
2414                 *internalFormat = GR_GL_RGBA;
2415             }
2416             *externalType = GR_GL_UNSIGNED_SHORT_4_4_4_4;
2417             break;
2418         case kIndex_8_GrPixelConfig:
2419             // no sized/unsized internal format distinction here
2420             *internalFormat = GR_GL_PALETTE8_RGBA8;
2421             break;
2422         case kAlpha_8_GrPixelConfig:
2423             if (this->glCaps().textureRedSupport()) {
2424                 *internalFormat = GR_GL_RED;
2425                 *externalFormat = GR_GL_RED;
2426                 if (getSizedInternalFormat) {
2427                     *internalFormat = GR_GL_R8;
2428                 } else {
2429                     *internalFormat = GR_GL_RED;
2430                 }
2431                 *externalType = GR_GL_UNSIGNED_BYTE;
2432             } else {
2433                 *internalFormat = GR_GL_ALPHA;
2434                 *externalFormat = GR_GL_ALPHA;
2435                 if (getSizedInternalFormat) {
2436                     *internalFormat = GR_GL_ALPHA8;
2437                 } else {
2438                     *internalFormat = GR_GL_ALPHA;
2439                 }
2440                 *externalType = GR_GL_UNSIGNED_BYTE;
2441             }
2442             break;
2443         case kETC1_GrPixelConfig:
2444             *internalFormat = GR_GL_COMPRESSED_ETC1_RGB8;
2445             break;
2446         case kLATC_GrPixelConfig:
2447             switch(this->glCaps().latcAlias()) {
2448                 case GrGLCaps::kLATC_LATCAlias:
2449                     *internalFormat = GR_GL_COMPRESSED_LUMINANCE_LATC1;
2450                     break;
2451                 case GrGLCaps::kRGTC_LATCAlias:
2452                     *internalFormat = GR_GL_COMPRESSED_RED_RGTC1;
2453                     break;
2454                 case GrGLCaps::k3DC_LATCAlias:
2455                     *internalFormat = GR_GL_COMPRESSED_3DC_X;
2456                     break;
2457             }
2458             break;
2459         case kR11_EAC_GrPixelConfig:
2460             *internalFormat = GR_GL_COMPRESSED_R11_EAC;
2461             break;
2462 
2463         case kASTC_12x12_GrPixelConfig:
2464             *internalFormat = GR_GL_COMPRESSED_RGBA_ASTC_12x12_KHR;
2465             break;
2466 
2467         case kRGBA_float_GrPixelConfig:
2468             *internalFormat = GR_GL_RGBA32F;
2469             *externalFormat = GR_GL_RGBA;
2470             *externalType = GR_GL_FLOAT;
2471             break;
2472 
2473         case kAlpha_half_GrPixelConfig:
2474             if (this->glCaps().textureRedSupport()) {
2475                 if (getSizedInternalFormat) {
2476                     *internalFormat = GR_GL_R16F;
2477                 } else {
2478                     *internalFormat = GR_GL_RED;
2479                 }
2480                 *externalFormat = GR_GL_RED;
2481             } else {
2482                 if (getSizedInternalFormat) {
2483                     *internalFormat = GR_GL_ALPHA16F;
2484                 } else {
2485                     *internalFormat = GR_GL_ALPHA;
2486                 }
2487                 *externalFormat = GR_GL_ALPHA;
2488             }
2489             if (kGL_GrGLStandard == this->glStandard() || this->glVersion() >= GR_GL_VER(3, 0)) {
2490                 *externalType = GR_GL_HALF_FLOAT;
2491             } else {
2492                 *externalType = GR_GL_HALF_FLOAT_OES;
2493             }
2494             break;
2495 
2496         default:
2497             return false;
2498     }
2499     return true;
2500 }
2501 
setTextureUnit(int unit)2502 void GrGLGpu::setTextureUnit(int unit) {
2503     SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count());
2504     if (unit != fHWActiveTextureUnitIdx) {
2505         GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
2506         fHWActiveTextureUnitIdx = unit;
2507     }
2508 }
2509 
setScratchTextureUnit()2510 void GrGLGpu::setScratchTextureUnit() {
2511     // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
2512     int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1;
2513     if (lastUnitIdx != fHWActiveTextureUnitIdx) {
2514         GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
2515         fHWActiveTextureUnitIdx = lastUnitIdx;
2516     }
2517     // clear out the this field so that if a program does use this unit it will rebind the correct
2518     // texture.
2519     fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID;
2520 }
2521 
2522 namespace {
2523 // Determines whether glBlitFramebuffer could be used between src and dst.
can_blit_framebuffer(const GrSurface * dst,const GrSurface * src,const GrGLGpu * gpu)2524 inline bool can_blit_framebuffer(const GrSurface* dst,
2525                                  const GrSurface* src,
2526                                  const GrGLGpu* gpu) {
2527     if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
2528         gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2529         gpu->glCaps().usesMSAARenderBuffers()) {
2530         // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match
2531         // or the rects are not the same (not just the same size but have the same edges).
2532         if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
2533             (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
2534            return false;
2535         }
2536         return true;
2537     } else {
2538         return false;
2539     }
2540 }
2541 
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLGpu * gpu)2542 inline bool can_copy_texsubimage(const GrSurface* dst,
2543                                  const GrSurface* src,
2544                                  const GrGLGpu* gpu) {
2545     // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
2546     // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
2547     // many drivers would allow it to work, but ANGLE does not.
2548     if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() &&
2549         (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
2550         return false;
2551     }
2552     const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
2553     // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
2554     // then we don't want to copy to the texture but to the MSAA buffer.
2555     if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
2556         return false;
2557     }
2558     const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2559     // If the src is multisampled (and uses an extension where there is a separate MSAA
2560     // renderbuffer) then it is an invalid operation to call CopyTexSubImage
2561     if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
2562         return false;
2563     }
2564     if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
2565         dst->asTexture() &&
2566         dst->origin() == src->origin() &&
2567         !GrPixelConfigIsCompressed(src->config())) {
2568         return true;
2569     } else {
2570         return false;
2571     }
2572 }
2573 
2574 }
2575 
2576 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
2577 // relative to is output.
bindSurfaceAsFBO(GrSurface * surface,GrGLenum fboTarget,GrGLIRect * viewport,TempFBOTarget tempFBOTarget)2578 GrGLuint GrGLGpu::bindSurfaceAsFBO(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport,
2579                                    TempFBOTarget tempFBOTarget) {
2580     GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2581     if (NULL == rt) {
2582         SkASSERT(surface->asTexture());
2583         GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
2584         GrGLuint* tempFBOID;
2585         tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
2586 
2587         if (0 == *tempFBOID) {
2588             GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
2589         }
2590 
2591         fStats.incRenderTargetBinds();
2592         GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, *tempFBOID));
2593         GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
2594                                                              GR_GL_COLOR_ATTACHMENT0,
2595                                                              GR_GL_TEXTURE_2D,
2596                                                              texID,
2597                                                              0));
2598         viewport->fLeft = 0;
2599         viewport->fBottom = 0;
2600         viewport->fWidth = surface->width();
2601         viewport->fHeight = surface->height();
2602         return *tempFBOID;
2603     } else {
2604         GrGLuint tempFBOID = 0;
2605         fStats.incRenderTargetBinds();
2606         GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBOID()));
2607         *viewport = rt->getViewport();
2608         return tempFBOID;
2609     }
2610 }
2611 
unbindTextureFromFBO(GrGLenum fboTarget)2612 void GrGLGpu::unbindTextureFromFBO(GrGLenum fboTarget) {
2613     GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
2614                                                          GR_GL_COLOR_ATTACHMENT0,
2615                                                          GR_GL_TEXTURE_2D,
2616                                                          0,
2617                                                          0));
2618 }
2619 
initCopySurfaceDstDesc(const GrSurface * src,GrSurfaceDesc * desc)2620 bool GrGLGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) {
2621     // In here we look for opportunities to use CopyTexSubImage, or fbo blit. If neither are
2622     // possible and we return false to fallback to creating a render target dst for render-to-
2623     // texture. This code prefers CopyTexSubImage to fbo blit and avoids triggering temporary fbo
2624     // creation. It isn't clear that avoiding temporary fbo creation is actually optimal.
2625 
2626     // Check for format issues with glCopyTexSubImage2D
2627     if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() &&
2628         kBGRA_8888_GrPixelConfig == src->config()) {
2629         // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit
2630         // then we set up for that, otherwise fail.
2631         if (this->caps()->isConfigRenderable(kBGRA_8888_GrPixelConfig, false)) {
2632             desc->fOrigin = kDefault_GrSurfaceOrigin;
2633             desc->fFlags = kRenderTarget_GrSurfaceFlag;
2634             desc->fConfig = kBGRA_8888_GrPixelConfig;
2635             return true;
2636         }
2637         return false;
2638     } else if (NULL == src->asRenderTarget()) {
2639         // CopyTexSubImage2D or fbo blit would require creating a temp fbo for the src.
2640         return false;
2641     }
2642 
2643     const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
2644     if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
2645         // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. Set up for FBO blit or
2646         // fail.
2647         if (this->caps()->isConfigRenderable(src->config(), false)) {
2648             desc->fOrigin = kDefault_GrSurfaceOrigin;
2649             desc->fFlags = kRenderTarget_GrSurfaceFlag;
2650             desc->fConfig = src->config();
2651             return true;
2652         }
2653         return false;
2654     }
2655 
2656     // We'll do a CopyTexSubImage. Make the dst a plain old texture.
2657     desc->fConfig = src->config();
2658     desc->fOrigin = src->origin();
2659     desc->fFlags = kNone_GrSurfaceFlags;
2660     return true;
2661 }
2662 
copySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2663 bool GrGLGpu::copySurface(GrSurface* dst,
2664                           GrSurface* src,
2665                           const SkIRect& srcRect,
2666                           const SkIPoint& dstPoint) {
2667     bool copied = false;
2668     if (can_copy_texsubimage(dst, src, this)) {
2669         GrGLuint srcFBO;
2670         GrGLIRect srcVP;
2671         srcFBO = this->bindSurfaceAsFBO(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
2672         GrGLTexture* dstTex = static_cast<GrGLTexture*>(dst->asTexture());
2673         SkASSERT(dstTex);
2674         // We modified the bound FBO
2675         fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
2676         GrGLIRect srcGLRect;
2677         srcGLRect.setRelativeTo(srcVP,
2678                                 srcRect.fLeft,
2679                                 srcRect.fTop,
2680                                 srcRect.width(),
2681                                 srcRect.height(),
2682                                 src->origin());
2683 
2684         this->setScratchTextureUnit();
2685         GL_CALL(BindTexture(GR_GL_TEXTURE_2D, dstTex->textureID()));
2686         GrGLint dstY;
2687         if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
2688             dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
2689         } else {
2690             dstY = dstPoint.fY;
2691         }
2692         GL_CALL(CopyTexSubImage2D(GR_GL_TEXTURE_2D, 0,
2693                                   dstPoint.fX, dstY,
2694                                   srcGLRect.fLeft, srcGLRect.fBottom,
2695                                   srcGLRect.fWidth, srcGLRect.fHeight));
2696         copied = true;
2697         if (srcFBO) {
2698             this->unbindTextureFromFBO(GR_GL_FRAMEBUFFER);
2699         }
2700     } else if (can_blit_framebuffer(dst, src, this)) {
2701         SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2702                                             srcRect.width(), srcRect.height());
2703         bool selfOverlap = false;
2704         if (dst == src) {
2705             selfOverlap = SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect);
2706         }
2707 
2708         if (!selfOverlap) {
2709             GrGLuint dstFBO;
2710             GrGLuint srcFBO;
2711             GrGLIRect dstVP;
2712             GrGLIRect srcVP;
2713             dstFBO = this->bindSurfaceAsFBO(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP,
2714                                             kDst_TempFBOTarget);
2715             srcFBO = this->bindSurfaceAsFBO(src, GR_GL_READ_FRAMEBUFFER, &srcVP,
2716                                             kSrc_TempFBOTarget);
2717             // We modified the bound FBO
2718             fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
2719             GrGLIRect srcGLRect;
2720             GrGLIRect dstGLRect;
2721             srcGLRect.setRelativeTo(srcVP,
2722                                     srcRect.fLeft,
2723                                     srcRect.fTop,
2724                                     srcRect.width(),
2725                                     srcRect.height(),
2726                                     src->origin());
2727             dstGLRect.setRelativeTo(dstVP,
2728                                     dstRect.fLeft,
2729                                     dstRect.fTop,
2730                                     dstRect.width(),
2731                                     dstRect.height(),
2732                                     dst->origin());
2733 
2734             // BlitFrameBuffer respects the scissor, so disable it.
2735             this->disableScissor();
2736 
2737             GrGLint srcY0;
2738             GrGLint srcY1;
2739             // Does the blit need to y-mirror or not?
2740             if (src->origin() == dst->origin()) {
2741                 srcY0 = srcGLRect.fBottom;
2742                 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
2743             } else {
2744                 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
2745                 srcY1 = srcGLRect.fBottom;
2746             }
2747             GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
2748                                     srcY0,
2749                                     srcGLRect.fLeft + srcGLRect.fWidth,
2750                                     srcY1,
2751                                     dstGLRect.fLeft,
2752                                     dstGLRect.fBottom,
2753                                     dstGLRect.fLeft + dstGLRect.fWidth,
2754                                     dstGLRect.fBottom + dstGLRect.fHeight,
2755                                     GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2756             if (dstFBO) {
2757                 this->unbindTextureFromFBO(GR_GL_DRAW_FRAMEBUFFER);
2758             }
2759             if (srcFBO) {
2760                 this->unbindTextureFromFBO(GR_GL_READ_FRAMEBUFFER);
2761             }
2762             copied = true;
2763         }
2764     }
2765     return copied;
2766 }
2767 
canCopySurface(const GrSurface * dst,const GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)2768 bool GrGLGpu::canCopySurface(const GrSurface* dst,
2769                              const GrSurface* src,
2770                              const SkIRect& srcRect,
2771                              const SkIPoint& dstPoint) {
2772     // This mirrors the logic in onCopySurface.
2773     if (can_copy_texsubimage(dst, src, this)) {
2774         return true;
2775     }
2776     if (can_blit_framebuffer(dst, src, this)) {
2777         if (dst == src) {
2778             SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
2779                                                 srcRect.width(), srcRect.height());
2780             if(!SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
2781                 return true;
2782             }
2783         } else {
2784             return true;
2785         }
2786     }
2787     return false;
2788 }
2789 
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)2790 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
2791     switch (type) {
2792         case kTexture_GrXferBarrierType: {
2793             GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
2794             if (glrt->textureFBOID() != glrt->renderFBOID()) {
2795                 // The render target uses separate storage so no need for glTextureBarrier.
2796                 // FIXME: The render target will resolve automatically when its texture is bound,
2797                 // but we could resolve only the bounds that will be read if we do it here instead.
2798                 return;
2799             }
2800             SkASSERT(this->caps()->textureBarrierSupport());
2801             GL_CALL(TextureBarrier());
2802             return;
2803         }
2804         case kBlend_GrXferBarrierType:
2805             SkASSERT(GrDrawTargetCaps::kAdvanced_BlendEquationSupport ==
2806                      this->caps()->blendEquationSupport());
2807             GL_CALL(BlendBarrier());
2808             return;
2809     }
2810 }
2811 
didAddGpuTraceMarker()2812 void GrGLGpu::didAddGpuTraceMarker() {
2813     if (this->caps()->gpuTracingSupport()) {
2814         const GrTraceMarkerSet& markerArray = this->getActiveTraceMarkers();
2815         SkString markerString = markerArray.toStringLast();
2816 #if GR_FORCE_GPU_TRACE_DEBUGGING
2817         SkDebugf("%s\n", markerString.c_str());
2818 #else
2819         GL_CALL(PushGroupMarker(0, markerString.c_str()));
2820 #endif
2821     }
2822 }
2823 
didRemoveGpuTraceMarker()2824 void GrGLGpu::didRemoveGpuTraceMarker() {
2825     if (this->caps()->gpuTracingSupport()) {
2826 #if GR_FORCE_GPU_TRACE_DEBUGGING
2827         SkDebugf("Pop trace marker.\n");
2828 #else
2829         GL_CALL(PopGroupMarker());
2830 #endif
2831     }
2832 }
2833 
2834 ///////////////////////////////////////////////////////////////////////////////
2835 
bindArrayAndBuffersToDraw(GrGLGpu * gpu,const GrGLVertexBuffer * vbuffer,const GrGLIndexBuffer * ibuffer)2836 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
2837                                                 GrGLGpu* gpu,
2838                                                 const GrGLVertexBuffer* vbuffer,
2839                                                 const GrGLIndexBuffer* ibuffer) {
2840     SkASSERT(vbuffer);
2841     GrGLAttribArrayState* attribState;
2842 
2843     // We use a vertex array if we're on a core profile and the verts are in a VBO.
2844     if (gpu->glCaps().isCoreProfile() && !vbuffer->isCPUBacked()) {
2845         if (!fVBOVertexArray) {
2846             GrGLuint arrayID;
2847             GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
2848             int attrCount = gpu->glCaps().maxVertexAttributes();
2849             fVBOVertexArray = SkNEW_ARGS(GrGLVertexArray, (arrayID, attrCount));
2850         }
2851         attribState = fVBOVertexArray->bindWithIndexBuffer(gpu, ibuffer);
2852     } else {
2853         if (ibuffer) {
2854             this->setIndexBufferIDOnDefaultVertexArray(gpu, ibuffer->bufferID());
2855         } else {
2856             this->setVertexArrayID(gpu, 0);
2857         }
2858         int attrCount = gpu->glCaps().maxVertexAttributes();
2859         if (fDefaultVertexArrayAttribState.count() != attrCount) {
2860             fDefaultVertexArrayAttribState.resize(attrCount);
2861         }
2862         attribState = &fDefaultVertexArrayAttribState;
2863     }
2864     return attribState;
2865 }
2866