1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrGLGpu.h"
9 #include "GrGLGLSL.h"
10 #include "GrGLStencilAttachment.h"
11 #include "GrGLTextureRenderTarget.h"
12 #include "GrGpuResourcePriv.h"
13 #include "GrPipeline.h"
14 #include "GrPLSGeometryProcessor.h"
15 #include "GrRenderTargetPriv.h"
16 #include "GrSurfacePriv.h"
17 #include "GrTexturePriv.h"
18 #include "GrTypes.h"
19 #include "GrVertices.h"
20 #include "builders/GrGLShaderStringBuilder.h"
21 #include "glsl/GrGLSL.h"
22 #include "glsl/GrGLSLCaps.h"
23 #include "glsl/GrGLSLPLSPathRendering.h"
24 #include "SkStrokeRec.h"
25 #include "SkTemplates.h"
26
27 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
28 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
29
30 #define SKIP_CACHE_CHECK true
31
32 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
33 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
34 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
35 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
36 #else
37 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
38 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
39 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
40 #endif
41
42 ///////////////////////////////////////////////////////////////////////////////
43
44
45 static const GrGLenum gXfermodeEquation2Blend[] = {
46 // Basic OpenGL blend equations.
47 GR_GL_FUNC_ADD,
48 GR_GL_FUNC_SUBTRACT,
49 GR_GL_FUNC_REVERSE_SUBTRACT,
50
51 // GL_KHR_blend_equation_advanced.
52 GR_GL_SCREEN,
53 GR_GL_OVERLAY,
54 GR_GL_DARKEN,
55 GR_GL_LIGHTEN,
56 GR_GL_COLORDODGE,
57 GR_GL_COLORBURN,
58 GR_GL_HARDLIGHT,
59 GR_GL_SOFTLIGHT,
60 GR_GL_DIFFERENCE,
61 GR_GL_EXCLUSION,
62 GR_GL_MULTIPLY,
63 GR_GL_HSL_HUE,
64 GR_GL_HSL_SATURATION,
65 GR_GL_HSL_COLOR,
66 GR_GL_HSL_LUMINOSITY
67 };
68 GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
69 GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
70 GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
71 GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
72 GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
73 GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
74 GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
75 GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
76 GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
77 GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
78 GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
79 GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
80 GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
81 GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
82 GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
83 GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
84 GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
85 GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
86 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
87
88 static const GrGLenum gXfermodeCoeff2Blend[] = {
89 GR_GL_ZERO,
90 GR_GL_ONE,
91 GR_GL_SRC_COLOR,
92 GR_GL_ONE_MINUS_SRC_COLOR,
93 GR_GL_DST_COLOR,
94 GR_GL_ONE_MINUS_DST_COLOR,
95 GR_GL_SRC_ALPHA,
96 GR_GL_ONE_MINUS_SRC_ALPHA,
97 GR_GL_DST_ALPHA,
98 GR_GL_ONE_MINUS_DST_ALPHA,
99 GR_GL_CONSTANT_COLOR,
100 GR_GL_ONE_MINUS_CONSTANT_COLOR,
101 GR_GL_CONSTANT_ALPHA,
102 GR_GL_ONE_MINUS_CONSTANT_ALPHA,
103
104 // extended blend coeffs
105 GR_GL_SRC1_COLOR,
106 GR_GL_ONE_MINUS_SRC1_COLOR,
107 GR_GL_SRC1_ALPHA,
108 GR_GL_ONE_MINUS_SRC1_ALPHA,
109 };
110
BlendCoeffReferencesConstant(GrBlendCoeff coeff)111 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
112 static const bool gCoeffReferencesBlendConst[] = {
113 false,
114 false,
115 false,
116 false,
117 false,
118 false,
119 false,
120 false,
121 false,
122 false,
123 true,
124 true,
125 true,
126 true,
127
128 // extended blend coeffs
129 false,
130 false,
131 false,
132 false,
133 };
134 return gCoeffReferencesBlendConst[coeff];
135 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
136
137 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
138 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
139 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
140 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
141 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
142 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
143 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
144 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
145 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
146 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
147 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
148 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
149 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
150 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
151
152 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
153 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
154 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
155 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
156
157 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
158 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
159 }
160
161 ///////////////////////////////////////////////////////////////////////////////
162
163
Create(GrBackendContext backendContext,const GrContextOptions & options,GrContext * context)164 GrGpu* GrGLGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
165 GrContext* context) {
166 SkAutoTUnref<const GrGLInterface> glInterface(
167 reinterpret_cast<const GrGLInterface*>(backendContext));
168 if (!glInterface) {
169 glInterface.reset(GrGLDefaultInterface());
170 } else {
171 glInterface->ref();
172 }
173 if (!glInterface) {
174 return nullptr;
175 }
176 GrGLContext* glContext = GrGLContext::Create(glInterface, options);
177 if (glContext) {
178 return new GrGLGpu(glContext, context);
179 }
180 return nullptr;
181 }
182
183 static bool gPrintStartupSpew;
184
GrGLGpu(GrGLContext * ctx,GrContext * context)185 GrGLGpu::GrGLGpu(GrGLContext* ctx, GrContext* context)
186 : GrGpu(context)
187 , fGLContext(ctx) {
188 SkASSERT(ctx);
189 fCaps.reset(SkRef(ctx->caps()));
190
191 fHWBoundTextureUniqueIDs.reset(this->glCaps().maxFragmentTextureUnits());
192
193 GrGLClearErr(this->glInterface());
194 if (gPrintStartupSpew) {
195 const GrGLubyte* vendor;
196 const GrGLubyte* renderer;
197 const GrGLubyte* version;
198 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
199 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
200 GL_CALL_RET(version, GetString(GR_GL_VERSION));
201 SkDebugf("------------------------- create GrGLGpu %p --------------\n",
202 this);
203 SkDebugf("------ VENDOR %s\n", vendor);
204 SkDebugf("------ RENDERER %s\n", renderer);
205 SkDebugf("------ VERSION %s\n", version);
206 SkDebugf("------ EXTENSIONS\n");
207 this->glContext().extensions().print();
208 SkDebugf("\n");
209 SkDebugf("%s", this->glCaps().dump().c_str());
210 }
211
212 fProgramCache = new ProgramCache(this);
213
214 SkASSERT(this->glCaps().maxVertexAttributes() >= GrGeometryProcessor::kMaxVertexAttribs);
215
216 fHWProgramID = 0;
217 fTempSrcFBOID = 0;
218 fTempDstFBOID = 0;
219 fStencilClearFBOID = 0;
220
221 if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
222 fPathRendering.reset(new GrGLPathRendering(this));
223 }
224 this->createCopyPrograms();
225 fWireRectProgram.fProgram = 0;
226 fWireRectArrayBuffer = 0;
227 if (this->glCaps().shaderCaps()->plsPathRenderingSupport()) {
228 this->createPLSSetupProgram();
229 }
230 else {
231 memset(&fPLSSetupProgram, 0, sizeof(fPLSSetupProgram));
232 }
233 fHWPLSEnabled = false;
234 fPLSHasBeenUsed = false;
235 }
236
~GrGLGpu()237 GrGLGpu::~GrGLGpu() {
238 // Delete the path rendering explicitly, since it will need working gpu object to release the
239 // resources the object itself holds.
240 fPathRendering.reset();
241
242 if (0 != fHWProgramID) {
243 // detach the current program so there is no confusion on OpenGL's part
244 // that we want it to be deleted
245 GL_CALL(UseProgram(0));
246 }
247
248 if (0 != fTempSrcFBOID) {
249 GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID));
250 }
251 if (0 != fTempDstFBOID) {
252 GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID));
253 }
254 if (0 != fStencilClearFBOID) {
255 GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
256 }
257
258 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
259 if (0 != fCopyPrograms[i].fProgram) {
260 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
261 }
262 }
263
264 if (0 != fCopyProgramArrayBuffer) {
265 GL_CALL(DeleteBuffers(1, &fCopyProgramArrayBuffer));
266 }
267
268 if (0 != fWireRectProgram.fProgram) {
269 GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
270 }
271
272 if (0 != fWireRectArrayBuffer) {
273 GL_CALL(DeleteBuffers(1, &fWireRectArrayBuffer));
274 }
275
276 if (0 != fPLSSetupProgram.fArrayBuffer) {
277 GL_CALL(DeleteBuffers(1, &fPLSSetupProgram.fArrayBuffer));
278 }
279
280 if (0 != fPLSSetupProgram.fProgram) {
281 GL_CALL(DeleteProgram(fPLSSetupProgram.fProgram));
282 }
283
284 delete fProgramCache;
285 }
286
createPLSSetupProgram()287 void GrGLGpu::createPLSSetupProgram() {
288 const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
289 const char* version = glslCaps->versionDeclString();
290
291 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
292 GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
293 GrShaderVar::kUniform_TypeModifier);
294 GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
295 GrGLSLShaderVar uTexture("u_texture", kSampler2D_GrSLType, GrShaderVar::kUniform_TypeModifier);
296 GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType, GrShaderVar::kVaryingOut_TypeModifier);
297
298 SkString vshaderTxt(version);
299 if (glslCaps->noperspectiveInterpolationSupport()) {
300 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
301 vshaderTxt.appendf("#extension %s : require\n", extension);
302 }
303 vTexCoord.addModifier("noperspective");
304 }
305 aVertex.appendDecl(glslCaps, &vshaderTxt);
306 vshaderTxt.append(";");
307 uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
308 vshaderTxt.append(";");
309 uPosXform.appendDecl(glslCaps, &vshaderTxt);
310 vshaderTxt.append(";");
311 vTexCoord.appendDecl(glslCaps, &vshaderTxt);
312 vshaderTxt.append(";");
313
314 vshaderTxt.append(
315 "// PLS Setup Program VS\n"
316 "void main() {"
317 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
318 " gl_Position.zw = vec2(0, 1);"
319 "}"
320 );
321
322 SkString fshaderTxt(version);
323 if (glslCaps->noperspectiveInterpolationSupport()) {
324 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
325 fshaderTxt.appendf("#extension %s : require\n", extension);
326 }
327 }
328 fshaderTxt.append("#extension ");
329 fshaderTxt.append(glslCaps->fbFetchExtensionString());
330 fshaderTxt.append(" : require\n");
331 fshaderTxt.append("#extension GL_EXT_shader_pixel_local_storage : require\n");
332 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps, &fshaderTxt);
333 vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
334 vTexCoord.appendDecl(glslCaps, &fshaderTxt);
335 fshaderTxt.append(";");
336 uTexture.appendDecl(glslCaps, &fshaderTxt);
337 fshaderTxt.append(";");
338
339 fshaderTxt.appendf(
340 "// PLS Setup Program FS\n"
341 GR_GL_PLS_PATH_DATA_DECL
342 "void main() {\n"
343 " " GR_GL_PLS_DSTCOLOR_NAME " = gl_LastFragColorARM;\n"
344 " pls.windings = ivec4(0, 0, 0, 0);\n"
345 "}"
346 );
347 GL_CALL_RET(fPLSSetupProgram.fProgram, CreateProgram());
348 const char* str;
349 GrGLint length;
350
351 str = vshaderTxt.c_str();
352 length = SkToInt(vshaderTxt.size());
353 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fPLSSetupProgram.fProgram,
354 GR_GL_VERTEX_SHADER, &str, &length, 1, &fStats);
355
356 str = fshaderTxt.c_str();
357 length = SkToInt(fshaderTxt.size());
358 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fPLSSetupProgram.fProgram,
359 GR_GL_FRAGMENT_SHADER, &str, &length, 1, &fStats);
360
361 GL_CALL(LinkProgram(fPLSSetupProgram.fProgram));
362
363 GL_CALL_RET(fPLSSetupProgram.fPosXformUniform, GetUniformLocation(fPLSSetupProgram.fProgram,
364 "u_posXform"));
365
366 GL_CALL(BindAttribLocation(fPLSSetupProgram.fProgram, 0, "a_vertex"));
367
368 GL_CALL(DeleteShader(vshader));
369 GL_CALL(DeleteShader(fshader));
370
371 GL_CALL(GenBuffers(1, &fPLSSetupProgram.fArrayBuffer));
372 fHWGeometryState.setVertexBufferID(this, fPLSSetupProgram.fArrayBuffer);
373 static const GrGLfloat vdata[] = {
374 0, 0,
375 0, 1,
376 1, 0,
377 1, 1
378 };
379 GL_ALLOC_CALL(this->glInterface(),
380 BufferData(GR_GL_ARRAY_BUFFER,
381 (GrGLsizeiptr) sizeof(vdata),
382 vdata, // data ptr
383 GR_GL_STATIC_DRAW));
384 }
385
contextAbandoned()386 void GrGLGpu::contextAbandoned() {
387 INHERITED::contextAbandoned();
388 fProgramCache->abandon();
389 fHWProgramID = 0;
390 fTempSrcFBOID = 0;
391 fTempDstFBOID = 0;
392 fStencilClearFBOID = 0;
393 fCopyProgramArrayBuffer = 0;
394 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
395 fCopyPrograms[i].fProgram = 0;
396 }
397 fWireRectProgram.fProgram = 0;
398 fWireRectArrayBuffer = 0;
399 if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
400 this->glPathRendering()->abandonGpuResources();
401 }
402 }
403
404 ///////////////////////////////////////////////////////////////////////////////
405
onResetContext(uint32_t resetBits)406 void GrGLGpu::onResetContext(uint32_t resetBits) {
407 // we don't use the zb at all
408 if (resetBits & kMisc_GrGLBackendState) {
409 GL_CALL(Disable(GR_GL_DEPTH_TEST));
410 GL_CALL(DepthMask(GR_GL_FALSE));
411
412 fHWDrawFace = GrPipelineBuilder::kInvalid_DrawFace;
413
414 if (kGL_GrGLStandard == this->glStandard()) {
415 // Desktop-only state that we never change
416 if (!this->glCaps().isCoreProfile()) {
417 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
418 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
419 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
420 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
421 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
422 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
423 }
424 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
425 // core profile. This seems like a bug since the core spec removes any mention of
426 // GL_ARB_imaging.
427 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
428 GL_CALL(Disable(GR_GL_COLOR_TABLE));
429 }
430 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
431 // Since ES doesn't support glPointSize at all we always use the VS to
432 // set the point size
433 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
434
435 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
436 // currently part of our gl interface. There are probably others as
437 // well.
438 }
439
440 if (kGLES_GrGLStandard == this->glStandard() &&
441 this->hasExtension("GL_ARM_shader_framebuffer_fetch")) {
442 // The arm extension requires specifically enabling MSAA fetching per sample.
443 // On some devices this may have a perf hit. Also multiple render targets are disabled
444 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
445 }
446 fHWWriteToColor = kUnknown_TriState;
447 // we only ever use lines in hairline mode
448 GL_CALL(LineWidth(1));
449 GL_CALL(Disable(GR_GL_DITHER));
450 }
451
452 if (resetBits & kMSAAEnable_GrGLBackendState) {
453 fMSAAEnabled = kUnknown_TriState;
454
455 if (this->caps()->usesMixedSamples()) {
456 if (0 != this->caps()->maxRasterSamples()) {
457 fHWRasterMultisampleEnabled = kUnknown_TriState;
458 fHWNumRasterSamples = 0;
459 }
460
461 // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage
462 // modulation. This state has no effect when not rendering to a mixed sampled target.
463 GL_CALL(CoverageModulation(GR_GL_RGBA));
464 }
465 }
466
467 fHWActiveTextureUnitIdx = -1; // invalid
468
469 if (resetBits & kTextureBinding_GrGLBackendState) {
470 for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) {
471 fHWBoundTextureUniqueIDs[s] = SK_InvalidUniqueID;
472 }
473 }
474
475 if (resetBits & kBlend_GrGLBackendState) {
476 fHWBlendState.invalidate();
477 }
478
479 if (resetBits & kView_GrGLBackendState) {
480 fHWScissorSettings.invalidate();
481 fHWViewport.invalidate();
482 }
483
484 if (resetBits & kStencil_GrGLBackendState) {
485 fHWStencilSettings.invalidate();
486 fHWStencilTestEnabled = kUnknown_TriState;
487 }
488
489 // Vertex
490 if (resetBits & kVertex_GrGLBackendState) {
491 fHWGeometryState.invalidate();
492 }
493
494 if (resetBits & kRenderTarget_GrGLBackendState) {
495 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
496 fHWSRGBFramebuffer = kUnknown_TriState;
497 }
498
499 if (resetBits & kPathRendering_GrGLBackendState) {
500 if (this->caps()->shaderCaps()->pathRenderingSupport()) {
501 this->glPathRendering()->resetContext();
502 }
503 }
504
505 // we assume these values
506 if (resetBits & kPixelStore_GrGLBackendState) {
507 if (this->glCaps().unpackRowLengthSupport()) {
508 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
509 }
510 if (this->glCaps().packRowLengthSupport()) {
511 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
512 }
513 if (this->glCaps().unpackFlipYSupport()) {
514 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
515 }
516 if (this->glCaps().packFlipYSupport()) {
517 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
518 }
519 }
520
521 if (resetBits & kProgram_GrGLBackendState) {
522 fHWProgramID = 0;
523 }
524 }
525
resolve_origin(GrSurfaceOrigin origin,bool renderTarget)526 static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
527 // By default, GrRenderTargets are GL's normal orientation so that they
528 // can be drawn to by the outside world without the client having
529 // to render upside down.
530 if (kDefault_GrSurfaceOrigin == origin) {
531 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
532 } else {
533 return origin;
534 }
535 }
536
onWrapBackendTexture(const GrBackendTextureDesc & desc,GrWrapOwnership ownership)537 GrTexture* GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
538 GrWrapOwnership ownership) {
539 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
540 if (!desc.fTextureHandle) {
541 return nullptr;
542 }
543 #else
544 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle);
545 if (!info || !info->fID) {
546 return nullptr;
547 }
548 #endif
549
550 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
551 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
552
553 GrGLTexture::IDDesc idDesc;
554 GrSurfaceDesc surfDesc;
555
556 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
557 idDesc.fInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle);
558 // We only support GL_TEXTURE_2D at the moment.
559 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
560 #else
561 idDesc.fInfo = *info;
562 #endif
563
564 if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) {
565 if (renderTarget) {
566 // This combination is not supported.
567 return nullptr;
568 }
569 if (!this->glCaps().externalTextureSupport()) {
570 return nullptr;
571 }
572 } else if (GR_GL_TEXTURE_RECTANGLE == idDesc.fInfo.fTarget) {
573 if (!this->glCaps().rectangleTextureSupport()) {
574 return nullptr;
575 }
576 } else if (GR_GL_TEXTURE_2D != idDesc.fInfo.fTarget) {
577 return nullptr;
578 }
579
580 // Sample count is interpreted to mean the number of samples that Gr code should allocate
581 // for a render buffer that resolves to the texture. We don't support MSAA textures.
582 if (desc.fSampleCnt && !renderTarget) {
583 return nullptr;
584 }
585
586 switch (ownership) {
587 case kAdopt_GrWrapOwnership:
588 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle;
589 break;
590 case kBorrow_GrWrapOwnership:
591 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
592 break;
593 }
594
595 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
596 surfDesc.fWidth = desc.fWidth;
597 surfDesc.fHeight = desc.fHeight;
598 surfDesc.fConfig = desc.fConfig;
599 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
600 // FIXME: this should be calling resolve_origin(), but Chrome code is currently
601 // assuming the old behaviour, which is that backend textures are always
602 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
603 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
604 if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
605 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
606 } else {
607 surfDesc.fOrigin = desc.fOrigin;
608 }
609
610 GrGLTexture* texture = nullptr;
611 if (renderTarget) {
612 GrGLRenderTarget::IDDesc rtIDDesc;
613 if (!this->createRenderTargetObjects(surfDesc, GrGpuResource::kUncached_LifeCycle,
614 idDesc.fInfo, &rtIDDesc)) {
615 return nullptr;
616 }
617 texture = new GrGLTextureRenderTarget(this, surfDesc, idDesc, rtIDDesc);
618 } else {
619 texture = new GrGLTexture(this, surfDesc, idDesc);
620 }
621 if (nullptr == texture) {
622 return nullptr;
623 }
624
625 return texture;
626 }
627
onWrapBackendRenderTarget(const GrBackendRenderTargetDesc & wrapDesc,GrWrapOwnership ownership)628 GrRenderTarget* GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc,
629 GrWrapOwnership ownership) {
630 GrGLRenderTarget::IDDesc idDesc;
631 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
632 idDesc.fMSColorRenderbufferID = 0;
633 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
634 switch (ownership) {
635 case kAdopt_GrWrapOwnership:
636 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle;
637 break;
638 case kBorrow_GrWrapOwnership:
639 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
640 break;
641 }
642 idDesc.fSampleConfig = GrRenderTarget::kUnified_SampleConfig;
643
644 GrSurfaceDesc desc;
645 desc.fConfig = wrapDesc.fConfig;
646 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag;
647 desc.fWidth = wrapDesc.fWidth;
648 desc.fHeight = wrapDesc.fHeight;
649 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
650 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true);
651
652 return GrGLRenderTarget::CreateWrapped(this, desc, idDesc, wrapDesc.fStencilBits);
653 }
654
onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc & desc,GrWrapOwnership ownership)655 GrRenderTarget* GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc,
656 GrWrapOwnership ownership) {
657 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
658 if (!desc.fTextureHandle) {
659 return nullptr;
660 }
661 #else
662 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle);
663 if (!info || !info->fID) {
664 return nullptr;
665 }
666 #endif
667
668 GrGLTexture::IDDesc idDesc;
669 GrSurfaceDesc surfDesc;
670
671 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
672 idDesc.fInfo.fID = static_cast<GrGLuint>(desc.fTextureHandle);
673 // We only support GL_TEXTURE_2D at the moment.
674 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
675 #else
676 idDesc.fInfo = *info;
677 #endif
678
679 if (GR_GL_TEXTURE_RECTANGLE != idDesc.fInfo.fTarget &&
680 GR_GL_TEXTURE_2D != idDesc.fInfo.fTarget) {
681 // Only texture rectangle and texture 2d are supported. We do not check whether texture
682 // rectangle is supported by Skia - if the caller provided us with a texture rectangle,
683 // we assume the necessary support exists.
684 return nullptr;
685 }
686
687 switch (ownership) {
688 case kAdopt_GrWrapOwnership:
689 idDesc.fLifeCycle = GrGpuResource::kAdopted_LifeCycle;
690 break;
691 case kBorrow_GrWrapOwnership:
692 idDesc.fLifeCycle = GrGpuResource::kBorrowed_LifeCycle;
693 break;
694 }
695
696 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
697 surfDesc.fWidth = desc.fWidth;
698 surfDesc.fHeight = desc.fHeight;
699 surfDesc.fConfig = desc.fConfig;
700 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
701 // FIXME: this should be calling resolve_origin(), but Chrome code is currently
702 // assuming the old behaviour, which is that backend textures are always
703 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
704 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
705 if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
706 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
707 } else {
708 surfDesc.fOrigin = desc.fOrigin;
709 }
710
711 GrGLRenderTarget::IDDesc rtIDDesc;
712 if (!this->createRenderTargetObjects(surfDesc, GrGpuResource::kUncached_LifeCycle,
713 idDesc.fInfo, &rtIDDesc)) {
714 return nullptr;
715 }
716 return GrGLRenderTarget::CreateWrapped(this, surfDesc, rtIDDesc, 0);
717 }
718
719 ////////////////////////////////////////////////////////////////////////////////
720
onGetWritePixelsInfo(GrSurface * dstSurface,int width,int height,GrPixelConfig srcConfig,DrawPreference * drawPreference,WritePixelTempDrawInfo * tempDrawInfo)721 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
722 GrPixelConfig srcConfig,
723 DrawPreference* drawPreference,
724 WritePixelTempDrawInfo* tempDrawInfo) {
725 if (kIndex_8_GrPixelConfig == srcConfig || GrPixelConfigIsCompressed(dstSurface->config())) {
726 return false;
727 }
728
729 // This subclass only allows writes to textures. If the dst is not a texture we have to draw
730 // into it. We could use glDrawPixels on GLs that have it, but we don't today.
731 if (!dstSurface->asTexture()) {
732 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
733 } else {
734 GrGLTexture* texture = static_cast<GrGLTexture*>(dstSurface->asTexture());
735 if (GR_GL_TEXTURE_EXTERNAL == texture->target()) {
736 // We don't currently support writing pixels to EXTERNAL textures.
737 return false;
738 }
739 }
740
741 if (GrPixelConfigIsSRGB(dstSurface->config()) != GrPixelConfigIsSRGB(srcConfig)) {
742 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
743 }
744
745 // Start off assuming no swizzling
746 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
747 tempDrawInfo->fWriteConfig = srcConfig;
748
749 // These settings we will always want if a temp draw is performed. Initially set the config
750 // to srcConfig, though that may be modified if we decide to do a R/G swap.
751 tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags;
752 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
753 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
754 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
755 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
756 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
757
758 bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config();
759
760 if (configsAreRBSwaps) {
761 if (!this->caps()->isConfigTexturable(srcConfig)) {
762 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
763 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
764 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
765 tempDrawInfo->fWriteConfig = dstSurface->config();
766 } else if (this->glCaps().rgba8888PixelsOpsAreSlow() &&
767 kRGBA_8888_GrPixelConfig == srcConfig) {
768 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
769 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
770 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
771 tempDrawInfo->fWriteConfig = dstSurface->config();
772 } else if (kGLES_GrGLStandard == this->glStandard() &&
773 this->glCaps().bgraIsInternalFormat()) {
774 // The internal format and external formats must match texture uploads so we can't
775 // swizzle while uploading when BGRA is a distinct internal format.
776 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
777 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
778 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
779 tempDrawInfo->fWriteConfig = dstSurface->config();
780 }
781 }
782
783 if (!this->glCaps().unpackFlipYSupport() &&
784 kBottomLeft_GrSurfaceOrigin == dstSurface->origin()) {
785 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
786 }
787
788 return true;
789 }
790
check_write_and_transfer_input(GrGLTexture * glTex,GrSurface * surface,GrPixelConfig config)791 static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surface,
792 GrPixelConfig config) {
793 if (!glTex) {
794 return false;
795 }
796
797 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels.
798 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
799 return false;
800 }
801
802 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
803 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
804 return false;
805 }
806
807 return true;
808 }
809
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrPixelConfig config,const void * buffer,size_t rowBytes)810 bool GrGLGpu::onWritePixels(GrSurface* surface,
811 int left, int top, int width, int height,
812 GrPixelConfig config, const void* buffer,
813 size_t rowBytes) {
814 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
815
816 if (!check_write_and_transfer_input(glTex, surface, config)) {
817 return false;
818 }
819
820 this->setScratchTextureUnit();
821 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
822
823 bool success = false;
824 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
825 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
826 SkASSERT(config == glTex->desc().fConfig);
827 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), buffer,
828 kWrite_UploadType, left, top, width, height);
829 } else {
830 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType,
831 left, top, width, height, config, buffer, rowBytes);
832 }
833
834 if (success) {
835 glTex->texturePriv().dirtyMipMaps(true);
836 return true;
837 }
838
839 return false;
840 }
841
onTransferPixels(GrSurface * surface,int left,int top,int width,int height,GrPixelConfig config,GrTransferBuffer * buffer,size_t offset,size_t rowBytes)842 bool GrGLGpu::onTransferPixels(GrSurface* surface,
843 int left, int top, int width, int height,
844 GrPixelConfig config, GrTransferBuffer* buffer,
845 size_t offset, size_t rowBytes) {
846 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
847
848 if (!check_write_and_transfer_input(glTex, surface, config)) {
849 return false;
850 }
851
852 // For the moment, can't transfer compressed data
853 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
854 return false;
855 }
856
857 this->setScratchTextureUnit();
858 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
859
860 SkASSERT(!buffer->isMapped());
861 GrGLTransferBuffer* glBuffer = reinterpret_cast<GrGLTransferBuffer*>(buffer);
862 // bind the transfer buffer
863 SkASSERT(GR_GL_PIXEL_UNPACK_BUFFER == glBuffer->bufferType() ||
864 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM == glBuffer->bufferType());
865 GL_CALL(BindBuffer(glBuffer->bufferType(), glBuffer->bufferID()));
866
867 bool success = false;
868 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
869 left, top, width, height, config, buffer, rowBytes);
870
871 if (success) {
872 glTex->texturePriv().dirtyMipMaps(true);
873 return true;
874 }
875
876 return false;
877 }
878
879 // For GL_[UN]PACK_ALIGNMENT.
config_alignment(GrPixelConfig config)880 static inline GrGLint config_alignment(GrPixelConfig config) {
881 SkASSERT(!GrPixelConfigIsCompressed(config));
882 switch (config) {
883 case kAlpha_8_GrPixelConfig:
884 return 1;
885 case kRGB_565_GrPixelConfig:
886 case kRGBA_4444_GrPixelConfig:
887 case kAlpha_half_GrPixelConfig:
888 case kRGBA_half_GrPixelConfig:
889 return 2;
890 case kRGBA_8888_GrPixelConfig:
891 case kBGRA_8888_GrPixelConfig:
892 case kSRGBA_8888_GrPixelConfig:
893 case kRGBA_float_GrPixelConfig:
894 return 4;
895 default:
896 return 0;
897 }
898 }
899
check_alloc_error(const GrSurfaceDesc & desc,const GrGLInterface * interface)900 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
901 const GrGLInterface* interface) {
902 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
903 return GR_GL_GET_ERROR(interface);
904 } else {
905 return CHECK_ALLOC_ERROR(interface);
906 }
907 }
908
uploadTexData(const GrSurfaceDesc & desc,GrGLenum target,UploadType uploadType,int left,int top,int width,int height,GrPixelConfig dataConfig,const void * dataOrOffset,size_t rowBytes)909 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
910 GrGLenum target,
911 UploadType uploadType,
912 int left, int top, int width, int height,
913 GrPixelConfig dataConfig,
914 const void* dataOrOffset,
915 size_t rowBytes) {
916 SkASSERT(dataOrOffset || kNewTexture_UploadType == uploadType ||
917 kTransfer_UploadType == uploadType);
918
919 // If we're uploading compressed data then we should be using uploadCompressedTexData
920 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
921
922 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
923
924 size_t bpp = GrBytesPerPixel(dataConfig);
925 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
926 &width, &height, &dataOrOffset, &rowBytes)) {
927 return false;
928 }
929 size_t trimRowBytes = width * bpp;
930
931 // in case we need a temporary, trimmed copy of the src pixels
932 SkAutoSMalloc<128 * 128> tempStorage;
933
934 // Internal format comes from the texture desc.
935 GrGLenum internalFormat;
936 // External format and type come from the upload data.
937 GrGLenum externalFormat;
938 GrGLenum externalType;
939 if (!this->glCaps().getTexImageFormats(desc.fConfig, dataConfig, &internalFormat,
940 &externalFormat, &externalType)) {
941 return false;
942 }
943 /*
944 * Check whether to allocate a temporary buffer for flipping y or
945 * because our srcData has extra bytes past each row. If so, we need
946 * to trim those off here, since GL ES may not let us specify
947 * GL_UNPACK_ROW_LENGTH.
948 */
949 bool restoreGLRowLength = false;
950 bool swFlipY = false;
951 bool glFlipY = false;
952 if (dataOrOffset) {
953 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
954 if (this->glCaps().unpackFlipYSupport()) {
955 glFlipY = true;
956 } else {
957 swFlipY = true;
958 }
959 }
960 if (this->glCaps().unpackRowLengthSupport() && !swFlipY) {
961 // can't use this for flipping, only non-neg values allowed. :(
962 if (rowBytes != trimRowBytes) {
963 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
964 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
965 restoreGLRowLength = true;
966 }
967 } else if (kTransfer_UploadType != uploadType) {
968 if (trimRowBytes != rowBytes || swFlipY) {
969 // copy data into our new storage, skipping the trailing bytes
970 size_t trimSize = height * trimRowBytes;
971 const char* src = (const char*)dataOrOffset;
972 if (swFlipY) {
973 src += (height - 1) * rowBytes;
974 }
975 char* dst = (char*)tempStorage.reset(trimSize);
976 for (int y = 0; y < height; y++) {
977 memcpy(dst, src, trimRowBytes);
978 if (swFlipY) {
979 src -= rowBytes;
980 } else {
981 src += rowBytes;
982 }
983 dst += trimRowBytes;
984 }
985 // now point data to our copied version
986 dataOrOffset = tempStorage.get();
987 }
988 } else {
989 return false;
990 }
991 if (glFlipY) {
992 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
993 }
994 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, config_alignment(dataConfig)));
995 }
996 bool succeeded = true;
997 if (kNewTexture_UploadType == uploadType) {
998 if (dataOrOffset &&
999 !(0 == left && 0 == top && desc.fWidth == width && desc.fHeight == height)) {
1000 succeeded = false;
1001 } else {
1002 if (desc.fTextureStorageAllocator.fAllocateTextureStorage) {
1003 if (dataOrOffset) {
1004 GL_CALL(TexSubImage2D(target,
1005 0, // level
1006 left, top,
1007 width, height,
1008 externalFormat, externalType, dataOrOffset));
1009 }
1010 } else {
1011 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1012 GL_ALLOC_CALL(this->glInterface(), TexImage2D(
1013 target, 0, internalFormat, desc.fWidth, desc.fHeight, 0, externalFormat,
1014 externalType, dataOrOffset));
1015 GrGLenum error = check_alloc_error(desc, this->glInterface());
1016 if (error != GR_GL_NO_ERROR) {
1017 succeeded = false;
1018 }
1019 }
1020 }
1021 } else {
1022 if (swFlipY || glFlipY) {
1023 top = desc.fHeight - (top + height);
1024 }
1025 GL_CALL(TexSubImage2D(target,
1026 0, // level
1027 left, top,
1028 width, height,
1029 externalFormat, externalType, dataOrOffset));
1030 }
1031
1032 if (restoreGLRowLength) {
1033 SkASSERT(this->glCaps().unpackRowLengthSupport());
1034 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1035 }
1036 if (glFlipY) {
1037 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
1038 }
1039 return succeeded;
1040 }
1041
1042 // TODO: This function is using a lot of wonky semantics like, if width == -1
1043 // then set width = desc.fWdith ... blah. A better way to do it might be to
1044 // create a CompressedTexData struct that takes a desc/ptr and figures out
1045 // the proper upload semantics. Then users can construct this function how they
1046 // see fit if they want to go against the "standard" way to do it.
uploadCompressedTexData(const GrSurfaceDesc & desc,GrGLenum target,const void * data,UploadType uploadType,int left,int top,int width,int height)1047 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
1048 GrGLenum target,
1049 const void* data,
1050 UploadType uploadType,
1051 int left, int top, int width, int height) {
1052 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
1053 SkASSERT(kTransfer_UploadType != uploadType &&
1054 (data || kNewTexture_UploadType != uploadType));
1055
1056 // No support for software flip y, yet...
1057 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
1058
1059 if (-1 == width) {
1060 width = desc.fWidth;
1061 }
1062 #ifdef SK_DEBUG
1063 else {
1064 SkASSERT(width <= desc.fWidth);
1065 }
1066 #endif
1067
1068 if (-1 == height) {
1069 height = desc.fHeight;
1070 }
1071 #ifdef SK_DEBUG
1072 else {
1073 SkASSERT(height <= desc.fHeight);
1074 }
1075 #endif
1076
1077 // Make sure that the width and height that we pass to OpenGL
1078 // is a multiple of the block size.
1079 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, width, height);
1080
1081 // We only need the internal format for compressed 2D textures.
1082 GrGLenum internalFormat;
1083 if (!this->glCaps().getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
1084 return false;
1085 }
1086
1087 if (kNewTexture_UploadType == uploadType) {
1088 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1089 GL_ALLOC_CALL(this->glInterface(),
1090 CompressedTexImage2D(target,
1091 0, // level
1092 internalFormat,
1093 width, height,
1094 0, // border
1095 SkToInt(dataSize),
1096 data));
1097 GrGLenum error = check_alloc_error(desc, this->glInterface());
1098 if (error != GR_GL_NO_ERROR) {
1099 return false;
1100 }
1101 } else {
1102 // Paletted textures can't be updated.
1103 if (GR_GL_PALETTE8_RGBA8 == internalFormat) {
1104 return false;
1105 }
1106 GL_CALL(CompressedTexSubImage2D(target,
1107 0, // level
1108 left, top,
1109 width, height,
1110 internalFormat,
1111 SkToInt(dataSize),
1112 data));
1113 }
1114
1115 return true;
1116 }
1117
renderbuffer_storage_msaa(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1118 static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
1119 int sampleCount,
1120 GrGLenum format,
1121 int width, int height) {
1122 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
1123 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1124 switch (ctx.caps()->msFBOType()) {
1125 case GrGLCaps::kDesktop_ARB_MSFBOType:
1126 case GrGLCaps::kDesktop_EXT_MSFBOType:
1127 case GrGLCaps::kMixedSamples_MSFBOType:
1128 case GrGLCaps::kES_3_0_MSFBOType:
1129 GL_ALLOC_CALL(ctx.interface(),
1130 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
1131 sampleCount,
1132 format,
1133 width, height));
1134 break;
1135 case GrGLCaps::kES_Apple_MSFBOType:
1136 GL_ALLOC_CALL(ctx.interface(),
1137 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
1138 sampleCount,
1139 format,
1140 width, height));
1141 break;
1142 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1143 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1144 GL_ALLOC_CALL(ctx.interface(),
1145 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
1146 sampleCount,
1147 format,
1148 width, height));
1149 break;
1150 case GrGLCaps::kNone_MSFBOType:
1151 SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers.");
1152 break;
1153 }
1154 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));
1155 }
1156
createRenderTargetObjects(const GrSurfaceDesc & desc,GrGpuResource::LifeCycle lifeCycle,const GrGLTextureInfo & texInfo,GrGLRenderTarget::IDDesc * idDesc)1157 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc,
1158 GrGpuResource::LifeCycle lifeCycle,
1159 const GrGLTextureInfo& texInfo,
1160 GrGLRenderTarget::IDDesc* idDesc) {
1161 idDesc->fMSColorRenderbufferID = 0;
1162 idDesc->fRTFBOID = 0;
1163 idDesc->fTexFBOID = 0;
1164 idDesc->fLifeCycle = lifeCycle;
1165 idDesc->fSampleConfig = (GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType() &&
1166 desc.fSampleCnt > 0) ? GrRenderTarget::kStencil_SampleConfig :
1167 GrRenderTarget::kUnified_SampleConfig;
1168
1169 GrGLenum status;
1170
1171 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1172
1173 if (desc.fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1174 goto FAILED;
1175 }
1176
1177 GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID));
1178 if (!idDesc->fTexFBOID) {
1179 goto FAILED;
1180 }
1181
1182 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1183 // the texture bound to the other. The exception is the IMG multisample extension. With this
1184 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1185 // rendered from.
1186 if (desc.fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
1187 GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID));
1188 GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
1189 if (!idDesc->fRTFBOID ||
1190 !idDesc->fMSColorRenderbufferID) {
1191 goto FAILED;
1192 }
1193 if (!this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat)) {
1194 return false;
1195 }
1196 } else {
1197 idDesc->fRTFBOID = idDesc->fTexFBOID;
1198 }
1199
1200 // below here we may bind the FBO
1201 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1202 if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
1203 SkASSERT(desc.fSampleCnt > 0);
1204 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID));
1205 if (!renderbuffer_storage_msaa(*fGLContext,
1206 desc.fSampleCnt,
1207 colorRenderbufferFormat,
1208 desc.fWidth, desc.fHeight)) {
1209 goto FAILED;
1210 }
1211 fStats.incRenderTargetBinds();
1212 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID));
1213 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1214 GR_GL_COLOR_ATTACHMENT0,
1215 GR_GL_RENDERBUFFER,
1216 idDesc->fMSColorRenderbufferID));
1217 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
1218 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
1219 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1220 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1221 goto FAILED;
1222 }
1223 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
1224 }
1225 }
1226 fStats.incRenderTargetBinds();
1227 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID));
1228
1229 if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 0) {
1230 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
1231 GR_GL_COLOR_ATTACHMENT0,
1232 texInfo.fTarget,
1233 texInfo.fID, 0, desc.fSampleCnt));
1234 } else {
1235 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1236 GR_GL_COLOR_ATTACHMENT0,
1237 texInfo.fTarget,
1238 texInfo.fID, 0));
1239 }
1240 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
1241 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
1242 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1243 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1244 goto FAILED;
1245 }
1246 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
1247 }
1248
1249 return true;
1250
1251 FAILED:
1252 if (idDesc->fMSColorRenderbufferID) {
1253 GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
1254 }
1255 if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
1256 GL_CALL(DeleteFramebuffers(1, &idDesc->fRTFBOID));
1257 }
1258 if (idDesc->fTexFBOID) {
1259 GL_CALL(DeleteFramebuffers(1, &idDesc->fTexFBOID));
1260 }
1261 return false;
1262 }
1263
1264 // good to set a break-point here to know when createTexture fails
return_null_texture()1265 static GrTexture* return_null_texture() {
1266 // SkDEBUGFAIL("null texture");
1267 return nullptr;
1268 }
1269
1270 #if 0 && defined(SK_DEBUG)
1271 static size_t as_size_t(int x) {
1272 return x;
1273 }
1274 #endif
1275
onCreateTexture(const GrSurfaceDesc & desc,GrGpuResource::LifeCycle lifeCycle,const void * srcData,size_t rowBytes)1276 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
1277 GrGpuResource::LifeCycle lifeCycle,
1278 const void* srcData, size_t rowBytes) {
1279 // We fail if the MSAA was requested and is not available.
1280 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
1281 //SkDebugf("MSAA RT requested but not supported on this platform.");
1282 return return_null_texture();
1283 }
1284
1285 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
1286
1287 GrGLTexture::IDDesc idDesc;
1288 idDesc.fLifeCycle = lifeCycle;
1289 GrGLTexture::TexParams initialTexParams;
1290 if (!this->createTextureImpl(desc, &idDesc.fInfo, renderTarget, srcData,
1291 &initialTexParams, rowBytes)) {
1292 return return_null_texture();
1293 }
1294
1295 GrGLTexture* tex;
1296 if (renderTarget) {
1297 // unbind the texture from the texture unit before binding it to the frame buffer
1298 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0));
1299 GrGLRenderTarget::IDDesc rtIDDesc;
1300
1301 if (!this->createRenderTargetObjects(desc, lifeCycle, idDesc.fInfo, &rtIDDesc)) {
1302 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1303 return return_null_texture();
1304 }
1305 tex = new GrGLTextureRenderTarget(this, desc, idDesc, rtIDDesc);
1306 } else {
1307 tex = new GrGLTexture(this, desc, idDesc);
1308 }
1309 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1310 #ifdef TRACE_TEXTURE_CREATION
1311 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
1312 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
1313 #endif
1314 return tex;
1315 }
1316
onCreateCompressedTexture(const GrSurfaceDesc & desc,GrGpuResource::LifeCycle lifeCycle,const void * srcData)1317 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
1318 GrGpuResource::LifeCycle lifeCycle,
1319 const void* srcData) {
1320 // Make sure that we're not flipping Y.
1321 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
1322 return return_null_texture();
1323 }
1324
1325 GrGLTexture::IDDesc idDesc;
1326 idDesc.fInfo.fID = 0;
1327 GL_CALL(GenTextures(1, &idDesc.fInfo.fID));
1328 idDesc.fLifeCycle = lifeCycle;
1329 // We only support GL_TEXTURE_2D at the moment.
1330 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
1331
1332 if (!idDesc.fInfo.fID) {
1333 return return_null_texture();
1334 }
1335
1336 this->setScratchTextureUnit();
1337 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
1338
1339 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1340 // drivers have a bug where an FBO won't be complete if it includes a
1341 // texture that is not mipmap complete (considering the filter in use).
1342 GrGLTexture::TexParams initialTexParams;
1343 // we only set a subset here so invalidate first
1344 initialTexParams.invalidate();
1345 initialTexParams.fMinFilter = GR_GL_NEAREST;
1346 initialTexParams.fMagFilter = GR_GL_NEAREST;
1347 initialTexParams.fWrapS = GR_GL_CLAMP_TO_EDGE;
1348 initialTexParams.fWrapT = GR_GL_CLAMP_TO_EDGE;
1349 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1350 GR_GL_TEXTURE_MAG_FILTER,
1351 initialTexParams.fMagFilter));
1352 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1353 GR_GL_TEXTURE_MIN_FILTER,
1354 initialTexParams.fMinFilter));
1355 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1356 GR_GL_TEXTURE_WRAP_S,
1357 initialTexParams.fWrapS));
1358 GL_CALL(TexParameteri(idDesc.fInfo.fTarget,
1359 GR_GL_TEXTURE_WRAP_T,
1360 initialTexParams.fWrapT));
1361
1362 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, srcData)) {
1363 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1364 return return_null_texture();
1365 }
1366
1367 GrGLTexture* tex;
1368 tex = new GrGLTexture(this, desc, idDesc);
1369 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1370 #ifdef TRACE_TEXTURE_CREATION
1371 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
1372 glTexDesc.fTextureID, desc.fWidth, desc.fHeight, desc.fConfig);
1373 #endif
1374 return tex;
1375 }
1376
1377 namespace {
1378
1379 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
1380
get_stencil_rb_sizes(const GrGLInterface * gl,GrGLStencilAttachment::Format * format)1381 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
1382 GrGLStencilAttachment::Format* format) {
1383
1384 // we shouldn't ever know one size and not the other
1385 SkASSERT((kUnknownBitCount == format->fStencilBits) ==
1386 (kUnknownBitCount == format->fTotalBits));
1387 if (kUnknownBitCount == format->fStencilBits) {
1388 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1389 GR_GL_RENDERBUFFER_STENCIL_SIZE,
1390 (GrGLint*)&format->fStencilBits);
1391 if (format->fPacked) {
1392 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1393 GR_GL_RENDERBUFFER_DEPTH_SIZE,
1394 (GrGLint*)&format->fTotalBits);
1395 format->fTotalBits += format->fStencilBits;
1396 } else {
1397 format->fTotalBits = format->fStencilBits;
1398 }
1399 }
1400 }
1401 }
1402
getCompatibleStencilIndex(GrPixelConfig config)1403 int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) {
1404 static const int kSize = 16;
1405 SkASSERT(this->caps()->isConfigRenderable(config, false));
1406 if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) {
1407 // Default to unsupported, set this if we find a stencil format that works.
1408 int firstWorkingStencilFormatIndex = -1;
1409 // Create color texture
1410 GrGLuint colorID = 0;
1411 GL_CALL(GenTextures(1, &colorID));
1412 this->setScratchTextureUnit();
1413 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID));
1414 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1415 GR_GL_TEXTURE_MAG_FILTER,
1416 GR_GL_NEAREST));
1417 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1418 GR_GL_TEXTURE_MIN_FILTER,
1419 GR_GL_NEAREST));
1420 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1421 GR_GL_TEXTURE_WRAP_S,
1422 GR_GL_CLAMP_TO_EDGE));
1423 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1424 GR_GL_TEXTURE_WRAP_T,
1425 GR_GL_CLAMP_TO_EDGE));
1426
1427 GrGLenum internalFormat;
1428 GrGLenum externalFormat;
1429 GrGLenum externalType;
1430 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
1431 &externalType)) {
1432 return false;
1433 }
1434 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1435 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D,
1436 0,
1437 internalFormat,
1438 kSize,
1439 kSize,
1440 0,
1441 externalFormat,
1442 externalType,
1443 NULL));
1444 if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) {
1445 GL_CALL(DeleteTextures(1, &colorID));
1446 return -1;
1447 }
1448
1449 // unbind the texture from the texture unit before binding it to the frame buffer
1450 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1451
1452 // Create Framebuffer
1453 GrGLuint fb = 0;
1454 GL_CALL(GenFramebuffers(1, &fb));
1455 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fb));
1456 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
1457 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1458 GR_GL_COLOR_ATTACHMENT0,
1459 GR_GL_TEXTURE_2D,
1460 colorID,
1461 0));
1462 GrGLuint sbRBID = 0;
1463 GL_CALL(GenRenderbuffers(1, &sbRBID));
1464
1465 // look over formats till I find a compatible one
1466 int stencilFmtCnt = this->glCaps().stencilFormats().count();
1467 if (sbRBID) {
1468 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1469 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1470 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i];
1471 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1472 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1473 sFmt.fInternalFormat,
1474 kSize, kSize));
1475 if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) {
1476 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1477 GR_GL_STENCIL_ATTACHMENT,
1478 GR_GL_RENDERBUFFER, sbRBID));
1479 if (sFmt.fPacked) {
1480 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1481 GR_GL_DEPTH_ATTACHMENT,
1482 GR_GL_RENDERBUFFER, sbRBID));
1483 } else {
1484 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1485 GR_GL_DEPTH_ATTACHMENT,
1486 GR_GL_RENDERBUFFER, 0));
1487 }
1488 GrGLenum status;
1489 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1490 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1491 firstWorkingStencilFormatIndex = i;
1492 break;
1493 }
1494 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1495 GR_GL_STENCIL_ATTACHMENT,
1496 GR_GL_RENDERBUFFER, 0));
1497 if (sFmt.fPacked) {
1498 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1499 GR_GL_DEPTH_ATTACHMENT,
1500 GR_GL_RENDERBUFFER, 0));
1501 }
1502 }
1503 }
1504 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1505 }
1506 GL_CALL(DeleteTextures(1, &colorID));
1507 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, 0));
1508 GL_CALL(DeleteFramebuffers(1, &fb));
1509 fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex);
1510 }
1511 return this->glCaps().getStencilFormatIndexForConfig(config);
1512 }
1513
createTextureImpl(const GrSurfaceDesc & desc,GrGLTextureInfo * info,bool renderTarget,const void * srcData,GrGLTexture::TexParams * initialTexParams,size_t rowBytes)1514 bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
1515 bool renderTarget, const void* srcData,
1516 GrGLTexture::TexParams* initialTexParams, size_t rowBytes) {
1517 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1518 // drivers have a bug where an FBO won't be complete if it includes a
1519 // texture that is not mipmap complete (considering the filter in use).
1520
1521 // we only set a subset here so invalidate first
1522 initialTexParams->invalidate();
1523 initialTexParams->fMinFilter = GR_GL_NEAREST;
1524 initialTexParams->fMagFilter = GR_GL_NEAREST;
1525 initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE;
1526 initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE;
1527
1528 if (desc.fTextureStorageAllocator.fAllocateTextureStorage) {
1529 return this->createTextureExternalAllocatorImpl(desc, info, srcData, rowBytes);
1530 }
1531
1532 info->fID = 0;
1533 info->fTarget = GR_GL_TEXTURE_2D;
1534 GL_CALL(GenTextures(1, &(info->fID)));
1535
1536 if (!info->fID) {
1537 return false;
1538 }
1539
1540 this->setScratchTextureUnit();
1541 GL_CALL(BindTexture(info->fTarget, info->fID));
1542
1543 if (renderTarget && this->glCaps().textureUsageSupport()) {
1544 // provides a hint about how this texture will be used
1545 GL_CALL(TexParameteri(info->fTarget,
1546 GR_GL_TEXTURE_USAGE,
1547 GR_GL_FRAMEBUFFER_ATTACHMENT));
1548 }
1549
1550 GL_CALL(TexParameteri(info->fTarget,
1551 GR_GL_TEXTURE_MAG_FILTER,
1552 initialTexParams->fMagFilter));
1553 GL_CALL(TexParameteri(info->fTarget,
1554 GR_GL_TEXTURE_MIN_FILTER,
1555 initialTexParams->fMinFilter));
1556 GL_CALL(TexParameteri(info->fTarget,
1557 GR_GL_TEXTURE_WRAP_S,
1558 initialTexParams->fWrapS));
1559 GL_CALL(TexParameteri(info->fTarget,
1560 GR_GL_TEXTURE_WRAP_T,
1561 initialTexParams->fWrapT));
1562 if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
1563 desc.fWidth, desc.fHeight,
1564 desc.fConfig, srcData, rowBytes)) {
1565 GL_CALL(DeleteTextures(1, &(info->fID)));
1566 return false;
1567 }
1568 return true;
1569 }
1570
createTextureExternalAllocatorImpl(const GrSurfaceDesc & desc,GrGLTextureInfo * info,const void * srcData,size_t rowBytes)1571 bool GrGLGpu::createTextureExternalAllocatorImpl(
1572 const GrSurfaceDesc& desc, GrGLTextureInfo* info, const void* srcData, size_t rowBytes) {
1573 switch (desc.fTextureStorageAllocator.fAllocateTextureStorage(
1574 desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info),
1575 desc.fWidth, desc.fHeight, desc.fConfig, srcData, desc.fOrigin)) {
1576 case GrTextureStorageAllocator::Result::kSucceededAndUploaded:
1577 return true;
1578 case GrTextureStorageAllocator::Result::kFailed:
1579 return false;
1580 case GrTextureStorageAllocator::Result::kSucceededWithoutUpload:
1581 break;
1582 }
1583
1584 if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
1585 desc.fWidth, desc.fHeight,
1586 desc.fConfig, srcData, rowBytes)) {
1587 desc.fTextureStorageAllocator.fDeallocateTextureStorage(
1588 desc.fTextureStorageAllocator.fCtx, reinterpret_cast<GrBackendObject>(info));
1589 return false;
1590 }
1591 return true;
1592 }
1593
createStencilAttachmentForRenderTarget(const GrRenderTarget * rt,int width,int height)1594 GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
1595 int width,
1596 int height) {
1597 SkASSERT(width >= rt->width());
1598 SkASSERT(height >= rt->height());
1599
1600 int samples = rt->numStencilSamples();
1601 GrGLStencilAttachment::IDDesc sbDesc;
1602
1603 int sIdx = this->getCompatibleStencilIndex(rt->config());
1604 if (sIdx < 0) {
1605 return nullptr;
1606 }
1607
1608 if (!sbDesc.fRenderbufferID) {
1609 GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID));
1610 }
1611 if (!sbDesc.fRenderbufferID) {
1612 return nullptr;
1613 }
1614 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
1615 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx];
1616 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1617 // we do this "if" so that we don't call the multisample
1618 // version on a GL that doesn't have an MSAA extension.
1619 if (samples > 0) {
1620 SkAssertResult(renderbuffer_storage_msaa(*fGLContext,
1621 samples,
1622 sFmt.fInternalFormat,
1623 width, height));
1624 } else {
1625 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1626 sFmt.fInternalFormat,
1627 width, height));
1628 SkASSERT(GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
1629 }
1630 fStats.incStencilAttachmentCreates();
1631 // After sized formats we attempt an unsized format and take
1632 // whatever sizes GL gives us. In that case we query for the size.
1633 GrGLStencilAttachment::Format format = sFmt;
1634 get_stencil_rb_sizes(this->glInterface(), &format);
1635 GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this,
1636 sbDesc,
1637 width,
1638 height,
1639 samples,
1640 format);
1641 return stencil;
1642 }
1643
1644 ////////////////////////////////////////////////////////////////////////////////
1645
1646 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
1647 // objects are implemented as client-side-arrays on tile-deferred architectures.
1648 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
1649
onCreateVertexBuffer(size_t size,bool dynamic)1650 GrVertexBuffer* GrGLGpu::onCreateVertexBuffer(size_t size, bool dynamic) {
1651 GrGLVertexBuffer::Desc desc;
1652 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
1653 desc.fSizeInBytes = size;
1654
1655 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
1656 desc.fID = 0;
1657 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
1658 return vertexBuffer;
1659 } else {
1660 desc.fID = 0;
1661 GL_CALL(GenBuffers(1, &desc.fID));
1662 if (desc.fID) {
1663 fHWGeometryState.setVertexBufferID(this, desc.fID);
1664 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1665 // make sure driver can allocate memory for this buffer
1666 GL_ALLOC_CALL(this->glInterface(),
1667 BufferData(GR_GL_ARRAY_BUFFER,
1668 (GrGLsizeiptr) desc.fSizeInBytes,
1669 nullptr, // data ptr
1670 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
1671 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1672 GL_CALL(DeleteBuffers(1, &desc.fID));
1673 this->notifyVertexBufferDelete(desc.fID);
1674 return nullptr;
1675 }
1676 GrGLVertexBuffer* vertexBuffer = new GrGLVertexBuffer(this, desc);
1677 return vertexBuffer;
1678 }
1679 return nullptr;
1680 }
1681 }
1682
onCreateIndexBuffer(size_t size,bool dynamic)1683 GrIndexBuffer* GrGLGpu::onCreateIndexBuffer(size_t size, bool dynamic) {
1684 GrGLIndexBuffer::Desc desc;
1685 desc.fUsage = dynamic ? GrGLBufferImpl::kDynamicDraw_Usage : GrGLBufferImpl::kStaticDraw_Usage;
1686 desc.fSizeInBytes = size;
1687
1688 if (this->glCaps().useNonVBOVertexAndIndexDynamicData() && dynamic) {
1689 desc.fID = 0;
1690 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
1691 return indexBuffer;
1692 } else {
1693 desc.fID = 0;
1694 GL_CALL(GenBuffers(1, &desc.fID));
1695 if (desc.fID) {
1696 fHWGeometryState.setIndexBufferIDOnDefaultVertexArray(this, desc.fID);
1697 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1698 // make sure driver can allocate memory for this buffer
1699 GL_ALLOC_CALL(this->glInterface(),
1700 BufferData(GR_GL_ELEMENT_ARRAY_BUFFER,
1701 (GrGLsizeiptr) desc.fSizeInBytes,
1702 nullptr, // data ptr
1703 dynamic ? DYNAMIC_USAGE_PARAM : GR_GL_STATIC_DRAW));
1704 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1705 GL_CALL(DeleteBuffers(1, &desc.fID));
1706 this->notifyIndexBufferDelete(desc.fID);
1707 return nullptr;
1708 }
1709 GrIndexBuffer* indexBuffer = new GrGLIndexBuffer(this, desc);
1710 return indexBuffer;
1711 }
1712 return nullptr;
1713 }
1714 }
1715
onCreateTransferBuffer(size_t size,TransferType xferType)1716 GrTransferBuffer* GrGLGpu::onCreateTransferBuffer(size_t size, TransferType xferType) {
1717 GrGLCaps::TransferBufferType xferBufferType = this->ctxInfo().caps()->transferBufferType();
1718 if (GrGLCaps::kNone_TransferBufferType == xferBufferType) {
1719 return nullptr;
1720 }
1721
1722 GrGLTransferBuffer::Desc desc;
1723 bool toGpu = (kCpuToGpu_TransferType == xferType);
1724 desc.fUsage = toGpu ? GrGLBufferImpl::kStreamDraw_Usage : GrGLBufferImpl::kStreamRead_Usage;
1725
1726 desc.fSizeInBytes = size;
1727 desc.fID = 0;
1728 GL_CALL(GenBuffers(1, &desc.fID));
1729 if (desc.fID) {
1730 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1731 // make sure driver can allocate memory for this bmapuffer
1732 GrGLenum target;
1733 if (GrGLCaps::kChromium_TransferBufferType == xferBufferType) {
1734 target = toGpu ? GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM
1735 : GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
1736 } else {
1737 SkASSERT(GrGLCaps::kPBO_TransferBufferType == xferBufferType);
1738 target = toGpu ? GR_GL_PIXEL_UNPACK_BUFFER : GR_GL_PIXEL_PACK_BUFFER;
1739 }
1740 GL_CALL(BindBuffer(target, desc.fID));
1741 GL_ALLOC_CALL(this->glInterface(),
1742 BufferData(target,
1743 (GrGLsizeiptr) desc.fSizeInBytes,
1744 nullptr, // data ptr
1745 (toGpu ? GR_GL_STREAM_DRAW : GR_GL_STREAM_READ)));
1746 if (CHECK_ALLOC_ERROR(this->glInterface()) != GR_GL_NO_ERROR) {
1747 GL_CALL(DeleteBuffers(1, &desc.fID));
1748 return nullptr;
1749 }
1750 GrTransferBuffer* transferBuffer = new GrGLTransferBuffer(this, desc, target);
1751 return transferBuffer;
1752 }
1753
1754 return nullptr;
1755 }
1756
flushScissor(const GrScissorState & scissorState,const GrGLIRect & rtViewport,GrSurfaceOrigin rtOrigin)1757 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
1758 const GrGLIRect& rtViewport,
1759 GrSurfaceOrigin rtOrigin) {
1760 if (scissorState.enabled()) {
1761 GrGLIRect scissor;
1762 scissor.setRelativeTo(rtViewport,
1763 scissorState.rect().fLeft,
1764 scissorState.rect().fTop,
1765 scissorState.rect().width(),
1766 scissorState.rect().height(),
1767 rtOrigin);
1768 // if the scissor fully contains the viewport then we fall through and
1769 // disable the scissor test.
1770 if (!scissor.contains(rtViewport)) {
1771 if (fHWScissorSettings.fRect != scissor) {
1772 scissor.pushToGLScissor(this->glInterface());
1773 fHWScissorSettings.fRect = scissor;
1774 }
1775 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1776 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1777 fHWScissorSettings.fEnabled = kYes_TriState;
1778 }
1779 return;
1780 }
1781 }
1782
1783 // See fall through note above
1784 this->disableScissor();
1785 }
1786
flushGLState(const DrawArgs & args)1787 bool GrGLGpu::flushGLState(const DrawArgs& args) {
1788 GrXferProcessor::BlendInfo blendInfo;
1789 const GrPipeline& pipeline = *args.fPipeline;
1790 args.fPipeline->getXferProcessor().getBlendInfo(&blendInfo);
1791
1792 this->flushColorWrite(blendInfo.fWriteColor);
1793 this->flushDrawFace(pipeline.getDrawFace());
1794
1795 SkAutoTUnref<GrGLProgram> program(fProgramCache->refProgram(args));
1796 if (!program) {
1797 GrCapsDebugf(this->caps(), "Failed to create program!\n");
1798 return false;
1799 }
1800
1801 GrGLuint programID = program->programID();
1802 if (fHWProgramID != programID) {
1803 GL_CALL(UseProgram(programID));
1804 fHWProgramID = programID;
1805 }
1806
1807 if (blendInfo.fWriteColor) {
1808 // Swizzle the blend to match what the shader will output.
1809 const GrSwizzle& swizzle = this->glCaps().glslCaps()->configOutputSwizzle(
1810 args.fPipeline->getRenderTarget()->config());
1811 this->flushBlend(blendInfo, swizzle);
1812 }
1813
1814 SkSTArray<8, const GrTextureAccess*> textureAccesses;
1815 program->setData(*args.fPrimitiveProcessor, pipeline, &textureAccesses);
1816
1817 int numTextureAccesses = textureAccesses.count();
1818 for (int i = 0; i < numTextureAccesses; i++) {
1819 this->bindTexture(i, textureAccesses[i]->getParams(),
1820 static_cast<GrGLTexture*>(textureAccesses[i]->getTexture()));
1821 }
1822
1823 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.getRenderTarget());
1824 this->flushStencil(pipeline.getStencil());
1825 this->flushScissor(pipeline.getScissorState(), glRT->getViewport(), glRT->origin());
1826 this->flushHWAAState(glRT, pipeline.isHWAntialiasState(), !pipeline.getStencil().isDisabled());
1827
1828 // This must come after textures are flushed because a texture may need
1829 // to be msaa-resolved (which will modify bound FBO state).
1830 this->flushRenderTarget(glRT, nullptr);
1831
1832 return true;
1833 }
1834
setupGeometry(const GrPrimitiveProcessor & primProc,const GrNonInstancedVertices & vertices,size_t * indexOffsetInBytes)1835 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
1836 const GrNonInstancedVertices& vertices,
1837 size_t* indexOffsetInBytes) {
1838 GrGLVertexBuffer* vbuf;
1839 vbuf = (GrGLVertexBuffer*) vertices.vertexBuffer();
1840
1841 SkASSERT(vbuf);
1842 SkASSERT(!vbuf->isMapped());
1843
1844 GrGLIndexBuffer* ibuf = nullptr;
1845 if (vertices.isIndexed()) {
1846 SkASSERT(indexOffsetInBytes);
1847
1848 *indexOffsetInBytes = 0;
1849 ibuf = (GrGLIndexBuffer*)vertices.indexBuffer();
1850
1851 SkASSERT(ibuf);
1852 SkASSERT(!ibuf->isMapped());
1853 *indexOffsetInBytes += ibuf->baseOffset();
1854 }
1855 GrGLAttribArrayState* attribState =
1856 fHWGeometryState.bindArrayAndBuffersToDraw(this, vbuf, ibuf);
1857
1858 int vaCount = primProc.numAttribs();
1859 if (vaCount > 0) {
1860
1861 GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride());
1862
1863 size_t vertexOffsetInBytes = stride * vertices.startVertex();
1864
1865 vertexOffsetInBytes += vbuf->baseOffset();
1866
1867 uint32_t usedAttribArraysMask = 0;
1868 size_t offset = 0;
1869
1870 for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
1871 const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
1872 usedAttribArraysMask |= (1 << attribIndex);
1873 GrVertexAttribType attribType = attrib.fType;
1874 attribState->set(this,
1875 attribIndex,
1876 vbuf->bufferID(),
1877 attribType,
1878 stride,
1879 reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + offset));
1880 offset += attrib.fOffset;
1881 }
1882 attribState->disableUnusedArrays(this, usedAttribArraysMask);
1883 }
1884 }
1885
buildProgramDesc(GrProgramDesc * desc,const GrPrimitiveProcessor & primProc,const GrPipeline & pipeline) const1886 void GrGLGpu::buildProgramDesc(GrProgramDesc* desc,
1887 const GrPrimitiveProcessor& primProc,
1888 const GrPipeline& pipeline) const {
1889 if (!GrGLProgramDescBuilder::Build(desc, primProc, pipeline, *this->glCaps().glslCaps())) {
1890 SkDEBUGFAIL("Failed to generate GL program descriptor");
1891 }
1892 }
1893
bindBuffer(GrGLuint id,GrGLenum type)1894 void GrGLGpu::bindBuffer(GrGLuint id, GrGLenum type) {
1895 this->handleDirtyContext();
1896 if (GR_GL_ARRAY_BUFFER == type) {
1897 this->bindVertexBuffer(id);
1898 } else if (GR_GL_ELEMENT_ARRAY_BUFFER == type) {
1899 this->bindIndexBufferAndDefaultVertexArray(id);
1900 } else {
1901 GR_GL_CALL(this->glInterface(), BindBuffer(type, id));
1902 }
1903 }
1904
releaseBuffer(GrGLuint id,GrGLenum type)1905 void GrGLGpu::releaseBuffer(GrGLuint id, GrGLenum type) {
1906 this->handleDirtyContext();
1907 GL_CALL(DeleteBuffers(1, &id));
1908 if (GR_GL_ARRAY_BUFFER == type) {
1909 this->notifyVertexBufferDelete(id);
1910 } else if (GR_GL_ELEMENT_ARRAY_BUFFER == type) {
1911 this->notifyIndexBufferDelete(id);
1912 }
1913 }
1914
get_gl_usage(GrGLBufferImpl::Usage usage)1915 static GrGLenum get_gl_usage(GrGLBufferImpl::Usage usage) {
1916 static const GrGLenum grToGL[] = {
1917 GR_GL_STATIC_DRAW, // GrGLBufferImpl::kStaticDraw_Usage
1918 DYNAMIC_USAGE_PARAM, // GrGLBufferImpl::kDynamicDraw_Usage
1919 GR_GL_STREAM_DRAW, // GrGLBufferImpl::kStreamDraw_Usage
1920 GR_GL_STREAM_READ, // GrGLBufferImpl::kStreamRead_Usage
1921 };
1922 static_assert(SK_ARRAY_COUNT(grToGL) == GrGLBufferImpl::kUsageCount, "array_size_mismatch");
1923
1924 return grToGL[usage];
1925 }
1926
mapBuffer(GrGLuint id,GrGLenum type,GrGLBufferImpl::Usage usage,size_t currentSize,size_t requestedSize)1927 void* GrGLGpu::mapBuffer(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
1928 size_t currentSize, size_t requestedSize) {
1929 void* mapPtr = nullptr;
1930 GrGLenum glUsage = get_gl_usage(usage);
1931 bool readOnly = (GrGLBufferImpl::kStreamRead_Usage == usage);
1932
1933 // Handling dirty context is done in the bindBuffer call
1934 switch (this->glCaps().mapBufferType()) {
1935 case GrGLCaps::kNone_MapBufferType:
1936 break;
1937 case GrGLCaps::kMapBuffer_MapBufferType:
1938 this->bindBuffer(id, type);
1939 // Let driver know it can discard the old data
1940 if (GR_GL_USE_BUFFER_DATA_NULL_HINT || currentSize != requestedSize) {
1941 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
1942 }
1943 GL_CALL_RET(mapPtr, MapBuffer(type, readOnly ? GR_GL_READ_ONLY : GR_GL_WRITE_ONLY));
1944 break;
1945 case GrGLCaps::kMapBufferRange_MapBufferType: {
1946 this->bindBuffer(id, type);
1947 // Make sure the GL buffer size agrees with fDesc before mapping.
1948 if (currentSize != requestedSize) {
1949 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
1950 }
1951 GrGLbitfield writeAccess = GR_GL_MAP_WRITE_BIT;
1952 // TODO: allow the client to specify invalidation in the stream draw case
1953 if (GrGLBufferImpl::kStreamDraw_Usage != usage) {
1954 writeAccess |= GR_GL_MAP_INVALIDATE_BUFFER_BIT;
1955 }
1956 GL_CALL_RET(mapPtr, MapBufferRange(type, 0, requestedSize, readOnly ?
1957 GR_GL_MAP_READ_BIT :
1958 writeAccess));
1959 break;
1960 }
1961 case GrGLCaps::kChromium_MapBufferType:
1962 this->bindBuffer(id, type);
1963 // Make sure the GL buffer size agrees with fDesc before mapping.
1964 if (currentSize != requestedSize) {
1965 GL_CALL(BufferData(type, requestedSize, nullptr, glUsage));
1966 }
1967 GL_CALL_RET(mapPtr, MapBufferSubData(type, 0, requestedSize, readOnly ?
1968 GR_GL_READ_ONLY :
1969 GR_GL_WRITE_ONLY));
1970 break;
1971 }
1972 return mapPtr;
1973 }
1974
bufferData(GrGLuint id,GrGLenum type,GrGLBufferImpl::Usage usage,size_t currentSize,const void * src,size_t srcSizeInBytes)1975 void GrGLGpu::bufferData(GrGLuint id, GrGLenum type, GrGLBufferImpl::Usage usage,
1976 size_t currentSize, const void* src, size_t srcSizeInBytes) {
1977 SkASSERT(srcSizeInBytes <= currentSize);
1978 // bindbuffer handles dirty context
1979 this->bindBuffer(id, type);
1980 GrGLenum glUsage = get_gl_usage(usage);
1981
1982 #if GR_GL_USE_BUFFER_DATA_NULL_HINT
1983 if (currentSize == srcSizeInBytes) {
1984 GL_CALL(BufferData(type, (GrGLsizeiptr) srcSizeInBytes, src, glUsage));
1985 } else {
1986 // Before we call glBufferSubData we give the driver a hint using
1987 // glBufferData with nullptr. This makes the old buffer contents
1988 // inaccessible to future draws. The GPU may still be processing
1989 // draws that reference the old contents. With this hint it can
1990 // assign a different allocation for the new contents to avoid
1991 // flushing the gpu past draws consuming the old contents.
1992 // TODO I think we actually want to try calling bufferData here
1993 GL_CALL(BufferData(type, currentSize, nullptr, glUsage));
1994 GL_CALL(BufferSubData(type, 0, (GrGLsizeiptr) srcSizeInBytes, src));
1995 }
1996 #else
1997 // Note that we're cheating on the size here. Currently no methods
1998 // allow a partial update that preserves contents of non-updated
1999 // portions of the buffer (map() does a glBufferData(..size, nullptr..))
2000 GL_CALL(BufferData(type, srcSizeInBytes, src, glUsage));
2001 #endif
2002 }
2003
unmapBuffer(GrGLuint id,GrGLenum type,void * mapPtr)2004 void GrGLGpu::unmapBuffer(GrGLuint id, GrGLenum type, void* mapPtr) {
2005 // bind buffer handles the dirty context
2006 switch (this->glCaps().mapBufferType()) {
2007 case GrGLCaps::kNone_MapBufferType:
2008 SkDEBUGFAIL("Shouldn't get here.");
2009 return;
2010 case GrGLCaps::kMapBuffer_MapBufferType: // fall through
2011 case GrGLCaps::kMapBufferRange_MapBufferType:
2012 this->bindBuffer(id, type);
2013 GL_CALL(UnmapBuffer(type));
2014 break;
2015 case GrGLCaps::kChromium_MapBufferType:
2016 this->bindBuffer(id, type);
2017 GL_CALL(UnmapBufferSubData(mapPtr));
2018 break;
2019 }
2020 }
2021
disableScissor()2022 void GrGLGpu::disableScissor() {
2023 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2024 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2025 fHWScissorSettings.fEnabled = kNo_TriState;
2026 return;
2027 }
2028 }
2029
onClear(GrRenderTarget * target,const SkIRect & rect,GrColor color)2030 void GrGLGpu::onClear(GrRenderTarget* target, const SkIRect& rect, GrColor color) {
2031 // parent class should never let us get here with no RT
2032 SkASSERT(target);
2033 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2034
2035 this->flushRenderTarget(glRT, &rect);
2036 GrScissorState scissorState;
2037 scissorState.set(rect);
2038 this->flushScissor(scissorState, glRT->getViewport(), glRT->origin());
2039
2040 GrGLfloat r, g, b, a;
2041 static const GrGLfloat scale255 = 1.f / 255.f;
2042 a = GrColorUnpackA(color) * scale255;
2043 GrGLfloat scaleRGB = scale255;
2044 r = GrColorUnpackR(color) * scaleRGB;
2045 g = GrColorUnpackG(color) * scaleRGB;
2046 b = GrColorUnpackB(color) * scaleRGB;
2047
2048 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
2049 fHWWriteToColor = kYes_TriState;
2050 GL_CALL(ClearColor(r, g, b, a));
2051 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
2052 }
2053
discard(GrRenderTarget * renderTarget)2054 void GrGLGpu::discard(GrRenderTarget* renderTarget) {
2055 SkASSERT(renderTarget);
2056 if (!this->caps()->discardRenderTargetSupport()) {
2057 return;
2058 }
2059
2060 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(renderTarget);
2061 if (renderTarget->getUniqueID() != fHWBoundRenderTargetUniqueID) {
2062 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
2063 fStats.incRenderTargetBinds();
2064 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, glRT->renderFBOID()));
2065 }
2066 switch (this->glCaps().invalidateFBType()) {
2067 case GrGLCaps::kNone_InvalidateFBType:
2068 SkFAIL("Should never get here.");
2069 break;
2070 case GrGLCaps::kInvalidate_InvalidateFBType:
2071 if (0 == glRT->renderFBOID()) {
2072 // When rendering to the default framebuffer the legal values for attachments
2073 // are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment
2074 // types.
2075 static const GrGLenum attachments[] = { GR_GL_COLOR };
2076 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
2077 attachments));
2078 } else {
2079 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 };
2080 GL_CALL(InvalidateFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
2081 attachments));
2082 }
2083 break;
2084 case GrGLCaps::kDiscard_InvalidateFBType: {
2085 if (0 == glRT->renderFBOID()) {
2086 // When rendering to the default framebuffer the legal values for attachments
2087 // are GL_COLOR, GL_DEPTH, GL_STENCIL, ... rather than the various FBO attachment
2088 // types. See glDiscardFramebuffer() spec.
2089 static const GrGLenum attachments[] = { GR_GL_COLOR };
2090 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
2091 attachments));
2092 } else {
2093 static const GrGLenum attachments[] = { GR_GL_COLOR_ATTACHMENT0 };
2094 GL_CALL(DiscardFramebuffer(GR_GL_FRAMEBUFFER, SK_ARRAY_COUNT(attachments),
2095 attachments));
2096 }
2097 break;
2098 }
2099 }
2100 renderTarget->flagAsResolved();
2101 }
2102
clearStencil(GrRenderTarget * target)2103 void GrGLGpu::clearStencil(GrRenderTarget* target) {
2104 if (nullptr == target) {
2105 return;
2106 }
2107 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2108 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
2109
2110 this->disableScissor();
2111
2112 GL_CALL(StencilMask(0xffffffff));
2113 GL_CALL(ClearStencil(0));
2114 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2115 fHWStencilSettings.invalidate();
2116 }
2117
onClearStencilClip(GrRenderTarget * target,const SkIRect & rect,bool insideClip)2118 void GrGLGpu::onClearStencilClip(GrRenderTarget* target, const SkIRect& rect, bool insideClip) {
2119 SkASSERT(target);
2120
2121 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
2122 // this should only be called internally when we know we have a
2123 // stencil buffer.
2124 SkASSERT(sb);
2125 GrGLint stencilBitCount = sb->bits();
2126 #if 0
2127 SkASSERT(stencilBitCount > 0);
2128 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2129 #else
2130 // we could just clear the clip bit but when we go through
2131 // ANGLE a partial stencil mask will cause clears to be
2132 // turned into draws. Our contract on GrDrawTarget says that
2133 // changing the clip between stencil passes may or may not
2134 // zero the client's clip bits. So we just clear the whole thing.
2135 static const GrGLint clipStencilMask = ~0;
2136 #endif
2137 GrGLint value;
2138 if (insideClip) {
2139 value = (1 << (stencilBitCount - 1));
2140 } else {
2141 value = 0;
2142 }
2143 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2144 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
2145
2146 GrScissorState scissorState;
2147 scissorState.set(rect);
2148 this->flushScissor(scissorState, glRT->getViewport(), glRT->origin());
2149
2150 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2151 GL_CALL(ClearStencil(value));
2152 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2153 fHWStencilSettings.invalidate();
2154 }
2155
read_pixels_pays_for_y_flip(GrRenderTarget * renderTarget,const GrGLCaps & caps,int width,int height,GrPixelConfig config,size_t rowBytes)2156 static bool read_pixels_pays_for_y_flip(GrRenderTarget* renderTarget, const GrGLCaps& caps,
2157 int width, int height, GrPixelConfig config,
2158 size_t rowBytes) {
2159 // If this render target is already TopLeft, we don't need to flip.
2160 if (kTopLeft_GrSurfaceOrigin == renderTarget->origin()) {
2161 return false;
2162 }
2163
2164 // If the read is really small or smaller than the min texture size, don't force a draw.
2165 static const int kMinSize = 32;
2166 if (width < kMinSize || height < kMinSize) {
2167 return false;
2168 }
2169
2170 // if GL can do the flip then we'll never pay for it.
2171 if (caps.packFlipYSupport()) {
2172 return false;
2173 }
2174
2175 // If we have to do memcpy to handle non-trim rowBytes then we
2176 // get the flip for free. Otherwise it costs.
2177 // Note that we're assuming that 0 rowBytes has already been handled and that the width has been
2178 // clipped.
2179 return caps.packRowLengthSupport() || GrBytesPerPixel(config) * width == rowBytes;
2180 }
2181
readPixelsSupported(GrRenderTarget * target,GrPixelConfig readConfig)2182 bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) {
2183 auto bindRenderTarget = [this, target]() -> bool {
2184 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), &SkIRect::EmptyIRect());
2185 return true;
2186 };
2187 auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
2188 GR_GL_GetIntegerv(this->glInterface(), query, value);
2189 };
2190 GrPixelConfig rtConfig = target->config();
2191 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget);
2192 }
2193
readPixelsSupported(GrPixelConfig rtConfig,GrPixelConfig readConfig)2194 bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) {
2195 auto bindRenderTarget = [this, rtConfig]() -> bool {
2196 GrTextureDesc desc;
2197 desc.fConfig = rtConfig;
2198 desc.fWidth = desc.fHeight = 16;
2199 desc.fFlags = kRenderTarget_GrSurfaceFlag;
2200 SkAutoTUnref<GrTexture> temp(this->createTexture(desc, SkBudgeted::kNo, nullptr, 0));
2201 if (!temp) {
2202 return false;
2203 }
2204 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget());
2205 this->flushRenderTarget(glrt, &SkIRect::EmptyIRect());
2206 return true;
2207 };
2208 auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
2209 GR_GL_GetIntegerv(this->glInterface(), query, value);
2210 };
2211 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget);
2212 }
2213
readPixelsSupported(GrSurface * surfaceForConfig,GrPixelConfig readConfig)2214 bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) {
2215 if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) {
2216 return this->readPixelsSupported(rt, readConfig);
2217 } else {
2218 GrPixelConfig config = surfaceForConfig->config();
2219 return this->readPixelsSupported(config, readConfig);
2220 }
2221 }
2222
requires_srgb_conversion(GrPixelConfig a,GrPixelConfig b)2223 static bool requires_srgb_conversion(GrPixelConfig a, GrPixelConfig b) {
2224 if (GrPixelConfigIsSRGB(a)) {
2225 return !GrPixelConfigIsSRGB(b) && !GrPixelConfigIsAlphaOnly(b);
2226 } else if (GrPixelConfigIsSRGB(b)) {
2227 return !GrPixelConfigIsSRGB(a) && !GrPixelConfigIsAlphaOnly(a);
2228 }
2229 return false;
2230 }
2231
onGetReadPixelsInfo(GrSurface * srcSurface,int width,int height,size_t rowBytes,GrPixelConfig readConfig,DrawPreference * drawPreference,ReadPixelTempDrawInfo * tempDrawInfo)2232 bool GrGLGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
2233 GrPixelConfig readConfig, DrawPreference* drawPreference,
2234 ReadPixelTempDrawInfo* tempDrawInfo) {
2235 GrPixelConfig srcConfig = srcSurface->config();
2236
2237 // These settings we will always want if a temp draw is performed.
2238 tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag;
2239 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
2240 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
2241 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
2242 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
2243 tempDrawInfo->fUseExactScratch = this->glCaps().partialFBOReadIsSlow();
2244
2245 // For now assume no swizzling, we may change that below.
2246 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
2247
2248 // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read
2249 // from will be srcConfig and we will read readConfig pixels from it.
2250 // Not that if we require a draw and return a non-renderable format for the temp surface the
2251 // base class will fail for us.
2252 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
2253 tempDrawInfo->fReadConfig = readConfig;
2254
2255 if (requires_srgb_conversion(srcConfig, readConfig)) {
2256 if (!this->readPixelsSupported(readConfig, readConfig)) {
2257 return false;
2258 }
2259 // Draw to do srgb to linear conversion or vice versa.
2260 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2261 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
2262 tempDrawInfo->fReadConfig = readConfig;
2263 return true;
2264 }
2265
2266 GrRenderTarget* srcAsRT = srcSurface->asRenderTarget();
2267 if (!srcAsRT) {
2268 // For now keep assuming the draw is not a format transformation, just a draw to get to a
2269 // RT. We may add additional transformations below.
2270 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2271 }
2272 if (this->glCaps().rgba8888PixelsOpsAreSlow() && kRGBA_8888_GrPixelConfig == readConfig &&
2273 this->readPixelsSupported(kBGRA_8888_GrPixelConfig, kBGRA_8888_GrPixelConfig)) {
2274 tempDrawInfo->fTempSurfaceDesc.fConfig = kBGRA_8888_GrPixelConfig;
2275 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2276 tempDrawInfo->fReadConfig = kBGRA_8888_GrPixelConfig;
2277 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
2278 } else if (kMesa_GrGLDriver == this->glContext().driver() &&
2279 GrBytesPerPixel(readConfig) == 4 &&
2280 GrPixelConfigSwapRAndB(readConfig) == srcConfig &&
2281 this->readPixelsSupported(srcSurface, srcConfig)) {
2282 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa.
2283 // Better to do a draw with a R/B swap and then read as the original config.
2284 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
2285 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2286 tempDrawInfo->fReadConfig = srcConfig;
2287 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
2288 } else if (!this->readPixelsSupported(srcSurface, readConfig)) {
2289 if (readConfig == kBGRA_8888_GrPixelConfig &&
2290 this->glCaps().isConfigRenderable(kRGBA_8888_GrPixelConfig, false) &&
2291 this->readPixelsSupported(kRGBA_8888_GrPixelConfig, kRGBA_8888_GrPixelConfig)) {
2292 // We're trying to read BGRA but it's not supported. If RGBA is renderable and
2293 // we can read it back, then do a swizzling draw to a RGBA and read it back (which
2294 // will effectively be BGRA).
2295 tempDrawInfo->fTempSurfaceDesc.fConfig = kRGBA_8888_GrPixelConfig;
2296 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2297 tempDrawInfo->fReadConfig = kRGBA_8888_GrPixelConfig;
2298 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2299 } else if (readConfig == kAlpha_8_GrPixelConfig) {
2300 // onReadPixels implements a fallback for cases where we are want to read kAlpha_8,
2301 // it's unsupported, but 32bit RGBA reads are supported.
2302 // Don't attempt to do any srgb conversions since we only care about alpha.
2303 GrPixelConfig cpuTempConfig = kRGBA_8888_GrPixelConfig;
2304 if (GrPixelConfigIsSRGB(srcSurface->config())) {
2305 cpuTempConfig = kSRGBA_8888_GrPixelConfig;
2306 }
2307 if (!this->readPixelsSupported(srcSurface, cpuTempConfig)) {
2308 // If we can't read RGBA from the src try to draw to a kRGBA_8888 (or kSRGBA_8888)
2309 // first and then onReadPixels will read that to a 32bit temporary buffer.
2310 if (this->caps()->isConfigRenderable(cpuTempConfig, false)) {
2311 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2312 tempDrawInfo->fTempSurfaceDesc.fConfig = cpuTempConfig;
2313 tempDrawInfo->fReadConfig = kAlpha_8_GrPixelConfig;
2314 } else {
2315 return false;
2316 }
2317 } else {
2318 SkASSERT(tempDrawInfo->fTempSurfaceDesc.fConfig == srcConfig);
2319 SkASSERT(tempDrawInfo->fReadConfig == kAlpha_8_GrPixelConfig);
2320 }
2321 } else {
2322 return false;
2323 }
2324 }
2325
2326 if (srcAsRT &&
2327 read_pixels_pays_for_y_flip(srcAsRT, this->glCaps(), width, height, readConfig, rowBytes)) {
2328 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
2329 }
2330
2331 return true;
2332 }
2333
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes)2334 bool GrGLGpu::onReadPixels(GrSurface* surface,
2335 int left, int top,
2336 int width, int height,
2337 GrPixelConfig config,
2338 void* buffer,
2339 size_t rowBytes) {
2340 SkASSERT(surface);
2341
2342 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2343 if (!renderTarget) {
2344 return false;
2345 }
2346
2347 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels.
2348 if (requires_srgb_conversion(surface->config(), config)) {
2349 return false;
2350 }
2351
2352 // We have a special case fallback for reading eight bit alpha. We will read back all four 8
2353 // bit channels as RGBA and then extract A.
2354 if (!this->readPixelsSupported(renderTarget, config)) {
2355 // Don't attempt to do any srgb conversions since we only care about alpha.
2356 GrPixelConfig tempConfig = kRGBA_8888_GrPixelConfig;
2357 if (GrPixelConfigIsSRGB(renderTarget->config())) {
2358 tempConfig = kSRGBA_8888_GrPixelConfig;
2359 }
2360 if (kAlpha_8_GrPixelConfig == config &&
2361 this->readPixelsSupported(renderTarget, tempConfig)) {
2362 SkAutoTDeleteArray<uint32_t> temp(new uint32_t[width * height * 4]);
2363 if (this->onReadPixels(renderTarget, left, top, width, height, tempConfig, temp.get(),
2364 width*4)) {
2365 uint8_t* dst = reinterpret_cast<uint8_t*>(buffer);
2366 for (int j = 0; j < height; ++j) {
2367 for (int i = 0; i < width; ++i) {
2368 dst[j*rowBytes + i] = (0xFF000000U & temp[j*width+i]) >> 24;
2369 }
2370 }
2371 return true;
2372 }
2373 }
2374 return false;
2375 }
2376
2377 GrGLenum externalFormat;
2378 GrGLenum externalType;
2379 if (!this->glCaps().getReadPixelsFormat(renderTarget->config(), config, &externalFormat,
2380 &externalType)) {
2381 return false;
2382 }
2383 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
2384
2385 // resolve the render target if necessary
2386 switch (renderTarget->getResolveType()) {
2387 case GrGLRenderTarget::kCantResolve_ResolveType:
2388 return false;
2389 case GrGLRenderTarget::kAutoResolves_ResolveType:
2390 this->flushRenderTarget(renderTarget, &SkIRect::EmptyIRect());
2391 break;
2392 case GrGLRenderTarget::kCanResolve_ResolveType:
2393 this->onResolveRenderTarget(renderTarget);
2394 // we don't track the state of the READ FBO ID.
2395 fStats.incRenderTargetBinds();
2396 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()));
2397 break;
2398 default:
2399 SkFAIL("Unknown resolve type");
2400 }
2401
2402 const GrGLIRect& glvp = renderTarget->getViewport();
2403
2404 // the read rect is viewport-relative
2405 GrGLIRect readRect;
2406 readRect.setRelativeTo(glvp, left, top, width, height, renderTarget->origin());
2407
2408 size_t bytesPerPixel = GrBytesPerPixel(config);
2409 size_t tightRowBytes = bytesPerPixel * width;
2410
2411 size_t readDstRowBytes = tightRowBytes;
2412 void* readDst = buffer;
2413
2414 // determine if GL can read using the passed rowBytes or if we need
2415 // a scratch buffer.
2416 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
2417 if (rowBytes != tightRowBytes) {
2418 if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) {
2419 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
2420 static_cast<GrGLint>(rowBytes / bytesPerPixel)));
2421 readDstRowBytes = rowBytes;
2422 } else {
2423 scratch.reset(tightRowBytes * height);
2424 readDst = scratch.get();
2425 }
2426 }
2427 if (flipY && this->glCaps().packFlipYSupport()) {
2428 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
2429 }
2430 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(config)));
2431
2432 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
2433 readRect.fWidth, readRect.fHeight,
2434 externalFormat, externalType, readDst));
2435 if (readDstRowBytes != tightRowBytes) {
2436 SkASSERT(this->glCaps().packRowLengthSupport());
2437 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2438 }
2439 if (flipY && this->glCaps().packFlipYSupport()) {
2440 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
2441 flipY = false;
2442 }
2443
2444 // now reverse the order of the rows, since GL's are bottom-to-top, but our
2445 // API presents top-to-bottom. We must preserve the padding contents. Note
2446 // that the above readPixels did not overwrite the padding.
2447 if (readDst == buffer) {
2448 SkASSERT(rowBytes == readDstRowBytes);
2449 if (flipY) {
2450 scratch.reset(tightRowBytes);
2451 void* tmpRow = scratch.get();
2452 // flip y in-place by rows
2453 const int halfY = height >> 1;
2454 char* top = reinterpret_cast<char*>(buffer);
2455 char* bottom = top + (height - 1) * rowBytes;
2456 for (int y = 0; y < halfY; y++) {
2457 memcpy(tmpRow, top, tightRowBytes);
2458 memcpy(top, bottom, tightRowBytes);
2459 memcpy(bottom, tmpRow, tightRowBytes);
2460 top += rowBytes;
2461 bottom -= rowBytes;
2462 }
2463 }
2464 } else {
2465 SkASSERT(readDst != buffer);
2466 SkASSERT(rowBytes != tightRowBytes);
2467 // copy from readDst to buffer while flipping y
2468 // const int halfY = height >> 1;
2469 const char* src = reinterpret_cast<const char*>(readDst);
2470 char* dst = reinterpret_cast<char*>(buffer);
2471 if (flipY) {
2472 dst += (height-1) * rowBytes;
2473 }
2474 for (int y = 0; y < height; y++) {
2475 memcpy(dst, src, tightRowBytes);
2476 src += readDstRowBytes;
2477 if (!flipY) {
2478 dst += rowBytes;
2479 } else {
2480 dst -= rowBytes;
2481 }
2482 }
2483 }
2484 return true;
2485 }
2486
finishDrawTarget()2487 void GrGLGpu::finishDrawTarget() {
2488 if (fPLSHasBeenUsed) {
2489 /* There is an ARM driver bug where if we use PLS, and then draw a frame which does not
2490 * use PLS, it leaves garbage all over the place. As a workaround, we use PLS in a
2491 * trivial way every frame. And since we use it every frame, there's never a point at which
2492 * it becomes safe to stop using this workaround once we start.
2493 */
2494 this->disableScissor();
2495 // using PLS in the presence of MSAA results in GL_INVALID_OPERATION
2496 this->flushHWAAState(nullptr, false, false);
2497 SkASSERT(!fHWPLSEnabled);
2498 SkASSERT(fMSAAEnabled != kYes_TriState);
2499 GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
2500 this->stampRectUsingProgram(fPLSSetupProgram.fProgram,
2501 SkRect::MakeXYWH(-100.0f, -100.0f, 0.01f, 0.01f),
2502 fPLSSetupProgram.fPosXformUniform,
2503 fPLSSetupProgram.fArrayBuffer);
2504 GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
2505 }
2506 }
2507
flushRenderTarget(GrGLRenderTarget * target,const SkIRect * bounds)2508 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bounds) {
2509 SkASSERT(target);
2510
2511 uint32_t rtID = target->getUniqueID();
2512 if (fHWBoundRenderTargetUniqueID != rtID) {
2513 fStats.incRenderTargetBinds();
2514 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()));
2515 #ifdef SK_DEBUG
2516 // don't do this check in Chromium -- this is causing
2517 // lots of repeated command buffer flushes when the compositor is
2518 // rendering with Ganesh, which is really slow; even too slow for
2519 // Debug mode.
2520 if (kChromium_GrGLDriver != this->glContext().driver()) {
2521 GrGLenum status;
2522 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2523 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2524 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status);
2525 }
2526 }
2527 #endif
2528 fHWBoundRenderTargetUniqueID = rtID;
2529 this->flushViewport(target->getViewport());
2530 if (this->glCaps().srgbWriteControl()) {
2531 bool enableSRGBWrite = GrPixelConfigIsSRGB(target->config());
2532 if (enableSRGBWrite && kYes_TriState != fHWSRGBFramebuffer) {
2533 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2534 fHWSRGBFramebuffer = kYes_TriState;
2535 } else if (!enableSRGBWrite && kNo_TriState != fHWSRGBFramebuffer) {
2536 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2537 fHWSRGBFramebuffer = kNo_TriState;
2538 }
2539 }
2540 }
2541 this->didWriteToSurface(target, bounds);
2542 }
2543
flushViewport(const GrGLIRect & viewport)2544 void GrGLGpu::flushViewport(const GrGLIRect& viewport) {
2545 if (fHWViewport != viewport) {
2546 viewport.pushToGLViewport(this->glInterface());
2547 fHWViewport = viewport;
2548 }
2549 }
2550
didWriteToSurface(GrSurface * surface,const SkIRect * bounds) const2551 void GrGLGpu::didWriteToSurface(GrSurface* surface, const SkIRect* bounds) const {
2552 SkASSERT(surface);
2553 // Mark any MIP chain and resolve buffer as dirty if and only if there is a non-empty bounds.
2554 if (nullptr == bounds || !bounds->isEmpty()) {
2555 if (GrRenderTarget* target = surface->asRenderTarget()) {
2556 target->flagAsNeedingResolve(bounds);
2557 }
2558 if (GrTexture* texture = surface->asTexture()) {
2559 texture->texturePriv().dirtyMipMaps(true);
2560 }
2561 }
2562 }
2563
2564 GrGLenum gPrimitiveType2GLMode[] = {
2565 GR_GL_TRIANGLES,
2566 GR_GL_TRIANGLE_STRIP,
2567 GR_GL_TRIANGLE_FAN,
2568 GR_GL_POINTS,
2569 GR_GL_LINES,
2570 GR_GL_LINE_STRIP
2571 };
2572
2573 #define SWAP_PER_DRAW 0
2574
2575 #if SWAP_PER_DRAW
2576 #if defined(SK_BUILD_FOR_MAC)
2577 #include <AGL/agl.h>
2578 #elif defined(SK_BUILD_FOR_WIN32)
2579 #include <gl/GL.h>
SwapBuf()2580 void SwapBuf() {
2581 DWORD procID = GetCurrentProcessId();
2582 HWND hwnd = GetTopWindow(GetDesktopWindow());
2583 while(hwnd) {
2584 DWORD wndProcID = 0;
2585 GetWindowThreadProcessId(hwnd, &wndProcID);
2586 if(wndProcID == procID) {
2587 SwapBuffers(GetDC(hwnd));
2588 }
2589 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
2590 }
2591 }
2592 #endif
2593 #endif
2594
onDraw(const DrawArgs & args,const GrNonInstancedVertices & vertices)2595 void GrGLGpu::onDraw(const DrawArgs& args, const GrNonInstancedVertices& vertices) {
2596 if (!this->flushGLState(args)) {
2597 return;
2598 }
2599
2600 GrPixelLocalStorageState plsState = args.fPrimitiveProcessor->getPixelLocalStorageState();
2601 if (!fHWPLSEnabled && plsState !=
2602 GrPixelLocalStorageState::kDisabled_GrPixelLocalStorageState) {
2603 GL_CALL(Enable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
2604 this->setupPixelLocalStorage(args);
2605 fHWPLSEnabled = true;
2606 }
2607 if (plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState) {
2608 GrStencilSettings stencil;
2609 stencil.setDisabled();
2610 this->flushStencil(stencil);
2611 }
2612
2613 size_t indexOffsetInBytes = 0;
2614 this->setupGeometry(*args.fPrimitiveProcessor, vertices, &indexOffsetInBytes);
2615
2616 SkASSERT((size_t)vertices.primitiveType() < SK_ARRAY_COUNT(gPrimitiveType2GLMode));
2617
2618 if (vertices.isIndexed()) {
2619 GrGLvoid* indices =
2620 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes + sizeof(uint16_t) *
2621 vertices.startIndex());
2622 // info.startVertex() was accounted for by setupGeometry.
2623 GL_CALL(DrawElements(gPrimitiveType2GLMode[vertices.primitiveType()],
2624 vertices.indexCount(),
2625 GR_GL_UNSIGNED_SHORT,
2626 indices));
2627 } else {
2628 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account for
2629 // startVertex in the DrawElements case. So we always rely on setupGeometry to have
2630 // accounted for startVertex.
2631 GL_CALL(DrawArrays(gPrimitiveType2GLMode[vertices.primitiveType()], 0,
2632 vertices.vertexCount()));
2633 }
2634
2635 if (fHWPLSEnabled && plsState == GrPixelLocalStorageState::kFinish_GrPixelLocalStorageState) {
2636 // PLS draws always involve multiple draws, finishing up with a non-PLS
2637 // draw that writes to the color buffer. That draw ends up here; we wait
2638 // until after it is complete to actually disable PLS.
2639 GL_CALL(Disable(GR_GL_SHADER_PIXEL_LOCAL_STORAGE));
2640 fHWPLSEnabled = false;
2641 this->disableScissor();
2642 }
2643
2644 #if SWAP_PER_DRAW
2645 glFlush();
2646 #if defined(SK_BUILD_FOR_MAC)
2647 aglSwapBuffers(aglGetCurrentContext());
2648 int set_a_break_pt_here = 9;
2649 aglSwapBuffers(aglGetCurrentContext());
2650 #elif defined(SK_BUILD_FOR_WIN32)
2651 SwapBuf();
2652 int set_a_break_pt_here = 9;
2653 SwapBuf();
2654 #endif
2655 #endif
2656 }
2657
stampRectUsingProgram(GrGLuint program,const SkRect & bounds,GrGLint posXformUniform,GrGLuint arrayBuffer)2658 void GrGLGpu::stampRectUsingProgram(GrGLuint program, const SkRect& bounds, GrGLint posXformUniform,
2659 GrGLuint arrayBuffer) {
2660 GL_CALL(UseProgram(program));
2661 this->fHWGeometryState.setVertexArrayID(this, 0);
2662
2663 GrGLAttribArrayState* attribs =
2664 this->fHWGeometryState.bindArrayAndBufferToDraw(this, arrayBuffer);
2665 attribs->set(this, 0, arrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat), 0);
2666 attribs->disableUnusedArrays(this, 0x1);
2667
2668 GL_CALL(Uniform4f(posXformUniform, bounds.width(), bounds.height(), bounds.left(),
2669 bounds.top()));
2670
2671 GrXferProcessor::BlendInfo blendInfo;
2672 blendInfo.reset();
2673 this->flushBlend(blendInfo, GrSwizzle());
2674 this->flushColorWrite(true);
2675 this->flushDrawFace(GrPipelineBuilder::kBoth_DrawFace);
2676 if (!fHWStencilSettings.isDisabled()) {
2677 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2678 }
2679 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
2680 GL_CALL(UseProgram(fHWProgramID));
2681 if (!fHWStencilSettings.isDisabled()) {
2682 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2683 }
2684 }
2685
setupPixelLocalStorage(const DrawArgs & args)2686 void GrGLGpu::setupPixelLocalStorage(const DrawArgs& args) {
2687 fPLSHasBeenUsed = true;
2688 const SkRect& bounds =
2689 static_cast<const GrPLSGeometryProcessor*>(args.fPrimitiveProcessor)->getBounds();
2690 // setup pixel local storage -- this means capturing and storing the current framebuffer color
2691 // and initializing the winding counts to zero
2692 GrRenderTarget* rt = args.fPipeline->getRenderTarget();
2693 SkScalar width = SkIntToScalar(rt->width());
2694 SkScalar height = SkIntToScalar(rt->height());
2695 // dst rect edges in NDC (-1 to 1)
2696 // having some issues with rounding, just expand the bounds by 1 and trust the scissor to keep
2697 // it contained properly
2698 GrGLfloat dx0 = 2.0f * (bounds.left() - 1) / width - 1.0f;
2699 GrGLfloat dx1 = 2.0f * (bounds.right() + 1) / width - 1.0f;
2700 GrGLfloat dy0 = -2.0f * (bounds.top() - 1) / height + 1.0f;
2701 GrGLfloat dy1 = -2.0f * (bounds.bottom() + 1) / height + 1.0f;
2702 SkRect deviceBounds = SkRect::MakeXYWH(dx0, dy0, dx1 - dx0, dy1 - dy0);
2703
2704 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
2705 this->stampRectUsingProgram(fPLSSetupProgram.fProgram, deviceBounds,
2706 fPLSSetupProgram.fPosXformUniform, fPLSSetupProgram.fArrayBuffer);
2707 }
2708
onResolveRenderTarget(GrRenderTarget * target)2709 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
2710 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
2711 if (rt->needsResolve()) {
2712 // Some extensions automatically resolves the texture when it is read.
2713 if (this->glCaps().usesMSAARenderBuffers()) {
2714 SkASSERT(rt->textureFBOID() != rt->renderFBOID());
2715 fStats.incRenderTargetBinds();
2716 fStats.incRenderTargetBinds();
2717 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
2718 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
2719 // make sure we go through flushRenderTarget() since we've modified
2720 // the bound DRAW FBO ID.
2721 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
2722 const GrGLIRect& vp = rt->getViewport();
2723 const SkIRect dirtyRect = rt->getResolveRect();
2724
2725 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
2726 // Apple's extension uses the scissor as the blit bounds.
2727 GrScissorState scissorState;
2728 scissorState.set(dirtyRect);
2729 this->flushScissor(scissorState, vp, rt->origin());
2730 GL_CALL(ResolveMultisampleFramebuffer());
2731 } else {
2732 GrGLIRect r;
2733 r.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
2734 dirtyRect.width(), dirtyRect.height(), target->origin());
2735
2736 int right = r.fLeft + r.fWidth;
2737 int top = r.fBottom + r.fHeight;
2738
2739 // BlitFrameBuffer respects the scissor, so disable it.
2740 this->disableScissor();
2741 GL_CALL(BlitFramebuffer(r.fLeft, r.fBottom, right, top,
2742 r.fLeft, r.fBottom, right, top,
2743 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2744 }
2745 }
2746 rt->flagAsResolved();
2747 }
2748 }
2749
2750 namespace {
2751
2752
gr_to_gl_stencil_op(GrStencilOp op)2753 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2754 static const GrGLenum gTable[] = {
2755 GR_GL_KEEP, // kKeep_StencilOp
2756 GR_GL_REPLACE, // kReplace_StencilOp
2757 GR_GL_INCR_WRAP, // kIncWrap_StencilOp
2758 GR_GL_INCR, // kIncClamp_StencilOp
2759 GR_GL_DECR_WRAP, // kDecWrap_StencilOp
2760 GR_GL_DECR, // kDecClamp_StencilOp
2761 GR_GL_ZERO, // kZero_StencilOp
2762 GR_GL_INVERT, // kInvert_StencilOp
2763 };
2764 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gTable) == kStencilOpCount);
2765 GR_STATIC_ASSERT(0 == kKeep_StencilOp);
2766 GR_STATIC_ASSERT(1 == kReplace_StencilOp);
2767 GR_STATIC_ASSERT(2 == kIncWrap_StencilOp);
2768 GR_STATIC_ASSERT(3 == kIncClamp_StencilOp);
2769 GR_STATIC_ASSERT(4 == kDecWrap_StencilOp);
2770 GR_STATIC_ASSERT(5 == kDecClamp_StencilOp);
2771 GR_STATIC_ASSERT(6 == kZero_StencilOp);
2772 GR_STATIC_ASSERT(7 == kInvert_StencilOp);
2773 SkASSERT((unsigned) op < kStencilOpCount);
2774 return gTable[op];
2775 }
2776
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings & settings,GrGLenum glFace,GrStencilSettings::Face grFace)2777 void set_gl_stencil(const GrGLInterface* gl,
2778 const GrStencilSettings& settings,
2779 GrGLenum glFace,
2780 GrStencilSettings::Face grFace) {
2781 GrGLenum glFunc = GrToGLStencilFunc(settings.func(grFace));
2782 GrGLenum glFailOp = gr_to_gl_stencil_op(settings.failOp(grFace));
2783 GrGLenum glPassOp = gr_to_gl_stencil_op(settings.passOp(grFace));
2784
2785 GrGLint ref = settings.funcRef(grFace);
2786 GrGLint mask = settings.funcMask(grFace);
2787 GrGLint writeMask = settings.writeMask(grFace);
2788
2789 if (GR_GL_FRONT_AND_BACK == glFace) {
2790 // we call the combined func just in case separate stencil is not
2791 // supported.
2792 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2793 GR_GL_CALL(gl, StencilMask(writeMask));
2794 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2795 } else {
2796 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2797 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2798 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2799 }
2800 }
2801 }
2802
flushStencil(const GrStencilSettings & stencilSettings)2803 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) {
2804 if (fHWStencilSettings != stencilSettings) {
2805 if (stencilSettings.isDisabled()) {
2806 if (kNo_TriState != fHWStencilTestEnabled) {
2807 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2808 fHWStencilTestEnabled = kNo_TriState;
2809 }
2810 } else {
2811 if (kYes_TriState != fHWStencilTestEnabled) {
2812 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2813 fHWStencilTestEnabled = kYes_TriState;
2814 }
2815 }
2816 if (!stencilSettings.isDisabled()) {
2817 if (this->caps()->twoSidedStencilSupport()) {
2818 set_gl_stencil(this->glInterface(),
2819 stencilSettings,
2820 GR_GL_FRONT,
2821 GrStencilSettings::kFront_Face);
2822 set_gl_stencil(this->glInterface(),
2823 stencilSettings,
2824 GR_GL_BACK,
2825 GrStencilSettings::kBack_Face);
2826 } else {
2827 set_gl_stencil(this->glInterface(),
2828 stencilSettings,
2829 GR_GL_FRONT_AND_BACK,
2830 GrStencilSettings::kFront_Face);
2831 }
2832 }
2833 fHWStencilSettings = stencilSettings;
2834 }
2835 }
2836
flushHWAAState(GrRenderTarget * rt,bool useHWAA,bool stencilEnabled)2837 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled) {
2838 // rt is only optional if useHWAA is false.
2839 SkASSERT(rt || !useHWAA);
2840 SkASSERT(!useHWAA || rt->isStencilBufferMultisampled());
2841
2842 if (this->glCaps().multisampleDisableSupport()) {
2843 if (useHWAA) {
2844 if (kYes_TriState != fMSAAEnabled) {
2845 GL_CALL(Enable(GR_GL_MULTISAMPLE));
2846 fMSAAEnabled = kYes_TriState;
2847 }
2848 } else {
2849 if (kNo_TriState != fMSAAEnabled) {
2850 GL_CALL(Disable(GR_GL_MULTISAMPLE));
2851 fMSAAEnabled = kNo_TriState;
2852 }
2853 }
2854 }
2855
2856 if (0 != this->caps()->maxRasterSamples()) {
2857 if (useHWAA && rt->hasMixedSamples() && !stencilEnabled) {
2858 // Since stencil is disabled and we want more samples than are in the color buffer, we
2859 // need to tell the rasterizer explicitly how many to run.
2860 if (kYes_TriState != fHWRasterMultisampleEnabled) {
2861 GL_CALL(Enable(GR_GL_RASTER_MULTISAMPLE));
2862 fHWRasterMultisampleEnabled = kYes_TriState;
2863 }
2864 if (rt->numStencilSamples() != fHWNumRasterSamples) {
2865 SkASSERT(rt->numStencilSamples() <= this->caps()->maxRasterSamples());
2866 GL_CALL(RasterSamples(rt->numStencilSamples(), GR_GL_TRUE));
2867 fHWNumRasterSamples = rt->numStencilSamples();
2868 }
2869 } else {
2870 if (kNo_TriState != fHWRasterMultisampleEnabled) {
2871 GL_CALL(Disable(GR_GL_RASTER_MULTISAMPLE));
2872 fHWRasterMultisampleEnabled = kNo_TriState;
2873 }
2874 }
2875 } else {
2876 SkASSERT(!useHWAA || !rt->hasMixedSamples() || stencilEnabled);
2877 }
2878 }
2879
flushBlend(const GrXferProcessor::BlendInfo & blendInfo,const GrSwizzle & swizzle)2880 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) {
2881 // Any optimization to disable blending should have already been applied and
2882 // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
2883
2884 GrBlendEquation equation = blendInfo.fEquation;
2885 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
2886 GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
2887 bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
2888 kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
2889 if (blendOff) {
2890 if (kNo_TriState != fHWBlendState.fEnabled) {
2891 GL_CALL(Disable(GR_GL_BLEND));
2892
2893 // Workaround for the ARM KHR_blend_equation_advanced blacklist issue
2894 // https://code.google.com/p/skia/issues/detail?id=3943
2895 if (kARM_GrGLVendor == this->ctxInfo().vendor() &&
2896 GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2897 SkASSERT(this->caps()->advancedBlendEquationSupport());
2898 // Set to any basic blending equation.
2899 GrBlendEquation blend_equation = kAdd_GrBlendEquation;
2900 GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation]));
2901 fHWBlendState.fEquation = blend_equation;
2902 }
2903
2904 fHWBlendState.fEnabled = kNo_TriState;
2905 }
2906 return;
2907 }
2908
2909 if (kYes_TriState != fHWBlendState.fEnabled) {
2910 GL_CALL(Enable(GR_GL_BLEND));
2911 fHWBlendState.fEnabled = kYes_TriState;
2912 }
2913
2914 if (fHWBlendState.fEquation != equation) {
2915 GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
2916 fHWBlendState.fEquation = equation;
2917 }
2918
2919 if (GrBlendEquationIsAdvanced(equation)) {
2920 SkASSERT(this->caps()->advancedBlendEquationSupport());
2921 // Advanced equations have no other blend state.
2922 return;
2923 }
2924
2925 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2926 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
2927 gXfermodeCoeff2Blend[dstCoeff]));
2928 fHWBlendState.fSrcCoeff = srcCoeff;
2929 fHWBlendState.fDstCoeff = dstCoeff;
2930 }
2931
2932 if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) {
2933 GrColor blendConst = blendInfo.fBlendConstant;
2934 blendConst = swizzle.applyTo(blendConst);
2935 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2936 GrGLfloat c[4];
2937 GrColorToRGBAFloat(blendConst, c);
2938 GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
2939 fHWBlendState.fConstColor = blendConst;
2940 fHWBlendState.fConstColorValid = true;
2941 }
2942 }
2943 }
2944
tile_to_gl_wrap(SkShader::TileMode tm)2945 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
2946 static const GrGLenum gWrapModes[] = {
2947 GR_GL_CLAMP_TO_EDGE,
2948 GR_GL_REPEAT,
2949 GR_GL_MIRRORED_REPEAT
2950 };
2951 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
2952 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
2953 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
2954 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
2955 return gWrapModes[tm];
2956 }
2957
get_component_enum_from_char(char component)2958 static GrGLenum get_component_enum_from_char(char component) {
2959 switch (component) {
2960 case 'r':
2961 return GR_GL_RED;
2962 case 'g':
2963 return GR_GL_GREEN;
2964 case 'b':
2965 return GR_GL_BLUE;
2966 case 'a':
2967 return GR_GL_ALPHA;
2968 default:
2969 SkFAIL("Unsupported component");
2970 return 0;
2971 }
2972 }
2973
2974 /** If texture swizzling is available using tex parameters then it is preferred over mangling
2975 the generated shader code. This potentially allows greater reuse of cached shaders. */
get_tex_param_swizzle(GrPixelConfig config,const GrGLCaps & caps,GrGLenum * glSwizzle)2976 static void get_tex_param_swizzle(GrPixelConfig config,
2977 const GrGLCaps& caps,
2978 GrGLenum* glSwizzle) {
2979 const GrSwizzle& swizzle = caps.configSwizzle(config);
2980 for (int i = 0; i < 4; ++i) {
2981 glSwizzle[i] = get_component_enum_from_char(swizzle.c_str()[i]);
2982 }
2983 }
2984
bindTexture(int unitIdx,const GrTextureParams & params,GrGLTexture * texture)2985 void GrGLGpu::bindTexture(int unitIdx, const GrTextureParams& params, GrGLTexture* texture) {
2986 SkASSERT(texture);
2987
2988 #ifdef SK_DEBUG
2989 if (!this->caps()->npotTextureTileSupport()) {
2990 const bool tileX = SkShader::kClamp_TileMode != params.getTileModeX();
2991 const bool tileY = SkShader::kClamp_TileMode != params.getTileModeY();
2992 if (tileX || tileY) {
2993 const int w = texture->width();
2994 const int h = texture->height();
2995 SkASSERT(SkIsPow2(w) && SkIsPow2(h));
2996 }
2997 }
2998 #endif
2999
3000 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
3001 // from the rt it will still be the last bound texture, but it needs resolving. So keep this
3002 // out of the "last != next" check.
3003 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
3004 if (texRT) {
3005 this->onResolveRenderTarget(texRT);
3006 }
3007
3008 uint32_t textureID = texture->getUniqueID();
3009 GrGLenum target = texture->target();
3010 if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) {
3011 this->setTextureUnit(unitIdx);
3012 GL_CALL(BindTexture(target, texture->textureID()));
3013 fHWBoundTextureUniqueIDs[unitIdx] = textureID;
3014 }
3015
3016 ResetTimestamp timestamp;
3017 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp);
3018 bool setAll = timestamp < this->getResetTimestamp();
3019 GrGLTexture::TexParams newTexParams;
3020
3021 static GrGLenum glMinFilterModes[] = {
3022 GR_GL_NEAREST,
3023 GR_GL_LINEAR,
3024 GR_GL_LINEAR_MIPMAP_LINEAR
3025 };
3026 static GrGLenum glMagFilterModes[] = {
3027 GR_GL_NEAREST,
3028 GR_GL_LINEAR,
3029 GR_GL_LINEAR
3030 };
3031 GrTextureParams::FilterMode filterMode = params.filterMode();
3032
3033 if (GrTextureParams::kMipMap_FilterMode == filterMode) {
3034 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) {
3035 filterMode = GrTextureParams::kBilerp_FilterMode;
3036 }
3037 }
3038
3039 newTexParams.fMinFilter = glMinFilterModes[filterMode];
3040 newTexParams.fMagFilter = glMagFilterModes[filterMode];
3041
3042 if (GrTextureParams::kMipMap_FilterMode == filterMode &&
3043 texture->texturePriv().mipMapsAreDirty()) {
3044 GL_CALL(GenerateMipmap(target));
3045 texture->texturePriv().dirtyMipMaps(false);
3046 }
3047
3048 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
3049 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
3050 get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizzleRGBA);
3051 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
3052 this->setTextureUnit(unitIdx);
3053 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newTexParams.fMagFilter));
3054 }
3055 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
3056 this->setTextureUnit(unitIdx);
3057 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newTexParams.fMinFilter));
3058 }
3059 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
3060 this->setTextureUnit(unitIdx);
3061 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newTexParams.fWrapS));
3062 }
3063 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
3064 this->setTextureUnit(unitIdx);
3065 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newTexParams.fWrapT));
3066 }
3067 if (this->glCaps().textureSwizzleSupport() &&
3068 (setAll || memcmp(newTexParams.fSwizzleRGBA,
3069 oldTexParams.fSwizzleRGBA,
3070 sizeof(newTexParams.fSwizzleRGBA)))) {
3071 this->setTextureUnit(unitIdx);
3072 if (this->glStandard() == kGLES_GrGLStandard) {
3073 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
3074 const GrGLenum* swizzle = newTexParams.fSwizzleRGBA;
3075 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
3076 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
3077 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
3078 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
3079 } else {
3080 GR_STATIC_ASSERT(sizeof(newTexParams.fSwizzleRGBA[0]) == sizeof(GrGLint));
3081 const GrGLint* swizzle = reinterpret_cast<const GrGLint*>(newTexParams.fSwizzleRGBA);
3082 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA, swizzle));
3083 }
3084 }
3085 texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
3086 }
3087
flushColorWrite(bool writeColor)3088 void GrGLGpu::flushColorWrite(bool writeColor) {
3089 if (!writeColor) {
3090 if (kNo_TriState != fHWWriteToColor) {
3091 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
3092 GR_GL_FALSE, GR_GL_FALSE));
3093 fHWWriteToColor = kNo_TriState;
3094 }
3095 } else {
3096 if (kYes_TriState != fHWWriteToColor) {
3097 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
3098 fHWWriteToColor = kYes_TriState;
3099 }
3100 }
3101 }
3102
flushDrawFace(GrPipelineBuilder::DrawFace face)3103 void GrGLGpu::flushDrawFace(GrPipelineBuilder::DrawFace face) {
3104 if (fHWDrawFace != face) {
3105 switch (face) {
3106 case GrPipelineBuilder::kCCW_DrawFace:
3107 GL_CALL(Enable(GR_GL_CULL_FACE));
3108 GL_CALL(CullFace(GR_GL_BACK));
3109 break;
3110 case GrPipelineBuilder::kCW_DrawFace:
3111 GL_CALL(Enable(GR_GL_CULL_FACE));
3112 GL_CALL(CullFace(GR_GL_FRONT));
3113 break;
3114 case GrPipelineBuilder::kBoth_DrawFace:
3115 GL_CALL(Disable(GR_GL_CULL_FACE));
3116 break;
3117 default:
3118 SkFAIL("Unknown draw face.");
3119 }
3120 fHWDrawFace = face;
3121 }
3122 }
3123
setTextureUnit(int unit)3124 void GrGLGpu::setTextureUnit(int unit) {
3125 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count());
3126 if (unit != fHWActiveTextureUnitIdx) {
3127 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
3128 fHWActiveTextureUnitIdx = unit;
3129 }
3130 }
3131
setScratchTextureUnit()3132 void GrGLGpu::setScratchTextureUnit() {
3133 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
3134 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1;
3135 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
3136 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
3137 fHWActiveTextureUnitIdx = lastUnitIdx;
3138 }
3139 // clear out the this field so that if a program does use this unit it will rebind the correct
3140 // texture.
3141 fHWBoundTextureUniqueIDs[lastUnitIdx] = SK_InvalidUniqueID;
3142 }
3143
3144 // Determines whether glBlitFramebuffer could be used between src and dst.
can_blit_framebuffer(const GrSurface * dst,const GrSurface * src,const GrGLGpu * gpu)3145 static inline bool can_blit_framebuffer(const GrSurface* dst,
3146 const GrSurface* src,
3147 const GrGLGpu* gpu) {
3148 if (gpu->glCaps().isConfigRenderable(dst->config(), dst->desc().fSampleCnt > 0) &&
3149 gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0)) {
3150 switch (gpu->glCaps().blitFramebufferSupport()) {
3151 case GrGLCaps::kNone_BlitFramebufferSupport:
3152 return false;
3153 case GrGLCaps::kNoScalingNoMirroring_BlitFramebufferSupport:
3154 // Our copy surface doesn't support scaling so just check for mirroring.
3155 if (dst->origin() != src->origin()) {
3156 return false;
3157 }
3158 break;
3159 case GrGLCaps::kFull_BlitFramebufferSupport:
3160 break;
3161 }
3162 // ES3 doesn't allow framebuffer blits when the src has MSAA and the configs don't match
3163 // or the rects are not the same (not just the same size but have the same edges).
3164 if (GrGLCaps::kES_3_0_MSFBOType == gpu->glCaps().msFBOType() &&
3165 (src->desc().fSampleCnt > 0 || src->config() != dst->config())) {
3166 return false;
3167 }
3168 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3169 if (dstTex && dstTex->target() != GR_GL_TEXTURE_2D) {
3170 return false;
3171 }
3172 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(dst->asTexture());
3173 if (srcTex && srcTex->target() != GR_GL_TEXTURE_2D) {
3174 return false;
3175 }
3176 return true;
3177 } else {
3178 return false;
3179 }
3180 }
3181
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLGpu * gpu)3182 static inline bool can_copy_texsubimage(const GrSurface* dst,
3183 const GrSurface* src,
3184 const GrGLGpu* gpu) {
3185 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
3186 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
3187 // many drivers would allow it to work, but ANGLE does not.
3188 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() &&
3189 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
3190 return false;
3191 }
3192 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
3193 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
3194 // then we don't want to copy to the texture but to the MSAA buffer.
3195 if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
3196 return false;
3197 }
3198 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3199 // If the src is multisampled (and uses an extension where there is a separate MSAA
3200 // renderbuffer) then it is an invalid operation to call CopyTexSubImage
3201 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
3202 return false;
3203 }
3204
3205 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3206 // CopyTex(Sub)Image writes to a texture and we have no way of dynamically wrapping a RT in a
3207 // texture.
3208 if (!dstTex) {
3209 return false;
3210 }
3211
3212 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3213
3214 // Check that we could wrap the source in an FBO, that the dst is TEXTURE_2D, that no mirroring
3215 // is required.
3216 if (gpu->glCaps().isConfigRenderable(src->config(), src->desc().fSampleCnt > 0) &&
3217 !GrPixelConfigIsCompressed(src->config()) &&
3218 (!srcTex || srcTex->target() == GR_GL_TEXTURE_2D) &&
3219 dstTex->target() == GR_GL_TEXTURE_2D &&
3220 dst->origin() == src->origin()) {
3221 return true;
3222 } else {
3223 return false;
3224 }
3225 }
3226
3227 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
3228 // relative to is output.
bindSurfaceFBOForCopy(GrSurface * surface,GrGLenum fboTarget,GrGLIRect * viewport,TempFBOTarget tempFBOTarget)3229 void GrGLGpu::bindSurfaceFBOForCopy(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport,
3230 TempFBOTarget tempFBOTarget) {
3231 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
3232 if (!rt) {
3233 SkASSERT(surface->asTexture());
3234 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
3235 GrGLenum target = static_cast<GrGLTexture*>(surface->asTexture())->target();
3236 GrGLuint* tempFBOID;
3237 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
3238
3239 if (0 == *tempFBOID) {
3240 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
3241 }
3242
3243 fStats.incRenderTargetBinds();
3244 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, *tempFBOID));
3245 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3246 GR_GL_COLOR_ATTACHMENT0,
3247 target,
3248 texID,
3249 0));
3250 viewport->fLeft = 0;
3251 viewport->fBottom = 0;
3252 viewport->fWidth = surface->width();
3253 viewport->fHeight = surface->height();
3254 } else {
3255 fStats.incRenderTargetBinds();
3256 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBOID()));
3257 *viewport = rt->getViewport();
3258 }
3259 }
3260
unbindTextureFBOForCopy(GrGLenum fboTarget,GrSurface * surface)3261 void GrGLGpu::unbindTextureFBOForCopy(GrGLenum fboTarget, GrSurface* surface) {
3262 // bindSurfaceFBOForCopy temporarily binds textures that are not render targets to
3263 if (!surface->asRenderTarget()) {
3264 SkASSERT(surface->asTexture());
3265 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
3266 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3267 GR_GL_COLOR_ATTACHMENT0,
3268 textureTarget,
3269 0,
3270 0));
3271 }
3272 }
3273
initCopySurfaceDstDesc(const GrSurface * src,GrSurfaceDesc * desc) const3274 bool GrGLGpu::initCopySurfaceDstDesc(const GrSurface* src, GrSurfaceDesc* desc) const {
3275 // If the src is a texture, we can implement the blit as a draw assuming the config is
3276 // renderable.
3277 if (src->asTexture() && this->caps()->isConfigRenderable(src->config(), false)) {
3278 desc->fOrigin = kDefault_GrSurfaceOrigin;
3279 desc->fFlags = kRenderTarget_GrSurfaceFlag;
3280 desc->fConfig = src->config();
3281 return true;
3282 }
3283
3284 const GrGLTexture* srcTexture = static_cast<const GrGLTexture*>(src->asTexture());
3285 if (srcTexture && srcTexture->target() != GR_GL_TEXTURE_2D) {
3286 // Not supported for FBO blit or CopyTexSubImage
3287 return false;
3288 }
3289
3290 // We look for opportunities to use CopyTexSubImage, or fbo blit. If neither are
3291 // possible and we return false to fallback to creating a render target dst for render-to-
3292 // texture. This code prefers CopyTexSubImage to fbo blit and avoids triggering temporary fbo
3293 // creation. It isn't clear that avoiding temporary fbo creation is actually optimal.
3294
3295 // Check for format issues with glCopyTexSubImage2D
3296 if (kGLES_GrGLStandard == this->glStandard() && this->glCaps().bgraIsInternalFormat() &&
3297 kBGRA_8888_GrPixelConfig == src->config()) {
3298 // glCopyTexSubImage2D doesn't work with this config. If the bgra can be used with fbo blit
3299 // then we set up for that, otherwise fail.
3300 if (this->caps()->isConfigRenderable(kBGRA_8888_GrPixelConfig, false)) {
3301 desc->fOrigin = kDefault_GrSurfaceOrigin;
3302 desc->fFlags = kRenderTarget_GrSurfaceFlag;
3303 desc->fConfig = kBGRA_8888_GrPixelConfig;
3304 return true;
3305 }
3306 return false;
3307 } else if (nullptr == src->asRenderTarget()) {
3308 // CopyTexSubImage2D or fbo blit would require creating a temp fbo for the src.
3309 return false;
3310 }
3311
3312 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3313 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
3314 // It's illegal to call CopyTexSubImage2D on a MSAA renderbuffer. Set up for FBO blit or
3315 // fail.
3316 if (this->caps()->isConfigRenderable(src->config(), false)) {
3317 desc->fOrigin = kDefault_GrSurfaceOrigin;
3318 desc->fFlags = kRenderTarget_GrSurfaceFlag;
3319 desc->fConfig = src->config();
3320 return true;
3321 }
3322 return false;
3323 }
3324
3325 // We'll do a CopyTexSubImage. Make the dst a plain old texture.
3326 desc->fConfig = src->config();
3327 desc->fOrigin = src->origin();
3328 desc->fFlags = kNone_GrSurfaceFlags;
3329 return true;
3330 }
3331
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3332 bool GrGLGpu::onCopySurface(GrSurface* dst,
3333 GrSurface* src,
3334 const SkIRect& srcRect,
3335 const SkIPoint& dstPoint) {
3336 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the
3337 // swizzle.
3338 if (this->glCaps().glslCaps()->configOutputSwizzle(src->config()) !=
3339 this->glCaps().glslCaps()->configOutputSwizzle(dst->config())) {
3340 return false;
3341 }
3342 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3343 bool preferCopy = SkToBool(dst->asRenderTarget());
3344 if (preferCopy && src->asTexture()) {
3345 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
3346 return true;
3347 }
3348
3349 if (can_copy_texsubimage(dst, src, this)) {
3350 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
3351 return true;
3352 }
3353
3354 if (can_blit_framebuffer(dst, src, this)) {
3355 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint);
3356 }
3357
3358 if (!preferCopy && src->asTexture()) {
3359 this->copySurfaceAsDraw(dst, src, srcRect, dstPoint);
3360 return true;
3361 }
3362
3363 return false;
3364 }
3365
createCopyPrograms()3366 void GrGLGpu::createCopyPrograms() {
3367 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
3368 fCopyPrograms[i].fProgram = 0;
3369 }
3370 const GrGLSLCaps* glslCaps = this->glCaps().glslCaps();
3371 const char* version = glslCaps->versionDeclString();
3372 static const GrSLType kSamplerTypes[3] = { kSampler2D_GrSLType, kSamplerExternal_GrSLType,
3373 kSampler2DRect_GrSLType };
3374 SkASSERT(3 == SK_ARRAY_COUNT(fCopyPrograms));
3375 for (int i = 0; i < 3; ++i) {
3376 if (kSamplerExternal_GrSLType == kSamplerTypes[i] &&
3377 !this->glCaps().externalTextureSupport()) {
3378 continue;
3379 }
3380 if (kSampler2DRect_GrSLType == kSamplerTypes[i] &&
3381 !this->glCaps().rectangleTextureSupport()) {
3382 continue;
3383 }
3384 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
3385 GrGLSLShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
3386 GrShaderVar::kUniform_TypeModifier);
3387 GrGLSLShaderVar uPosXform("u_posXform", kVec4f_GrSLType,
3388 GrShaderVar::kUniform_TypeModifier);
3389 GrGLSLShaderVar uTexture("u_texture", kSamplerTypes[i],
3390 GrShaderVar::kUniform_TypeModifier);
3391 GrGLSLShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType,
3392 GrShaderVar::kVaryingOut_TypeModifier);
3393 GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType,
3394 GrShaderVar::kOut_TypeModifier);
3395
3396 SkString vshaderTxt(version);
3397 if (glslCaps->noperspectiveInterpolationSupport()) {
3398 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
3399 vshaderTxt.appendf("#extension %s : require\n", extension);
3400 }
3401 vTexCoord.addModifier("noperspective");
3402 }
3403
3404 aVertex.appendDecl(glslCaps, &vshaderTxt);
3405 vshaderTxt.append(";");
3406 uTexCoordXform.appendDecl(glslCaps, &vshaderTxt);
3407 vshaderTxt.append(";");
3408 uPosXform.appendDecl(glslCaps, &vshaderTxt);
3409 vshaderTxt.append(";");
3410 vTexCoord.appendDecl(glslCaps, &vshaderTxt);
3411 vshaderTxt.append(";");
3412
3413 vshaderTxt.append(
3414 "// Copy Program VS\n"
3415 "void main() {"
3416 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;"
3417 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3418 " gl_Position.zw = vec2(0, 1);"
3419 "}"
3420 );
3421
3422 SkString fshaderTxt(version);
3423 if (glslCaps->noperspectiveInterpolationSupport()) {
3424 if (const char* extension = glslCaps->noperspectiveInterpolationExtensionString()) {
3425 fshaderTxt.appendf("#extension %s : require\n", extension);
3426 }
3427 }
3428 if (kSamplerTypes[i] == kSamplerExternal_GrSLType) {
3429 fshaderTxt.appendf("#extension %s : require\n",
3430 glslCaps->externalTextureExtensionString());
3431 }
3432 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *glslCaps,
3433 &fshaderTxt);
3434 vTexCoord.setTypeModifier(GrShaderVar::kVaryingIn_TypeModifier);
3435 vTexCoord.appendDecl(glslCaps, &fshaderTxt);
3436 fshaderTxt.append(";");
3437 uTexture.appendDecl(glslCaps, &fshaderTxt);
3438 fshaderTxt.append(";");
3439 const char* fsOutName;
3440 if (glslCaps->mustDeclareFragmentShaderOutput()) {
3441 oFragColor.appendDecl(glslCaps, &fshaderTxt);
3442 fshaderTxt.append(";");
3443 fsOutName = oFragColor.c_str();
3444 } else {
3445 fsOutName = "gl_FragColor";
3446 }
3447 fshaderTxt.appendf(
3448 "// Copy Program FS\n"
3449 "void main() {"
3450 " %s = %s(u_texture, v_texCoord);"
3451 "}",
3452 fsOutName,
3453 GrGLSLTexture2DFunctionName(kVec2f_GrSLType, kSamplerTypes[i], this->glslGeneration())
3454 );
3455
3456 GL_CALL_RET(fCopyPrograms[i].fProgram, CreateProgram());
3457 const char* str;
3458 GrGLint length;
3459
3460 str = vshaderTxt.c_str();
3461 length = SkToInt(vshaderTxt.size());
3462 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[i].fProgram,
3463 GR_GL_VERTEX_SHADER, &str, &length, 1,
3464 &fStats);
3465
3466 str = fshaderTxt.c_str();
3467 length = SkToInt(fshaderTxt.size());
3468 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[i].fProgram,
3469 GR_GL_FRAGMENT_SHADER, &str, &length, 1,
3470 &fStats);
3471
3472 GL_CALL(LinkProgram(fCopyPrograms[i].fProgram));
3473
3474 GL_CALL_RET(fCopyPrograms[i].fTextureUniform,
3475 GetUniformLocation(fCopyPrograms[i].fProgram, "u_texture"));
3476 GL_CALL_RET(fCopyPrograms[i].fPosXformUniform,
3477 GetUniformLocation(fCopyPrograms[i].fProgram, "u_posXform"));
3478 GL_CALL_RET(fCopyPrograms[i].fTexCoordXformUniform,
3479 GetUniformLocation(fCopyPrograms[i].fProgram, "u_texCoordXform"));
3480
3481 GL_CALL(BindAttribLocation(fCopyPrograms[i].fProgram, 0, "a_vertex"));
3482
3483 GL_CALL(DeleteShader(vshader));
3484 GL_CALL(DeleteShader(fshader));
3485 }
3486 fCopyProgramArrayBuffer = 0;
3487 GL_CALL(GenBuffers(1, &fCopyProgramArrayBuffer));
3488 fHWGeometryState.setVertexBufferID(this, fCopyProgramArrayBuffer);
3489 static const GrGLfloat vdata[] = {
3490 0, 0,
3491 0, 1,
3492 1, 0,
3493 1, 1
3494 };
3495 GL_ALLOC_CALL(this->glInterface(),
3496 BufferData(GR_GL_ARRAY_BUFFER,
3497 (GrGLsizeiptr) sizeof(vdata),
3498 vdata, // data ptr
3499 GR_GL_STATIC_DRAW));
3500 }
3501
createWireRectProgram()3502 void GrGLGpu::createWireRectProgram() {
3503 SkASSERT(!fWireRectProgram.fProgram);
3504 GrGLSLShaderVar uColor("u_color", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
3505 GrGLSLShaderVar uRect("u_rect", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
3506 GrGLSLShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kAttribute_TypeModifier);
3507 const char* version = this->glCaps().glslCaps()->versionDeclString();
3508
3509 // The rect uniform specifies the rectangle in NDC space as a vec4 (left,top,right,bottom). The
3510 // program is used with a vbo containing the unit square. Vertices are computed from the rect
3511 // uniform using the 4 vbo vertices.
3512 SkString vshaderTxt(version);
3513 aVertex.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
3514 vshaderTxt.append(";");
3515 uRect.appendDecl(this->glCaps().glslCaps(), &vshaderTxt);
3516 vshaderTxt.append(";");
3517 vshaderTxt.append(
3518 "// Wire Rect Program VS\n"
3519 "void main() {"
3520 " gl_Position.x = u_rect.x + a_vertex.x * (u_rect.z - u_rect.x);"
3521 " gl_Position.y = u_rect.y + a_vertex.y * (u_rect.w - u_rect.y);"
3522 " gl_Position.zw = vec2(0, 1);"
3523 "}"
3524 );
3525
3526 GrGLSLShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, GrShaderVar::kOut_TypeModifier);
3527
3528 SkString fshaderTxt(version);
3529 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision,
3530 *this->glCaps().glslCaps(),
3531 &fshaderTxt);
3532 uColor.appendDecl(this->glCaps().glslCaps(), &fshaderTxt);
3533 fshaderTxt.append(";");
3534 const char* fsOutName;
3535 if (this->glCaps().glslCaps()->mustDeclareFragmentShaderOutput()) {
3536 oFragColor.appendDecl(this->glCaps().glslCaps(), &fshaderTxt);
3537 fshaderTxt.append(";");
3538 fsOutName = oFragColor.c_str();
3539 } else {
3540 fsOutName = "gl_FragColor";
3541 }
3542 fshaderTxt.appendf(
3543 "// Write Rect Program FS\n"
3544 "void main() {"
3545 " %s = %s;"
3546 "}",
3547 fsOutName,
3548 uColor.c_str()
3549 );
3550
3551 GL_CALL_RET(fWireRectProgram.fProgram, CreateProgram());
3552 const char* str;
3553 GrGLint length;
3554
3555 str = vshaderTxt.c_str();
3556 length = SkToInt(vshaderTxt.size());
3557 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram,
3558 GR_GL_VERTEX_SHADER, &str, &length, 1,
3559 &fStats);
3560
3561 str = fshaderTxt.c_str();
3562 length = SkToInt(fshaderTxt.size());
3563 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram,
3564 GR_GL_FRAGMENT_SHADER, &str, &length, 1,
3565 &fStats);
3566
3567 GL_CALL(LinkProgram(fWireRectProgram.fProgram));
3568
3569 GL_CALL_RET(fWireRectProgram.fColorUniform,
3570 GetUniformLocation(fWireRectProgram.fProgram, "u_color"));
3571 GL_CALL_RET(fWireRectProgram.fRectUniform,
3572 GetUniformLocation(fWireRectProgram.fProgram, "u_rect"));
3573 GL_CALL(BindAttribLocation(fWireRectProgram.fProgram, 0, "a_vertex"));
3574
3575 GL_CALL(DeleteShader(vshader));
3576 GL_CALL(DeleteShader(fshader));
3577 GL_CALL(GenBuffers(1, &fWireRectArrayBuffer));
3578 fHWGeometryState.setVertexBufferID(this, fWireRectArrayBuffer);
3579 static const GrGLfloat vdata[] = {
3580 0, 0,
3581 0, 1,
3582 1, 1,
3583 1, 0,
3584 };
3585 GL_ALLOC_CALL(this->glInterface(),
3586 BufferData(GR_GL_ARRAY_BUFFER,
3587 (GrGLsizeiptr) sizeof(vdata),
3588 vdata, // data ptr
3589 GR_GL_STATIC_DRAW));
3590 }
3591
drawDebugWireRect(GrRenderTarget * rt,const SkIRect & rect,GrColor color)3592 void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor color) {
3593 // TODO: This should swizzle the output to match dst's config, though it is a debugging
3594 // visualization.
3595
3596 this->handleDirtyContext();
3597 if (!fWireRectProgram.fProgram) {
3598 this->createWireRectProgram();
3599 }
3600
3601 int w = rt->width();
3602 int h = rt->height();
3603
3604 // Compute the edges of the rectangle (top,left,right,bottom) in NDC space. Must consider
3605 // whether the render target is flipped or not.
3606 GrGLfloat edges[4];
3607 edges[0] = SkIntToScalar(rect.fLeft) + 0.5f;
3608 edges[2] = SkIntToScalar(rect.fRight) - 0.5f;
3609 if (kBottomLeft_GrSurfaceOrigin == rt->origin()) {
3610 edges[1] = h - (SkIntToScalar(rect.fTop) + 0.5f);
3611 edges[3] = h - (SkIntToScalar(rect.fBottom) - 0.5f);
3612 } else {
3613 edges[1] = SkIntToScalar(rect.fTop) + 0.5f;
3614 edges[3] = SkIntToScalar(rect.fBottom) - 0.5f;
3615 }
3616 edges[0] = 2 * edges[0] / w - 1.0f;
3617 edges[1] = 2 * edges[1] / h - 1.0f;
3618 edges[2] = 2 * edges[2] / w - 1.0f;
3619 edges[3] = 2 * edges[3] / h - 1.0f;
3620
3621 GrGLfloat channels[4];
3622 static const GrGLfloat scale255 = 1.f / 255.f;
3623 channels[0] = GrColorUnpackR(color) * scale255;
3624 channels[1] = GrColorUnpackG(color) * scale255;
3625 channels[2] = GrColorUnpackB(color) * scale255;
3626 channels[3] = GrColorUnpackA(color) * scale255;
3627
3628 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(rt->asRenderTarget());
3629 this->flushRenderTarget(glRT, &rect);
3630
3631 GL_CALL(UseProgram(fWireRectProgram.fProgram));
3632 fHWProgramID = fWireRectProgram.fProgram;
3633
3634 fHWGeometryState.setVertexArrayID(this, 0);
3635
3636 GrGLAttribArrayState* attribs =
3637 fHWGeometryState.bindArrayAndBufferToDraw(this, fWireRectArrayBuffer);
3638 attribs->set(this, 0, fWireRectArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat),
3639 0);
3640 attribs->disableUnusedArrays(this, 0x1);
3641
3642 GL_CALL(Uniform4fv(fWireRectProgram.fRectUniform, 1, edges));
3643 GL_CALL(Uniform4fv(fWireRectProgram.fColorUniform, 1, channels));
3644
3645 GrXferProcessor::BlendInfo blendInfo;
3646 blendInfo.reset();
3647 this->flushBlend(blendInfo, GrSwizzle::RGBA());
3648 this->flushColorWrite(true);
3649 this->flushDrawFace(GrPipelineBuilder::kBoth_DrawFace);
3650 this->flushHWAAState(glRT, false, false);
3651 this->disableScissor();
3652 GrStencilSettings stencil;
3653 stencil.setDisabled();
3654 this->flushStencil(stencil);
3655
3656 GL_CALL(DrawArrays(GR_GL_LINE_LOOP, 0, 4));
3657 }
3658
3659
copySurfaceAsDraw(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3660 void GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
3661 GrSurface* src,
3662 const SkIRect& srcRect,
3663 const SkIPoint& dstPoint) {
3664 int w = srcRect.width();
3665 int h = srcRect.height();
3666
3667 GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3668 GrTextureParams params(SkShader::kClamp_TileMode, GrTextureParams::kNone_FilterMode);
3669 this->bindTexture(0, params, srcTex);
3670
3671 GrGLIRect dstVP;
3672 this->bindSurfaceFBOForCopy(dst, GR_GL_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
3673 this->flushViewport(dstVP);
3674 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
3675
3676 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
3677
3678 int progIdx = TextureTargetToCopyProgramIdx(srcTex->target());
3679
3680 GL_CALL(UseProgram(fCopyPrograms[progIdx].fProgram));
3681 fHWProgramID = fCopyPrograms[progIdx].fProgram;
3682
3683 fHWGeometryState.setVertexArrayID(this, 0);
3684
3685 GrGLAttribArrayState* attribs =
3686 fHWGeometryState.bindArrayAndBufferToDraw(this, fCopyProgramArrayBuffer);
3687 attribs->set(this, 0, fCopyProgramArrayBuffer, kVec2f_GrVertexAttribType, 2 * sizeof(GrGLfloat),
3688 0);
3689 attribs->disableUnusedArrays(this, 0x1);
3690
3691 // dst rect edges in NDC (-1 to 1)
3692 int dw = dst->width();
3693 int dh = dst->height();
3694 GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f;
3695 GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
3696 GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f;
3697 GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
3698 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
3699 dy0 = -dy0;
3700 dy1 = -dy1;
3701 }
3702
3703 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3704 GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w);
3705 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3706 GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h);
3707 int sh = src->height();
3708 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
3709 sy0 = sh - sy0;
3710 sy1 = sh - sy1;
3711 }
3712 // src rect edges in normalized texture space (0 to 1) unless we're using a RECTANGLE texture.
3713 GrGLenum srcTarget = srcTex->target();
3714 if (GR_GL_TEXTURE_RECTANGLE != srcTarget) {
3715 int sw = src->width();
3716 sx0 /= sw;
3717 sx1 /= sw;
3718 sy0 /= sh;
3719 sy1 /= sh;
3720 }
3721
3722 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
3723 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
3724 sx1 - sx0, sy1 - sy0, sx0, sy0));
3725 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
3726
3727 GrXferProcessor::BlendInfo blendInfo;
3728 blendInfo.reset();
3729 this->flushBlend(blendInfo, GrSwizzle::RGBA());
3730 this->flushColorWrite(true);
3731 this->flushDrawFace(GrPipelineBuilder::kBoth_DrawFace);
3732 this->flushHWAAState(nullptr, false, false);
3733 this->disableScissor();
3734 GrStencilSettings stencil;
3735 stencil.setDisabled();
3736 this->flushStencil(stencil);
3737
3738 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
3739 this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, dst);
3740 this->didWriteToSurface(dst, &dstRect);
3741
3742 }
3743
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3744 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst,
3745 GrSurface* src,
3746 const SkIRect& srcRect,
3747 const SkIPoint& dstPoint) {
3748 SkASSERT(can_copy_texsubimage(dst, src, this));
3749 GrGLIRect srcVP;
3750 this->bindSurfaceFBOForCopy(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
3751 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
3752 SkASSERT(dstTex);
3753 // We modified the bound FBO
3754 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
3755 GrGLIRect srcGLRect;
3756 srcGLRect.setRelativeTo(srcVP,
3757 srcRect.fLeft,
3758 srcRect.fTop,
3759 srcRect.width(),
3760 srcRect.height(),
3761 src->origin());
3762
3763 this->setScratchTextureUnit();
3764 GL_CALL(BindTexture(dstTex->target(), dstTex->textureID()));
3765 GrGLint dstY;
3766 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
3767 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
3768 } else {
3769 dstY = dstPoint.fY;
3770 }
3771 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
3772 dstPoint.fX, dstY,
3773 srcGLRect.fLeft, srcGLRect.fBottom,
3774 srcGLRect.fWidth, srcGLRect.fHeight));
3775 this->unbindTextureFBOForCopy(GR_GL_FRAMEBUFFER, src);
3776 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3777 srcRect.width(), srcRect.height());
3778 this->didWriteToSurface(dst, &dstRect);
3779 }
3780
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3781 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst,
3782 GrSurface* src,
3783 const SkIRect& srcRect,
3784 const SkIPoint& dstPoint) {
3785 SkASSERT(can_blit_framebuffer(dst, src, this));
3786 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
3787 srcRect.width(), srcRect.height());
3788 if (dst == src) {
3789 if (SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
3790 return false;
3791 }
3792 }
3793
3794 GrGLIRect dstVP;
3795 GrGLIRect srcVP;
3796 this->bindSurfaceFBOForCopy(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
3797 this->bindSurfaceFBOForCopy(src, GR_GL_READ_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
3798 // We modified the bound FBO
3799 fHWBoundRenderTargetUniqueID = SK_InvalidUniqueID;
3800 GrGLIRect srcGLRect;
3801 GrGLIRect dstGLRect;
3802 srcGLRect.setRelativeTo(srcVP,
3803 srcRect.fLeft,
3804 srcRect.fTop,
3805 srcRect.width(),
3806 srcRect.height(),
3807 src->origin());
3808 dstGLRect.setRelativeTo(dstVP,
3809 dstRect.fLeft,
3810 dstRect.fTop,
3811 dstRect.width(),
3812 dstRect.height(),
3813 dst->origin());
3814
3815 // BlitFrameBuffer respects the scissor, so disable it.
3816 this->disableScissor();
3817
3818 GrGLint srcY0;
3819 GrGLint srcY1;
3820 // Does the blit need to y-mirror or not?
3821 if (src->origin() == dst->origin()) {
3822 srcY0 = srcGLRect.fBottom;
3823 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
3824 } else {
3825 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
3826 srcY1 = srcGLRect.fBottom;
3827 }
3828 GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
3829 srcY0,
3830 srcGLRect.fLeft + srcGLRect.fWidth,
3831 srcY1,
3832 dstGLRect.fLeft,
3833 dstGLRect.fBottom,
3834 dstGLRect.fLeft + dstGLRect.fWidth,
3835 dstGLRect.fBottom + dstGLRect.fHeight,
3836 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
3837 this->unbindTextureFBOForCopy(GR_GL_DRAW_FRAMEBUFFER, dst);
3838 this->unbindTextureFBOForCopy(GR_GL_READ_FRAMEBUFFER, src);
3839 this->didWriteToSurface(dst, &dstRect);
3840 return true;
3841 }
3842
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)3843 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
3844 SkASSERT(type);
3845 switch (type) {
3846 case kTexture_GrXferBarrierType: {
3847 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
3848 if (glrt->textureFBOID() != glrt->renderFBOID()) {
3849 // The render target uses separate storage so no need for glTextureBarrier.
3850 // FIXME: The render target will resolve automatically when its texture is bound,
3851 // but we could resolve only the bounds that will be read if we do it here instead.
3852 return;
3853 }
3854 SkASSERT(this->caps()->textureBarrierSupport());
3855 GL_CALL(TextureBarrier());
3856 return;
3857 }
3858 case kBlend_GrXferBarrierType:
3859 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
3860 this->caps()->blendEquationSupport());
3861 GL_CALL(BlendBarrier());
3862 return;
3863 default: break; // placate compiler warnings that kNone not handled
3864 }
3865 }
3866
createTestingOnlyBackendTexture(void * pixels,int w,int h,GrPixelConfig config)3867 GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
3868 GrPixelConfig config) {
3869 if (!this->caps()->isConfigTexturable(config)) {
3870 return false;
3871 }
3872 GrGLTextureInfo* info = new GrGLTextureInfo;
3873 info->fTarget = GR_GL_TEXTURE_2D;
3874 info->fID = 0;
3875 GL_CALL(GenTextures(1, &info->fID));
3876 GL_CALL(ActiveTexture(GR_GL_TEXTURE0));
3877 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
3878 GL_CALL(BindTexture(info->fTarget, info->fID));
3879 fHWBoundTextureUniqueIDs[0] = 0;
3880 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST));
3881 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST));
3882 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
3883 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
3884
3885 GrGLenum internalFormat;
3886 GrGLenum externalFormat;
3887 GrGLenum externalType;
3888
3889 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
3890 &externalType)) {
3891 delete info;
3892 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
3893 return 0;
3894 #else
3895 return reinterpret_cast<GrBackendObject>(nullptr);
3896 #endif
3897 }
3898
3899 GL_CALL(TexImage2D(info->fTarget, 0, internalFormat, w, h, 0, externalFormat,
3900 externalType, pixels));
3901
3902 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
3903 GrGLuint id = info->fID;
3904 delete info;
3905 return id;
3906 #else
3907 return reinterpret_cast<GrBackendObject>(info);
3908 #endif
3909 }
3910
isTestingOnlyBackendTexture(GrBackendObject id) const3911 bool GrGLGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
3912 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
3913 GrGLuint texID = (GrGLuint)id;
3914 #else
3915 GrGLuint texID = reinterpret_cast<const GrGLTextureInfo*>(id)->fID;
3916 #endif
3917
3918 GrGLboolean result;
3919 GL_CALL_RET(result, IsTexture(texID));
3920
3921 return (GR_GL_TRUE == result);
3922 }
3923
deleteTestingOnlyBackendTexture(GrBackendObject id,bool abandonTexture)3924 void GrGLGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) {
3925 #ifdef SK_IGNORE_GL_TEXTURE_TARGET
3926 GrGLuint texID = (GrGLuint)id;
3927 #else
3928 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(id);
3929 GrGLuint texID = info->fID;
3930 #endif
3931
3932 if (!abandonTexture) {
3933 GL_CALL(DeleteTextures(1, &texID));
3934 }
3935
3936 #ifndef SK_IGNORE_GL_TEXTURE_TARGET
3937 delete info;
3938 #endif
3939 }
3940
resetShaderCacheForTesting() const3941 void GrGLGpu::resetShaderCacheForTesting() const {
3942 fProgramCache->abandon();
3943 }
3944
3945 ///////////////////////////////////////////////////////////////////////////////
bindArrayAndBuffersToDraw(GrGLGpu * gpu,const GrGLVertexBuffer * vbuffer,const GrGLIndexBuffer * ibuffer)3946 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(
3947 GrGLGpu* gpu,
3948 const GrGLVertexBuffer* vbuffer,
3949 const GrGLIndexBuffer* ibuffer) {
3950 SkASSERT(vbuffer);
3951 GrGLuint vbufferID = vbuffer->bufferID();
3952 GrGLuint* ibufferIDPtr = nullptr;
3953 GrGLuint ibufferID;
3954 if (ibuffer) {
3955 ibufferID = ibuffer->bufferID();
3956 ibufferIDPtr = &ibufferID;
3957 }
3958 return this->internalBind(gpu, vbufferID, ibufferIDPtr);
3959 }
3960
bindArrayAndBufferToDraw(GrGLGpu * gpu,GrGLuint vbufferID)3961 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBufferToDraw(GrGLGpu* gpu,
3962 GrGLuint vbufferID) {
3963 return this->internalBind(gpu, vbufferID, nullptr);
3964 }
3965
bindArrayAndBuffersToDraw(GrGLGpu * gpu,GrGLuint vbufferID,GrGLuint ibufferID)3966 GrGLAttribArrayState* GrGLGpu::HWGeometryState::bindArrayAndBuffersToDraw(GrGLGpu* gpu,
3967 GrGLuint vbufferID,
3968 GrGLuint ibufferID) {
3969 return this->internalBind(gpu, vbufferID, &ibufferID);
3970 }
3971
internalBind(GrGLGpu * gpu,GrGLuint vbufferID,GrGLuint * ibufferID)3972 GrGLAttribArrayState* GrGLGpu::HWGeometryState::internalBind(GrGLGpu* gpu,
3973 GrGLuint vbufferID,
3974 GrGLuint* ibufferID) {
3975 GrGLAttribArrayState* attribState;
3976
3977 if (gpu->glCaps().isCoreProfile() && 0 != vbufferID) {
3978 if (!fVBOVertexArray) {
3979 GrGLuint arrayID;
3980 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
3981 int attrCount = gpu->glCaps().maxVertexAttributes();
3982 fVBOVertexArray = new GrGLVertexArray(arrayID, attrCount);
3983 }
3984 if (ibufferID) {
3985 attribState = fVBOVertexArray->bindWithIndexBuffer(gpu, *ibufferID);
3986 } else {
3987 attribState = fVBOVertexArray->bind(gpu);
3988 }
3989 } else {
3990 if (ibufferID) {
3991 this->setIndexBufferIDOnDefaultVertexArray(gpu, *ibufferID);
3992 } else {
3993 this->setVertexArrayID(gpu, 0);
3994 }
3995 int attrCount = gpu->glCaps().maxVertexAttributes();
3996 if (fDefaultVertexArrayAttribState.count() != attrCount) {
3997 fDefaultVertexArrayAttribState.resize(attrCount);
3998 }
3999 attribState = &fDefaultVertexArrayAttribState;
4000 }
4001 return attribState;
4002 }
4003
onMakeCopyForTextureParams(GrTexture * texture,const GrTextureParams & textureParams,GrTextureProducer::CopyParams * copyParams) const4004 bool GrGLGpu::onMakeCopyForTextureParams(GrTexture* texture, const GrTextureParams& textureParams,
4005 GrTextureProducer::CopyParams* copyParams) const {
4006 if (textureParams.isTiled() ||
4007 GrTextureParams::kMipMap_FilterMode == textureParams.filterMode()) {
4008 GrGLTexture* glTexture = static_cast<GrGLTexture*>(texture);
4009 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() ||
4010 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) {
4011 copyParams->fFilter = GrTextureParams::kNone_FilterMode;
4012 copyParams->fWidth = texture->width();
4013 copyParams->fHeight = texture->height();
4014 return true;
4015 }
4016 }
4017 return false;
4018 }
4019