1 /*
2 * Copyright 2011 Google Inc.
3 *
4 * Use of this source code is governed by a BSD-style license that can be
5 * found in the LICENSE file.
6 */
7
8 #include "GrGLGpu.h"
9
10 #include "../private/GrGLSL.h"
11 #include "GrFixedClip.h"
12 #include "GrGLBuffer.h"
13 #include "GrGLGpuCommandBuffer.h"
14 #include "GrGLSemaphore.h"
15 #include "GrGLStencilAttachment.h"
16 #include "GrGLTextureRenderTarget.h"
17 #include "GrGpuResourcePriv.h"
18 #include "GrMesh.h"
19 #include "GrPipeline.h"
20 #include "GrRenderTargetPriv.h"
21 #include "GrShaderCaps.h"
22 #include "GrSurfacePriv.h"
23 #include "GrSurfaceProxyPriv.h"
24 #include "GrTexturePriv.h"
25 #include "GrTypes.h"
26 #include "SkAutoMalloc.h"
27 #include "SkMakeUnique.h"
28 #include "SkMipMap.h"
29 #include "SkPixmap.h"
30 #include "SkSLCompiler.h"
31 #include "SkStrokeRec.h"
32 #include "SkTemplates.h"
33 #include "SkTypes.h"
34 #include "builders/GrGLShaderStringBuilder.h"
35 #include "instanced/GLInstancedRendering.h"
36
37 #define GL_CALL(X) GR_GL_CALL(this->glInterface(), X)
38 #define GL_CALL_RET(RET, X) GR_GL_CALL_RET(this->glInterface(), RET, X)
39
40 #define SKIP_CACHE_CHECK true
41
42 #if GR_GL_CHECK_ALLOC_WITH_GET_ERROR
43 #define CLEAR_ERROR_BEFORE_ALLOC(iface) GrGLClearErr(iface)
44 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL_NOERRCHECK(iface, call)
45 #define CHECK_ALLOC_ERROR(iface) GR_GL_GET_ERROR(iface)
46 #else
47 #define CLEAR_ERROR_BEFORE_ALLOC(iface)
48 #define GL_ALLOC_CALL(iface, call) GR_GL_CALL(iface, call)
49 #define CHECK_ALLOC_ERROR(iface) GR_GL_NO_ERROR
50 #endif
51
52 //#define USE_NSIGHT
53
54 ///////////////////////////////////////////////////////////////////////////////
55
56 using gr_instanced::InstancedRendering;
57 using gr_instanced::GLInstancedRendering;
58
59 static const GrGLenum gXfermodeEquation2Blend[] = {
60 // Basic OpenGL blend equations.
61 GR_GL_FUNC_ADD,
62 GR_GL_FUNC_SUBTRACT,
63 GR_GL_FUNC_REVERSE_SUBTRACT,
64
65 // GL_KHR_blend_equation_advanced.
66 GR_GL_SCREEN,
67 GR_GL_OVERLAY,
68 GR_GL_DARKEN,
69 GR_GL_LIGHTEN,
70 GR_GL_COLORDODGE,
71 GR_GL_COLORBURN,
72 GR_GL_HARDLIGHT,
73 GR_GL_SOFTLIGHT,
74 GR_GL_DIFFERENCE,
75 GR_GL_EXCLUSION,
76 GR_GL_MULTIPLY,
77 GR_GL_HSL_HUE,
78 GR_GL_HSL_SATURATION,
79 GR_GL_HSL_COLOR,
80 GR_GL_HSL_LUMINOSITY
81 };
82 GR_STATIC_ASSERT(0 == kAdd_GrBlendEquation);
83 GR_STATIC_ASSERT(1 == kSubtract_GrBlendEquation);
84 GR_STATIC_ASSERT(2 == kReverseSubtract_GrBlendEquation);
85 GR_STATIC_ASSERT(3 == kScreen_GrBlendEquation);
86 GR_STATIC_ASSERT(4 == kOverlay_GrBlendEquation);
87 GR_STATIC_ASSERT(5 == kDarken_GrBlendEquation);
88 GR_STATIC_ASSERT(6 == kLighten_GrBlendEquation);
89 GR_STATIC_ASSERT(7 == kColorDodge_GrBlendEquation);
90 GR_STATIC_ASSERT(8 == kColorBurn_GrBlendEquation);
91 GR_STATIC_ASSERT(9 == kHardLight_GrBlendEquation);
92 GR_STATIC_ASSERT(10 == kSoftLight_GrBlendEquation);
93 GR_STATIC_ASSERT(11 == kDifference_GrBlendEquation);
94 GR_STATIC_ASSERT(12 == kExclusion_GrBlendEquation);
95 GR_STATIC_ASSERT(13 == kMultiply_GrBlendEquation);
96 GR_STATIC_ASSERT(14 == kHSLHue_GrBlendEquation);
97 GR_STATIC_ASSERT(15 == kHSLSaturation_GrBlendEquation);
98 GR_STATIC_ASSERT(16 == kHSLColor_GrBlendEquation);
99 GR_STATIC_ASSERT(17 == kHSLLuminosity_GrBlendEquation);
100 GR_STATIC_ASSERT(SK_ARRAY_COUNT(gXfermodeEquation2Blend) == kGrBlendEquationCnt);
101
102 static const GrGLenum gXfermodeCoeff2Blend[] = {
103 GR_GL_ZERO,
104 GR_GL_ONE,
105 GR_GL_SRC_COLOR,
106 GR_GL_ONE_MINUS_SRC_COLOR,
107 GR_GL_DST_COLOR,
108 GR_GL_ONE_MINUS_DST_COLOR,
109 GR_GL_SRC_ALPHA,
110 GR_GL_ONE_MINUS_SRC_ALPHA,
111 GR_GL_DST_ALPHA,
112 GR_GL_ONE_MINUS_DST_ALPHA,
113 GR_GL_CONSTANT_COLOR,
114 GR_GL_ONE_MINUS_CONSTANT_COLOR,
115 GR_GL_CONSTANT_ALPHA,
116 GR_GL_ONE_MINUS_CONSTANT_ALPHA,
117
118 // extended blend coeffs
119 GR_GL_SRC1_COLOR,
120 GR_GL_ONE_MINUS_SRC1_COLOR,
121 GR_GL_SRC1_ALPHA,
122 GR_GL_ONE_MINUS_SRC1_ALPHA,
123 };
124
BlendCoeffReferencesConstant(GrBlendCoeff coeff)125 bool GrGLGpu::BlendCoeffReferencesConstant(GrBlendCoeff coeff) {
126 static const bool gCoeffReferencesBlendConst[] = {
127 false,
128 false,
129 false,
130 false,
131 false,
132 false,
133 false,
134 false,
135 false,
136 false,
137 true,
138 true,
139 true,
140 true,
141
142 // extended blend coeffs
143 false,
144 false,
145 false,
146 false,
147 };
148 return gCoeffReferencesBlendConst[coeff];
149 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gCoeffReferencesBlendConst));
150
151 GR_STATIC_ASSERT(0 == kZero_GrBlendCoeff);
152 GR_STATIC_ASSERT(1 == kOne_GrBlendCoeff);
153 GR_STATIC_ASSERT(2 == kSC_GrBlendCoeff);
154 GR_STATIC_ASSERT(3 == kISC_GrBlendCoeff);
155 GR_STATIC_ASSERT(4 == kDC_GrBlendCoeff);
156 GR_STATIC_ASSERT(5 == kIDC_GrBlendCoeff);
157 GR_STATIC_ASSERT(6 == kSA_GrBlendCoeff);
158 GR_STATIC_ASSERT(7 == kISA_GrBlendCoeff);
159 GR_STATIC_ASSERT(8 == kDA_GrBlendCoeff);
160 GR_STATIC_ASSERT(9 == kIDA_GrBlendCoeff);
161 GR_STATIC_ASSERT(10 == kConstC_GrBlendCoeff);
162 GR_STATIC_ASSERT(11 == kIConstC_GrBlendCoeff);
163 GR_STATIC_ASSERT(12 == kConstA_GrBlendCoeff);
164 GR_STATIC_ASSERT(13 == kIConstA_GrBlendCoeff);
165
166 GR_STATIC_ASSERT(14 == kS2C_GrBlendCoeff);
167 GR_STATIC_ASSERT(15 == kIS2C_GrBlendCoeff);
168 GR_STATIC_ASSERT(16 == kS2A_GrBlendCoeff);
169 GR_STATIC_ASSERT(17 == kIS2A_GrBlendCoeff);
170
171 // assertion for gXfermodeCoeff2Blend have to be in GrGpu scope
172 GR_STATIC_ASSERT(kGrBlendCoeffCnt == SK_ARRAY_COUNT(gXfermodeCoeff2Blend));
173 }
174
175 ///////////////////////////////////////////////////////////////////////////////
176
177
Create(GrBackendContext backendContext,const GrContextOptions & options,GrContext * context)178 GrGpu* GrGLGpu::Create(GrBackendContext backendContext, const GrContextOptions& options,
179 GrContext* context) {
180 sk_sp<const GrGLInterface> glInterface(
181 reinterpret_cast<const GrGLInterface*>(backendContext));
182 if (!glInterface) {
183 glInterface.reset(GrGLDefaultInterface());
184 } else {
185 glInterface->ref();
186 }
187 if (!glInterface) {
188 return nullptr;
189 }
190 GrGLContext* glContext = GrGLContext::Create(glInterface.get(), options);
191 if (glContext) {
192 return new GrGLGpu(glContext, context);
193 }
194 return nullptr;
195 }
196
197 static bool gPrintStartupSpew;
198
GrGLGpu(GrGLContext * ctx,GrContext * context)199 GrGLGpu::GrGLGpu(GrGLContext* ctx, GrContext* context)
200 : GrGpu(context)
201 , fGLContext(ctx)
202 , fProgramCache(new ProgramCache(this))
203 , fHWProgramID(0)
204 , fTempSrcFBOID(0)
205 , fTempDstFBOID(0)
206 , fStencilClearFBOID(0)
207 , fHWMaxUsedBufferTextureUnit(-1)
208 , fHWMinSampleShading(0.0) {
209 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
210 fCopyPrograms[i].fProgram = 0;
211 }
212 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
213 fMipmapPrograms[i].fProgram = 0;
214 }
215 fWireRectProgram.fProgram = 0;
216
217 SkASSERT(ctx);
218 fCaps.reset(SkRef(ctx->caps()));
219
220 fHWBoundTextureUniqueIDs.reset(this->caps()->shaderCaps()->maxCombinedSamplers());
221 fHWBoundImageStorages.reset(this->caps()->shaderCaps()->maxCombinedImageStorages());
222
223 fHWBufferState[kVertex_GrBufferType].fGLTarget = GR_GL_ARRAY_BUFFER;
224 fHWBufferState[kIndex_GrBufferType].fGLTarget = GR_GL_ELEMENT_ARRAY_BUFFER;
225 fHWBufferState[kTexel_GrBufferType].fGLTarget = GR_GL_TEXTURE_BUFFER;
226 fHWBufferState[kDrawIndirect_GrBufferType].fGLTarget = GR_GL_DRAW_INDIRECT_BUFFER;
227 if (GrGLCaps::kChromium_TransferBufferType == this->glCaps().transferBufferType()) {
228 fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget =
229 GR_GL_PIXEL_UNPACK_TRANSFER_BUFFER_CHROMIUM;
230 fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget =
231 GR_GL_PIXEL_PACK_TRANSFER_BUFFER_CHROMIUM;
232 } else {
233 fHWBufferState[kXferCpuToGpu_GrBufferType].fGLTarget = GR_GL_PIXEL_UNPACK_BUFFER;
234 fHWBufferState[kXferGpuToCpu_GrBufferType].fGLTarget = GR_GL_PIXEL_PACK_BUFFER;
235 }
236 GR_STATIC_ASSERT(6 == SK_ARRAY_COUNT(fHWBufferState));
237
238 if (this->caps()->shaderCaps()->texelBufferSupport()) {
239 fHWBufferTextures.reset(this->caps()->shaderCaps()->maxCombinedSamplers());
240 }
241
242 if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
243 fPathRendering.reset(new GrGLPathRendering(this));
244 }
245
246 GrGLClearErr(this->glInterface());
247 if (gPrintStartupSpew) {
248 const GrGLubyte* vendor;
249 const GrGLubyte* renderer;
250 const GrGLubyte* version;
251 GL_CALL_RET(vendor, GetString(GR_GL_VENDOR));
252 GL_CALL_RET(renderer, GetString(GR_GL_RENDERER));
253 GL_CALL_RET(version, GetString(GR_GL_VERSION));
254 SkDebugf("------------------------- create GrGLGpu %p --------------\n",
255 this);
256 SkDebugf("------ VENDOR %s\n", vendor);
257 SkDebugf("------ RENDERER %s\n", renderer);
258 SkDebugf("------ VERSION %s\n", version);
259 SkDebugf("------ EXTENSIONS\n");
260 this->glContext().extensions().print();
261 SkDebugf("\n");
262 SkDebugf("%s", this->glCaps().dump().c_str());
263 }
264 }
265
~GrGLGpu()266 GrGLGpu::~GrGLGpu() {
267 // Ensure any GrGpuResource objects get deleted first, since they may require a working GrGLGpu
268 // to release the resources held by the objects themselves.
269 fPathRendering.reset();
270 fCopyProgramArrayBuffer.reset();
271 fMipmapProgramArrayBuffer.reset();
272 fWireRectArrayBuffer.reset();
273
274 if (0 != fHWProgramID) {
275 // detach the current program so there is no confusion on OpenGL's part
276 // that we want it to be deleted
277 GL_CALL(UseProgram(0));
278 }
279
280 if (0 != fTempSrcFBOID) {
281 GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID));
282 }
283 if (0 != fTempDstFBOID) {
284 GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID));
285 }
286 if (0 != fStencilClearFBOID) {
287 GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
288 }
289
290 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
291 if (0 != fCopyPrograms[i].fProgram) {
292 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
293 }
294 }
295
296 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
297 if (0 != fMipmapPrograms[i].fProgram) {
298 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
299 }
300 }
301
302 if (0 != fWireRectProgram.fProgram) {
303 GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
304 }
305
306 delete fProgramCache;
307 }
308
disconnect(DisconnectType type)309 void GrGLGpu::disconnect(DisconnectType type) {
310 INHERITED::disconnect(type);
311 if (DisconnectType::kCleanup == type) {
312 if (fHWProgramID) {
313 GL_CALL(UseProgram(0));
314 }
315 if (fTempSrcFBOID) {
316 GL_CALL(DeleteFramebuffers(1, &fTempSrcFBOID));
317 }
318 if (fTempDstFBOID) {
319 GL_CALL(DeleteFramebuffers(1, &fTempDstFBOID));
320 }
321 if (fStencilClearFBOID) {
322 GL_CALL(DeleteFramebuffers(1, &fStencilClearFBOID));
323 }
324 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
325 if (fCopyPrograms[i].fProgram) {
326 GL_CALL(DeleteProgram(fCopyPrograms[i].fProgram));
327 }
328 }
329 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
330 if (fMipmapPrograms[i].fProgram) {
331 GL_CALL(DeleteProgram(fMipmapPrograms[i].fProgram));
332 }
333 }
334 if (fWireRectProgram.fProgram) {
335 GL_CALL(DeleteProgram(fWireRectProgram.fProgram));
336 }
337 } else {
338 if (fProgramCache) {
339 fProgramCache->abandon();
340 }
341 }
342
343 delete fProgramCache;
344 fProgramCache = nullptr;
345
346 fHWProgramID = 0;
347 fTempSrcFBOID = 0;
348 fTempDstFBOID = 0;
349 fStencilClearFBOID = 0;
350 fCopyProgramArrayBuffer.reset();
351 for (size_t i = 0; i < SK_ARRAY_COUNT(fCopyPrograms); ++i) {
352 fCopyPrograms[i].fProgram = 0;
353 }
354 fMipmapProgramArrayBuffer.reset();
355 for (size_t i = 0; i < SK_ARRAY_COUNT(fMipmapPrograms); ++i) {
356 fMipmapPrograms[i].fProgram = 0;
357 }
358 fWireRectProgram.fProgram = 0;
359 fWireRectArrayBuffer.reset();
360 if (this->glCaps().shaderCaps()->pathRenderingSupport()) {
361 this->glPathRendering()->disconnect(type);
362 }
363 }
364
365 ///////////////////////////////////////////////////////////////////////////////
366
onResetContext(uint32_t resetBits)367 void GrGLGpu::onResetContext(uint32_t resetBits) {
368 // we don't use the zb at all
369 if (resetBits & kMisc_GrGLBackendState) {
370 GL_CALL(Disable(GR_GL_DEPTH_TEST));
371 GL_CALL(DepthMask(GR_GL_FALSE));
372
373 fHWBufferState[kTexel_GrBufferType].invalidate();
374 fHWBufferState[kDrawIndirect_GrBufferType].invalidate();
375 fHWBufferState[kXferCpuToGpu_GrBufferType].invalidate();
376 fHWBufferState[kXferGpuToCpu_GrBufferType].invalidate();
377
378 fHWDrawFace = GrDrawFace::kInvalid;
379 if (kGL_GrGLStandard == this->glStandard()) {
380 #ifndef USE_NSIGHT
381 // Desktop-only state that we never change
382 if (!this->glCaps().isCoreProfile()) {
383 GL_CALL(Disable(GR_GL_POINT_SMOOTH));
384 GL_CALL(Disable(GR_GL_LINE_SMOOTH));
385 GL_CALL(Disable(GR_GL_POLYGON_SMOOTH));
386 GL_CALL(Disable(GR_GL_POLYGON_STIPPLE));
387 GL_CALL(Disable(GR_GL_COLOR_LOGIC_OP));
388 GL_CALL(Disable(GR_GL_INDEX_LOGIC_OP));
389 }
390 // The windows NVIDIA driver has GL_ARB_imaging in the extension string when using a
391 // core profile. This seems like a bug since the core spec removes any mention of
392 // GL_ARB_imaging.
393 if (this->glCaps().imagingSupport() && !this->glCaps().isCoreProfile()) {
394 GL_CALL(Disable(GR_GL_COLOR_TABLE));
395 }
396 GL_CALL(Disable(GR_GL_POLYGON_OFFSET_FILL));
397 #endif
398 // Since ES doesn't support glPointSize at all we always use the VS to
399 // set the point size
400 GL_CALL(Enable(GR_GL_VERTEX_PROGRAM_POINT_SIZE));
401
402 // We should set glPolygonMode(FRONT_AND_BACK,FILL) here, too. It isn't
403 // currently part of our gl interface. There are probably others as
404 // well.
405 }
406
407 if (kGLES_GrGLStandard == this->glStandard() &&
408 this->hasExtension("GL_ARM_shader_framebuffer_fetch")) {
409 // The arm extension requires specifically enabling MSAA fetching per sample.
410 // On some devices this may have a perf hit. Also multiple render targets are disabled
411 GL_CALL(Enable(GR_GL_FETCH_PER_SAMPLE_ARM));
412 }
413 fHWWriteToColor = kUnknown_TriState;
414 // we only ever use lines in hairline mode
415 GL_CALL(LineWidth(1));
416 GL_CALL(Disable(GR_GL_DITHER));
417 }
418
419 if (resetBits & kMSAAEnable_GrGLBackendState) {
420 fMSAAEnabled = kUnknown_TriState;
421
422 if (this->caps()->usesMixedSamples()) {
423 if (0 != this->caps()->maxRasterSamples()) {
424 fHWRasterMultisampleEnabled = kUnknown_TriState;
425 fHWNumRasterSamples = 0;
426 }
427
428 // The skia blend modes all use premultiplied alpha and therefore expect RGBA coverage
429 // modulation. This state has no effect when not rendering to a mixed sampled target.
430 GL_CALL(CoverageModulation(GR_GL_RGBA));
431 }
432 }
433
434 fHWActiveTextureUnitIdx = -1; // invalid
435
436 if (resetBits & kTextureBinding_GrGLBackendState) {
437 for (int s = 0; s < fHWBoundTextureUniqueIDs.count(); ++s) {
438 fHWBoundTextureUniqueIDs[s].makeInvalid();
439 }
440 for (int b = 0; b < fHWBufferTextures.count(); ++b) {
441 SkASSERT(this->caps()->shaderCaps()->texelBufferSupport());
442 fHWBufferTextures[b].fKnownBound = false;
443 }
444 for (int i = 0; i < fHWBoundImageStorages.count(); ++i) {
445 SkASSERT(this->caps()->shaderCaps()->imageLoadStoreSupport());
446 fHWBoundImageStorages[i].fTextureUniqueID.makeInvalid();
447 }
448 }
449
450 if (resetBits & kBlend_GrGLBackendState) {
451 fHWBlendState.invalidate();
452 }
453
454 if (resetBits & kView_GrGLBackendState) {
455 fHWScissorSettings.invalidate();
456 fHWWindowRectsState.invalidate();
457 fHWViewport.invalidate();
458 }
459
460 if (resetBits & kStencil_GrGLBackendState) {
461 fHWStencilSettings.invalidate();
462 fHWStencilTestEnabled = kUnknown_TriState;
463 }
464
465 // Vertex
466 if (resetBits & kVertex_GrGLBackendState) {
467 fHWVertexArrayState.invalidate();
468 fHWBufferState[kVertex_GrBufferType].invalidate();
469 fHWBufferState[kIndex_GrBufferType].invalidate();
470 }
471
472 if (resetBits & kRenderTarget_GrGLBackendState) {
473 fHWBoundRenderTargetUniqueID.makeInvalid();
474 fHWSRGBFramebuffer = kUnknown_TriState;
475 }
476
477 if (resetBits & kPathRendering_GrGLBackendState) {
478 if (this->caps()->shaderCaps()->pathRenderingSupport()) {
479 this->glPathRendering()->resetContext();
480 }
481 }
482
483 // we assume these values
484 if (resetBits & kPixelStore_GrGLBackendState) {
485 if (this->glCaps().unpackRowLengthSupport()) {
486 GL_CALL(PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
487 }
488 if (this->glCaps().packRowLengthSupport()) {
489 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
490 }
491 if (this->glCaps().unpackFlipYSupport()) {
492 GL_CALL(PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
493 }
494 if (this->glCaps().packFlipYSupport()) {
495 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, GR_GL_FALSE));
496 }
497 }
498
499 if (resetBits & kProgram_GrGLBackendState) {
500 fHWProgramID = 0;
501 }
502 }
503
resolve_origin(GrSurfaceOrigin origin,bool renderTarget)504 static GrSurfaceOrigin resolve_origin(GrSurfaceOrigin origin, bool renderTarget) {
505 // By default, GrRenderTargets are GL's normal orientation so that they
506 // can be drawn to by the outside world without the client having
507 // to render upside down.
508 if (kDefault_GrSurfaceOrigin == origin) {
509 return renderTarget ? kBottomLeft_GrSurfaceOrigin : kTopLeft_GrSurfaceOrigin;
510 } else {
511 return origin;
512 }
513 }
514
onWrapBackendTexture(const GrBackendTextureDesc & desc,GrWrapOwnership ownership)515 sk_sp<GrTexture> GrGLGpu::onWrapBackendTexture(const GrBackendTextureDesc& desc,
516 GrWrapOwnership ownership) {
517 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle);
518 if (!info || !info->fID) {
519 return nullptr;
520 }
521
522 // next line relies on GrBackendTextureDesc's flags matching GrTexture's
523 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrBackendTextureFlag);
524 SkASSERT(!renderTarget || kAdoptAndCache_GrWrapOwnership != ownership); // Not supported
525
526 GrGLTexture::IDDesc idDesc;
527 idDesc.fInfo = *info;
528
529 if (GR_GL_TEXTURE_EXTERNAL == idDesc.fInfo.fTarget) {
530 if (renderTarget) {
531 // This combination is not supported.
532 return nullptr;
533 }
534 if (!this->caps()->shaderCaps()->externalTextureSupport()) {
535 return nullptr;
536 }
537 } else if (GR_GL_TEXTURE_RECTANGLE == idDesc.fInfo.fTarget) {
538 if (!this->glCaps().rectangleTextureSupport()) {
539 return nullptr;
540 }
541 } else if (GR_GL_TEXTURE_2D != idDesc.fInfo.fTarget) {
542 return nullptr;
543 }
544
545 // Sample count is interpreted to mean the number of samples that Gr code should allocate
546 // for a render buffer that resolves to the texture. We don't support MSAA textures.
547 if (desc.fSampleCnt && !renderTarget) {
548 return nullptr;
549 }
550
551 if (kBorrow_GrWrapOwnership == ownership) {
552 idDesc.fOwnership = GrBackendObjectOwnership::kBorrowed;
553 } else {
554 idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
555 }
556
557 GrSurfaceDesc surfDesc;
558 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
559 surfDesc.fWidth = desc.fWidth;
560 surfDesc.fHeight = desc.fHeight;
561 surfDesc.fConfig = desc.fConfig;
562 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
563 // FIXME: this should be calling resolve_origin(), but Chrome code is currently
564 // assuming the old behaviour, which is that backend textures are always
565 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
566 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
567 if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
568 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
569 } else {
570 surfDesc.fOrigin = desc.fOrigin;
571 }
572
573 if (renderTarget) {
574 GrGLRenderTarget::IDDesc rtIDDesc;
575 if (!this->createRenderTargetObjects(surfDesc, idDesc.fInfo, &rtIDDesc)) {
576 return nullptr;
577 }
578 return GrGLTextureRenderTarget::MakeWrapped(this, surfDesc, idDesc, rtIDDesc);
579 }
580
581 if (kAdoptAndCache_GrWrapOwnership == ownership) {
582 return sk_sp<GrTexture>(new GrGLTexture(this, SkBudgeted::kYes, surfDesc, idDesc));
583 } else {
584 return GrGLTexture::MakeWrapped(this, surfDesc, idDesc);
585 }
586 }
587
onWrapBackendRenderTarget(const GrBackendRenderTargetDesc & wrapDesc)588 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendRenderTarget(const GrBackendRenderTargetDesc& wrapDesc){
589 GrGLRenderTarget::IDDesc idDesc;
590 idDesc.fRTFBOID = static_cast<GrGLuint>(wrapDesc.fRenderTargetHandle);
591 idDesc.fMSColorRenderbufferID = 0;
592 idDesc.fTexFBOID = GrGLRenderTarget::kUnresolvableFBOID;
593 idDesc.fRTFBOOwnership = GrBackendObjectOwnership::kBorrowed;
594 idDesc.fIsMixedSampled = false;
595
596 GrSurfaceDesc desc;
597 desc.fConfig = wrapDesc.fConfig;
598 desc.fFlags = kCheckAllocation_GrSurfaceFlag | kRenderTarget_GrSurfaceFlag;
599 desc.fWidth = wrapDesc.fWidth;
600 desc.fHeight = wrapDesc.fHeight;
601 desc.fSampleCnt = SkTMin(wrapDesc.fSampleCnt, this->caps()->maxSampleCount());
602 desc.fOrigin = resolve_origin(wrapDesc.fOrigin, true);
603
604 return GrGLRenderTarget::MakeWrapped(this, desc, idDesc, wrapDesc.fStencilBits);
605 }
606
onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc & desc)607 sk_sp<GrRenderTarget> GrGLGpu::onWrapBackendTextureAsRenderTarget(const GrBackendTextureDesc& desc){
608 const GrGLTextureInfo* info = reinterpret_cast<const GrGLTextureInfo*>(desc.fTextureHandle);
609 if (!info || !info->fID) {
610 return nullptr;
611 }
612
613 GrGLTextureInfo texInfo;
614 texInfo = *info;
615
616 if (GR_GL_TEXTURE_RECTANGLE != texInfo.fTarget &&
617 GR_GL_TEXTURE_2D != texInfo.fTarget) {
618 // Only texture rectangle and texture 2d are supported. We do not check whether texture
619 // rectangle is supported by Skia - if the caller provided us with a texture rectangle,
620 // we assume the necessary support exists.
621 return nullptr;
622 }
623
624 GrSurfaceDesc surfDesc;
625 surfDesc.fFlags = (GrSurfaceFlags) desc.fFlags;
626 surfDesc.fWidth = desc.fWidth;
627 surfDesc.fHeight = desc.fHeight;
628 surfDesc.fConfig = desc.fConfig;
629 surfDesc.fSampleCnt = SkTMin(desc.fSampleCnt, this->caps()->maxSampleCount());
630 // FIXME: this should be calling resolve_origin(), but Chrome code is currently
631 // assuming the old behaviour, which is that backend textures are always
632 // BottomLeft, even for non-RT's. Once Chrome is fixed, change this to:
633 // glTexDesc.fOrigin = resolve_origin(desc.fOrigin, renderTarget);
634 if (kDefault_GrSurfaceOrigin == desc.fOrigin) {
635 surfDesc.fOrigin = kBottomLeft_GrSurfaceOrigin;
636 } else {
637 surfDesc.fOrigin = desc.fOrigin;
638 }
639
640 GrGLRenderTarget::IDDesc rtIDDesc;
641 if (!this->createRenderTargetObjects(surfDesc, texInfo, &rtIDDesc)) {
642 return nullptr;
643 }
644 return GrGLRenderTarget::MakeWrapped(this, surfDesc, rtIDDesc, 0);
645 }
646
647 ////////////////////////////////////////////////////////////////////////////////
648
onGetWritePixelsInfo(GrSurface * dstSurface,int width,int height,GrPixelConfig srcConfig,DrawPreference * drawPreference,WritePixelTempDrawInfo * tempDrawInfo)649 bool GrGLGpu::onGetWritePixelsInfo(GrSurface* dstSurface, int width, int height,
650 GrPixelConfig srcConfig,
651 DrawPreference* drawPreference,
652 WritePixelTempDrawInfo* tempDrawInfo) {
653 if (GrPixelConfigIsCompressed(dstSurface->config())) {
654 return false;
655 }
656
657 // This subclass only allows writes to textures. If the dst is not a texture we have to draw
658 // into it. We could use glDrawPixels on GLs that have it, but we don't today.
659 if (!dstSurface->asTexture()) {
660 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
661 } else {
662 GrGLTexture* texture = static_cast<GrGLTexture*>(dstSurface->asTexture());
663 if (GR_GL_TEXTURE_EXTERNAL == texture->target()) {
664 // We don't currently support writing pixels to EXTERNAL textures.
665 return false;
666 }
667 }
668
669 if (GrPixelConfigIsSRGB(dstSurface->config()) != GrPixelConfigIsSRGB(srcConfig)) {
670 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
671 }
672
673 // Start off assuming no swizzling
674 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
675 tempDrawInfo->fWriteConfig = srcConfig;
676
677 // These settings we will always want if a temp draw is performed. Initially set the config
678 // to srcConfig, though that may be modified if we decide to do a R/G swap.
679 tempDrawInfo->fTempSurfaceDesc.fFlags = kNone_GrSurfaceFlags;
680 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
681 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
682 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
683 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
684 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
685
686 bool configsAreRBSwaps = GrPixelConfigSwapRAndB(srcConfig) == dstSurface->config();
687
688 if (configsAreRBSwaps) {
689 if (!this->caps()->isConfigTexturable(srcConfig)) {
690 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
691 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
692 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
693 tempDrawInfo->fWriteConfig = dstSurface->config();
694 } else if (this->glCaps().rgba8888PixelsOpsAreSlow() &&
695 kRGBA_8888_GrPixelConfig == srcConfig) {
696 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
697 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
698 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
699 tempDrawInfo->fWriteConfig = dstSurface->config();
700 } else if (kGLES_GrGLStandard == this->glStandard() &&
701 this->glCaps().bgraIsInternalFormat()) {
702 // The internal format and external formats must match texture uploads so we can't
703 // swizzle while uploading when BGRA is a distinct internal format.
704 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
705 tempDrawInfo->fTempSurfaceDesc.fConfig = dstSurface->config();
706 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
707 tempDrawInfo->fWriteConfig = dstSurface->config();
708 }
709 }
710
711 if (!this->glCaps().unpackFlipYSupport() &&
712 kBottomLeft_GrSurfaceOrigin == dstSurface->origin()) {
713 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
714 }
715
716 return true;
717 }
718
check_write_and_transfer_input(GrGLTexture * glTex,GrSurface * surface,GrPixelConfig config)719 static bool check_write_and_transfer_input(GrGLTexture* glTex, GrSurface* surface,
720 GrPixelConfig config) {
721 if (!glTex) {
722 return false;
723 }
724
725 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels.
726 if (GrPixelConfigIsSRGB(surface->config()) != GrPixelConfigIsSRGB(config)) {
727 return false;
728 }
729
730 // Write or transfer of pixels is not implemented for TEXTURE_EXTERNAL textures
731 if (GR_GL_TEXTURE_EXTERNAL == glTex->target()) {
732 return false;
733 }
734
735 return true;
736 }
737
onWritePixels(GrSurface * surface,int left,int top,int width,int height,GrPixelConfig config,const SkTArray<GrMipLevel> & texels)738 bool GrGLGpu::onWritePixels(GrSurface* surface,
739 int left, int top, int width, int height,
740 GrPixelConfig config,
741 const SkTArray<GrMipLevel>& texels) {
742 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
743
744 if (!check_write_and_transfer_input(glTex, surface, config)) {
745 return false;
746 }
747
748 this->setScratchTextureUnit();
749 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
750
751 bool success = false;
752 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
753 // We check that config == desc.fConfig in GrGLGpu::canWriteTexturePixels()
754 SkASSERT(config == glTex->desc().fConfig);
755 success = this->uploadCompressedTexData(glTex->desc(), glTex->target(), texels,
756 kWrite_UploadType, left, top, width, height);
757 } else {
758 success = this->uploadTexData(glTex->desc(), glTex->target(), kWrite_UploadType,
759 left, top, width, height, config, texels);
760 }
761
762 return success;
763 }
764
onTransferPixels(GrSurface * surface,int left,int top,int width,int height,GrPixelConfig config,GrBuffer * transferBuffer,size_t offset,size_t rowBytes)765 bool GrGLGpu::onTransferPixels(GrSurface* surface,
766 int left, int top, int width, int height,
767 GrPixelConfig config, GrBuffer* transferBuffer,
768 size_t offset, size_t rowBytes) {
769 GrGLTexture* glTex = static_cast<GrGLTexture*>(surface->asTexture());
770
771 if (!check_write_and_transfer_input(glTex, surface, config)) {
772 return false;
773 }
774
775 // For the moment, can't transfer compressed data
776 if (GrPixelConfigIsCompressed(glTex->desc().fConfig)) {
777 return false;
778 }
779
780 this->setScratchTextureUnit();
781 GL_CALL(BindTexture(glTex->target(), glTex->textureID()));
782
783 SkASSERT(!transferBuffer->isMapped());
784 SkASSERT(!transferBuffer->isCPUBacked());
785 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(transferBuffer);
786 this->bindBuffer(kXferCpuToGpu_GrBufferType, glBuffer);
787
788 bool success = false;
789 GrMipLevel mipLevel;
790 mipLevel.fPixels = transferBuffer;
791 mipLevel.fRowBytes = rowBytes;
792 SkSTArray<1, GrMipLevel> texels;
793 texels.push_back(mipLevel);
794 success = this->uploadTexData(glTex->desc(), glTex->target(), kTransfer_UploadType,
795 left, top, width, height, config, texels);
796 return success;
797 }
798
799 // For GL_[UN]PACK_ALIGNMENT.
config_alignment(GrPixelConfig config)800 static inline GrGLint config_alignment(GrPixelConfig config) {
801 SkASSERT(!GrPixelConfigIsCompressed(config));
802 switch (config) {
803 case kAlpha_8_GrPixelConfig:
804 case kGray_8_GrPixelConfig:
805 return 1;
806 case kRGB_565_GrPixelConfig:
807 case kRGBA_4444_GrPixelConfig:
808 case kAlpha_half_GrPixelConfig:
809 case kRGBA_half_GrPixelConfig:
810 return 2;
811 case kRGBA_8888_GrPixelConfig:
812 case kBGRA_8888_GrPixelConfig:
813 case kSRGBA_8888_GrPixelConfig:
814 case kSBGRA_8888_GrPixelConfig:
815 case kRGBA_8888_sint_GrPixelConfig:
816 case kRGBA_float_GrPixelConfig:
817 case kRG_float_GrPixelConfig:
818 return 4;
819 case kUnknown_GrPixelConfig:
820 case kETC1_GrPixelConfig:
821 return 0;
822 }
823 SkFAIL("Invalid pixel config");
824 return 0;
825 }
826
check_alloc_error(const GrSurfaceDesc & desc,const GrGLInterface * interface)827 static inline GrGLenum check_alloc_error(const GrSurfaceDesc& desc,
828 const GrGLInterface* interface) {
829 if (SkToBool(desc.fFlags & kCheckAllocation_GrSurfaceFlag)) {
830 return GR_GL_GET_ERROR(interface);
831 } else {
832 return CHECK_ALLOC_ERROR(interface);
833 }
834 }
835
836 /**
837 * Creates storage space for the texture and fills it with texels.
838 *
839 * @param desc The surface descriptor for the texture being created.
840 * @param interface The GL interface in use.
841 * @param caps The capabilities of the GL device.
842 * @param internalFormat The data format used for the internal storage of the texture. May be sized.
843 * @param internalFormatForTexStorage The data format used for the TexStorage API. Must be sized.
844 * @param externalFormat The data format used for the external storage of the texture.
845 * @param externalType The type of the data used for the external storage of the texture.
846 * @param texels The texel data of the texture being created.
847 * @param baseWidth The width of the texture's base mipmap level
848 * @param baseHeight The height of the texture's base mipmap level
849 * @param succeeded Set to true if allocating and populating the texture completed
850 * without error.
851 */
allocate_and_populate_uncompressed_texture(const GrSurfaceDesc & desc,const GrGLInterface & interface,const GrGLCaps & caps,GrGLenum target,GrGLenum internalFormat,GrGLenum internalFormatForTexStorage,GrGLenum externalFormat,GrGLenum externalType,const SkTArray<GrMipLevel> & texels,int baseWidth,int baseHeight)852 static bool allocate_and_populate_uncompressed_texture(const GrSurfaceDesc& desc,
853 const GrGLInterface& interface,
854 const GrGLCaps& caps,
855 GrGLenum target,
856 GrGLenum internalFormat,
857 GrGLenum internalFormatForTexStorage,
858 GrGLenum externalFormat,
859 GrGLenum externalType,
860 const SkTArray<GrMipLevel>& texels,
861 int baseWidth, int baseHeight) {
862 CLEAR_ERROR_BEFORE_ALLOC(&interface);
863
864 bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig);
865 // We can only use TexStorage if we know we will not later change the storage requirements.
866 // This means if we may later want to add mipmaps, we cannot use TexStorage.
867 // Right now, we cannot know if we will later add mipmaps or not.
868 // The only time we can use TexStorage is when we already have the
869 // mipmaps or are using a format incompatible with MIP maps.
870 useTexStorage &= texels.count() > 1 || GrPixelConfigIsSint(desc.fConfig);
871
872 if (useTexStorage) {
873 // We never resize or change formats of textures.
874 GL_ALLOC_CALL(&interface,
875 TexStorage2D(target,
876 SkTMax(texels.count(), 1),
877 internalFormatForTexStorage,
878 desc.fWidth, desc.fHeight));
879 GrGLenum error = check_alloc_error(desc, &interface);
880 if (error != GR_GL_NO_ERROR) {
881 return false;
882 } else {
883 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
884 const void* currentMipData = texels[currentMipLevel].fPixels;
885 if (currentMipData == nullptr) {
886 continue;
887 }
888 int twoToTheMipLevel = 1 << currentMipLevel;
889 int currentWidth = SkTMax(1, desc.fWidth / twoToTheMipLevel);
890 int currentHeight = SkTMax(1, desc.fHeight / twoToTheMipLevel);
891
892 GR_GL_CALL(&interface,
893 TexSubImage2D(target,
894 currentMipLevel,
895 0, // left
896 0, // top
897 currentWidth,
898 currentHeight,
899 externalFormat, externalType,
900 currentMipData));
901 }
902 return true;
903 }
904 } else {
905 if (texels.empty()) {
906 GL_ALLOC_CALL(&interface,
907 TexImage2D(target,
908 0,
909 internalFormat,
910 baseWidth,
911 baseHeight,
912 0, // border
913 externalFormat, externalType,
914 nullptr));
915 GrGLenum error = check_alloc_error(desc, &interface);
916 if (error != GR_GL_NO_ERROR) {
917 return false;
918 }
919 } else {
920 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
921 int twoToTheMipLevel = 1 << currentMipLevel;
922 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
923 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
924 const void* currentMipData = texels[currentMipLevel].fPixels;
925 // Even if curremtMipData is nullptr, continue to call TexImage2D.
926 // This will allocate texture memory which we can later populate.
927 GL_ALLOC_CALL(&interface,
928 TexImage2D(target,
929 currentMipLevel,
930 internalFormat,
931 currentWidth,
932 currentHeight,
933 0, // border
934 externalFormat, externalType,
935 currentMipData));
936 GrGLenum error = check_alloc_error(desc, &interface);
937 if (error != GR_GL_NO_ERROR) {
938 return false;
939 }
940 }
941 }
942 }
943 return true;
944 }
945
946 /**
947 * Creates storage space for the texture and fills it with texels.
948 *
949 * @param desc The surface descriptor for the texture being created.
950 * @param interface The GL interface in use.
951 * @param caps The capabilities of the GL device.
952 * @param internalFormat The data format used for the internal storage of the texture.
953 * @param texels The texel data of the texture being created.
954 */
allocate_and_populate_compressed_texture(const GrSurfaceDesc & desc,const GrGLInterface & interface,const GrGLCaps & caps,GrGLenum target,GrGLenum internalFormat,const SkTArray<GrMipLevel> & texels,int baseWidth,int baseHeight)955 static bool allocate_and_populate_compressed_texture(const GrSurfaceDesc& desc,
956 const GrGLInterface& interface,
957 const GrGLCaps& caps,
958 GrGLenum target, GrGLenum internalFormat,
959 const SkTArray<GrMipLevel>& texels,
960 int baseWidth, int baseHeight) {
961 CLEAR_ERROR_BEFORE_ALLOC(&interface);
962
963 bool useTexStorage = caps.isConfigTexSupportEnabled(desc.fConfig);
964 // We can only use TexStorage if we know we will not later change the storage requirements.
965 // This means if we may later want to add mipmaps, we cannot use TexStorage.
966 // Right now, we cannot know if we will later add mipmaps or not.
967 // The only time we can use TexStorage is when we already have the
968 // mipmaps.
969 useTexStorage &= texels.count() > 1;
970
971 if (useTexStorage) {
972 // We never resize or change formats of textures.
973 GL_ALLOC_CALL(&interface,
974 TexStorage2D(target,
975 texels.count(),
976 internalFormat,
977 baseWidth, baseHeight));
978 GrGLenum error = check_alloc_error(desc, &interface);
979 if (error != GR_GL_NO_ERROR) {
980 return false;
981 } else {
982 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
983 const void* currentMipData = texels[currentMipLevel].fPixels;
984 if (currentMipData == nullptr) {
985 continue;
986 }
987
988 int twoToTheMipLevel = 1 << currentMipLevel;
989 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
990 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
991
992 // Make sure that the width and height that we pass to OpenGL
993 // is a multiple of the block size.
994 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth,
995 currentHeight);
996 GR_GL_CALL(&interface, CompressedTexSubImage2D(target,
997 currentMipLevel,
998 0, // left
999 0, // top
1000 currentWidth,
1001 currentHeight,
1002 internalFormat,
1003 SkToInt(dataSize),
1004 currentMipData));
1005 }
1006 }
1007 } else {
1008 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
1009 int twoToTheMipLevel = 1 << currentMipLevel;
1010 int currentWidth = SkTMax(1, baseWidth / twoToTheMipLevel);
1011 int currentHeight = SkTMax(1, baseHeight / twoToTheMipLevel);
1012
1013 // Make sure that the width and height that we pass to OpenGL
1014 // is a multiple of the block size.
1015 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, baseWidth, baseHeight);
1016
1017 GL_ALLOC_CALL(&interface,
1018 CompressedTexImage2D(target,
1019 currentMipLevel,
1020 internalFormat,
1021 currentWidth,
1022 currentHeight,
1023 0, // border
1024 SkToInt(dataSize),
1025 texels[currentMipLevel].fPixels));
1026
1027 GrGLenum error = check_alloc_error(desc, &interface);
1028 if (error != GR_GL_NO_ERROR) {
1029 return false;
1030 }
1031 }
1032 }
1033
1034 return true;
1035 }
1036
1037 /**
1038 * After a texture is created, any state which was altered during its creation
1039 * needs to be restored.
1040 *
1041 * @param interface The GL interface to use.
1042 * @param caps The capabilities of the GL device.
1043 * @param restoreGLRowLength Should the row length unpacking be restored?
1044 * @param glFlipY Did GL flip the texture vertically?
1045 */
restore_pixelstore_state(const GrGLInterface & interface,const GrGLCaps & caps,bool restoreGLRowLength,bool glFlipY)1046 static void restore_pixelstore_state(const GrGLInterface& interface, const GrGLCaps& caps,
1047 bool restoreGLRowLength, bool glFlipY) {
1048 if (restoreGLRowLength) {
1049 SkASSERT(caps.unpackRowLengthSupport());
1050 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, 0));
1051 }
1052 if (glFlipY) {
1053 GR_GL_CALL(&interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_FALSE));
1054 }
1055 }
1056
uploadTexData(const GrSurfaceDesc & desc,GrGLenum target,UploadType uploadType,int left,int top,int width,int height,GrPixelConfig dataConfig,const SkTArray<GrMipLevel> & texels)1057 bool GrGLGpu::uploadTexData(const GrSurfaceDesc& desc,
1058 GrGLenum target,
1059 UploadType uploadType,
1060 int left, int top, int width, int height,
1061 GrPixelConfig dataConfig,
1062 const SkTArray<GrMipLevel>& texels) {
1063 // If we're uploading compressed data then we should be using uploadCompressedTexData
1064 SkASSERT(!GrPixelConfigIsCompressed(dataConfig));
1065
1066 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
1067
1068 // texels is const.
1069 // But we may need to flip the texture vertically to prepare it.
1070 // Rather than flip in place and alter the incoming data,
1071 // we allocate a new buffer to flip into.
1072 // This means we need to make a non-const shallow copy of texels.
1073 SkTArray<GrMipLevel> texelsShallowCopy(texels);
1074
1075 for (int currentMipLevel = texelsShallowCopy.count() - 1; currentMipLevel >= 0;
1076 currentMipLevel--) {
1077 SkASSERT(texelsShallowCopy[currentMipLevel].fPixels || kTransfer_UploadType == uploadType);
1078 }
1079
1080 const GrGLInterface* interface = this->glInterface();
1081 const GrGLCaps& caps = this->glCaps();
1082
1083 size_t bpp = GrBytesPerPixel(dataConfig);
1084
1085 if (width == 0 || height == 0) {
1086 return false;
1087 }
1088
1089 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
1090 int twoToTheMipLevel = 1 << currentMipLevel;
1091 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1092 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1093
1094 if (currentHeight > SK_MaxS32 ||
1095 currentWidth > SK_MaxS32) {
1096 return false;
1097 }
1098 if (!GrSurfacePriv::AdjustWritePixelParams(desc.fWidth, desc.fHeight, bpp, &left, &top,
1099 ¤tWidth,
1100 ¤tHeight,
1101 &texelsShallowCopy[currentMipLevel].fPixels,
1102 &texelsShallowCopy[currentMipLevel].fRowBytes)) {
1103 return false;
1104 }
1105 if (currentWidth < 0 || currentHeight < 0) {
1106 return false;
1107 }
1108 }
1109
1110 // Internal format comes from the texture desc.
1111 GrGLenum internalFormat;
1112 // External format and type come from the upload data.
1113 GrGLenum externalFormat;
1114 GrGLenum externalType;
1115 if (!this->glCaps().getTexImageFormats(desc.fConfig, dataConfig, &internalFormat,
1116 &externalFormat, &externalType)) {
1117 return false;
1118 }
1119 // TexStorage requires a sized format, and internalFormat may or may not be
1120 GrGLenum internalFormatForTexStorage = this->glCaps().configSizedInternalFormat(desc.fConfig);
1121
1122 /*
1123 * Check whether to allocate a temporary buffer for flipping y or
1124 * because our srcData has extra bytes past each row. If so, we need
1125 * to trim those off here, since GL ES may not let us specify
1126 * GL_UNPACK_ROW_LENGTH.
1127 */
1128 bool restoreGLRowLength = false;
1129 bool swFlipY = false;
1130 bool glFlipY = false;
1131
1132 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin && !texelsShallowCopy.empty()) {
1133 if (caps.unpackFlipYSupport()) {
1134 glFlipY = true;
1135 } else {
1136 swFlipY = true;
1137 }
1138 }
1139
1140 // in case we need a temporary, trimmed copy of the src pixels
1141 SkAutoSMalloc<128 * 128> tempStorage;
1142
1143 // find the combined size of all the mip levels and the relative offset of
1144 // each into the collective buffer
1145 size_t combined_buffer_size = 0;
1146 SkTArray<size_t> individual_mip_offsets(texelsShallowCopy.count());
1147 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
1148 int twoToTheMipLevel = 1 << currentMipLevel;
1149 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1150 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1151 const size_t trimmedSize = currentWidth * bpp * currentHeight;
1152 individual_mip_offsets.push_back(combined_buffer_size);
1153 combined_buffer_size += trimmedSize;
1154 }
1155 char* buffer = (char*)tempStorage.reset(combined_buffer_size);
1156
1157 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count(); currentMipLevel++) {
1158 int twoToTheMipLevel = 1 << currentMipLevel;
1159 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1160 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1161 const size_t trimRowBytes = currentWidth * bpp;
1162
1163 /*
1164 * check whether to allocate a temporary buffer for flipping y or
1165 * because our srcData has extra bytes past each row. If so, we need
1166 * to trim those off here, since GL ES may not let us specify
1167 * GL_UNPACK_ROW_LENGTH.
1168 */
1169 restoreGLRowLength = false;
1170
1171 const size_t rowBytes = texelsShallowCopy[currentMipLevel].fRowBytes;
1172
1173 // TODO: This optimization should be enabled with or without mips.
1174 // For use with mips, we must set GR_GL_UNPACK_ROW_LENGTH once per
1175 // mip level, before calling glTexImage2D.
1176 const bool usesMips = texelsShallowCopy.count() > 1;
1177 if (caps.unpackRowLengthSupport() && !swFlipY && !usesMips) {
1178 // can't use this for flipping, only non-neg values allowed. :(
1179 if (rowBytes != trimRowBytes) {
1180 GrGLint rowLength = static_cast<GrGLint>(rowBytes / bpp);
1181 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ROW_LENGTH, rowLength));
1182 restoreGLRowLength = true;
1183 }
1184 } else if (kTransfer_UploadType != uploadType) {
1185 if (trimRowBytes != rowBytes || swFlipY) {
1186 // copy data into our new storage, skipping the trailing bytes
1187 const char* src = (const char*)texelsShallowCopy[currentMipLevel].fPixels;
1188 if (swFlipY && currentHeight >= 1) {
1189 src += (currentHeight - 1) * rowBytes;
1190 }
1191 char* dst = buffer + individual_mip_offsets[currentMipLevel];
1192 for (int y = 0; y < currentHeight; y++) {
1193 memcpy(dst, src, trimRowBytes);
1194 if (swFlipY) {
1195 src -= rowBytes;
1196 } else {
1197 src += rowBytes;
1198 }
1199 dst += trimRowBytes;
1200 }
1201 // now point data to our copied version
1202 texelsShallowCopy[currentMipLevel].fPixels = buffer +
1203 individual_mip_offsets[currentMipLevel];
1204 texelsShallowCopy[currentMipLevel].fRowBytes = trimRowBytes;
1205 }
1206 } else {
1207 return false;
1208 }
1209 }
1210
1211 if (!texelsShallowCopy.empty()) {
1212 if (glFlipY) {
1213 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_FLIP_Y, GR_GL_TRUE));
1214 }
1215 GR_GL_CALL(interface, PixelStorei(GR_GL_UNPACK_ALIGNMENT,
1216 config_alignment(desc.fConfig)));
1217 }
1218
1219 bool succeeded = true;
1220 if (kNewTexture_UploadType == uploadType &&
1221 0 == left && 0 == top &&
1222 desc.fWidth == width && desc.fHeight == height) {
1223 succeeded = allocate_and_populate_uncompressed_texture(desc, *interface, caps, target,
1224 internalFormat,
1225 internalFormatForTexStorage,
1226 externalFormat, externalType,
1227 texelsShallowCopy, width, height);
1228 } else {
1229 if (swFlipY || glFlipY) {
1230 top = desc.fHeight - (top + height);
1231 }
1232 for (int currentMipLevel = 0; currentMipLevel < texelsShallowCopy.count();
1233 currentMipLevel++) {
1234 int twoToTheMipLevel = 1 << currentMipLevel;
1235 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1236 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1237
1238 GL_CALL(TexSubImage2D(target,
1239 currentMipLevel,
1240 left, top,
1241 currentWidth,
1242 currentHeight,
1243 externalFormat, externalType,
1244 texelsShallowCopy[currentMipLevel].fPixels));
1245 }
1246 }
1247
1248 restore_pixelstore_state(*interface, caps, restoreGLRowLength, glFlipY);
1249
1250 return succeeded;
1251 }
1252
1253 // TODO: This function is using a lot of wonky semantics like, if width == -1
1254 // then set width = desc.fWdith ... blah. A better way to do it might be to
1255 // create a CompressedTexData struct that takes a desc/ptr and figures out
1256 // the proper upload semantics. Then users can construct this function how they
1257 // see fit if they want to go against the "standard" way to do it.
uploadCompressedTexData(const GrSurfaceDesc & desc,GrGLenum target,const SkTArray<GrMipLevel> & texels,UploadType uploadType,int left,int top,int width,int height)1258 bool GrGLGpu::uploadCompressedTexData(const GrSurfaceDesc& desc,
1259 GrGLenum target,
1260 const SkTArray<GrMipLevel>& texels,
1261 UploadType uploadType,
1262 int left, int top, int width, int height) {
1263 SkASSERT(this->caps()->isConfigTexturable(desc.fConfig));
1264
1265 // No support for software flip y, yet...
1266 SkASSERT(kBottomLeft_GrSurfaceOrigin != desc.fOrigin);
1267
1268 const GrGLInterface* interface = this->glInterface();
1269 const GrGLCaps& caps = this->glCaps();
1270
1271 if (-1 == width) {
1272 width = desc.fWidth;
1273 }
1274 #ifdef SK_DEBUG
1275 else {
1276 SkASSERT(width <= desc.fWidth);
1277 }
1278 #endif
1279
1280 if (-1 == height) {
1281 height = desc.fHeight;
1282 }
1283 #ifdef SK_DEBUG
1284 else {
1285 SkASSERT(height <= desc.fHeight);
1286 }
1287 #endif
1288
1289 // We only need the internal format for compressed 2D textures.
1290 GrGLenum internalFormat;
1291 if (!caps.getCompressedTexImageFormats(desc.fConfig, &internalFormat)) {
1292 return false;
1293 }
1294
1295 if (kNewTexture_UploadType == uploadType) {
1296 return allocate_and_populate_compressed_texture(desc, *interface, caps, target,
1297 internalFormat, texels, width, height);
1298 } else {
1299 for (int currentMipLevel = 0; currentMipLevel < texels.count(); currentMipLevel++) {
1300 SkASSERT(texels[currentMipLevel].fPixels || kTransfer_UploadType == uploadType);
1301
1302 int twoToTheMipLevel = 1 << currentMipLevel;
1303 int currentWidth = SkTMax(1, width / twoToTheMipLevel);
1304 int currentHeight = SkTMax(1, height / twoToTheMipLevel);
1305
1306 // Make sure that the width and height that we pass to OpenGL
1307 // is a multiple of the block size.
1308 size_t dataSize = GrCompressedFormatDataSize(desc.fConfig, currentWidth,
1309 currentHeight);
1310 GL_CALL(CompressedTexSubImage2D(target,
1311 currentMipLevel,
1312 left, top,
1313 currentWidth,
1314 currentHeight,
1315 internalFormat,
1316 SkToInt(dataSize),
1317 texels[currentMipLevel].fPixels));
1318 }
1319 }
1320
1321 return true;
1322 }
1323
renderbuffer_storage_msaa(const GrGLContext & ctx,int sampleCount,GrGLenum format,int width,int height)1324 static bool renderbuffer_storage_msaa(const GrGLContext& ctx,
1325 int sampleCount,
1326 GrGLenum format,
1327 int width, int height) {
1328 CLEAR_ERROR_BEFORE_ALLOC(ctx.interface());
1329 SkASSERT(GrGLCaps::kNone_MSFBOType != ctx.caps()->msFBOType());
1330 switch (ctx.caps()->msFBOType()) {
1331 case GrGLCaps::kEXT_MSFBOType:
1332 case GrGLCaps::kStandard_MSFBOType:
1333 case GrGLCaps::kMixedSamples_MSFBOType:
1334 GL_ALLOC_CALL(ctx.interface(),
1335 RenderbufferStorageMultisample(GR_GL_RENDERBUFFER,
1336 sampleCount,
1337 format,
1338 width, height));
1339 break;
1340 case GrGLCaps::kES_Apple_MSFBOType:
1341 GL_ALLOC_CALL(ctx.interface(),
1342 RenderbufferStorageMultisampleES2APPLE(GR_GL_RENDERBUFFER,
1343 sampleCount,
1344 format,
1345 width, height));
1346 break;
1347 case GrGLCaps::kES_EXT_MsToTexture_MSFBOType:
1348 case GrGLCaps::kES_IMG_MsToTexture_MSFBOType:
1349 GL_ALLOC_CALL(ctx.interface(),
1350 RenderbufferStorageMultisampleES2EXT(GR_GL_RENDERBUFFER,
1351 sampleCount,
1352 format,
1353 width, height));
1354 break;
1355 case GrGLCaps::kNone_MSFBOType:
1356 SkFAIL("Shouldn't be here if we don't support multisampled renderbuffers.");
1357 break;
1358 }
1359 return (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(ctx.interface()));
1360 }
1361
createRenderTargetObjects(const GrSurfaceDesc & desc,const GrGLTextureInfo & texInfo,GrGLRenderTarget::IDDesc * idDesc)1362 bool GrGLGpu::createRenderTargetObjects(const GrSurfaceDesc& desc,
1363 const GrGLTextureInfo& texInfo,
1364 GrGLRenderTarget::IDDesc* idDesc) {
1365 idDesc->fMSColorRenderbufferID = 0;
1366 idDesc->fRTFBOID = 0;
1367 idDesc->fRTFBOOwnership = GrBackendObjectOwnership::kOwned;
1368 idDesc->fTexFBOID = 0;
1369 SkASSERT((GrGLCaps::kMixedSamples_MSFBOType == this->glCaps().msFBOType()) ==
1370 this->caps()->usesMixedSamples());
1371 idDesc->fIsMixedSampled = desc.fSampleCnt > 0 && this->caps()->usesMixedSamples();
1372
1373 GrGLenum status;
1374
1375 GrGLenum colorRenderbufferFormat = 0; // suppress warning
1376
1377 if (desc.fSampleCnt > 0 && GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType()) {
1378 goto FAILED;
1379 }
1380
1381 GL_CALL(GenFramebuffers(1, &idDesc->fTexFBOID));
1382 if (!idDesc->fTexFBOID) {
1383 goto FAILED;
1384 }
1385
1386 // If we are using multisampling we will create two FBOS. We render to one and then resolve to
1387 // the texture bound to the other. The exception is the IMG multisample extension. With this
1388 // extension the texture is multisampled when rendered to and then auto-resolves it when it is
1389 // rendered from.
1390 if (desc.fSampleCnt > 0 && this->glCaps().usesMSAARenderBuffers()) {
1391 GL_CALL(GenFramebuffers(1, &idDesc->fRTFBOID));
1392 GL_CALL(GenRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
1393 if (!idDesc->fRTFBOID ||
1394 !idDesc->fMSColorRenderbufferID) {
1395 goto FAILED;
1396 }
1397 if (!this->glCaps().getRenderbufferFormat(desc.fConfig, &colorRenderbufferFormat)) {
1398 return false;
1399 }
1400 } else {
1401 idDesc->fRTFBOID = idDesc->fTexFBOID;
1402 }
1403
1404 // below here we may bind the FBO
1405 fHWBoundRenderTargetUniqueID.makeInvalid();
1406 if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
1407 SkASSERT(desc.fSampleCnt > 0);
1408 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, idDesc->fMSColorRenderbufferID));
1409 if (!renderbuffer_storage_msaa(*fGLContext,
1410 desc.fSampleCnt,
1411 colorRenderbufferFormat,
1412 desc.fWidth, desc.fHeight)) {
1413 goto FAILED;
1414 }
1415 fStats.incRenderTargetBinds();
1416 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fRTFBOID));
1417 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1418 GR_GL_COLOR_ATTACHMENT0,
1419 GR_GL_RENDERBUFFER,
1420 idDesc->fMSColorRenderbufferID));
1421 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
1422 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
1423 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1424 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1425 goto FAILED;
1426 }
1427 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
1428 }
1429 }
1430 fStats.incRenderTargetBinds();
1431 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, idDesc->fTexFBOID));
1432
1433 if (this->glCaps().usesImplicitMSAAResolve() && desc.fSampleCnt > 0) {
1434 GL_CALL(FramebufferTexture2DMultisample(GR_GL_FRAMEBUFFER,
1435 GR_GL_COLOR_ATTACHMENT0,
1436 texInfo.fTarget,
1437 texInfo.fID, 0, desc.fSampleCnt));
1438 } else {
1439 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1440 GR_GL_COLOR_ATTACHMENT0,
1441 texInfo.fTarget,
1442 texInfo.fID, 0));
1443 }
1444 if ((desc.fFlags & kCheckAllocation_GrSurfaceFlag) ||
1445 !this->glCaps().isConfigVerifiedColorAttachment(desc.fConfig)) {
1446 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1447 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
1448 goto FAILED;
1449 }
1450 fGLContext->caps()->markConfigAsValidColorAttachment(desc.fConfig);
1451 }
1452
1453 return true;
1454
1455 FAILED:
1456 if (idDesc->fMSColorRenderbufferID) {
1457 GL_CALL(DeleteRenderbuffers(1, &idDesc->fMSColorRenderbufferID));
1458 }
1459 if (idDesc->fRTFBOID != idDesc->fTexFBOID) {
1460 GL_CALL(DeleteFramebuffers(1, &idDesc->fRTFBOID));
1461 }
1462 if (idDesc->fTexFBOID) {
1463 GL_CALL(DeleteFramebuffers(1, &idDesc->fTexFBOID));
1464 }
1465 return false;
1466 }
1467
1468 // good to set a break-point here to know when createTexture fails
return_null_texture()1469 static GrTexture* return_null_texture() {
1470 // SkDEBUGFAIL("null texture");
1471 return nullptr;
1472 }
1473
1474 #if 0 && defined(SK_DEBUG)
1475 static size_t as_size_t(int x) {
1476 return x;
1477 }
1478 #endif
1479
generate_gl_texture(const GrGLInterface * interface)1480 static GrGLTexture::IDDesc generate_gl_texture(const GrGLInterface* interface) {
1481 GrGLTexture::IDDesc idDesc;
1482 idDesc.fInfo.fID = 0;
1483 GR_GL_CALL(interface, GenTextures(1, &idDesc.fInfo.fID));
1484 idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1485 // When we create the texture, we only
1486 // create GL_TEXTURE_2D at the moment.
1487 // External clients can do something different.
1488 idDesc.fInfo.fTarget = GR_GL_TEXTURE_2D;
1489 return idDesc;
1490 }
1491
set_initial_texture_params(const GrGLInterface * interface,const GrGLTextureInfo & info,GrGLTexture::TexParams * initialTexParams)1492 static void set_initial_texture_params(const GrGLInterface* interface,
1493 const GrGLTextureInfo& info,
1494 GrGLTexture::TexParams* initialTexParams) {
1495 // Some drivers like to know filter/wrap before seeing glTexImage2D. Some
1496 // drivers have a bug where an FBO won't be complete if it includes a
1497 // texture that is not mipmap complete (considering the filter in use).
1498 // we only set a subset here so invalidate first
1499 initialTexParams->invalidate();
1500 initialTexParams->fMinFilter = GR_GL_NEAREST;
1501 initialTexParams->fMagFilter = GR_GL_NEAREST;
1502 initialTexParams->fWrapS = GR_GL_CLAMP_TO_EDGE;
1503 initialTexParams->fWrapT = GR_GL_CLAMP_TO_EDGE;
1504 GR_GL_CALL(interface, TexParameteri(info.fTarget,
1505 GR_GL_TEXTURE_MAG_FILTER,
1506 initialTexParams->fMagFilter));
1507 GR_GL_CALL(interface, TexParameteri(info.fTarget,
1508 GR_GL_TEXTURE_MIN_FILTER,
1509 initialTexParams->fMinFilter));
1510 GR_GL_CALL(interface, TexParameteri(info.fTarget,
1511 GR_GL_TEXTURE_WRAP_S,
1512 initialTexParams->fWrapS));
1513 GR_GL_CALL(interface, TexParameteri(info.fTarget,
1514 GR_GL_TEXTURE_WRAP_T,
1515 initialTexParams->fWrapT));
1516 }
1517
onCreateTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,const SkTArray<GrMipLevel> & texels)1518 GrTexture* GrGLGpu::onCreateTexture(const GrSurfaceDesc& desc,
1519 SkBudgeted budgeted,
1520 const SkTArray<GrMipLevel>& texels) {
1521 // We fail if the MSAA was requested and is not available.
1522 if (GrGLCaps::kNone_MSFBOType == this->glCaps().msFBOType() && desc.fSampleCnt) {
1523 //SkDebugf("MSAA RT requested but not supported on this platform.");
1524 return return_null_texture();
1525 }
1526
1527 bool renderTarget = SkToBool(desc.fFlags & kRenderTarget_GrSurfaceFlag);
1528
1529 GrGLTexture::IDDesc idDesc;
1530 idDesc.fOwnership = GrBackendObjectOwnership::kOwned;
1531 GrGLTexture::TexParams initialTexParams;
1532 if (!this->createTextureImpl(desc, &idDesc.fInfo, renderTarget, &initialTexParams, texels)) {
1533 return return_null_texture();
1534 }
1535
1536 bool wasMipMapDataProvided = false;
1537 if (texels.count() > 1) {
1538 wasMipMapDataProvided = true;
1539 }
1540
1541 GrGLTexture* tex;
1542 if (renderTarget) {
1543 // unbind the texture from the texture unit before binding it to the frame buffer
1544 GL_CALL(BindTexture(idDesc.fInfo.fTarget, 0));
1545 GrGLRenderTarget::IDDesc rtIDDesc;
1546
1547 if (!this->createRenderTargetObjects(desc, idDesc.fInfo, &rtIDDesc)) {
1548 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1549 return return_null_texture();
1550 }
1551 tex = new GrGLTextureRenderTarget(this, budgeted, desc, idDesc, rtIDDesc,
1552 wasMipMapDataProvided);
1553 } else {
1554 tex = new GrGLTexture(this, budgeted, desc, idDesc, wasMipMapDataProvided);
1555 }
1556 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1557 #ifdef TRACE_TEXTURE_CREATION
1558 SkDebugf("--- new texture [%d] size=(%d %d) config=%d\n",
1559 idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
1560 #endif
1561 return tex;
1562 }
1563
onCreateCompressedTexture(const GrSurfaceDesc & desc,SkBudgeted budgeted,const SkTArray<GrMipLevel> & texels)1564 GrTexture* GrGLGpu::onCreateCompressedTexture(const GrSurfaceDesc& desc,
1565 SkBudgeted budgeted,
1566 const SkTArray<GrMipLevel>& texels) {
1567 // Make sure that we're not flipping Y.
1568 if (kBottomLeft_GrSurfaceOrigin == desc.fOrigin) {
1569 return return_null_texture();
1570 }
1571
1572 GrGLTexture::IDDesc idDesc = generate_gl_texture(this->glInterface());
1573 if (!idDesc.fInfo.fID) {
1574 return return_null_texture();
1575 }
1576
1577 this->setScratchTextureUnit();
1578 GL_CALL(BindTexture(idDesc.fInfo.fTarget, idDesc.fInfo.fID));
1579
1580 GrGLTexture::TexParams initialTexParams;
1581 set_initial_texture_params(this->glInterface(), idDesc.fInfo, &initialTexParams);
1582
1583 if (!this->uploadCompressedTexData(desc, idDesc.fInfo.fTarget, texels)) {
1584 GL_CALL(DeleteTextures(1, &idDesc.fInfo.fID));
1585 return return_null_texture();
1586 }
1587
1588 GrGLTexture* tex;
1589 tex = new GrGLTexture(this, budgeted, desc, idDesc);
1590 tex->setCachedTexParams(initialTexParams, this->getResetTimestamp());
1591 #ifdef TRACE_TEXTURE_CREATION
1592 SkDebugf("--- new compressed texture [%d] size=(%d %d) config=%d\n",
1593 idDesc.fInfo.fID, desc.fWidth, desc.fHeight, desc.fConfig);
1594 #endif
1595 return tex;
1596 }
1597
1598 namespace {
1599
1600 const GrGLuint kUnknownBitCount = GrGLStencilAttachment::kUnknownBitCount;
1601
get_stencil_rb_sizes(const GrGLInterface * gl,GrGLStencilAttachment::Format * format)1602 void inline get_stencil_rb_sizes(const GrGLInterface* gl,
1603 GrGLStencilAttachment::Format* format) {
1604
1605 // we shouldn't ever know one size and not the other
1606 SkASSERT((kUnknownBitCount == format->fStencilBits) ==
1607 (kUnknownBitCount == format->fTotalBits));
1608 if (kUnknownBitCount == format->fStencilBits) {
1609 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1610 GR_GL_RENDERBUFFER_STENCIL_SIZE,
1611 (GrGLint*)&format->fStencilBits);
1612 if (format->fPacked) {
1613 GR_GL_GetRenderbufferParameteriv(gl, GR_GL_RENDERBUFFER,
1614 GR_GL_RENDERBUFFER_DEPTH_SIZE,
1615 (GrGLint*)&format->fTotalBits);
1616 format->fTotalBits += format->fStencilBits;
1617 } else {
1618 format->fTotalBits = format->fStencilBits;
1619 }
1620 }
1621 }
1622 }
1623
getCompatibleStencilIndex(GrPixelConfig config)1624 int GrGLGpu::getCompatibleStencilIndex(GrPixelConfig config) {
1625 static const int kSize = 16;
1626 SkASSERT(this->caps()->isConfigRenderable(config, false));
1627 if (!this->glCaps().hasStencilFormatBeenDeterminedForConfig(config)) {
1628 // Default to unsupported, set this if we find a stencil format that works.
1629 int firstWorkingStencilFormatIndex = -1;
1630 // Create color texture
1631 GrGLuint colorID = 0;
1632 GL_CALL(GenTextures(1, &colorID));
1633 this->setScratchTextureUnit();
1634 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, colorID));
1635 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1636 GR_GL_TEXTURE_MAG_FILTER,
1637 GR_GL_NEAREST));
1638 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1639 GR_GL_TEXTURE_MIN_FILTER,
1640 GR_GL_NEAREST));
1641 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1642 GR_GL_TEXTURE_WRAP_S,
1643 GR_GL_CLAMP_TO_EDGE));
1644 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D,
1645 GR_GL_TEXTURE_WRAP_T,
1646 GR_GL_CLAMP_TO_EDGE));
1647
1648 GrGLenum internalFormat;
1649 GrGLenum externalFormat;
1650 GrGLenum externalType;
1651 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
1652 &externalType)) {
1653 return false;
1654 }
1655 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1656 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D,
1657 0,
1658 internalFormat,
1659 kSize,
1660 kSize,
1661 0,
1662 externalFormat,
1663 externalType,
1664 NULL));
1665 if (GR_GL_NO_ERROR != CHECK_ALLOC_ERROR(this->glInterface())) {
1666 GL_CALL(DeleteTextures(1, &colorID));
1667 return -1;
1668 }
1669
1670 // unbind the texture from the texture unit before binding it to the frame buffer
1671 GL_CALL(BindTexture(GR_GL_TEXTURE_2D, 0));
1672
1673 // Create Framebuffer
1674 GrGLuint fb = 0;
1675 GL_CALL(GenFramebuffers(1, &fb));
1676 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fb));
1677 fHWBoundRenderTargetUniqueID.makeInvalid();
1678 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER,
1679 GR_GL_COLOR_ATTACHMENT0,
1680 GR_GL_TEXTURE_2D,
1681 colorID,
1682 0));
1683 GrGLuint sbRBID = 0;
1684 GL_CALL(GenRenderbuffers(1, &sbRBID));
1685
1686 // look over formats till I find a compatible one
1687 int stencilFmtCnt = this->glCaps().stencilFormats().count();
1688 if (sbRBID) {
1689 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbRBID));
1690 for (int i = 0; i < stencilFmtCnt && sbRBID; ++i) {
1691 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[i];
1692 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1693 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1694 sFmt.fInternalFormat,
1695 kSize, kSize));
1696 if (GR_GL_NO_ERROR == CHECK_ALLOC_ERROR(this->glInterface())) {
1697 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1698 GR_GL_STENCIL_ATTACHMENT,
1699 GR_GL_RENDERBUFFER, sbRBID));
1700 if (sFmt.fPacked) {
1701 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1702 GR_GL_DEPTH_ATTACHMENT,
1703 GR_GL_RENDERBUFFER, sbRBID));
1704 } else {
1705 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1706 GR_GL_DEPTH_ATTACHMENT,
1707 GR_GL_RENDERBUFFER, 0));
1708 }
1709 GrGLenum status;
1710 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
1711 if (status == GR_GL_FRAMEBUFFER_COMPLETE) {
1712 firstWorkingStencilFormatIndex = i;
1713 break;
1714 }
1715 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1716 GR_GL_STENCIL_ATTACHMENT,
1717 GR_GL_RENDERBUFFER, 0));
1718 if (sFmt.fPacked) {
1719 GL_CALL(FramebufferRenderbuffer(GR_GL_FRAMEBUFFER,
1720 GR_GL_DEPTH_ATTACHMENT,
1721 GR_GL_RENDERBUFFER, 0));
1722 }
1723 }
1724 }
1725 GL_CALL(DeleteRenderbuffers(1, &sbRBID));
1726 }
1727 GL_CALL(DeleteTextures(1, &colorID));
1728 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, 0));
1729 GL_CALL(DeleteFramebuffers(1, &fb));
1730 fGLContext->caps()->setStencilFormatIndexForConfig(config, firstWorkingStencilFormatIndex);
1731 }
1732 return this->glCaps().getStencilFormatIndexForConfig(config);
1733 }
1734
createTextureImpl(const GrSurfaceDesc & desc,GrGLTextureInfo * info,bool renderTarget,GrGLTexture::TexParams * initialTexParams,const SkTArray<GrMipLevel> & texels)1735 bool GrGLGpu::createTextureImpl(const GrSurfaceDesc& desc, GrGLTextureInfo* info,
1736 bool renderTarget, GrGLTexture::TexParams* initialTexParams,
1737 const SkTArray<GrMipLevel>& texels) {
1738 info->fID = 0;
1739 info->fTarget = GR_GL_TEXTURE_2D;
1740 GL_CALL(GenTextures(1, &(info->fID)));
1741
1742 if (!info->fID) {
1743 return false;
1744 }
1745
1746 this->setScratchTextureUnit();
1747 GL_CALL(BindTexture(info->fTarget, info->fID));
1748
1749 if (renderTarget && this->glCaps().textureUsageSupport()) {
1750 // provides a hint about how this texture will be used
1751 GL_CALL(TexParameteri(info->fTarget,
1752 GR_GL_TEXTURE_USAGE,
1753 GR_GL_FRAMEBUFFER_ATTACHMENT));
1754 }
1755
1756 if (info) {
1757 set_initial_texture_params(this->glInterface(), *info, initialTexParams);
1758 }
1759 if (!this->uploadTexData(desc, info->fTarget, kNewTexture_UploadType, 0, 0,
1760 desc.fWidth, desc.fHeight,
1761 desc.fConfig, texels)) {
1762 GL_CALL(DeleteTextures(1, &(info->fID)));
1763 return false;
1764 }
1765 return true;
1766 }
1767
createStencilAttachmentForRenderTarget(const GrRenderTarget * rt,int width,int height)1768 GrStencilAttachment* GrGLGpu::createStencilAttachmentForRenderTarget(const GrRenderTarget* rt,
1769 int width,
1770 int height) {
1771 SkASSERT(width >= rt->width());
1772 SkASSERT(height >= rt->height());
1773
1774 int samples = rt->numStencilSamples();
1775 GrGLStencilAttachment::IDDesc sbDesc;
1776
1777 int sIdx = this->getCompatibleStencilIndex(rt->config());
1778 if (sIdx < 0) {
1779 return nullptr;
1780 }
1781
1782 if (!sbDesc.fRenderbufferID) {
1783 GL_CALL(GenRenderbuffers(1, &sbDesc.fRenderbufferID));
1784 }
1785 if (!sbDesc.fRenderbufferID) {
1786 return nullptr;
1787 }
1788 GL_CALL(BindRenderbuffer(GR_GL_RENDERBUFFER, sbDesc.fRenderbufferID));
1789 const GrGLCaps::StencilFormat& sFmt = this->glCaps().stencilFormats()[sIdx];
1790 CLEAR_ERROR_BEFORE_ALLOC(this->glInterface());
1791 // we do this "if" so that we don't call the multisample
1792 // version on a GL that doesn't have an MSAA extension.
1793 if (samples > 0) {
1794 SkAssertResult(renderbuffer_storage_msaa(*fGLContext,
1795 samples,
1796 sFmt.fInternalFormat,
1797 width, height));
1798 } else {
1799 GL_ALLOC_CALL(this->glInterface(), RenderbufferStorage(GR_GL_RENDERBUFFER,
1800 sFmt.fInternalFormat,
1801 width, height));
1802 SkASSERT(GR_GL_NO_ERROR == check_alloc_error(rt->desc(), this->glInterface()));
1803 }
1804 fStats.incStencilAttachmentCreates();
1805 // After sized formats we attempt an unsized format and take
1806 // whatever sizes GL gives us. In that case we query for the size.
1807 GrGLStencilAttachment::Format format = sFmt;
1808 get_stencil_rb_sizes(this->glInterface(), &format);
1809 GrGLStencilAttachment* stencil = new GrGLStencilAttachment(this,
1810 sbDesc,
1811 width,
1812 height,
1813 samples,
1814 format);
1815 return stencil;
1816 }
1817
1818 ////////////////////////////////////////////////////////////////////////////////
1819
1820 // GL_STREAM_DRAW triggers an optimization in Chromium's GPU process where a client's vertex buffer
1821 // objects are implemented as client-side-arrays on tile-deferred architectures.
1822 #define DYNAMIC_USAGE_PARAM GR_GL_STREAM_DRAW
1823
onCreateBuffer(size_t size,GrBufferType intendedType,GrAccessPattern accessPattern,const void * data)1824 GrBuffer* GrGLGpu::onCreateBuffer(size_t size, GrBufferType intendedType,
1825 GrAccessPattern accessPattern, const void* data) {
1826 return GrGLBuffer::Create(this, size, intendedType, accessPattern, data);
1827 }
1828
onCreateInstancedRendering()1829 InstancedRendering* GrGLGpu::onCreateInstancedRendering() {
1830 return new GLInstancedRendering(this);
1831 }
1832
flushScissor(const GrScissorState & scissorState,const GrGLIRect & rtViewport,GrSurfaceOrigin rtOrigin)1833 void GrGLGpu::flushScissor(const GrScissorState& scissorState,
1834 const GrGLIRect& rtViewport,
1835 GrSurfaceOrigin rtOrigin) {
1836 if (scissorState.enabled()) {
1837 GrGLIRect scissor;
1838 scissor.setRelativeTo(rtViewport,
1839 scissorState.rect().fLeft,
1840 scissorState.rect().fTop,
1841 scissorState.rect().width(),
1842 scissorState.rect().height(),
1843 rtOrigin);
1844 // if the scissor fully contains the viewport then we fall through and
1845 // disable the scissor test.
1846 if (!scissor.contains(rtViewport)) {
1847 if (fHWScissorSettings.fRect != scissor) {
1848 scissor.pushToGLScissor(this->glInterface());
1849 fHWScissorSettings.fRect = scissor;
1850 }
1851 if (kYes_TriState != fHWScissorSettings.fEnabled) {
1852 GL_CALL(Enable(GR_GL_SCISSOR_TEST));
1853 fHWScissorSettings.fEnabled = kYes_TriState;
1854 }
1855 return;
1856 }
1857 }
1858
1859 // See fall through note above
1860 this->disableScissor();
1861 }
1862
flushWindowRectangles(const GrWindowRectsState & windowState,const GrGLRenderTarget * rt)1863 void GrGLGpu::flushWindowRectangles(const GrWindowRectsState& windowState,
1864 const GrGLRenderTarget* rt) {
1865 #ifndef USE_NSIGHT
1866 typedef GrWindowRectsState::Mode Mode;
1867 SkASSERT(!windowState.enabled() || rt->renderFBOID()); // Window rects can't be used on-screen.
1868 SkASSERT(windowState.numWindows() <= this->caps()->maxWindowRectangles());
1869
1870 if (!this->caps()->maxWindowRectangles() ||
1871 fHWWindowRectsState.knownEqualTo(rt->origin(), rt->getViewport(), windowState)) {
1872 return;
1873 }
1874
1875 // This is purely a workaround for a spurious warning generated by gcc. Otherwise the above
1876 // assert would be sufficient. https://gcc.gnu.org/bugzilla/show_bug.cgi?id=5912
1877 int numWindows = SkTMin(windowState.numWindows(), int(GrWindowRectangles::kMaxWindows));
1878 SkASSERT(windowState.numWindows() == numWindows);
1879
1880 GrGLIRect glwindows[GrWindowRectangles::kMaxWindows];
1881 const SkIRect* skwindows = windowState.windows().data();
1882 for (int i = 0; i < numWindows; ++i) {
1883 glwindows[i].setRelativeTo(rt->getViewport(), skwindows[i], rt->origin());
1884 }
1885
1886 GrGLenum glmode = (Mode::kExclusive == windowState.mode()) ? GR_GL_EXCLUSIVE : GR_GL_INCLUSIVE;
1887 GL_CALL(WindowRectangles(glmode, numWindows, glwindows->asInts()));
1888
1889 fHWWindowRectsState.set(rt->origin(), rt->getViewport(), windowState);
1890 #endif
1891 }
1892
disableWindowRectangles()1893 void GrGLGpu::disableWindowRectangles() {
1894 #ifndef USE_NSIGHT
1895 if (!this->caps()->maxWindowRectangles() || fHWWindowRectsState.knownDisabled()) {
1896 return;
1897 }
1898 GL_CALL(WindowRectangles(GR_GL_EXCLUSIVE, 0, nullptr));
1899 fHWWindowRectsState.setDisabled();
1900 #endif
1901 }
1902
flushMinSampleShading(float minSampleShading)1903 void GrGLGpu::flushMinSampleShading(float minSampleShading) {
1904 if (fHWMinSampleShading != minSampleShading) {
1905 if (minSampleShading > 0.0) {
1906 GL_CALL(Enable(GR_GL_SAMPLE_SHADING));
1907 GL_CALL(MinSampleShading(minSampleShading));
1908 }
1909 else {
1910 GL_CALL(Disable(GR_GL_SAMPLE_SHADING));
1911 }
1912 fHWMinSampleShading = minSampleShading;
1913 }
1914 }
1915
flushGLState(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,bool willDrawPoints)1916 bool GrGLGpu::flushGLState(const GrPipeline& pipeline, const GrPrimitiveProcessor& primProc,
1917 bool willDrawPoints) {
1918 sk_sp<GrGLProgram> program(fProgramCache->refProgram(this, pipeline, primProc, willDrawPoints));
1919 if (!program) {
1920 GrCapsDebugf(this->caps(), "Failed to create program!\n");
1921 return false;
1922 }
1923
1924 program->generateMipmaps(primProc, pipeline);
1925
1926 GrXferProcessor::BlendInfo blendInfo;
1927 pipeline.getXferProcessor().getBlendInfo(&blendInfo);
1928
1929 this->flushColorWrite(blendInfo.fWriteColor);
1930 this->flushDrawFace(pipeline.getDrawFace());
1931 this->flushMinSampleShading(primProc.getSampleShading());
1932
1933 GrGLuint programID = program->programID();
1934 if (fHWProgramID != programID) {
1935 GL_CALL(UseProgram(programID));
1936 fHWProgramID = programID;
1937 }
1938
1939 if (blendInfo.fWriteColor) {
1940 // Swizzle the blend to match what the shader will output.
1941 const GrSwizzle& swizzle = this->caps()->shaderCaps()->configOutputSwizzle(
1942 pipeline.getRenderTarget()->config());
1943 this->flushBlend(blendInfo, swizzle);
1944 }
1945
1946 program->setData(primProc, pipeline);
1947
1948 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(pipeline.getRenderTarget());
1949 GrStencilSettings stencil;
1950 if (pipeline.isStencilEnabled()) {
1951 // TODO: attach stencil and create settings during render target flush.
1952 SkASSERT(glRT->renderTargetPriv().getStencilAttachment());
1953 stencil.reset(*pipeline.getUserStencil(), pipeline.hasStencilClip(),
1954 glRT->renderTargetPriv().numStencilBits());
1955 }
1956 this->flushStencil(stencil);
1957 this->flushScissor(pipeline.getScissorState(), glRT->getViewport(), glRT->origin());
1958 this->flushWindowRectangles(pipeline.getWindowRectsState(), glRT);
1959 this->flushHWAAState(glRT, pipeline.isHWAntialiasState(), !stencil.isDisabled());
1960
1961 // This must come after textures are flushed because a texture may need
1962 // to be msaa-resolved (which will modify bound FBO state).
1963 this->flushRenderTarget(glRT, nullptr, pipeline.getDisableOutputConversionToSRGB());
1964
1965 return true;
1966 }
1967
setupGeometry(const GrPrimitiveProcessor & primProc,const GrNonInstancedMesh & mesh,size_t * indexOffsetInBytes)1968 void GrGLGpu::setupGeometry(const GrPrimitiveProcessor& primProc,
1969 const GrNonInstancedMesh& mesh,
1970 size_t* indexOffsetInBytes) {
1971 const GrBuffer* vbuf = mesh.vertexBuffer();
1972 SkASSERT(vbuf);
1973 SkASSERT(!vbuf->isMapped());
1974
1975 GrGLAttribArrayState* attribState;
1976 if (mesh.isIndexed()) {
1977 SkASSERT(indexOffsetInBytes);
1978
1979 *indexOffsetInBytes = 0;
1980 const GrBuffer* ibuf = mesh.indexBuffer();
1981 SkASSERT(ibuf);
1982 SkASSERT(!ibuf->isMapped());
1983 *indexOffsetInBytes += ibuf->baseOffset();
1984 attribState = fHWVertexArrayState.bindInternalVertexArray(this, ibuf);
1985 } else {
1986 attribState = fHWVertexArrayState.bindInternalVertexArray(this);
1987 }
1988
1989 int vaCount = primProc.numAttribs();
1990 if (vaCount > 0) {
1991
1992 GrGLsizei stride = static_cast<GrGLsizei>(primProc.getVertexStride());
1993
1994 size_t vertexOffsetInBytes = stride * mesh.startVertex();
1995
1996 vertexOffsetInBytes += vbuf->baseOffset();
1997
1998 uint32_t usedAttribArraysMask = 0;
1999 size_t offset = 0;
2000
2001 for (int attribIndex = 0; attribIndex < vaCount; attribIndex++) {
2002 const GrGeometryProcessor::Attribute& attrib = primProc.getAttrib(attribIndex);
2003 usedAttribArraysMask |= (1 << attribIndex);
2004 GrVertexAttribType attribType = attrib.fType;
2005 attribState->set(this,
2006 attribIndex,
2007 vbuf,
2008 attribType,
2009 stride,
2010 reinterpret_cast<GrGLvoid*>(vertexOffsetInBytes + offset));
2011 offset += attrib.fOffset;
2012 }
2013 attribState->disableUnusedArrays(this, usedAttribArraysMask);
2014 }
2015 }
2016
bindBuffer(GrBufferType type,const GrBuffer * buffer)2017 GrGLenum GrGLGpu::bindBuffer(GrBufferType type, const GrBuffer* buffer) {
2018 this->handleDirtyContext();
2019
2020 // Index buffer state is tied to the vertex array.
2021 if (kIndex_GrBufferType == type) {
2022 this->bindVertexArray(0);
2023 }
2024
2025 SkASSERT(type >= 0 && type <= kLast_GrBufferType);
2026 auto& bufferState = fHWBufferState[type];
2027
2028 if (buffer->uniqueID() != bufferState.fBoundBufferUniqueID) {
2029 if (buffer->isCPUBacked()) {
2030 if (!bufferState.fBufferZeroKnownBound) {
2031 GL_CALL(BindBuffer(bufferState.fGLTarget, 0));
2032 }
2033 } else {
2034 const GrGLBuffer* glBuffer = static_cast<const GrGLBuffer*>(buffer);
2035 GL_CALL(BindBuffer(bufferState.fGLTarget, glBuffer->bufferID()));
2036 }
2037 bufferState.fBufferZeroKnownBound = buffer->isCPUBacked();
2038 bufferState.fBoundBufferUniqueID = buffer->uniqueID();
2039 }
2040
2041 return bufferState.fGLTarget;
2042 }
2043
notifyBufferReleased(const GrGLBuffer * buffer)2044 void GrGLGpu::notifyBufferReleased(const GrGLBuffer* buffer) {
2045 if (buffer->hasAttachedToTexture()) {
2046 // Detach this buffer from any textures to ensure the underlying memory is freed.
2047 GrGpuResource::UniqueID uniqueID = buffer->uniqueID();
2048 for (int i = fHWMaxUsedBufferTextureUnit; i >= 0; --i) {
2049 auto& buffTex = fHWBufferTextures[i];
2050 if (uniqueID != buffTex.fAttachedBufferUniqueID) {
2051 continue;
2052 }
2053 if (i == fHWMaxUsedBufferTextureUnit) {
2054 --fHWMaxUsedBufferTextureUnit;
2055 }
2056
2057 this->setTextureUnit(i);
2058 if (!buffTex.fKnownBound) {
2059 SkASSERT(buffTex.fTextureID);
2060 GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID));
2061 buffTex.fKnownBound = true;
2062 }
2063 GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER,
2064 this->glCaps().configSizedInternalFormat(buffTex.fTexelConfig), 0));
2065 }
2066 }
2067 }
2068
disableScissor()2069 void GrGLGpu::disableScissor() {
2070 if (kNo_TriState != fHWScissorSettings.fEnabled) {
2071 GL_CALL(Disable(GR_GL_SCISSOR_TEST));
2072 fHWScissorSettings.fEnabled = kNo_TriState;
2073 return;
2074 }
2075 }
2076
clear(const GrFixedClip & clip,GrColor color,GrRenderTarget * target)2077 void GrGLGpu::clear(const GrFixedClip& clip, GrColor color, GrRenderTarget* target) {
2078 this->handleDirtyContext();
2079
2080 // parent class should never let us get here with no RT
2081 SkASSERT(target);
2082 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2083
2084 this->flushRenderTarget(glRT, clip.scissorEnabled() ? &clip.scissorRect() : nullptr);
2085 this->flushScissor(clip.scissorState(), glRT->getViewport(), glRT->origin());
2086 this->flushWindowRectangles(clip.windowRectsState(), glRT);
2087
2088 GrGLfloat r, g, b, a;
2089 static const GrGLfloat scale255 = 1.f / 255.f;
2090 a = GrColorUnpackA(color) * scale255;
2091 GrGLfloat scaleRGB = scale255;
2092 r = GrColorUnpackR(color) * scaleRGB;
2093 g = GrColorUnpackG(color) * scaleRGB;
2094 b = GrColorUnpackB(color) * scaleRGB;
2095
2096 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
2097 fHWWriteToColor = kYes_TriState;
2098 GL_CALL(ClearColor(r, g, b, a));
2099 GL_CALL(Clear(GR_GL_COLOR_BUFFER_BIT));
2100 }
2101
clearStencil(GrRenderTarget * target)2102 void GrGLGpu::clearStencil(GrRenderTarget* target) {
2103 if (nullptr == target) {
2104 return;
2105 }
2106 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2107 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
2108
2109 this->disableScissor();
2110 this->disableWindowRectangles();
2111
2112 GL_CALL(StencilMask(0xffffffff));
2113 GL_CALL(ClearStencil(0));
2114 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2115 fHWStencilSettings.invalidate();
2116 }
2117
clearStencilClip(const GrFixedClip & clip,bool insideStencilMask,GrRenderTarget * target)2118 void GrGLGpu::clearStencilClip(const GrFixedClip& clip,
2119 bool insideStencilMask,
2120 GrRenderTarget* target) {
2121 SkASSERT(target);
2122 this->handleDirtyContext();
2123
2124 GrStencilAttachment* sb = target->renderTargetPriv().getStencilAttachment();
2125 // this should only be called internally when we know we have a
2126 // stencil buffer.
2127 SkASSERT(sb);
2128 GrGLint stencilBitCount = sb->bits();
2129 #if 0
2130 SkASSERT(stencilBitCount > 0);
2131 GrGLint clipStencilMask = (1 << (stencilBitCount - 1));
2132 #else
2133 // we could just clear the clip bit but when we go through
2134 // ANGLE a partial stencil mask will cause clears to be
2135 // turned into draws. Our contract on GrOpList says that
2136 // changing the clip between stencil passes may or may not
2137 // zero the client's clip bits. So we just clear the whole thing.
2138 static const GrGLint clipStencilMask = ~0;
2139 #endif
2140 GrGLint value;
2141 if (insideStencilMask) {
2142 value = (1 << (stencilBitCount - 1));
2143 } else {
2144 value = 0;
2145 }
2146 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(target);
2147 this->flushRenderTarget(glRT, &SkIRect::EmptyIRect());
2148
2149 this->flushScissor(clip.scissorState(), glRT->getViewport(), glRT->origin());
2150 this->flushWindowRectangles(clip.windowRectsState(), glRT);
2151
2152 GL_CALL(StencilMask((uint32_t) clipStencilMask));
2153 GL_CALL(ClearStencil(value));
2154 GL_CALL(Clear(GR_GL_STENCIL_BUFFER_BIT));
2155 fHWStencilSettings.invalidate();
2156 }
2157
read_pixels_pays_for_y_flip(GrSurfaceOrigin origin,const GrGLCaps & caps,int width,int height,GrPixelConfig config,size_t rowBytes)2158 static bool read_pixels_pays_for_y_flip(GrSurfaceOrigin origin, const GrGLCaps& caps,
2159 int width, int height, GrPixelConfig config,
2160 size_t rowBytes) {
2161 // If the surface is already TopLeft, we don't need to flip.
2162 if (kTopLeft_GrSurfaceOrigin == origin) {
2163 return false;
2164 }
2165
2166 // If the read is really small or smaller than the min texture size, don't force a draw.
2167 static const int kMinSize = 32;
2168 if (width < kMinSize || height < kMinSize) {
2169 return false;
2170 }
2171
2172 // if GL can do the flip then we'll never pay for it.
2173 if (caps.packFlipYSupport()) {
2174 return false;
2175 }
2176
2177 // If we have to do memcpy to handle non-trim rowBytes then we
2178 // get the flip for free. Otherwise it costs.
2179 // Note that we're assuming that 0 rowBytes has already been handled and that the width has been
2180 // clipped.
2181 return caps.packRowLengthSupport() || GrBytesPerPixel(config) * width == rowBytes;
2182 }
2183
readPixelsSupported(GrRenderTarget * target,GrPixelConfig readConfig)2184 bool GrGLGpu::readPixelsSupported(GrRenderTarget* target, GrPixelConfig readConfig) {
2185 #ifdef SK_BUILD_FOR_MAC
2186 // Chromium may ask us to read back from locked IOSurfaces. Calling the command buffer's
2187 // glGetIntegerv() with GL_IMPLEMENTATION_COLOR_READ_FORMAT/_TYPE causes the command buffer
2188 // to make a call to check the framebuffer status which can hang the driver. So in Mac Chromium
2189 // we always use a temporary surface to test for read pixels support.
2190 // https://www.crbug.com/662802
2191 if (this->glContext().driver() == kChromium_GrGLDriver) {
2192 return this->readPixelsSupported(target->config(), readConfig);
2193 }
2194 #endif
2195 auto bindRenderTarget = [this, target]() -> bool {
2196 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(target), &SkIRect::EmptyIRect());
2197 return true;
2198 };
2199 auto unbindRenderTarget = []{};
2200 auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
2201 GR_GL_GetIntegerv(this->glInterface(), query, value);
2202 };
2203 GrPixelConfig rtConfig = target->config();
2204 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget,
2205 unbindRenderTarget);
2206 }
2207
readPixelsSupported(GrPixelConfig rtConfig,GrPixelConfig readConfig)2208 bool GrGLGpu::readPixelsSupported(GrPixelConfig rtConfig, GrPixelConfig readConfig) {
2209 sk_sp<GrTexture> temp;
2210 auto bindRenderTarget = [this, rtConfig, &temp]() -> bool {
2211 GrTextureDesc desc;
2212 desc.fConfig = rtConfig;
2213 desc.fWidth = desc.fHeight = 16;
2214 if (this->glCaps().isConfigRenderable(rtConfig, false)) {
2215 desc.fFlags = kRenderTarget_GrSurfaceFlag;
2216 temp.reset(this->createTexture(desc, SkBudgeted::kNo));
2217 if (!temp) {
2218 return false;
2219 }
2220 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(temp->asRenderTarget());
2221 this->flushRenderTarget(glrt, &SkIRect::EmptyIRect());
2222 return true;
2223 } else if (this->glCaps().canConfigBeFBOColorAttachment(rtConfig)) {
2224 temp.reset(this->createTexture(desc, SkBudgeted::kNo));
2225 if (!temp) {
2226 return false;
2227 }
2228 GrGLIRect vp;
2229 this->bindSurfaceFBOForPixelOps(temp.get(), GR_GL_FRAMEBUFFER, &vp, kDst_TempFBOTarget);
2230 fHWBoundRenderTargetUniqueID.makeInvalid();
2231 return true;
2232 }
2233 return false;
2234 };
2235 auto unbindRenderTarget = [this, &temp]() {
2236 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, temp.get());
2237 };
2238 auto getIntegerv = [this](GrGLenum query, GrGLint* value) {
2239 GR_GL_GetIntegerv(this->glInterface(), query, value);
2240 };
2241 return this->glCaps().readPixelsSupported(rtConfig, readConfig, getIntegerv, bindRenderTarget,
2242 unbindRenderTarget);
2243 }
2244
readPixelsSupported(GrSurface * surfaceForConfig,GrPixelConfig readConfig)2245 bool GrGLGpu::readPixelsSupported(GrSurface* surfaceForConfig, GrPixelConfig readConfig) {
2246 if (GrRenderTarget* rt = surfaceForConfig->asRenderTarget()) {
2247 return this->readPixelsSupported(rt, readConfig);
2248 } else {
2249 GrPixelConfig config = surfaceForConfig->config();
2250 return this->readPixelsSupported(config, readConfig);
2251 }
2252 }
2253
requires_srgb_conversion(GrPixelConfig a,GrPixelConfig b)2254 static bool requires_srgb_conversion(GrPixelConfig a, GrPixelConfig b) {
2255 if (GrPixelConfigIsSRGB(a)) {
2256 return !GrPixelConfigIsSRGB(b) && !GrPixelConfigIsAlphaOnly(b);
2257 } else if (GrPixelConfigIsSRGB(b)) {
2258 return !GrPixelConfigIsSRGB(a) && !GrPixelConfigIsAlphaOnly(a);
2259 }
2260 return false;
2261 }
2262
onGetReadPixelsInfo(GrSurface * srcSurface,int width,int height,size_t rowBytes,GrPixelConfig readConfig,DrawPreference * drawPreference,ReadPixelTempDrawInfo * tempDrawInfo)2263 bool GrGLGpu::onGetReadPixelsInfo(GrSurface* srcSurface, int width, int height, size_t rowBytes,
2264 GrPixelConfig readConfig, DrawPreference* drawPreference,
2265 ReadPixelTempDrawInfo* tempDrawInfo) {
2266 GrPixelConfig srcConfig = srcSurface->config();
2267
2268 // These settings we will always want if a temp draw is performed.
2269 tempDrawInfo->fTempSurfaceDesc.fFlags = kRenderTarget_GrSurfaceFlag;
2270 tempDrawInfo->fTempSurfaceDesc.fWidth = width;
2271 tempDrawInfo->fTempSurfaceDesc.fHeight = height;
2272 tempDrawInfo->fTempSurfaceDesc.fSampleCnt = 0;
2273 tempDrawInfo->fTempSurfaceDesc.fOrigin = kTopLeft_GrSurfaceOrigin; // no CPU y-flip for TL.
2274 tempDrawInfo->fTempSurfaceFit = this->glCaps().partialFBOReadIsSlow() ? SkBackingFit::kExact
2275 : SkBackingFit::kApprox;
2276 // For now assume no swizzling, we may change that below.
2277 tempDrawInfo->fSwizzle = GrSwizzle::RGBA();
2278
2279 // Depends on why we need/want a temp draw. Start off assuming no change, the surface we read
2280 // from will be srcConfig and we will read readConfig pixels from it.
2281 // Not that if we require a draw and return a non-renderable format for the temp surface the
2282 // base class will fail for us.
2283 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
2284 tempDrawInfo->fReadConfig = readConfig;
2285
2286 if (requires_srgb_conversion(srcConfig, readConfig)) {
2287 if (!this->readPixelsSupported(readConfig, readConfig)) {
2288 return false;
2289 }
2290 // Draw to do srgb to linear conversion or vice versa.
2291 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2292 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
2293 tempDrawInfo->fReadConfig = readConfig;
2294 return true;
2295 }
2296
2297 if (this->glCaps().rgba8888PixelsOpsAreSlow() && kRGBA_8888_GrPixelConfig == readConfig &&
2298 this->readPixelsSupported(kBGRA_8888_GrPixelConfig, kBGRA_8888_GrPixelConfig)) {
2299 tempDrawInfo->fTempSurfaceDesc.fConfig = kBGRA_8888_GrPixelConfig;
2300 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2301 tempDrawInfo->fReadConfig = kBGRA_8888_GrPixelConfig;
2302 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
2303 } else if (this->glCaps().rgbaToBgraReadbackConversionsAreSlow() &&
2304 GrBytesPerPixel(readConfig) == 4 &&
2305 GrPixelConfigSwapRAndB(readConfig) == srcConfig &&
2306 this->readPixelsSupported(srcSurface, srcConfig)) {
2307 // Mesa 3D takes a slow path on when reading back BGRA from an RGBA surface and vice-versa.
2308 // Better to do a draw with a R/B swap and then read as the original config.
2309 tempDrawInfo->fTempSurfaceDesc.fConfig = srcConfig;
2310 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2311 tempDrawInfo->fReadConfig = srcConfig;
2312 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
2313 } else if (!this->readPixelsSupported(srcSurface, readConfig)) {
2314 if (readConfig == kBGRA_8888_GrPixelConfig &&
2315 this->glCaps().canConfigBeFBOColorAttachment(kRGBA_8888_GrPixelConfig) &&
2316 this->readPixelsSupported(kRGBA_8888_GrPixelConfig, kRGBA_8888_GrPixelConfig)) {
2317 // We're trying to read BGRA but it's not supported. If RGBA is renderable and
2318 // we can read it back, then do a swizzling draw to a RGBA and read it back (which
2319 // will effectively be BGRA).
2320 tempDrawInfo->fTempSurfaceDesc.fConfig = kRGBA_8888_GrPixelConfig;
2321 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2322 tempDrawInfo->fReadConfig = kRGBA_8888_GrPixelConfig;
2323 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2324 } else if (readConfig == kSBGRA_8888_GrPixelConfig &&
2325 this->glCaps().canConfigBeFBOColorAttachment(kSRGBA_8888_GrPixelConfig) &&
2326 this->readPixelsSupported(kSRGBA_8888_GrPixelConfig, kSRGBA_8888_GrPixelConfig)) {
2327 // We're trying to read sBGRA but it's not supported. If sRGBA is renderable and
2328 // we can read it back, then do a swizzling draw to a sRGBA and read it back (which
2329 // will effectively be sBGRA).
2330 tempDrawInfo->fTempSurfaceDesc.fConfig = kSRGBA_8888_GrPixelConfig;
2331 tempDrawInfo->fSwizzle = GrSwizzle::BGRA();
2332 tempDrawInfo->fReadConfig = kSRGBA_8888_GrPixelConfig;
2333 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2334 } else if (readConfig == kAlpha_8_GrPixelConfig) {
2335 // onReadPixels implements a fallback for cases where we are want to read kAlpha_8,
2336 // it's unsupported, but 32bit RGBA reads are supported.
2337 // Don't attempt to do any srgb conversions since we only care about alpha.
2338 GrPixelConfig cpuTempConfig = kRGBA_8888_GrPixelConfig;
2339 if (GrPixelConfigIsSRGB(srcSurface->config())) {
2340 cpuTempConfig = kSRGBA_8888_GrPixelConfig;
2341 }
2342 if (!this->readPixelsSupported(srcSurface, cpuTempConfig)) {
2343 // If we can't read RGBA from the src try to draw to a kRGBA_8888 (or kSRGBA_8888)
2344 // first and then onReadPixels will read that to a 32bit temporary buffer.
2345 if (this->glCaps().canConfigBeFBOColorAttachment(cpuTempConfig)) {
2346 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2347 tempDrawInfo->fTempSurfaceDesc.fConfig = cpuTempConfig;
2348 tempDrawInfo->fReadConfig = kAlpha_8_GrPixelConfig;
2349 } else {
2350 return false;
2351 }
2352 } else {
2353 SkASSERT(tempDrawInfo->fTempSurfaceDesc.fConfig == srcConfig);
2354 SkASSERT(tempDrawInfo->fReadConfig == kAlpha_8_GrPixelConfig);
2355 }
2356 } else if (this->glCaps().canConfigBeFBOColorAttachment(readConfig) &&
2357 this->readPixelsSupported(readConfig, readConfig)) {
2358 // Do a draw to convert from the src config to the read config.
2359 ElevateDrawPreference(drawPreference, kRequireDraw_DrawPreference);
2360 tempDrawInfo->fTempSurfaceDesc.fConfig = readConfig;
2361 tempDrawInfo->fReadConfig = readConfig;
2362 } else {
2363 return false;
2364 }
2365 }
2366
2367 if ((srcSurface->asRenderTarget() || this->glCaps().canConfigBeFBOColorAttachment(srcConfig)) &&
2368 read_pixels_pays_for_y_flip(srcSurface->origin(), this->glCaps(), width, height, readConfig,
2369 rowBytes)) {
2370 ElevateDrawPreference(drawPreference, kGpuPrefersDraw_DrawPreference);
2371 }
2372
2373 return true;
2374 }
2375
onReadPixels(GrSurface * surface,int left,int top,int width,int height,GrPixelConfig config,void * buffer,size_t rowBytes)2376 bool GrGLGpu::onReadPixels(GrSurface* surface,
2377 int left, int top,
2378 int width, int height,
2379 GrPixelConfig config,
2380 void* buffer,
2381 size_t rowBytes) {
2382 SkASSERT(surface);
2383
2384 GrGLRenderTarget* renderTarget = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
2385 if (!renderTarget && !this->glCaps().canConfigBeFBOColorAttachment(surface->config())) {
2386 return false;
2387 }
2388
2389 // OpenGL doesn't do sRGB <-> linear conversions when reading and writing pixels.
2390 if (requires_srgb_conversion(surface->config(), config)) {
2391 return false;
2392 }
2393
2394 // We have a special case fallback for reading eight bit alpha. We will read back all four 8
2395 // bit channels as RGBA and then extract A.
2396 if (!this->readPixelsSupported(surface, config)) {
2397 // Don't attempt to do any srgb conversions since we only care about alpha.
2398 GrPixelConfig tempConfig = kRGBA_8888_GrPixelConfig;
2399 if (GrPixelConfigIsSRGB(surface->config())) {
2400 tempConfig = kSRGBA_8888_GrPixelConfig;
2401 }
2402 if (kAlpha_8_GrPixelConfig == config &&
2403 this->readPixelsSupported(surface, tempConfig)) {
2404 std::unique_ptr<uint32_t[]> temp(new uint32_t[width * height * 4]);
2405 if (this->onReadPixels(surface, left, top, width, height, tempConfig, temp.get(),
2406 width*4)) {
2407 uint8_t* dst = reinterpret_cast<uint8_t*>(buffer);
2408 for (int j = 0; j < height; ++j) {
2409 for (int i = 0; i < width; ++i) {
2410 dst[j*rowBytes + i] = (0xFF000000U & temp[j*width+i]) >> 24;
2411 }
2412 }
2413 return true;
2414 }
2415 }
2416 return false;
2417 }
2418
2419 GrGLenum externalFormat;
2420 GrGLenum externalType;
2421 if (!this->glCaps().getReadPixelsFormat(surface->config(), config, &externalFormat,
2422 &externalType)) {
2423 return false;
2424 }
2425 bool flipY = kBottomLeft_GrSurfaceOrigin == surface->origin();
2426
2427 GrGLIRect glvp;
2428 if (renderTarget) {
2429 // resolve the render target if necessary
2430 switch (renderTarget->getResolveType()) {
2431 case GrGLRenderTarget::kCantResolve_ResolveType:
2432 return false;
2433 case GrGLRenderTarget::kAutoResolves_ResolveType:
2434 this->flushRenderTarget(renderTarget, &SkIRect::EmptyIRect());
2435 break;
2436 case GrGLRenderTarget::kCanResolve_ResolveType:
2437 this->onResolveRenderTarget(renderTarget);
2438 // we don't track the state of the READ FBO ID.
2439 fStats.incRenderTargetBinds();
2440 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, renderTarget->textureFBOID()));
2441 break;
2442 default:
2443 SkFAIL("Unknown resolve type");
2444 }
2445 glvp = renderTarget->getViewport();
2446 } else {
2447 // Use a temporary FBO.
2448 this->bindSurfaceFBOForPixelOps(surface, GR_GL_FRAMEBUFFER, &glvp, kSrc_TempFBOTarget);
2449 fHWBoundRenderTargetUniqueID.makeInvalid();
2450 }
2451
2452 // the read rect is viewport-relative
2453 GrGLIRect readRect;
2454 readRect.setRelativeTo(glvp, left, top, width, height, surface->origin());
2455
2456 size_t bytesPerPixel = GrBytesPerPixel(config);
2457 size_t tightRowBytes = bytesPerPixel * width;
2458
2459 size_t readDstRowBytes = tightRowBytes;
2460 void* readDst = buffer;
2461
2462 // determine if GL can read using the passed rowBytes or if we need
2463 // a scratch buffer.
2464 SkAutoSMalloc<32 * sizeof(GrColor)> scratch;
2465 if (rowBytes != tightRowBytes) {
2466 if (this->glCaps().packRowLengthSupport() && !(rowBytes % bytesPerPixel)) {
2467 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH,
2468 static_cast<GrGLint>(rowBytes / bytesPerPixel)));
2469 readDstRowBytes = rowBytes;
2470 } else {
2471 scratch.reset(tightRowBytes * height);
2472 readDst = scratch.get();
2473 }
2474 }
2475 if (flipY && this->glCaps().packFlipYSupport()) {
2476 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 1));
2477 }
2478 GL_CALL(PixelStorei(GR_GL_PACK_ALIGNMENT, config_alignment(config)));
2479
2480 GL_CALL(ReadPixels(readRect.fLeft, readRect.fBottom,
2481 readRect.fWidth, readRect.fHeight,
2482 externalFormat, externalType, readDst));
2483 if (readDstRowBytes != tightRowBytes) {
2484 SkASSERT(this->glCaps().packRowLengthSupport());
2485 GL_CALL(PixelStorei(GR_GL_PACK_ROW_LENGTH, 0));
2486 }
2487 if (flipY && this->glCaps().packFlipYSupport()) {
2488 GL_CALL(PixelStorei(GR_GL_PACK_REVERSE_ROW_ORDER, 0));
2489 flipY = false;
2490 }
2491
2492 // now reverse the order of the rows, since GL's are bottom-to-top, but our
2493 // API presents top-to-bottom. We must preserve the padding contents. Note
2494 // that the above readPixels did not overwrite the padding.
2495 if (readDst == buffer) {
2496 SkASSERT(rowBytes == readDstRowBytes);
2497 if (flipY) {
2498 scratch.reset(tightRowBytes);
2499 void* tmpRow = scratch.get();
2500 // flip y in-place by rows
2501 const int halfY = height >> 1;
2502 char* top = reinterpret_cast<char*>(buffer);
2503 char* bottom = top + (height - 1) * rowBytes;
2504 for (int y = 0; y < halfY; y++) {
2505 memcpy(tmpRow, top, tightRowBytes);
2506 memcpy(top, bottom, tightRowBytes);
2507 memcpy(bottom, tmpRow, tightRowBytes);
2508 top += rowBytes;
2509 bottom -= rowBytes;
2510 }
2511 }
2512 } else {
2513 SkASSERT(readDst != buffer);
2514 SkASSERT(rowBytes != tightRowBytes);
2515 // copy from readDst to buffer while flipping y
2516 // const int halfY = height >> 1;
2517 const char* src = reinterpret_cast<const char*>(readDst);
2518 char* dst = reinterpret_cast<char*>(buffer);
2519 if (flipY) {
2520 dst += (height-1) * rowBytes;
2521 }
2522 for (int y = 0; y < height; y++) {
2523 memcpy(dst, src, tightRowBytes);
2524 src += readDstRowBytes;
2525 if (!flipY) {
2526 dst += rowBytes;
2527 } else {
2528 dst -= rowBytes;
2529 }
2530 }
2531 }
2532 if (!renderTarget) {
2533 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, surface);
2534 }
2535 return true;
2536 }
2537
createCommandBuffer(const GrGpuCommandBuffer::LoadAndStoreInfo & colorInfo,const GrGpuCommandBuffer::LoadAndStoreInfo & stencilInfo)2538 GrGpuCommandBuffer* GrGLGpu::createCommandBuffer(
2539 const GrGpuCommandBuffer::LoadAndStoreInfo& colorInfo,
2540 const GrGpuCommandBuffer::LoadAndStoreInfo& stencilInfo) {
2541 return new GrGLGpuCommandBuffer(this);
2542 }
2543
flushRenderTarget(GrGLRenderTarget * target,const SkIRect * bounds,bool disableSRGB)2544 void GrGLGpu::flushRenderTarget(GrGLRenderTarget* target, const SkIRect* bounds, bool disableSRGB) {
2545 SkASSERT(target);
2546
2547 GrGpuResource::UniqueID rtID = target->uniqueID();
2548 if (fHWBoundRenderTargetUniqueID != rtID) {
2549 fStats.incRenderTargetBinds();
2550 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, target->renderFBOID()));
2551 #ifdef SK_DEBUG
2552 // don't do this check in Chromium -- this is causing
2553 // lots of repeated command buffer flushes when the compositor is
2554 // rendering with Ganesh, which is really slow; even too slow for
2555 // Debug mode.
2556 if (kChromium_GrGLDriver != this->glContext().driver()) {
2557 GrGLenum status;
2558 GL_CALL_RET(status, CheckFramebufferStatus(GR_GL_FRAMEBUFFER));
2559 if (status != GR_GL_FRAMEBUFFER_COMPLETE) {
2560 SkDebugf("GrGLGpu::flushRenderTarget glCheckFramebufferStatus %x\n", status);
2561 }
2562 }
2563 #endif
2564 fHWBoundRenderTargetUniqueID = rtID;
2565 this->flushViewport(target->getViewport());
2566 }
2567
2568 if (this->glCaps().srgbWriteControl()) {
2569 this->flushFramebufferSRGB(GrPixelConfigIsSRGB(target->config()) && !disableSRGB);
2570 }
2571
2572 this->didWriteToSurface(target, bounds);
2573 }
2574
flushFramebufferSRGB(bool enable)2575 void GrGLGpu::flushFramebufferSRGB(bool enable) {
2576 if (enable && kYes_TriState != fHWSRGBFramebuffer) {
2577 GL_CALL(Enable(GR_GL_FRAMEBUFFER_SRGB));
2578 fHWSRGBFramebuffer = kYes_TriState;
2579 } else if (!enable && kNo_TriState != fHWSRGBFramebuffer) {
2580 GL_CALL(Disable(GR_GL_FRAMEBUFFER_SRGB));
2581 fHWSRGBFramebuffer = kNo_TriState;
2582 }
2583 }
2584
flushViewport(const GrGLIRect & viewport)2585 void GrGLGpu::flushViewport(const GrGLIRect& viewport) {
2586 if (fHWViewport != viewport) {
2587 viewport.pushToGLViewport(this->glInterface());
2588 fHWViewport = viewport;
2589 }
2590 }
2591
2592 GrGLenum gPrimitiveType2GLMode[] = {
2593 GR_GL_TRIANGLES,
2594 GR_GL_TRIANGLE_STRIP,
2595 GR_GL_TRIANGLE_FAN,
2596 GR_GL_POINTS,
2597 GR_GL_LINES,
2598 GR_GL_LINE_STRIP
2599 };
2600
2601 #define SWAP_PER_DRAW 0
2602
2603 #if SWAP_PER_DRAW
2604 #if defined(SK_BUILD_FOR_MAC)
2605 #include <AGL/agl.h>
2606 #elif defined(SK_BUILD_FOR_WIN32)
2607 #include <gl/GL.h>
SwapBuf()2608 void SwapBuf() {
2609 DWORD procID = GetCurrentProcessId();
2610 HWND hwnd = GetTopWindow(GetDesktopWindow());
2611 while(hwnd) {
2612 DWORD wndProcID = 0;
2613 GetWindowThreadProcessId(hwnd, &wndProcID);
2614 if(wndProcID == procID) {
2615 SwapBuffers(GetDC(hwnd));
2616 }
2617 hwnd = GetNextWindow(hwnd, GW_HWNDNEXT);
2618 }
2619 }
2620 #endif
2621 #endif
2622
draw(const GrPipeline & pipeline,const GrPrimitiveProcessor & primProc,const GrMesh meshes[],int meshCount)2623 void GrGLGpu::draw(const GrPipeline& pipeline,
2624 const GrPrimitiveProcessor& primProc,
2625 const GrMesh meshes[],
2626 int meshCount) {
2627 this->handleDirtyContext();
2628
2629 bool hasPoints = false;
2630 for (int i = 0; i < meshCount; ++i) {
2631 if (meshes[i].primitiveType() == kPoints_GrPrimitiveType) {
2632 hasPoints = true;
2633 break;
2634 }
2635 }
2636 if (!this->flushGLState(pipeline, primProc, hasPoints)) {
2637 return;
2638 }
2639
2640 for (int i = 0; i < meshCount; ++i) {
2641 if (GrXferBarrierType barrierType = pipeline.xferBarrierType(*this->caps())) {
2642 this->xferBarrier(pipeline.getRenderTarget(), barrierType);
2643 }
2644
2645 const GrMesh& mesh = meshes[i];
2646 GrMesh::Iterator iter;
2647 const GrNonInstancedMesh* nonInstMesh = iter.init(mesh);
2648 do {
2649 size_t indexOffsetInBytes = 0;
2650 this->setupGeometry(primProc, *nonInstMesh, &indexOffsetInBytes);
2651 if (nonInstMesh->isIndexed()) {
2652 GrGLvoid* indices =
2653 reinterpret_cast<GrGLvoid*>(indexOffsetInBytes +
2654 sizeof(uint16_t) * nonInstMesh->startIndex());
2655 // info.startVertex() was accounted for by setupGeometry.
2656 if (this->glCaps().drawRangeElementsSupport()) {
2657 // We assume here that the GrMeshDrawOps that generated the mesh used the full
2658 // 0..vertexCount()-1 range.
2659 int start = 0;
2660 int end = nonInstMesh->vertexCount() - 1;
2661 GL_CALL(DrawRangeElements(gPrimitiveType2GLMode[nonInstMesh->primitiveType()],
2662 start, end,
2663 nonInstMesh->indexCount(),
2664 GR_GL_UNSIGNED_SHORT,
2665 indices));
2666 } else {
2667 GL_CALL(DrawElements(gPrimitiveType2GLMode[nonInstMesh->primitiveType()],
2668 nonInstMesh->indexCount(),
2669 GR_GL_UNSIGNED_SHORT,
2670 indices));
2671 }
2672 } else {
2673 // Pass 0 for parameter first. We have to adjust glVertexAttribPointer() to account
2674 // for startVertex in the DrawElements case. So we always rely on setupGeometry to
2675 // have accounted for startVertex.
2676 GL_CALL(DrawArrays(gPrimitiveType2GLMode[nonInstMesh->primitiveType()], 0,
2677 nonInstMesh->vertexCount()));
2678 }
2679 fStats.incNumDraws();
2680 } while ((nonInstMesh = iter.next()));
2681 }
2682
2683 #if SWAP_PER_DRAW
2684 glFlush();
2685 #if defined(SK_BUILD_FOR_MAC)
2686 aglSwapBuffers(aglGetCurrentContext());
2687 int set_a_break_pt_here = 9;
2688 aglSwapBuffers(aglGetCurrentContext());
2689 #elif defined(SK_BUILD_FOR_WIN32)
2690 SwapBuf();
2691 int set_a_break_pt_here = 9;
2692 SwapBuf();
2693 #endif
2694 #endif
2695 }
2696
onResolveRenderTarget(GrRenderTarget * target)2697 void GrGLGpu::onResolveRenderTarget(GrRenderTarget* target) {
2698 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(target);
2699 if (rt->needsResolve()) {
2700 // Some extensions automatically resolves the texture when it is read.
2701 if (this->glCaps().usesMSAARenderBuffers()) {
2702 SkASSERT(rt->textureFBOID() != rt->renderFBOID());
2703 fStats.incRenderTargetBinds();
2704 fStats.incRenderTargetBinds();
2705 GL_CALL(BindFramebuffer(GR_GL_READ_FRAMEBUFFER, rt->renderFBOID()));
2706 GL_CALL(BindFramebuffer(GR_GL_DRAW_FRAMEBUFFER, rt->textureFBOID()));
2707 // make sure we go through flushRenderTarget() since we've modified
2708 // the bound DRAW FBO ID.
2709 fHWBoundRenderTargetUniqueID.makeInvalid();
2710 const GrGLIRect& vp = rt->getViewport();
2711 const SkIRect dirtyRect = rt->getResolveRect();
2712
2713 if (GrGLCaps::kES_Apple_MSFBOType == this->glCaps().msFBOType()) {
2714 // Apple's extension uses the scissor as the blit bounds.
2715 GrScissorState scissorState;
2716 scissorState.set(dirtyRect);
2717 this->flushScissor(scissorState, vp, rt->origin());
2718 this->disableWindowRectangles();
2719 GL_CALL(ResolveMultisampleFramebuffer());
2720 } else {
2721 int l, b, r, t;
2722 if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag &
2723 this->glCaps().blitFramebufferSupportFlags()) {
2724 l = 0;
2725 b = 0;
2726 r = target->width();
2727 t = target->height();
2728 } else {
2729 GrGLIRect rect;
2730 rect.setRelativeTo(vp, dirtyRect.fLeft, dirtyRect.fTop,
2731 dirtyRect.width(), dirtyRect.height(), target->origin());
2732 l = rect.fLeft;
2733 b = rect.fBottom;
2734 r = rect.fLeft + rect.fWidth;
2735 t = rect.fBottom + rect.fHeight;
2736 }
2737
2738 // BlitFrameBuffer respects the scissor, so disable it.
2739 this->disableScissor();
2740 this->disableWindowRectangles();
2741 GL_CALL(BlitFramebuffer(l, b, r, t, l, b, r, t,
2742 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
2743 }
2744 }
2745 rt->flagAsResolved();
2746 }
2747 }
2748
2749 namespace {
2750
2751
gr_to_gl_stencil_op(GrStencilOp op)2752 GrGLenum gr_to_gl_stencil_op(GrStencilOp op) {
2753 static const GrGLenum gTable[kGrStencilOpCount] = {
2754 GR_GL_KEEP, // kKeep
2755 GR_GL_ZERO, // kZero
2756 GR_GL_REPLACE, // kReplace
2757 GR_GL_INVERT, // kInvert
2758 GR_GL_INCR_WRAP, // kIncWrap
2759 GR_GL_DECR_WRAP, // kDecWrap
2760 GR_GL_INCR, // kIncClamp
2761 GR_GL_DECR, // kDecClamp
2762 };
2763 GR_STATIC_ASSERT(0 == (int)GrStencilOp::kKeep);
2764 GR_STATIC_ASSERT(1 == (int)GrStencilOp::kZero);
2765 GR_STATIC_ASSERT(2 == (int)GrStencilOp::kReplace);
2766 GR_STATIC_ASSERT(3 == (int)GrStencilOp::kInvert);
2767 GR_STATIC_ASSERT(4 == (int)GrStencilOp::kIncWrap);
2768 GR_STATIC_ASSERT(5 == (int)GrStencilOp::kDecWrap);
2769 GR_STATIC_ASSERT(6 == (int)GrStencilOp::kIncClamp);
2770 GR_STATIC_ASSERT(7 == (int)GrStencilOp::kDecClamp);
2771 SkASSERT(op < (GrStencilOp)kGrStencilOpCount);
2772 return gTable[(int)op];
2773 }
2774
set_gl_stencil(const GrGLInterface * gl,const GrStencilSettings::Face & face,GrGLenum glFace)2775 void set_gl_stencil(const GrGLInterface* gl,
2776 const GrStencilSettings::Face& face,
2777 GrGLenum glFace) {
2778 GrGLenum glFunc = GrToGLStencilFunc(face.fTest);
2779 GrGLenum glFailOp = gr_to_gl_stencil_op(face.fFailOp);
2780 GrGLenum glPassOp = gr_to_gl_stencil_op(face.fPassOp);
2781
2782 GrGLint ref = face.fRef;
2783 GrGLint mask = face.fTestMask;
2784 GrGLint writeMask = face.fWriteMask;
2785
2786 if (GR_GL_FRONT_AND_BACK == glFace) {
2787 // we call the combined func just in case separate stencil is not
2788 // supported.
2789 GR_GL_CALL(gl, StencilFunc(glFunc, ref, mask));
2790 GR_GL_CALL(gl, StencilMask(writeMask));
2791 GR_GL_CALL(gl, StencilOp(glFailOp, GR_GL_KEEP, glPassOp));
2792 } else {
2793 GR_GL_CALL(gl, StencilFuncSeparate(glFace, glFunc, ref, mask));
2794 GR_GL_CALL(gl, StencilMaskSeparate(glFace, writeMask));
2795 GR_GL_CALL(gl, StencilOpSeparate(glFace, glFailOp, GR_GL_KEEP, glPassOp));
2796 }
2797 }
2798 }
2799
flushStencil(const GrStencilSettings & stencilSettings)2800 void GrGLGpu::flushStencil(const GrStencilSettings& stencilSettings) {
2801 if (stencilSettings.isDisabled()) {
2802 this->disableStencil();
2803 } else if (fHWStencilSettings != stencilSettings) {
2804 if (kYes_TriState != fHWStencilTestEnabled) {
2805 GL_CALL(Enable(GR_GL_STENCIL_TEST));
2806 fHWStencilTestEnabled = kYes_TriState;
2807 }
2808 if (stencilSettings.isTwoSided()) {
2809 SkASSERT(this->caps()->twoSidedStencilSupport());
2810 set_gl_stencil(this->glInterface(),
2811 stencilSettings.front(),
2812 GR_GL_FRONT);
2813 set_gl_stencil(this->glInterface(),
2814 stencilSettings.back(),
2815 GR_GL_BACK);
2816 } else {
2817 set_gl_stencil(this->glInterface(),
2818 stencilSettings.front(),
2819 GR_GL_FRONT_AND_BACK);
2820 }
2821 fHWStencilSettings = stencilSettings;
2822 }
2823 }
2824
disableStencil()2825 void GrGLGpu::disableStencil() {
2826 if (kNo_TriState != fHWStencilTestEnabled) {
2827 GL_CALL(Disable(GR_GL_STENCIL_TEST));
2828 fHWStencilTestEnabled = kNo_TriState;
2829 fHWStencilSettings.invalidate();
2830 }
2831 }
2832
flushHWAAState(GrRenderTarget * rt,bool useHWAA,bool stencilEnabled)2833 void GrGLGpu::flushHWAAState(GrRenderTarget* rt, bool useHWAA, bool stencilEnabled) {
2834 // rt is only optional if useHWAA is false.
2835 SkASSERT(rt || !useHWAA);
2836 SkASSERT(!useHWAA || rt->isStencilBufferMultisampled());
2837
2838 if (this->caps()->multisampleDisableSupport()) {
2839 if (useHWAA) {
2840 if (kYes_TriState != fMSAAEnabled) {
2841 GL_CALL(Enable(GR_GL_MULTISAMPLE));
2842 fMSAAEnabled = kYes_TriState;
2843 }
2844 } else {
2845 if (kNo_TriState != fMSAAEnabled) {
2846 GL_CALL(Disable(GR_GL_MULTISAMPLE));
2847 fMSAAEnabled = kNo_TriState;
2848 }
2849 }
2850 }
2851
2852 if (0 != this->caps()->maxRasterSamples()) {
2853 if (useHWAA && rt->isMixedSampled() && !stencilEnabled) {
2854 // Since stencil is disabled and we want more samples than are in the color buffer, we
2855 // need to tell the rasterizer explicitly how many to run.
2856 if (kYes_TriState != fHWRasterMultisampleEnabled) {
2857 GL_CALL(Enable(GR_GL_RASTER_MULTISAMPLE));
2858 fHWRasterMultisampleEnabled = kYes_TriState;
2859 }
2860 if (rt->numStencilSamples() != fHWNumRasterSamples) {
2861 SkASSERT(rt->numStencilSamples() <= this->caps()->maxRasterSamples());
2862 GL_CALL(RasterSamples(rt->numStencilSamples(), GR_GL_TRUE));
2863 fHWNumRasterSamples = rt->numStencilSamples();
2864 }
2865 } else {
2866 if (kNo_TriState != fHWRasterMultisampleEnabled) {
2867 GL_CALL(Disable(GR_GL_RASTER_MULTISAMPLE));
2868 fHWRasterMultisampleEnabled = kNo_TriState;
2869 }
2870 }
2871 } else {
2872 SkASSERT(!useHWAA || !rt->isMixedSampled() || stencilEnabled);
2873 }
2874 }
2875
flushBlend(const GrXferProcessor::BlendInfo & blendInfo,const GrSwizzle & swizzle)2876 void GrGLGpu::flushBlend(const GrXferProcessor::BlendInfo& blendInfo, const GrSwizzle& swizzle) {
2877 // Any optimization to disable blending should have already been applied and
2878 // tweaked the equation to "add" or "subtract", and the coeffs to (1, 0).
2879
2880 GrBlendEquation equation = blendInfo.fEquation;
2881 GrBlendCoeff srcCoeff = blendInfo.fSrcBlend;
2882 GrBlendCoeff dstCoeff = blendInfo.fDstBlend;
2883 bool blendOff = (kAdd_GrBlendEquation == equation || kSubtract_GrBlendEquation == equation) &&
2884 kOne_GrBlendCoeff == srcCoeff && kZero_GrBlendCoeff == dstCoeff;
2885 if (blendOff) {
2886 if (kNo_TriState != fHWBlendState.fEnabled) {
2887 GL_CALL(Disable(GR_GL_BLEND));
2888
2889 // Workaround for the ARM KHR_blend_equation_advanced blacklist issue
2890 // https://code.google.com/p/skia/issues/detail?id=3943
2891 if (kARM_GrGLVendor == this->ctxInfo().vendor() &&
2892 GrBlendEquationIsAdvanced(fHWBlendState.fEquation)) {
2893 SkASSERT(this->caps()->advancedBlendEquationSupport());
2894 // Set to any basic blending equation.
2895 GrBlendEquation blend_equation = kAdd_GrBlendEquation;
2896 GL_CALL(BlendEquation(gXfermodeEquation2Blend[blend_equation]));
2897 fHWBlendState.fEquation = blend_equation;
2898 }
2899
2900 fHWBlendState.fEnabled = kNo_TriState;
2901 }
2902 return;
2903 }
2904
2905 if (kYes_TriState != fHWBlendState.fEnabled) {
2906 GL_CALL(Enable(GR_GL_BLEND));
2907 fHWBlendState.fEnabled = kYes_TriState;
2908 }
2909
2910 if (fHWBlendState.fEquation != equation) {
2911 GL_CALL(BlendEquation(gXfermodeEquation2Blend[equation]));
2912 fHWBlendState.fEquation = equation;
2913 }
2914
2915 if (GrBlendEquationIsAdvanced(equation)) {
2916 SkASSERT(this->caps()->advancedBlendEquationSupport());
2917 // Advanced equations have no other blend state.
2918 return;
2919 }
2920
2921 if (fHWBlendState.fSrcCoeff != srcCoeff || fHWBlendState.fDstCoeff != dstCoeff) {
2922 GL_CALL(BlendFunc(gXfermodeCoeff2Blend[srcCoeff],
2923 gXfermodeCoeff2Blend[dstCoeff]));
2924 fHWBlendState.fSrcCoeff = srcCoeff;
2925 fHWBlendState.fDstCoeff = dstCoeff;
2926 }
2927
2928 if ((BlendCoeffReferencesConstant(srcCoeff) || BlendCoeffReferencesConstant(dstCoeff))) {
2929 GrColor blendConst = blendInfo.fBlendConstant;
2930 blendConst = swizzle.applyTo(blendConst);
2931 if (!fHWBlendState.fConstColorValid || fHWBlendState.fConstColor != blendConst) {
2932 GrGLfloat c[4];
2933 GrColorToRGBAFloat(blendConst, c);
2934 GL_CALL(BlendColor(c[0], c[1], c[2], c[3]));
2935 fHWBlendState.fConstColor = blendConst;
2936 fHWBlendState.fConstColorValid = true;
2937 }
2938 }
2939 }
2940
tile_to_gl_wrap(SkShader::TileMode tm)2941 static inline GrGLenum tile_to_gl_wrap(SkShader::TileMode tm) {
2942 static const GrGLenum gWrapModes[] = {
2943 GR_GL_CLAMP_TO_EDGE,
2944 GR_GL_REPEAT,
2945 GR_GL_MIRRORED_REPEAT
2946 };
2947 GR_STATIC_ASSERT(SkShader::kTileModeCount == SK_ARRAY_COUNT(gWrapModes));
2948 GR_STATIC_ASSERT(0 == SkShader::kClamp_TileMode);
2949 GR_STATIC_ASSERT(1 == SkShader::kRepeat_TileMode);
2950 GR_STATIC_ASSERT(2 == SkShader::kMirror_TileMode);
2951 return gWrapModes[tm];
2952 }
2953
get_component_enum_from_char(char component)2954 static GrGLenum get_component_enum_from_char(char component) {
2955 switch (component) {
2956 case 'r':
2957 return GR_GL_RED;
2958 case 'g':
2959 return GR_GL_GREEN;
2960 case 'b':
2961 return GR_GL_BLUE;
2962 case 'a':
2963 return GR_GL_ALPHA;
2964 default:
2965 SkFAIL("Unsupported component");
2966 return 0;
2967 }
2968 }
2969
2970 /** If texture swizzling is available using tex parameters then it is preferred over mangling
2971 the generated shader code. This potentially allows greater reuse of cached shaders. */
get_tex_param_swizzle(GrPixelConfig config,const GrGLCaps & caps,GrGLenum * glSwizzle)2972 static void get_tex_param_swizzle(GrPixelConfig config,
2973 const GrGLCaps& caps,
2974 GrGLenum* glSwizzle) {
2975 const GrSwizzle& swizzle = caps.configSwizzle(config);
2976 for (int i = 0; i < 4; ++i) {
2977 glSwizzle[i] = get_component_enum_from_char(swizzle.c_str()[i]);
2978 }
2979 }
2980
bindTexture(int unitIdx,const GrSamplerParams & params,bool allowSRGBInputs,GrGLTexture * texture)2981 void GrGLGpu::bindTexture(int unitIdx, const GrSamplerParams& params, bool allowSRGBInputs,
2982 GrGLTexture* texture) {
2983 SkASSERT(texture);
2984
2985 #ifdef SK_DEBUG
2986 if (!this->caps()->npotTextureTileSupport()) {
2987 const bool tileX = SkShader::kClamp_TileMode != params.getTileModeX();
2988 const bool tileY = SkShader::kClamp_TileMode != params.getTileModeY();
2989 if (tileX || tileY) {
2990 const int w = texture->width();
2991 const int h = texture->height();
2992 SkASSERT(SkIsPow2(w) && SkIsPow2(h));
2993 }
2994 }
2995 #endif
2996
2997 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
2998 // from the rt it will still be the last bound texture, but it needs resolving. So keep this
2999 // out of the "last != next" check.
3000 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
3001 if (texRT) {
3002 this->onResolveRenderTarget(texRT);
3003 }
3004
3005 GrGpuResource::UniqueID textureID = texture->uniqueID();
3006 GrGLenum target = texture->target();
3007 if (fHWBoundTextureUniqueIDs[unitIdx] != textureID) {
3008 this->setTextureUnit(unitIdx);
3009 GL_CALL(BindTexture(target, texture->textureID()));
3010 fHWBoundTextureUniqueIDs[unitIdx] = textureID;
3011 }
3012
3013 ResetTimestamp timestamp;
3014 const GrGLTexture::TexParams& oldTexParams = texture->getCachedTexParams(×tamp);
3015 bool setAll = timestamp < this->getResetTimestamp();
3016 GrGLTexture::TexParams newTexParams;
3017
3018 static GrGLenum glMinFilterModes[] = {
3019 GR_GL_NEAREST,
3020 GR_GL_LINEAR,
3021 GR_GL_LINEAR_MIPMAP_LINEAR
3022 };
3023 static GrGLenum glMagFilterModes[] = {
3024 GR_GL_NEAREST,
3025 GR_GL_LINEAR,
3026 GR_GL_LINEAR
3027 };
3028 GrSamplerParams::FilterMode filterMode = params.filterMode();
3029
3030 if (GrSamplerParams::kMipMap_FilterMode == filterMode) {
3031 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) {
3032 filterMode = GrSamplerParams::kBilerp_FilterMode;
3033 }
3034 }
3035
3036 newTexParams.fMinFilter = glMinFilterModes[filterMode];
3037 newTexParams.fMagFilter = glMagFilterModes[filterMode];
3038
3039 if (this->glCaps().srgbDecodeDisableSupport() && GrPixelConfigIsSRGB(texture->config())) {
3040 newTexParams.fSRGBDecode = allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT;
3041 if (setAll || newTexParams.fSRGBDecode != oldTexParams.fSRGBDecode) {
3042 this->setTextureUnit(unitIdx);
3043 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT, newTexParams.fSRGBDecode));
3044 }
3045 }
3046
3047 #ifdef SK_DEBUG
3048 // We were supposed to ensure MipMaps were up-to-date and built correctly before getting here.
3049 if (GrSamplerParams::kMipMap_FilterMode == filterMode) {
3050 SkASSERT(!texture->texturePriv().mipMapsAreDirty());
3051 if (GrPixelConfigIsSRGB(texture->config())) {
3052 SkDestinationSurfaceColorMode colorMode = allowSRGBInputs
3053 ? SkDestinationSurfaceColorMode::kGammaAndColorSpaceAware
3054 : SkDestinationSurfaceColorMode::kLegacy;
3055 SkASSERT(texture->texturePriv().mipColorMode() == colorMode);
3056 }
3057 }
3058 #endif
3059
3060 newTexParams.fMaxMipMapLevel = texture->texturePriv().maxMipMapLevel();
3061
3062 newTexParams.fWrapS = tile_to_gl_wrap(params.getTileModeX());
3063 newTexParams.fWrapT = tile_to_gl_wrap(params.getTileModeY());
3064 get_tex_param_swizzle(texture->config(), this->glCaps(), newTexParams.fSwizzleRGBA);
3065 if (setAll || newTexParams.fMagFilter != oldTexParams.fMagFilter) {
3066 this->setTextureUnit(unitIdx);
3067 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAG_FILTER, newTexParams.fMagFilter));
3068 }
3069 if (setAll || newTexParams.fMinFilter != oldTexParams.fMinFilter) {
3070 this->setTextureUnit(unitIdx);
3071 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_FILTER, newTexParams.fMinFilter));
3072 }
3073 if (setAll || newTexParams.fMaxMipMapLevel != oldTexParams.fMaxMipMapLevel) {
3074 // These are not supported in ES2 contexts
3075 if (this->glCaps().mipMapLevelAndLodControlSupport()) {
3076 if (newTexParams.fMaxMipMapLevel != 0) {
3077 this->setTextureUnit(unitIdx);
3078 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MIN_LOD, 0));
3079 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_BASE_LEVEL, 0));
3080 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LOD,
3081 newTexParams.fMaxMipMapLevel));
3082 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_MAX_LEVEL,
3083 newTexParams.fMaxMipMapLevel));
3084 }
3085 }
3086 }
3087 if (setAll || newTexParams.fWrapS != oldTexParams.fWrapS) {
3088 this->setTextureUnit(unitIdx);
3089 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_S, newTexParams.fWrapS));
3090 }
3091 if (setAll || newTexParams.fWrapT != oldTexParams.fWrapT) {
3092 this->setTextureUnit(unitIdx);
3093 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_WRAP_T, newTexParams.fWrapT));
3094 }
3095 if (this->glCaps().textureSwizzleSupport() &&
3096 (setAll || memcmp(newTexParams.fSwizzleRGBA,
3097 oldTexParams.fSwizzleRGBA,
3098 sizeof(newTexParams.fSwizzleRGBA)))) {
3099 this->setTextureSwizzle(unitIdx, target, newTexParams.fSwizzleRGBA);
3100 }
3101 texture->setCachedTexParams(newTexParams, this->getResetTimestamp());
3102 }
3103
bindTexelBuffer(int unitIdx,GrPixelConfig texelConfig,GrGLBuffer * buffer)3104 void GrGLGpu::bindTexelBuffer(int unitIdx, GrPixelConfig texelConfig, GrGLBuffer* buffer) {
3105 SkASSERT(this->glCaps().canUseConfigWithTexelBuffer(texelConfig));
3106 SkASSERT(unitIdx >= 0 && unitIdx < fHWBufferTextures.count());
3107
3108 BufferTexture& buffTex = fHWBufferTextures[unitIdx];
3109
3110 if (!buffTex.fKnownBound) {
3111 if (!buffTex.fTextureID) {
3112 GL_CALL(GenTextures(1, &buffTex.fTextureID));
3113 if (!buffTex.fTextureID) {
3114 return;
3115 }
3116 }
3117
3118 this->setTextureUnit(unitIdx);
3119 GL_CALL(BindTexture(GR_GL_TEXTURE_BUFFER, buffTex.fTextureID));
3120
3121 buffTex.fKnownBound = true;
3122 }
3123
3124 if (buffer->uniqueID() != buffTex.fAttachedBufferUniqueID ||
3125 buffTex.fTexelConfig != texelConfig) {
3126
3127 this->setTextureUnit(unitIdx);
3128 GL_CALL(TexBuffer(GR_GL_TEXTURE_BUFFER,
3129 this->glCaps().configSizedInternalFormat(texelConfig),
3130 buffer->bufferID()));
3131
3132 buffTex.fTexelConfig = texelConfig;
3133 buffTex.fAttachedBufferUniqueID = buffer->uniqueID();
3134
3135 if (this->glCaps().textureSwizzleSupport() &&
3136 this->glCaps().configSwizzle(texelConfig) != buffTex.fSwizzle) {
3137 GrGLenum glSwizzle[4];
3138 get_tex_param_swizzle(texelConfig, this->glCaps(), glSwizzle);
3139 this->setTextureSwizzle(unitIdx, GR_GL_TEXTURE_BUFFER, glSwizzle);
3140 buffTex.fSwizzle = this->glCaps().configSwizzle(texelConfig);
3141 }
3142
3143 buffer->setHasAttachedToTexture();
3144 fHWMaxUsedBufferTextureUnit = SkTMax(unitIdx, fHWMaxUsedBufferTextureUnit);
3145 }
3146 }
3147
bindImageStorage(int unitIdx,GrIOType ioType,GrGLTexture * texture)3148 void GrGLGpu::bindImageStorage(int unitIdx, GrIOType ioType, GrGLTexture *texture) {
3149 SkASSERT(texture);
3150 if (texture->uniqueID() != fHWBoundImageStorages[unitIdx].fTextureUniqueID ||
3151 ioType != fHWBoundImageStorages[unitIdx].fIOType) {
3152 GrGLenum access = GR_GL_READ_ONLY;
3153 switch (ioType) {
3154 case kRead_GrIOType:
3155 access = GR_GL_READ_ONLY;
3156 break;
3157 case kWrite_GrIOType:
3158 access = GR_GL_WRITE_ONLY;
3159 break;
3160 case kRW_GrIOType:
3161 access = GR_GL_READ_WRITE;
3162 break;
3163 }
3164 GrGLenum format = this->glCaps().getImageFormat(texture->config());
3165 GL_CALL(BindImageTexture(unitIdx, texture->textureID(), 0, GR_GL_FALSE, 0, access, format));
3166 }
3167 }
3168
generateMipmaps(const GrSamplerParams & params,bool allowSRGBInputs,GrGLTexture * texture)3169 void GrGLGpu::generateMipmaps(const GrSamplerParams& params, bool allowSRGBInputs,
3170 GrGLTexture* texture) {
3171 SkASSERT(texture);
3172
3173 // First, figure out if we need mips for this texture at all:
3174 GrSamplerParams::FilterMode filterMode = params.filterMode();
3175
3176 if (GrSamplerParams::kMipMap_FilterMode == filterMode) {
3177 if (!this->caps()->mipMapSupport() || GrPixelConfigIsCompressed(texture->config())) {
3178 filterMode = GrSamplerParams::kBilerp_FilterMode;
3179 }
3180 }
3181
3182 if (GrSamplerParams::kMipMap_FilterMode != filterMode) {
3183 return;
3184 }
3185
3186 // If this is an sRGB texture and the mips were previously built the "other" way
3187 // (gamma-correct vs. not), then we need to rebuild them. We don't need to check for
3188 // srgbSupport - we'll *never* get an sRGB pixel config if we don't support it.
3189 SkDestinationSurfaceColorMode colorMode = allowSRGBInputs
3190 ? SkDestinationSurfaceColorMode::kGammaAndColorSpaceAware
3191 : SkDestinationSurfaceColorMode::kLegacy;
3192 if (GrPixelConfigIsSRGB(texture->config()) &&
3193 colorMode != texture->texturePriv().mipColorMode()) {
3194 texture->texturePriv().dirtyMipMaps(true);
3195 }
3196
3197 // If the mips aren't dirty, we're done:
3198 if (!texture->texturePriv().mipMapsAreDirty()) {
3199 return;
3200 }
3201
3202 // If we created a rt/tex and rendered to it without using a texture and now we're texturing
3203 // from the rt it will still be the last bound texture, but it needs resolving.
3204 GrGLRenderTarget* texRT = static_cast<GrGLRenderTarget*>(texture->asRenderTarget());
3205 if (texRT) {
3206 this->onResolveRenderTarget(texRT);
3207 }
3208
3209 GrGLenum target = texture->target();
3210 this->setScratchTextureUnit();
3211 GL_CALL(BindTexture(target, texture->textureID()));
3212
3213 // Configure sRGB decode, if necessary. This state is the only thing needed for the driver
3214 // call (glGenerateMipmap) to work correctly. Our manual method dirties other state, too.
3215 if (this->glCaps().srgbDecodeDisableSupport() && GrPixelConfigIsSRGB(texture->config())) {
3216 GrGLenum srgbDecode = allowSRGBInputs ? GR_GL_DECODE_EXT : GR_GL_SKIP_DECODE_EXT;
3217 // Command buffer's sRGB decode extension doesn't influence mipmap generation correctly.
3218 // If we set this to skip_decode, it appears to suppress sRGB -> Linear for each downsample,
3219 // but not the Linear -> sRGB when writing the next level. The result is that mip-chains
3220 // get progressively brighter as you go down. Forcing this to 'decode' gives predictable
3221 // (and only slightly incorrect) results. See crbug.com/655247 (~comment 28)
3222 if (!this->glCaps().srgbDecodeDisableAffectsMipmaps()) {
3223 srgbDecode = GR_GL_DECODE_EXT;
3224 }
3225 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SRGB_DECODE_EXT, srgbDecode));
3226 }
3227
3228 // Either do manual mipmap generation or (if that fails), just rely on the driver:
3229 if (!this->generateMipmap(texture, allowSRGBInputs)) {
3230 GL_CALL(GenerateMipmap(target));
3231 }
3232
3233 texture->texturePriv().dirtyMipMaps(false);
3234 texture->texturePriv().setMaxMipMapLevel(SkMipMap::ComputeLevelCount(
3235 texture->width(), texture->height()));
3236 texture->texturePriv().setMipColorMode(colorMode);
3237
3238 // We have potentially set lots of state on the texture. Easiest to dirty it all:
3239 texture->textureParamsModified();
3240 }
3241
setTextureSwizzle(int unitIdx,GrGLenum target,const GrGLenum swizzle[])3242 void GrGLGpu::setTextureSwizzle(int unitIdx, GrGLenum target, const GrGLenum swizzle[]) {
3243 this->setTextureUnit(unitIdx);
3244 if (this->glStandard() == kGLES_GrGLStandard) {
3245 // ES3 added swizzle support but not GL_TEXTURE_SWIZZLE_RGBA.
3246 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_R, swizzle[0]));
3247 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_G, swizzle[1]));
3248 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_B, swizzle[2]));
3249 GL_CALL(TexParameteri(target, GR_GL_TEXTURE_SWIZZLE_A, swizzle[3]));
3250 } else {
3251 GR_STATIC_ASSERT(sizeof(swizzle[0]) == sizeof(GrGLint));
3252 GL_CALL(TexParameteriv(target, GR_GL_TEXTURE_SWIZZLE_RGBA,
3253 reinterpret_cast<const GrGLint*>(swizzle)));
3254 }
3255 }
3256
flushColorWrite(bool writeColor)3257 void GrGLGpu::flushColorWrite(bool writeColor) {
3258 if (!writeColor) {
3259 if (kNo_TriState != fHWWriteToColor) {
3260 GL_CALL(ColorMask(GR_GL_FALSE, GR_GL_FALSE,
3261 GR_GL_FALSE, GR_GL_FALSE));
3262 fHWWriteToColor = kNo_TriState;
3263 }
3264 } else {
3265 if (kYes_TriState != fHWWriteToColor) {
3266 GL_CALL(ColorMask(GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE, GR_GL_TRUE));
3267 fHWWriteToColor = kYes_TriState;
3268 }
3269 }
3270 }
3271
flushDrawFace(GrDrawFace face)3272 void GrGLGpu::flushDrawFace(GrDrawFace face) {
3273 if (fHWDrawFace != face) {
3274 switch (face) {
3275 case GrDrawFace::kCCW:
3276 GL_CALL(Enable(GR_GL_CULL_FACE));
3277 GL_CALL(CullFace(GR_GL_BACK));
3278 break;
3279 case GrDrawFace::kCW:
3280 GL_CALL(Enable(GR_GL_CULL_FACE));
3281 GL_CALL(CullFace(GR_GL_FRONT));
3282 break;
3283 case GrDrawFace::kBoth:
3284 GL_CALL(Disable(GR_GL_CULL_FACE));
3285 break;
3286 default:
3287 SkFAIL("Unknown draw face.");
3288 }
3289 fHWDrawFace = face;
3290 }
3291 }
3292
setTextureUnit(int unit)3293 void GrGLGpu::setTextureUnit(int unit) {
3294 SkASSERT(unit >= 0 && unit < fHWBoundTextureUniqueIDs.count());
3295 if (unit != fHWActiveTextureUnitIdx) {
3296 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + unit));
3297 fHWActiveTextureUnitIdx = unit;
3298 }
3299 }
3300
setScratchTextureUnit()3301 void GrGLGpu::setScratchTextureUnit() {
3302 // Bind the last texture unit since it is the least likely to be used by GrGLProgram.
3303 int lastUnitIdx = fHWBoundTextureUniqueIDs.count() - 1;
3304 if (lastUnitIdx != fHWActiveTextureUnitIdx) {
3305 GL_CALL(ActiveTexture(GR_GL_TEXTURE0 + lastUnitIdx));
3306 fHWActiveTextureUnitIdx = lastUnitIdx;
3307 }
3308 // clear out the this field so that if a program does use this unit it will rebind the correct
3309 // texture.
3310 fHWBoundTextureUniqueIDs[lastUnitIdx].makeInvalid();
3311 }
3312
3313 // Determines whether glBlitFramebuffer could be used between src and dst by onCopySurface.
can_blit_framebuffer_for_copy_surface(const GrSurface * dst,const GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint,const GrGLGpu * gpu)3314 static inline bool can_blit_framebuffer_for_copy_surface(const GrSurface* dst,
3315 const GrSurface* src,
3316 const SkIRect& srcRect,
3317 const SkIPoint& dstPoint,
3318 const GrGLGpu* gpu) {
3319 auto blitFramebufferFlags = gpu->glCaps().blitFramebufferSupportFlags();
3320 if (!gpu->glCaps().canConfigBeFBOColorAttachment(dst->config()) ||
3321 !gpu->glCaps().canConfigBeFBOColorAttachment(src->config())) {
3322 return false;
3323 }
3324 // Blits are not allowed between int color buffers and float/fixed color buffers. GrGpu should
3325 // have filtered such cases out.
3326 SkASSERT(GrPixelConfigIsSint(dst->config()) == GrPixelConfigIsSint(src->config()));
3327 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3328 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(dst->asTexture());
3329 const GrRenderTarget* dstRT = dst->asRenderTarget();
3330 const GrRenderTarget* srcRT = src->asRenderTarget();
3331 if (dstTex && dstTex->target() != GR_GL_TEXTURE_2D) {
3332 return false;
3333 }
3334 if (srcTex && srcTex->target() != GR_GL_TEXTURE_2D) {
3335 return false;
3336 }
3337 if (GrGLCaps::kNoSupport_BlitFramebufferFlag & blitFramebufferFlags) {
3338 return false;
3339 }
3340 if (GrGLCaps::kNoScalingOrMirroring_BlitFramebufferFlag & blitFramebufferFlags) {
3341 // We would mirror to compensate for origin changes. Note that copySurface is
3342 // specified such that the src and dst rects are the same.
3343 if (dst->origin() != src->origin()) {
3344 return false;
3345 }
3346 }
3347 if (GrGLCaps::kResolveMustBeFull_BlitFrambufferFlag & blitFramebufferFlags) {
3348 if (srcRT && srcRT->numColorSamples() && dstRT && !dstRT->numColorSamples()) {
3349 return false;
3350 }
3351 }
3352 if (GrGLCaps::kNoMSAADst_BlitFramebufferFlag & blitFramebufferFlags) {
3353 if (dstRT && dstRT->numColorSamples() > 0) {
3354 return false;
3355 }
3356 }
3357 if (GrGLCaps::kNoFormatConversion_BlitFramebufferFlag & blitFramebufferFlags) {
3358 if (dst->config() != src->config()) {
3359 return false;
3360 }
3361 } else if (GrGLCaps::kNoFormatConversionForMSAASrc_BlitFramebufferFlag & blitFramebufferFlags) {
3362 const GrRenderTarget* srcRT = src->asRenderTarget();
3363 if (srcRT && srcRT->numColorSamples() && dst->config() != src->config()) {
3364 return false;
3365 }
3366 }
3367 if (GrGLCaps::kRectsMustMatchForMSAASrc_BlitFramebufferFlag & blitFramebufferFlags) {
3368 if (srcRT && srcRT->numColorSamples() &&
3369 (dstPoint.fX != srcRect.fLeft || dstPoint.fY != srcRect.fTop)) {
3370 return false;
3371 }
3372 }
3373 return true;
3374 }
3375
can_copy_texsubimage(const GrSurface * dst,const GrSurface * src,const GrGLGpu * gpu)3376 static inline bool can_copy_texsubimage(const GrSurface* dst,
3377 const GrSurface* src,
3378 const GrGLGpu* gpu) {
3379 // Table 3.9 of the ES2 spec indicates the supported formats with CopyTexSubImage
3380 // and BGRA isn't in the spec. There doesn't appear to be any extension that adds it. Perhaps
3381 // many drivers would allow it to work, but ANGLE does not.
3382 if (kGLES_GrGLStandard == gpu->glStandard() && gpu->glCaps().bgraIsInternalFormat() &&
3383 (kBGRA_8888_GrPixelConfig == dst->config() || kBGRA_8888_GrPixelConfig == src->config())) {
3384 return false;
3385 }
3386 const GrGLRenderTarget* dstRT = static_cast<const GrGLRenderTarget*>(dst->asRenderTarget());
3387 // If dst is multisampled (and uses an extension where there is a separate MSAA renderbuffer)
3388 // then we don't want to copy to the texture but to the MSAA buffer.
3389 if (dstRT && dstRT->renderFBOID() != dstRT->textureFBOID()) {
3390 return false;
3391 }
3392 const GrGLRenderTarget* srcRT = static_cast<const GrGLRenderTarget*>(src->asRenderTarget());
3393 // If the src is multisampled (and uses an extension where there is a separate MSAA
3394 // renderbuffer) then it is an invalid operation to call CopyTexSubImage
3395 if (srcRT && srcRT->renderFBOID() != srcRT->textureFBOID()) {
3396 return false;
3397 }
3398
3399 const GrGLTexture* dstTex = static_cast<const GrGLTexture*>(dst->asTexture());
3400 // CopyTex(Sub)Image writes to a texture and we have no way of dynamically wrapping a RT in a
3401 // texture.
3402 if (!dstTex) {
3403 return false;
3404 }
3405
3406 const GrGLTexture* srcTex = static_cast<const GrGLTexture*>(src->asTexture());
3407
3408 // Check that we could wrap the source in an FBO, that the dst is TEXTURE_2D, that no mirroring
3409 // is required.
3410 if (gpu->glCaps().canConfigBeFBOColorAttachment(src->config()) &&
3411 !GrPixelConfigIsCompressed(src->config()) &&
3412 (!srcTex || srcTex->target() == GR_GL_TEXTURE_2D) && dstTex->target() == GR_GL_TEXTURE_2D &&
3413 dst->origin() == src->origin()) {
3414 return true;
3415 } else {
3416 return false;
3417 }
3418 }
3419
3420 // If a temporary FBO was created, its non-zero ID is returned. The viewport that the copy rect is
3421 // relative to is output.
bindSurfaceFBOForPixelOps(GrSurface * surface,GrGLenum fboTarget,GrGLIRect * viewport,TempFBOTarget tempFBOTarget)3422 void GrGLGpu::bindSurfaceFBOForPixelOps(GrSurface* surface, GrGLenum fboTarget, GrGLIRect* viewport,
3423 TempFBOTarget tempFBOTarget) {
3424 GrGLRenderTarget* rt = static_cast<GrGLRenderTarget*>(surface->asRenderTarget());
3425 if (!rt) {
3426 SkASSERT(surface->asTexture());
3427 GrGLuint texID = static_cast<GrGLTexture*>(surface->asTexture())->textureID();
3428 GrGLenum target = static_cast<GrGLTexture*>(surface->asTexture())->target();
3429 GrGLuint* tempFBOID;
3430 tempFBOID = kSrc_TempFBOTarget == tempFBOTarget ? &fTempSrcFBOID : &fTempDstFBOID;
3431
3432 if (0 == *tempFBOID) {
3433 GR_GL_CALL(this->glInterface(), GenFramebuffers(1, tempFBOID));
3434 }
3435
3436 fStats.incRenderTargetBinds();
3437 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, *tempFBOID));
3438 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3439 GR_GL_COLOR_ATTACHMENT0,
3440 target,
3441 texID,
3442 0));
3443 viewport->fLeft = 0;
3444 viewport->fBottom = 0;
3445 viewport->fWidth = surface->width();
3446 viewport->fHeight = surface->height();
3447 } else {
3448 fStats.incRenderTargetBinds();
3449 GR_GL_CALL(this->glInterface(), BindFramebuffer(fboTarget, rt->renderFBOID()));
3450 *viewport = rt->getViewport();
3451 }
3452 }
3453
unbindTextureFBOForPixelOps(GrGLenum fboTarget,GrSurface * surface)3454 void GrGLGpu::unbindTextureFBOForPixelOps(GrGLenum fboTarget, GrSurface* surface) {
3455 // bindSurfaceFBOForPixelOps temporarily binds textures that are not render targets to
3456 if (!surface->asRenderTarget()) {
3457 SkASSERT(surface->asTexture());
3458 GrGLenum textureTarget = static_cast<GrGLTexture*>(surface->asTexture())->target();
3459 GR_GL_CALL(this->glInterface(), FramebufferTexture2D(fboTarget,
3460 GR_GL_COLOR_ATTACHMENT0,
3461 textureTarget,
3462 0,
3463 0));
3464 }
3465 }
3466
onCopySurface(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3467 bool GrGLGpu::onCopySurface(GrSurface* dst,
3468 GrSurface* src,
3469 const SkIRect& srcRect,
3470 const SkIPoint& dstPoint) {
3471 // None of our copy methods can handle a swizzle. TODO: Make copySurfaceAsDraw handle the
3472 // swizzle.
3473 if (this->caps()->shaderCaps()->configOutputSwizzle(src->config()) !=
3474 this->caps()->shaderCaps()->configOutputSwizzle(dst->config())) {
3475 return false;
3476 }
3477 // Don't prefer copying as a draw if the dst doesn't already have a FBO object.
3478 bool preferCopy = SkToBool(dst->asRenderTarget());
3479 if (preferCopy && src->asTexture()) {
3480 if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
3481 return true;
3482 }
3483 }
3484
3485 if (can_copy_texsubimage(dst, src, this)) {
3486 this->copySurfaceAsCopyTexSubImage(dst, src, srcRect, dstPoint);
3487 return true;
3488 }
3489
3490 if (can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this)) {
3491 return this->copySurfaceAsBlitFramebuffer(dst, src, srcRect, dstPoint);
3492 }
3493
3494 if (!preferCopy && src->asTexture()) {
3495 if (this->copySurfaceAsDraw(dst, src, srcRect, dstPoint)) {
3496 return true;
3497 }
3498 }
3499
3500 return false;
3501 }
3502
createCopyProgram(GrTexture * srcTex)3503 bool GrGLGpu::createCopyProgram(GrTexture* srcTex) {
3504 int progIdx = TextureToCopyProgramIdx(srcTex);
3505 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3506 GrSLType samplerType = srcTex->texturePriv().samplerType();
3507
3508 if (!fCopyProgramArrayBuffer) {
3509 static const GrGLfloat vdata[] = {
3510 0, 0,
3511 0, 1,
3512 1, 0,
3513 1, 1
3514 };
3515 fCopyProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType,
3516 kStatic_GrAccessPattern, vdata));
3517 }
3518 if (!fCopyProgramArrayBuffer) {
3519 return false;
3520 }
3521
3522 SkASSERT(!fCopyPrograms[progIdx].fProgram);
3523 GL_CALL_RET(fCopyPrograms[progIdx].fProgram, CreateProgram());
3524 if (!fCopyPrograms[progIdx].fProgram) {
3525 return false;
3526 }
3527
3528 const char* version = shaderCaps->versionDeclString();
3529 GrShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kIn_TypeModifier);
3530 GrShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
3531 GrShaderVar::kUniform_TypeModifier);
3532 GrShaderVar uPosXform("u_posXform", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
3533 GrShaderVar uTexture("u_texture", samplerType, GrShaderVar::kUniform_TypeModifier);
3534 GrShaderVar vTexCoord("v_texCoord", kVec2f_GrSLType, GrShaderVar::kOut_TypeModifier);
3535 GrShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, GrShaderVar::kOut_TypeModifier);
3536
3537 SkString vshaderTxt(version);
3538 if (shaderCaps->noperspectiveInterpolationSupport()) {
3539 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3540 vshaderTxt.appendf("#extension %s : require\n", extension);
3541 }
3542 vTexCoord.addModifier("noperspective");
3543 }
3544
3545 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3546 vshaderTxt.append(";");
3547 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3548 vshaderTxt.append(";");
3549 uPosXform.appendDecl(shaderCaps, &vshaderTxt);
3550 vshaderTxt.append(";");
3551 vTexCoord.appendDecl(shaderCaps, &vshaderTxt);
3552 vshaderTxt.append(";");
3553
3554 vshaderTxt.append(
3555 "// Copy Program VS\n"
3556 "void main() {"
3557 " v_texCoord = a_vertex.xy * u_texCoordXform.xy + u_texCoordXform.zw;"
3558 " gl_Position.xy = a_vertex * u_posXform.xy + u_posXform.zw;"
3559 " gl_Position.zw = vec2(0, 1);"
3560 "}"
3561 );
3562
3563 SkString fshaderTxt(version);
3564 if (shaderCaps->noperspectiveInterpolationSupport()) {
3565 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3566 fshaderTxt.appendf("#extension %s : require\n", extension);
3567 }
3568 }
3569 if (samplerType == kTextureExternalSampler_GrSLType) {
3570 fshaderTxt.appendf("#extension %s : require\n",
3571 shaderCaps->externalTextureExtensionString());
3572 }
3573 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *shaderCaps,
3574 &fshaderTxt);
3575 vTexCoord.setTypeModifier(GrShaderVar::kIn_TypeModifier);
3576 vTexCoord.appendDecl(shaderCaps, &fshaderTxt);
3577 fshaderTxt.append(";");
3578 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3579 fshaderTxt.append(";");
3580 fshaderTxt.appendf(
3581 "// Copy Program FS\n"
3582 "void main() {"
3583 " sk_FragColor = texture(u_texture, v_texCoord);"
3584 "}"
3585 );
3586
3587 const char* str;
3588 GrGLint length;
3589
3590 str = vshaderTxt.c_str();
3591 length = SkToInt(vshaderTxt.size());
3592 SkSL::Program::Settings settings;
3593 settings.fCaps = shaderCaps;
3594 SkSL::Program::Inputs inputs;
3595 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
3596 GR_GL_VERTEX_SHADER, &str, &length, 1,
3597 &fStats, settings, &inputs);
3598 SkASSERT(inputs.isEmpty());
3599
3600 str = fshaderTxt.c_str();
3601 length = SkToInt(fshaderTxt.size());
3602 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fCopyPrograms[progIdx].fProgram,
3603 GR_GL_FRAGMENT_SHADER, &str, &length, 1,
3604 &fStats, settings, &inputs);
3605 SkASSERT(inputs.isEmpty());
3606
3607 GL_CALL(LinkProgram(fCopyPrograms[progIdx].fProgram));
3608
3609 GL_CALL_RET(fCopyPrograms[progIdx].fTextureUniform,
3610 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texture"));
3611 GL_CALL_RET(fCopyPrograms[progIdx].fPosXformUniform,
3612 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_posXform"));
3613 GL_CALL_RET(fCopyPrograms[progIdx].fTexCoordXformUniform,
3614 GetUniformLocation(fCopyPrograms[progIdx].fProgram, "u_texCoordXform"));
3615
3616 GL_CALL(BindAttribLocation(fCopyPrograms[progIdx].fProgram, 0, "a_vertex"));
3617
3618 GL_CALL(DeleteShader(vshader));
3619 GL_CALL(DeleteShader(fshader));
3620
3621 return true;
3622 }
3623
createMipmapProgram(int progIdx)3624 bool GrGLGpu::createMipmapProgram(int progIdx) {
3625 const bool oddWidth = SkToBool(progIdx & 0x2);
3626 const bool oddHeight = SkToBool(progIdx & 0x1);
3627 const int numTaps = (oddWidth ? 2 : 1) * (oddHeight ? 2 : 1);
3628
3629 const GrShaderCaps* shaderCaps = this->caps()->shaderCaps();
3630
3631 SkASSERT(!fMipmapPrograms[progIdx].fProgram);
3632 GL_CALL_RET(fMipmapPrograms[progIdx].fProgram, CreateProgram());
3633 if (!fMipmapPrograms[progIdx].fProgram) {
3634 return false;
3635 }
3636
3637 const char* version = shaderCaps->versionDeclString();
3638 GrShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kIn_TypeModifier);
3639 GrShaderVar uTexCoordXform("u_texCoordXform", kVec4f_GrSLType,
3640 GrShaderVar::kUniform_TypeModifier);
3641 GrShaderVar uTexture("u_texture", kTexture2DSampler_GrSLType,
3642 GrShaderVar::kUniform_TypeModifier);
3643 // We need 1, 2, or 4 texture coordinates (depending on parity of each dimension):
3644 GrShaderVar vTexCoords[] = {
3645 GrShaderVar("v_texCoord0", kVec2f_GrSLType, GrShaderVar::kOut_TypeModifier),
3646 GrShaderVar("v_texCoord1", kVec2f_GrSLType, GrShaderVar::kOut_TypeModifier),
3647 GrShaderVar("v_texCoord2", kVec2f_GrSLType, GrShaderVar::kOut_TypeModifier),
3648 GrShaderVar("v_texCoord3", kVec2f_GrSLType, GrShaderVar::kOut_TypeModifier),
3649 };
3650 GrShaderVar oFragColor("o_FragColor", kVec4f_GrSLType,GrShaderVar::kOut_TypeModifier);
3651
3652 SkString vshaderTxt(version);
3653 if (shaderCaps->noperspectiveInterpolationSupport()) {
3654 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3655 vshaderTxt.appendf("#extension %s : require\n", extension);
3656 }
3657 vTexCoords[0].addModifier("noperspective");
3658 vTexCoords[1].addModifier("noperspective");
3659 vTexCoords[2].addModifier("noperspective");
3660 vTexCoords[3].addModifier("noperspective");
3661 }
3662
3663 aVertex.appendDecl(shaderCaps, &vshaderTxt);
3664 vshaderTxt.append(";");
3665 uTexCoordXform.appendDecl(shaderCaps, &vshaderTxt);
3666 vshaderTxt.append(";");
3667 for (int i = 0; i < numTaps; ++i) {
3668 vTexCoords[i].appendDecl(shaderCaps, &vshaderTxt);
3669 vshaderTxt.append(";");
3670 }
3671
3672 vshaderTxt.append(
3673 "// Mipmap Program VS\n"
3674 "void main() {"
3675 " gl_Position.xy = a_vertex * vec2(2, 2) - vec2(1, 1);"
3676 " gl_Position.zw = vec2(0, 1);"
3677 );
3678
3679 // Insert texture coordinate computation:
3680 if (oddWidth && oddHeight) {
3681 vshaderTxt.append(
3682 " v_texCoord0 = a_vertex.xy * u_texCoordXform.yw;"
3683 " v_texCoord1 = a_vertex.xy * u_texCoordXform.yw + vec2(u_texCoordXform.x, 0);"
3684 " v_texCoord2 = a_vertex.xy * u_texCoordXform.yw + vec2(0, u_texCoordXform.z);"
3685 " v_texCoord3 = a_vertex.xy * u_texCoordXform.yw + u_texCoordXform.xz;"
3686 );
3687 } else if (oddWidth) {
3688 vshaderTxt.append(
3689 " v_texCoord0 = a_vertex.xy * vec2(u_texCoordXform.y, 1);"
3690 " v_texCoord1 = a_vertex.xy * vec2(u_texCoordXform.y, 1) + vec2(u_texCoordXform.x, 0);"
3691 );
3692 } else if (oddHeight) {
3693 vshaderTxt.append(
3694 " v_texCoord0 = a_vertex.xy * vec2(1, u_texCoordXform.w);"
3695 " v_texCoord1 = a_vertex.xy * vec2(1, u_texCoordXform.w) + vec2(0, u_texCoordXform.z);"
3696 );
3697 } else {
3698 vshaderTxt.append(
3699 " v_texCoord0 = a_vertex.xy;"
3700 );
3701 }
3702
3703 vshaderTxt.append("}");
3704
3705 SkString fshaderTxt(version);
3706 if (shaderCaps->noperspectiveInterpolationSupport()) {
3707 if (const char* extension = shaderCaps->noperspectiveInterpolationExtensionString()) {
3708 fshaderTxt.appendf("#extension %s : require\n", extension);
3709 }
3710 }
3711 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision, *shaderCaps,
3712 &fshaderTxt);
3713 for (int i = 0; i < numTaps; ++i) {
3714 vTexCoords[i].setTypeModifier(GrShaderVar::kIn_TypeModifier);
3715 vTexCoords[i].appendDecl(shaderCaps, &fshaderTxt);
3716 fshaderTxt.append(";");
3717 }
3718 uTexture.appendDecl(shaderCaps, &fshaderTxt);
3719 fshaderTxt.append(";");
3720 fshaderTxt.append(
3721 "// Mipmap Program FS\n"
3722 "void main() {"
3723 );
3724
3725 if (oddWidth && oddHeight) {
3726 fshaderTxt.append(
3727 " sk_FragColor = (texture(u_texture, v_texCoord0) + "
3728 " texture(u_texture, v_texCoord1) + "
3729 " texture(u_texture, v_texCoord2) + "
3730 " texture(u_texture, v_texCoord3)) * 0.25;"
3731 );
3732 } else if (oddWidth || oddHeight) {
3733 fshaderTxt.append(
3734 " sk_FragColor = (texture(u_texture, v_texCoord0) + "
3735 " texture(u_texture, v_texCoord1)) * 0.5;"
3736 );
3737 } else {
3738 fshaderTxt.append(
3739 " sk_FragColor = texture(u_texture, v_texCoord0);"
3740 );
3741 }
3742
3743 fshaderTxt.append("}");
3744
3745 const char* str;
3746 GrGLint length;
3747
3748 str = vshaderTxt.c_str();
3749 length = SkToInt(vshaderTxt.size());
3750 SkSL::Program::Settings settings;
3751 settings.fCaps = shaderCaps;
3752 SkSL::Program::Inputs inputs;
3753 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
3754 GR_GL_VERTEX_SHADER, &str, &length, 1,
3755 &fStats, settings, &inputs);
3756 SkASSERT(inputs.isEmpty());
3757
3758 str = fshaderTxt.c_str();
3759 length = SkToInt(fshaderTxt.size());
3760 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fMipmapPrograms[progIdx].fProgram,
3761 GR_GL_FRAGMENT_SHADER, &str, &length, 1,
3762 &fStats, settings, &inputs);
3763 SkASSERT(inputs.isEmpty());
3764
3765 GL_CALL(LinkProgram(fMipmapPrograms[progIdx].fProgram));
3766
3767 GL_CALL_RET(fMipmapPrograms[progIdx].fTextureUniform,
3768 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texture"));
3769 GL_CALL_RET(fMipmapPrograms[progIdx].fTexCoordXformUniform,
3770 GetUniformLocation(fMipmapPrograms[progIdx].fProgram, "u_texCoordXform"));
3771
3772 GL_CALL(BindAttribLocation(fMipmapPrograms[progIdx].fProgram, 0, "a_vertex"));
3773
3774 GL_CALL(DeleteShader(vshader));
3775 GL_CALL(DeleteShader(fshader));
3776
3777 return true;
3778 }
3779
createWireRectProgram()3780 bool GrGLGpu::createWireRectProgram() {
3781 if (!fWireRectArrayBuffer) {
3782 static const GrGLfloat vdata[] = {
3783 0, 0,
3784 0, 1,
3785 1, 1,
3786 1, 0
3787 };
3788 fWireRectArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata), kVertex_GrBufferType,
3789 kStatic_GrAccessPattern, vdata));
3790 if (!fWireRectArrayBuffer) {
3791 return false;
3792 }
3793 }
3794
3795 SkASSERT(!fWireRectProgram.fProgram);
3796 GL_CALL_RET(fWireRectProgram.fProgram, CreateProgram());
3797 if (!fWireRectProgram.fProgram) {
3798 return false;
3799 }
3800
3801 GrShaderVar uColor("u_color", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
3802 GrShaderVar uRect("u_rect", kVec4f_GrSLType, GrShaderVar::kUniform_TypeModifier);
3803 GrShaderVar aVertex("a_vertex", kVec2f_GrSLType, GrShaderVar::kIn_TypeModifier);
3804 const char* version = this->caps()->shaderCaps()->versionDeclString();
3805
3806 // The rect uniform specifies the rectangle in NDC space as a vec4 (left,top,right,bottom). The
3807 // program is used with a vbo containing the unit square. Vertices are computed from the rect
3808 // uniform using the 4 vbo vertices.
3809 SkString vshaderTxt(version);
3810 aVertex.appendDecl(this->caps()->shaderCaps(), &vshaderTxt);
3811 vshaderTxt.append(";");
3812 uRect.appendDecl(this->caps()->shaderCaps(), &vshaderTxt);
3813 vshaderTxt.append(";");
3814 vshaderTxt.append(
3815 "// Wire Rect Program VS\n"
3816 "void main() {"
3817 " gl_Position.x = u_rect.x + a_vertex.x * (u_rect.z - u_rect.x);"
3818 " gl_Position.y = u_rect.y + a_vertex.y * (u_rect.w - u_rect.y);"
3819 " gl_Position.zw = vec2(0, 1);"
3820 "}"
3821 );
3822
3823 GrShaderVar oFragColor("o_FragColor", kVec4f_GrSLType, GrShaderVar::kOut_TypeModifier);
3824
3825 SkString fshaderTxt(version);
3826 GrGLSLAppendDefaultFloatPrecisionDeclaration(kDefault_GrSLPrecision,
3827 *this->caps()->shaderCaps(),
3828 &fshaderTxt);
3829 uColor.appendDecl(this->caps()->shaderCaps(), &fshaderTxt);
3830 fshaderTxt.append(";");
3831 fshaderTxt.appendf(
3832 "// Write Rect Program FS\n"
3833 "void main() {"
3834 " sk_FragColor = %s;"
3835 "}",
3836 uColor.c_str()
3837 );
3838
3839 const char* str;
3840 GrGLint length;
3841
3842 str = vshaderTxt.c_str();
3843 length = SkToInt(vshaderTxt.size());
3844 SkSL::Program::Settings settings;
3845 settings.fCaps = this->caps()->shaderCaps();
3846 SkSL::Program::Inputs inputs;
3847 GrGLuint vshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram,
3848 GR_GL_VERTEX_SHADER, &str, &length, 1,
3849 &fStats, settings, &inputs);
3850 SkASSERT(inputs.isEmpty());
3851
3852 str = fshaderTxt.c_str();
3853 length = SkToInt(fshaderTxt.size());
3854 GrGLuint fshader = GrGLCompileAndAttachShader(*fGLContext, fWireRectProgram.fProgram,
3855 GR_GL_FRAGMENT_SHADER, &str, &length, 1,
3856 &fStats, settings, &inputs);
3857 SkASSERT(inputs.isEmpty());
3858
3859 GL_CALL(LinkProgram(fWireRectProgram.fProgram));
3860
3861 GL_CALL_RET(fWireRectProgram.fColorUniform,
3862 GetUniformLocation(fWireRectProgram.fProgram, "u_color"));
3863 GL_CALL_RET(fWireRectProgram.fRectUniform,
3864 GetUniformLocation(fWireRectProgram.fProgram, "u_rect"));
3865 GL_CALL(BindAttribLocation(fWireRectProgram.fProgram, 0, "a_vertex"));
3866
3867 GL_CALL(DeleteShader(vshader));
3868 GL_CALL(DeleteShader(fshader));
3869
3870 return true;
3871 }
3872
drawDebugWireRect(GrRenderTarget * rt,const SkIRect & rect,GrColor color)3873 void GrGLGpu::drawDebugWireRect(GrRenderTarget* rt, const SkIRect& rect, GrColor color) {
3874 // TODO: This should swizzle the output to match dst's config, though it is a debugging
3875 // visualization.
3876
3877 this->handleDirtyContext();
3878 if (!fWireRectProgram.fProgram) {
3879 if (!this->createWireRectProgram()) {
3880 SkDebugf("Failed to create wire rect program.\n");
3881 return;
3882 }
3883 }
3884
3885 int w = rt->width();
3886 int h = rt->height();
3887
3888 // Compute the edges of the rectangle (top,left,right,bottom) in NDC space. Must consider
3889 // whether the render target is flipped or not.
3890 GrGLfloat edges[4];
3891 edges[0] = SkIntToScalar(rect.fLeft) + 0.5f;
3892 edges[2] = SkIntToScalar(rect.fRight) - 0.5f;
3893 if (kBottomLeft_GrSurfaceOrigin == rt->origin()) {
3894 edges[1] = h - (SkIntToScalar(rect.fTop) + 0.5f);
3895 edges[3] = h - (SkIntToScalar(rect.fBottom) - 0.5f);
3896 } else {
3897 edges[1] = SkIntToScalar(rect.fTop) + 0.5f;
3898 edges[3] = SkIntToScalar(rect.fBottom) - 0.5f;
3899 }
3900 edges[0] = 2 * edges[0] / w - 1.0f;
3901 edges[1] = 2 * edges[1] / h - 1.0f;
3902 edges[2] = 2 * edges[2] / w - 1.0f;
3903 edges[3] = 2 * edges[3] / h - 1.0f;
3904
3905 GrGLfloat channels[4];
3906 static const GrGLfloat scale255 = 1.f / 255.f;
3907 channels[0] = GrColorUnpackR(color) * scale255;
3908 channels[1] = GrColorUnpackG(color) * scale255;
3909 channels[2] = GrColorUnpackB(color) * scale255;
3910 channels[3] = GrColorUnpackA(color) * scale255;
3911
3912 GrGLRenderTarget* glRT = static_cast<GrGLRenderTarget*>(rt->asRenderTarget());
3913 this->flushRenderTarget(glRT, &rect);
3914
3915 GL_CALL(UseProgram(fWireRectProgram.fProgram));
3916 fHWProgramID = fWireRectProgram.fProgram;
3917
3918 fHWVertexArrayState.setVertexArrayID(this, 0);
3919
3920 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3921 attribs->set(this, 0, fWireRectArrayBuffer.get(), kVec2f_GrVertexAttribType,
3922 2 * sizeof(GrGLfloat), 0);
3923 attribs->disableUnusedArrays(this, 0x1);
3924
3925 GL_CALL(Uniform4fv(fWireRectProgram.fRectUniform, 1, edges));
3926 GL_CALL(Uniform4fv(fWireRectProgram.fColorUniform, 1, channels));
3927
3928 GrXferProcessor::BlendInfo blendInfo;
3929 blendInfo.reset();
3930 this->flushBlend(blendInfo, GrSwizzle::RGBA());
3931 this->flushColorWrite(true);
3932 this->flushDrawFace(GrDrawFace::kBoth);
3933 this->flushHWAAState(glRT, false, false);
3934 this->disableScissor();
3935 this->disableWindowRectangles();
3936 this->disableStencil();
3937
3938 GL_CALL(DrawArrays(GR_GL_LINE_LOOP, 0, 4));
3939 }
3940
3941
copySurfaceAsDraw(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)3942 bool GrGLGpu::copySurfaceAsDraw(GrSurface* dst,
3943 GrSurface* src,
3944 const SkIRect& srcRect,
3945 const SkIPoint& dstPoint) {
3946 GrGLTexture* srcTex = static_cast<GrGLTexture*>(src->asTexture());
3947 int progIdx = TextureToCopyProgramIdx(srcTex);
3948
3949 if (!fCopyPrograms[progIdx].fProgram) {
3950 if (!this->createCopyProgram(srcTex)) {
3951 SkDebugf("Failed to create copy program.\n");
3952 return false;
3953 }
3954 }
3955
3956 int w = srcRect.width();
3957 int h = srcRect.height();
3958
3959 GrSamplerParams params(SkShader::kClamp_TileMode, GrSamplerParams::kNone_FilterMode);
3960 this->bindTexture(0, params, true, srcTex);
3961
3962 GrGLIRect dstVP;
3963 this->bindSurfaceFBOForPixelOps(dst, GR_GL_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
3964 this->flushViewport(dstVP);
3965 fHWBoundRenderTargetUniqueID.makeInvalid();
3966
3967 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY, w, h);
3968
3969 GL_CALL(UseProgram(fCopyPrograms[progIdx].fProgram));
3970 fHWProgramID = fCopyPrograms[progIdx].fProgram;
3971
3972 fHWVertexArrayState.setVertexArrayID(this, 0);
3973
3974 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
3975 attribs->set(this, 0, fCopyProgramArrayBuffer.get(), kVec2f_GrVertexAttribType,
3976 2 * sizeof(GrGLfloat), 0);
3977 attribs->disableUnusedArrays(this, 0x1);
3978
3979 // dst rect edges in NDC (-1 to 1)
3980 int dw = dst->width();
3981 int dh = dst->height();
3982 GrGLfloat dx0 = 2.f * dstPoint.fX / dw - 1.f;
3983 GrGLfloat dx1 = 2.f * (dstPoint.fX + w) / dw - 1.f;
3984 GrGLfloat dy0 = 2.f * dstPoint.fY / dh - 1.f;
3985 GrGLfloat dy1 = 2.f * (dstPoint.fY + h) / dh - 1.f;
3986 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
3987 dy0 = -dy0;
3988 dy1 = -dy1;
3989 }
3990
3991 GrGLfloat sx0 = (GrGLfloat)srcRect.fLeft;
3992 GrGLfloat sx1 = (GrGLfloat)(srcRect.fLeft + w);
3993 GrGLfloat sy0 = (GrGLfloat)srcRect.fTop;
3994 GrGLfloat sy1 = (GrGLfloat)(srcRect.fTop + h);
3995 int sh = src->height();
3996 if (kBottomLeft_GrSurfaceOrigin == src->origin()) {
3997 sy0 = sh - sy0;
3998 sy1 = sh - sy1;
3999 }
4000 // src rect edges in normalized texture space (0 to 1) unless we're using a RECTANGLE texture.
4001 GrGLenum srcTarget = srcTex->target();
4002 if (GR_GL_TEXTURE_RECTANGLE != srcTarget) {
4003 int sw = src->width();
4004 sx0 /= sw;
4005 sx1 /= sw;
4006 sy0 /= sh;
4007 sy1 /= sh;
4008 }
4009
4010 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fPosXformUniform, dx1 - dx0, dy1 - dy0, dx0, dy0));
4011 GL_CALL(Uniform4f(fCopyPrograms[progIdx].fTexCoordXformUniform,
4012 sx1 - sx0, sy1 - sy0, sx0, sy0));
4013 GL_CALL(Uniform1i(fCopyPrograms[progIdx].fTextureUniform, 0));
4014
4015 GrXferProcessor::BlendInfo blendInfo;
4016 blendInfo.reset();
4017 this->flushBlend(blendInfo, GrSwizzle::RGBA());
4018 this->flushColorWrite(true);
4019 this->flushDrawFace(GrDrawFace::kBoth);
4020 this->flushHWAAState(nullptr, false, false);
4021 this->disableScissor();
4022 this->disableWindowRectangles();
4023 this->disableStencil();
4024
4025 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
4026 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, dst);
4027 this->didWriteToSurface(dst, &dstRect);
4028
4029 return true;
4030 }
4031
copySurfaceAsCopyTexSubImage(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)4032 void GrGLGpu::copySurfaceAsCopyTexSubImage(GrSurface* dst,
4033 GrSurface* src,
4034 const SkIRect& srcRect,
4035 const SkIPoint& dstPoint) {
4036 SkASSERT(can_copy_texsubimage(dst, src, this));
4037 GrGLIRect srcVP;
4038 this->bindSurfaceFBOForPixelOps(src, GR_GL_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
4039 GrGLTexture* dstTex = static_cast<GrGLTexture *>(dst->asTexture());
4040 SkASSERT(dstTex);
4041 // We modified the bound FBO
4042 fHWBoundRenderTargetUniqueID.makeInvalid();
4043 GrGLIRect srcGLRect;
4044 srcGLRect.setRelativeTo(srcVP,
4045 srcRect.fLeft,
4046 srcRect.fTop,
4047 srcRect.width(),
4048 srcRect.height(),
4049 src->origin());
4050
4051 this->setScratchTextureUnit();
4052 GL_CALL(BindTexture(dstTex->target(), dstTex->textureID()));
4053 GrGLint dstY;
4054 if (kBottomLeft_GrSurfaceOrigin == dst->origin()) {
4055 dstY = dst->height() - (dstPoint.fY + srcGLRect.fHeight);
4056 } else {
4057 dstY = dstPoint.fY;
4058 }
4059 GL_CALL(CopyTexSubImage2D(dstTex->target(), 0,
4060 dstPoint.fX, dstY,
4061 srcGLRect.fLeft, srcGLRect.fBottom,
4062 srcGLRect.fWidth, srcGLRect.fHeight));
4063 this->unbindTextureFBOForPixelOps(GR_GL_FRAMEBUFFER, src);
4064 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
4065 srcRect.width(), srcRect.height());
4066 this->didWriteToSurface(dst, &dstRect);
4067 }
4068
copySurfaceAsBlitFramebuffer(GrSurface * dst,GrSurface * src,const SkIRect & srcRect,const SkIPoint & dstPoint)4069 bool GrGLGpu::copySurfaceAsBlitFramebuffer(GrSurface* dst,
4070 GrSurface* src,
4071 const SkIRect& srcRect,
4072 const SkIPoint& dstPoint) {
4073 SkASSERT(can_blit_framebuffer_for_copy_surface(dst, src, srcRect, dstPoint, this));
4074 SkIRect dstRect = SkIRect::MakeXYWH(dstPoint.fX, dstPoint.fY,
4075 srcRect.width(), srcRect.height());
4076 if (dst == src) {
4077 if (SkIRect::IntersectsNoEmptyCheck(dstRect, srcRect)) {
4078 return false;
4079 }
4080 }
4081
4082 GrGLIRect dstVP;
4083 GrGLIRect srcVP;
4084 this->bindSurfaceFBOForPixelOps(dst, GR_GL_DRAW_FRAMEBUFFER, &dstVP, kDst_TempFBOTarget);
4085 this->bindSurfaceFBOForPixelOps(src, GR_GL_READ_FRAMEBUFFER, &srcVP, kSrc_TempFBOTarget);
4086 // We modified the bound FBO
4087 fHWBoundRenderTargetUniqueID.makeInvalid();
4088 GrGLIRect srcGLRect;
4089 GrGLIRect dstGLRect;
4090 srcGLRect.setRelativeTo(srcVP,
4091 srcRect.fLeft,
4092 srcRect.fTop,
4093 srcRect.width(),
4094 srcRect.height(),
4095 src->origin());
4096 dstGLRect.setRelativeTo(dstVP,
4097 dstRect.fLeft,
4098 dstRect.fTop,
4099 dstRect.width(),
4100 dstRect.height(),
4101 dst->origin());
4102
4103 // BlitFrameBuffer respects the scissor, so disable it.
4104 this->disableScissor();
4105 this->disableWindowRectangles();
4106
4107 GrGLint srcY0;
4108 GrGLint srcY1;
4109 // Does the blit need to y-mirror or not?
4110 if (src->origin() == dst->origin()) {
4111 srcY0 = srcGLRect.fBottom;
4112 srcY1 = srcGLRect.fBottom + srcGLRect.fHeight;
4113 } else {
4114 srcY0 = srcGLRect.fBottom + srcGLRect.fHeight;
4115 srcY1 = srcGLRect.fBottom;
4116 }
4117 GL_CALL(BlitFramebuffer(srcGLRect.fLeft,
4118 srcY0,
4119 srcGLRect.fLeft + srcGLRect.fWidth,
4120 srcY1,
4121 dstGLRect.fLeft,
4122 dstGLRect.fBottom,
4123 dstGLRect.fLeft + dstGLRect.fWidth,
4124 dstGLRect.fBottom + dstGLRect.fHeight,
4125 GR_GL_COLOR_BUFFER_BIT, GR_GL_NEAREST));
4126 this->unbindTextureFBOForPixelOps(GR_GL_DRAW_FRAMEBUFFER, dst);
4127 this->unbindTextureFBOForPixelOps(GR_GL_READ_FRAMEBUFFER, src);
4128 this->didWriteToSurface(dst, &dstRect);
4129 return true;
4130 }
4131
4132 // Manual implementation of mipmap generation, to work around driver bugs w/sRGB.
4133 // Uses draw calls to do a series of downsample operations to successive mips.
4134 // If this returns false, then the calling code falls back to using glGenerateMipmap.
generateMipmap(GrGLTexture * texture,bool gammaCorrect)4135 bool GrGLGpu::generateMipmap(GrGLTexture* texture, bool gammaCorrect) {
4136 SkASSERT(!GrPixelConfigIsSint(texture->config()));
4137 // Our iterative downsample requires the ability to limit which level we're sampling:
4138 if (!this->glCaps().doManualMipmapping()) {
4139 return false;
4140 }
4141
4142 // Mipmaps are only supported on 2D textures:
4143 if (GR_GL_TEXTURE_2D != texture->target()) {
4144 return false;
4145 }
4146
4147 // We need to be able to render to the texture for this to work:
4148 if (!this->glCaps().canConfigBeFBOColorAttachment(texture->config())) {
4149 return false;
4150 }
4151
4152 // If we're mipping an sRGB texture, we need to ensure FB sRGB is correct:
4153 if (GrPixelConfigIsSRGB(texture->config())) {
4154 // If we have write-control, just set the state that we want:
4155 if (this->glCaps().srgbWriteControl()) {
4156 this->flushFramebufferSRGB(gammaCorrect);
4157 } else if (!gammaCorrect) {
4158 // If we don't have write-control we can't do non-gamma-correct mipmapping:
4159 return false;
4160 }
4161 }
4162
4163 int width = texture->width();
4164 int height = texture->height();
4165 int levelCount = SkMipMap::ComputeLevelCount(width, height) + 1;
4166
4167 // Define all mips, if we haven't previously done so:
4168 if (0 == texture->texturePriv().maxMipMapLevel()) {
4169 GrGLenum internalFormat;
4170 GrGLenum externalFormat;
4171 GrGLenum externalType;
4172 if (!this->glCaps().getTexImageFormats(texture->config(), texture->config(),
4173 &internalFormat, &externalFormat, &externalType)) {
4174 return false;
4175 }
4176
4177 for (GrGLint level = 1; level < levelCount; ++level) {
4178 // Define the next mip:
4179 width = SkTMax(1, width / 2);
4180 height = SkTMax(1, height / 2);
4181 GL_ALLOC_CALL(this->glInterface(), TexImage2D(GR_GL_TEXTURE_2D, level, internalFormat,
4182 width, height, 0,
4183 externalFormat, externalType, nullptr));
4184 }
4185 }
4186
4187 // Create (if necessary), then bind temporary FBO:
4188 if (0 == fTempDstFBOID) {
4189 GL_CALL(GenFramebuffers(1, &fTempDstFBOID));
4190 }
4191 GL_CALL(BindFramebuffer(GR_GL_FRAMEBUFFER, fTempDstFBOID));
4192 fHWBoundRenderTargetUniqueID.makeInvalid();
4193
4194 // Bind the texture, to get things configured for filtering.
4195 // We'll be changing our base level further below:
4196 this->setTextureUnit(0);
4197 GrSamplerParams params(SkShader::kClamp_TileMode, GrSamplerParams::kBilerp_FilterMode);
4198 this->bindTexture(0, params, gammaCorrect, texture);
4199
4200 // Vertex data:
4201 if (!fMipmapProgramArrayBuffer) {
4202 static const GrGLfloat vdata[] = {
4203 0, 0,
4204 0, 1,
4205 1, 0,
4206 1, 1
4207 };
4208 fMipmapProgramArrayBuffer.reset(GrGLBuffer::Create(this, sizeof(vdata),
4209 kVertex_GrBufferType,
4210 kStatic_GrAccessPattern, vdata));
4211 }
4212 if (!fMipmapProgramArrayBuffer) {
4213 return false;
4214 }
4215
4216 fHWVertexArrayState.setVertexArrayID(this, 0);
4217
4218 GrGLAttribArrayState* attribs = fHWVertexArrayState.bindInternalVertexArray(this);
4219 attribs->set(this, 0, fMipmapProgramArrayBuffer.get(), kVec2f_GrVertexAttribType,
4220 2 * sizeof(GrGLfloat), 0);
4221 attribs->disableUnusedArrays(this, 0x1);
4222
4223 // Set "simple" state once:
4224 GrXferProcessor::BlendInfo blendInfo;
4225 blendInfo.reset();
4226 this->flushBlend(blendInfo, GrSwizzle::RGBA());
4227 this->flushColorWrite(true);
4228 this->flushDrawFace(GrDrawFace::kBoth);
4229 this->flushHWAAState(nullptr, false, false);
4230 this->disableScissor();
4231 this->disableWindowRectangles();
4232 this->disableStencil();
4233
4234 // Do all the blits:
4235 width = texture->width();
4236 height = texture->height();
4237 GrGLIRect viewport;
4238 viewport.fLeft = 0;
4239 viewport.fBottom = 0;
4240 for (GrGLint level = 1; level < levelCount; ++level) {
4241 // Get and bind the program for this particular downsample (filter shape can vary):
4242 int progIdx = TextureSizeToMipmapProgramIdx(width, height);
4243 if (!fMipmapPrograms[progIdx].fProgram) {
4244 if (!this->createMipmapProgram(progIdx)) {
4245 SkDebugf("Failed to create mipmap program.\n");
4246 return false;
4247 }
4248 }
4249 GL_CALL(UseProgram(fMipmapPrograms[progIdx].fProgram));
4250 fHWProgramID = fMipmapPrograms[progIdx].fProgram;
4251
4252 // Texcoord uniform is expected to contain (1/w, (w-1)/w, 1/h, (h-1)/h)
4253 const float invWidth = 1.0f / width;
4254 const float invHeight = 1.0f / height;
4255 GL_CALL(Uniform4f(fMipmapPrograms[progIdx].fTexCoordXformUniform,
4256 invWidth, (width - 1) * invWidth, invHeight, (height - 1) * invHeight));
4257 GL_CALL(Uniform1i(fMipmapPrograms[progIdx].fTextureUniform, 0));
4258
4259 // Only sample from previous mip
4260 GL_CALL(TexParameteri(GR_GL_TEXTURE_2D, GR_GL_TEXTURE_BASE_LEVEL, level - 1));
4261
4262 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4263 GR_GL_TEXTURE_2D, texture->textureID(), level));
4264
4265 width = SkTMax(1, width / 2);
4266 height = SkTMax(1, height / 2);
4267 viewport.fWidth = width;
4268 viewport.fHeight = height;
4269 this->flushViewport(viewport);
4270
4271 GL_CALL(DrawArrays(GR_GL_TRIANGLE_STRIP, 0, 4));
4272 }
4273
4274 // Unbind:
4275 GL_CALL(FramebufferTexture2D(GR_GL_FRAMEBUFFER, GR_GL_COLOR_ATTACHMENT0,
4276 GR_GL_TEXTURE_2D, 0, 0));
4277
4278 return true;
4279 }
4280
onQueryMultisampleSpecs(GrRenderTarget * rt,const GrStencilSettings & stencil,int * effectiveSampleCnt,SamplePattern * samplePattern)4281 void GrGLGpu::onQueryMultisampleSpecs(GrRenderTarget* rt, const GrStencilSettings& stencil,
4282 int* effectiveSampleCnt, SamplePattern* samplePattern) {
4283 SkASSERT(!rt->isMixedSampled() || rt->renderTargetPriv().getStencilAttachment() ||
4284 stencil.isDisabled());
4285
4286 this->flushStencil(stencil);
4287 this->flushHWAAState(rt, true, !stencil.isDisabled());
4288 this->flushRenderTarget(static_cast<GrGLRenderTarget*>(rt), &SkIRect::EmptyIRect());
4289
4290 if (0 != this->caps()->maxRasterSamples()) {
4291 GR_GL_GetIntegerv(this->glInterface(), GR_GL_EFFECTIVE_RASTER_SAMPLES, effectiveSampleCnt);
4292 } else {
4293 GR_GL_GetIntegerv(this->glInterface(), GR_GL_SAMPLES, effectiveSampleCnt);
4294 }
4295
4296 SkASSERT(*effectiveSampleCnt >= rt->desc().fSampleCnt);
4297
4298 if (this->caps()->sampleLocationsSupport()) {
4299 samplePattern->reset(*effectiveSampleCnt);
4300 for (int i = 0; i < *effectiveSampleCnt; ++i) {
4301 GrGLfloat pos[2];
4302 GL_CALL(GetMultisamplefv(GR_GL_SAMPLE_POSITION, i, pos));
4303 if (kTopLeft_GrSurfaceOrigin == rt->origin()) {
4304 (*samplePattern)[i].set(pos[0], pos[1]);
4305 } else {
4306 (*samplePattern)[i].set(pos[0], 1 - pos[1]);
4307 }
4308 }
4309 }
4310 }
4311
xferBarrier(GrRenderTarget * rt,GrXferBarrierType type)4312 void GrGLGpu::xferBarrier(GrRenderTarget* rt, GrXferBarrierType type) {
4313 SkASSERT(type);
4314 switch (type) {
4315 case kTexture_GrXferBarrierType: {
4316 GrGLRenderTarget* glrt = static_cast<GrGLRenderTarget*>(rt);
4317 if (glrt->textureFBOID() != glrt->renderFBOID()) {
4318 // The render target uses separate storage so no need for glTextureBarrier.
4319 // FIXME: The render target will resolve automatically when its texture is bound,
4320 // but we could resolve only the bounds that will be read if we do it here instead.
4321 return;
4322 }
4323 SkASSERT(this->caps()->textureBarrierSupport());
4324 GL_CALL(TextureBarrier());
4325 return;
4326 }
4327 case kBlend_GrXferBarrierType:
4328 SkASSERT(GrCaps::kAdvanced_BlendEquationSupport ==
4329 this->caps()->blendEquationSupport());
4330 GL_CALL(BlendBarrier());
4331 return;
4332 default: break; // placate compiler warnings that kNone not handled
4333 }
4334 }
4335
createTestingOnlyBackendTexture(void * pixels,int w,int h,GrPixelConfig config,bool)4336 GrBackendObject GrGLGpu::createTestingOnlyBackendTexture(void* pixels, int w, int h,
4337 GrPixelConfig config, bool /*isRT*/) {
4338 if (!this->caps()->isConfigTexturable(config)) {
4339 return false;
4340 }
4341 std::unique_ptr<GrGLTextureInfo> info = skstd::make_unique<GrGLTextureInfo>();
4342 info->fTarget = GR_GL_TEXTURE_2D;
4343 info->fID = 0;
4344 GL_CALL(GenTextures(1, &info->fID));
4345 GL_CALL(ActiveTexture(GR_GL_TEXTURE0));
4346 GL_CALL(PixelStorei(GR_GL_UNPACK_ALIGNMENT, 1));
4347 GL_CALL(BindTexture(info->fTarget, info->fID));
4348 fHWBoundTextureUniqueIDs[0].makeInvalid();
4349 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MAG_FILTER, GR_GL_NEAREST));
4350 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_MIN_FILTER, GR_GL_NEAREST));
4351 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_S, GR_GL_CLAMP_TO_EDGE));
4352 GL_CALL(TexParameteri(info->fTarget, GR_GL_TEXTURE_WRAP_T, GR_GL_CLAMP_TO_EDGE));
4353
4354 GrGLenum internalFormat;
4355 GrGLenum externalFormat;
4356 GrGLenum externalType;
4357
4358 if (!this->glCaps().getTexImageFormats(config, config, &internalFormat, &externalFormat,
4359 &externalType)) {
4360 return reinterpret_cast<GrBackendObject>(nullptr);
4361 }
4362
4363 GL_CALL(TexImage2D(info->fTarget, 0, internalFormat, w, h, 0, externalFormat,
4364 externalType, pixels));
4365
4366 return reinterpret_cast<GrBackendObject>(info.release());
4367 }
4368
isTestingOnlyBackendTexture(GrBackendObject id) const4369 bool GrGLGpu::isTestingOnlyBackendTexture(GrBackendObject id) const {
4370 GrGLuint texID = reinterpret_cast<const GrGLTextureInfo*>(id)->fID;
4371
4372 GrGLboolean result;
4373 GL_CALL_RET(result, IsTexture(texID));
4374
4375 return (GR_GL_TRUE == result);
4376 }
4377
deleteTestingOnlyBackendTexture(GrBackendObject id,bool abandonTexture)4378 void GrGLGpu::deleteTestingOnlyBackendTexture(GrBackendObject id, bool abandonTexture) {
4379 std::unique_ptr<const GrGLTextureInfo> info(reinterpret_cast<const GrGLTextureInfo*>(id));
4380 GrGLuint texID = info->fID;
4381
4382 if (!abandonTexture) {
4383 GL_CALL(DeleteTextures(1, &texID));
4384 }
4385 }
4386
resetShaderCacheForTesting() const4387 void GrGLGpu::resetShaderCacheForTesting() const {
4388 fProgramCache->abandon();
4389 }
4390
4391 ///////////////////////////////////////////////////////////////////////////////
4392
bindInternalVertexArray(GrGLGpu * gpu,const GrBuffer * ibuf)4393 GrGLAttribArrayState* GrGLGpu::HWVertexArrayState::bindInternalVertexArray(GrGLGpu* gpu,
4394 const GrBuffer* ibuf) {
4395 GrGLAttribArrayState* attribState;
4396
4397 if (gpu->glCaps().isCoreProfile()) {
4398 if (!fCoreProfileVertexArray) {
4399 GrGLuint arrayID;
4400 GR_GL_CALL(gpu->glInterface(), GenVertexArrays(1, &arrayID));
4401 int attrCount = gpu->glCaps().maxVertexAttributes();
4402 fCoreProfileVertexArray = new GrGLVertexArray(arrayID, attrCount);
4403 }
4404 if (ibuf) {
4405 attribState = fCoreProfileVertexArray->bindWithIndexBuffer(gpu, ibuf);
4406 } else {
4407 attribState = fCoreProfileVertexArray->bind(gpu);
4408 }
4409 } else {
4410 if (ibuf) {
4411 // bindBuffer implicitly binds VAO 0 when binding an index buffer.
4412 gpu->bindBuffer(kIndex_GrBufferType, ibuf);
4413 } else {
4414 this->setVertexArrayID(gpu, 0);
4415 }
4416 int attrCount = gpu->glCaps().maxVertexAttributes();
4417 if (fDefaultVertexArrayAttribState.count() != attrCount) {
4418 fDefaultVertexArrayAttribState.resize(attrCount);
4419 }
4420 attribState = &fDefaultVertexArrayAttribState;
4421 }
4422 return attribState;
4423 }
4424
onIsACopyNeededForTextureParams(GrTextureProxy * proxy,const GrSamplerParams & textureParams,GrTextureProducer::CopyParams * copyParams,SkScalar scaleAdjust[2]) const4425 bool GrGLGpu::onIsACopyNeededForTextureParams(GrTextureProxy* proxy,
4426 const GrSamplerParams& textureParams,
4427 GrTextureProducer::CopyParams* copyParams,
4428 SkScalar scaleAdjust[2]) const {
4429 const GrTexture* texture = proxy->priv().peekTexture();
4430 if (!texture) {
4431 // The only way to get and EXTERNAL or RECTANGLE texture in Ganesh is to wrap them.
4432 // In that case the proxy should already be instantiated.
4433 return false;
4434 }
4435
4436 if (textureParams.isTiled() ||
4437 GrSamplerParams::kMipMap_FilterMode == textureParams.filterMode()) {
4438 const GrGLTexture* glTexture = static_cast<const GrGLTexture*>(texture);
4439 if (GR_GL_TEXTURE_EXTERNAL == glTexture->target() ||
4440 GR_GL_TEXTURE_RECTANGLE == glTexture->target()) {
4441 copyParams->fFilter = GrSamplerParams::kNone_FilterMode;
4442 copyParams->fWidth = texture->width();
4443 copyParams->fHeight = texture->height();
4444 return true;
4445 }
4446 }
4447 return false;
4448 }
4449
insertFence()4450 GrFence SK_WARN_UNUSED_RESULT GrGLGpu::insertFence() {
4451 GrGLsync sync;
4452 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4453 GR_STATIC_ASSERT(sizeof(GrFence) >= sizeof(GrGLsync));
4454 return (GrFence)sync;
4455 }
4456
waitFence(GrFence fence,uint64_t timeout)4457 bool GrGLGpu::waitFence(GrFence fence, uint64_t timeout) {
4458 GrGLenum result;
4459 GL_CALL_RET(result, ClientWaitSync((GrGLsync)fence, GR_GL_SYNC_FLUSH_COMMANDS_BIT, timeout));
4460 return (GR_GL_CONDITION_SATISFIED == result);
4461 }
4462
deleteFence(GrFence fence) const4463 void GrGLGpu::deleteFence(GrFence fence) const {
4464 this->deleteSync((GrGLsync)fence);
4465 }
4466
makeSemaphore()4467 sk_sp<GrSemaphore> SK_WARN_UNUSED_RESULT GrGLGpu::makeSemaphore() {
4468 return GrGLSemaphore::Make(this);
4469 }
4470
insertSemaphore(sk_sp<GrSemaphore> semaphore)4471 void GrGLGpu::insertSemaphore(sk_sp<GrSemaphore> semaphore) {
4472 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
4473
4474 GrGLsync sync;
4475 GL_CALL_RET(sync, FenceSync(GR_GL_SYNC_GPU_COMMANDS_COMPLETE, 0));
4476 glSem->setSync(sync);
4477 }
4478
waitSemaphore(sk_sp<GrSemaphore> semaphore)4479 void GrGLGpu::waitSemaphore(sk_sp<GrSemaphore> semaphore) {
4480 GrGLSemaphore* glSem = static_cast<GrGLSemaphore*>(semaphore.get());
4481
4482 GL_CALL(WaitSync(glSem->sync(), 0, GR_GL_TIMEOUT_IGNORED));
4483 }
4484
deleteSync(GrGLsync sync) const4485 void GrGLGpu::deleteSync(GrGLsync sync) const {
4486 GL_CALL(DeleteSync(sync));
4487 }
4488
flush()4489 void GrGLGpu::flush() {
4490 GL_CALL(Flush());
4491 }
4492