1/* 2 * Copyright 2018 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrMtlGpuCommandBuffer.h" 9 10#include "GrColor.h" 11#include "GrFixedClip.h" 12#include "GrMtlPipelineState.h" 13#include "GrMtlPipelineStateBuilder.h" 14#include "GrMtlRenderTarget.h" 15#include "GrRenderTargetPriv.h" 16 17GrMtlGpuRTCommandBuffer::GrMtlGpuRTCommandBuffer( 18 GrMtlGpu* gpu, GrRenderTarget* rt, GrSurfaceOrigin origin, const SkRect& bounds, 19 const GrGpuRTCommandBuffer::LoadAndStoreInfo& colorInfo, 20 const GrGpuRTCommandBuffer::StencilLoadAndStoreInfo& stencilInfo) 21 : INHERITED(rt, origin) 22 , fGpu(gpu) 23#ifdef SK_DEBUG 24 , fBounds(bounds) 25#endif 26 , fColorLoadAndStoreInfo(colorInfo) 27 , fStencilLoadAndStoreInfo(stencilInfo) 28 , fRenderPassDesc(this->createRenderPassDesc()) { 29 (void)fStencilLoadAndStoreInfo; // Silence unused var warning 30 const GrMtlStencilAttachment* stencil = static_cast<GrMtlStencilAttachment*>( 31 rt->renderTargetPriv().getStencilAttachment()); 32 if (stencil) { 33 fRenderPassDesc.stencilAttachment.texture = stencil->stencilView(); 34 } 35 if (fColorLoadAndStoreInfo.fLoadOp == GrLoadOp::kClear) { 36 fCommandBufferInfo.fBounds = SkRect::MakeWH(fRenderTarget->width(), 37 fRenderTarget->height()); 38 this->addNullCommand(); 39 fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad; 40 } else { 41 fCommandBufferInfo.fBounds.setEmpty(); 42 } 43 switch (stencilInfo.fLoadOp) { 44 case GrLoadOp::kLoad: 45 fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad; 46 break; 47 case GrLoadOp::kClear: 48 fCommandBufferInfo.fBounds = SkRect::MakeWH(fRenderTarget->width(), 49 fRenderTarget->height()); 50 fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear; 51 this->addNullCommand(); 52 fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad; 53 break; 54 case GrLoadOp::kDiscard: 55 fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionDontCare; 56 break; 57 } 58 switch (stencilInfo.fStoreOp) { 59 case GrStoreOp::kStore: 60 fRenderPassDesc.stencilAttachment.storeAction = MTLStoreActionStore; 61 break; 62 case GrStoreOp::kDiscard: 63 fRenderPassDesc.stencilAttachment.storeAction = MTLStoreActionDontCare; 64 break; 65 } 66} 67 68GrMtlGpuRTCommandBuffer::~GrMtlGpuRTCommandBuffer() { 69 SkASSERT(nil == fActiveRenderCmdEncoder); 70} 71 72void GrMtlGpuRTCommandBuffer::addNullCommand() { 73 SkASSERT(nil == fActiveRenderCmdEncoder); 74 SK_BEGIN_AUTORELEASE_BLOCK 75 id<MTLRenderCommandEncoder> cmdEncoder = 76 [fGpu->commandBuffer() renderCommandEncoderWithDescriptor:fRenderPassDesc]; 77 SkASSERT(nil != cmdEncoder); 78 [cmdEncoder endEncoding]; 79 SK_END_AUTORELEASE_BLOCK 80} 81 82void GrMtlGpuRTCommandBuffer::submit() { 83 if (!fRenderTarget) { 84 return; 85 } 86 SkIRect iBounds; 87 fCommandBufferInfo.fBounds.roundOut(&iBounds); 88 fGpu->submitIndirectCommandBuffer(fRenderTarget, fOrigin, &iBounds); 89} 90 91void GrMtlGpuRTCommandBuffer::copy(GrSurface* src, GrSurfaceOrigin srcOrigin, 92 const SkIRect& srcRect, const SkIPoint& dstPoint) { 93 // We cannot have an active encoder when we call copy since it requires its own 94 // command encoder. 95 SkASSERT(nil == fActiveRenderCmdEncoder); 96 fGpu->copySurface(fRenderTarget, fOrigin, src, srcOrigin, srcRect, dstPoint); 97} 98 99GrMtlPipelineState* GrMtlGpuRTCommandBuffer::prepareDrawState( 100 const GrPrimitiveProcessor& primProc, 101 const GrPipeline& pipeline, 102 const GrPipeline::FixedDynamicState* fixedDynamicState, 103 GrPrimitiveType primType) { 104 // TODO: resolve textures and regenerate mipmaps as needed 105 106 const GrTextureProxy* const* primProcProxies = nullptr; 107 if (fixedDynamicState) { 108 primProcProxies = fixedDynamicState->fPrimitiveProcessorTextures; 109 } 110 SkASSERT(SkToBool(primProcProxies) == SkToBool(primProc.numTextureSamplers())); 111 112 GrMtlPipelineState* pipelineState = 113 fGpu->resourceProvider().findOrCreateCompatiblePipelineState(fRenderTarget, fOrigin, 114 pipeline, 115 primProc, 116 primProcProxies, 117 primType); 118 if (!pipelineState) { 119 return nullptr; 120 } 121 pipelineState->setData(fRenderTarget, fOrigin, primProc, pipeline, primProcProxies); 122 123 return pipelineState; 124} 125 126void GrMtlGpuRTCommandBuffer::onDraw(const GrPrimitiveProcessor& primProc, 127 const GrPipeline& pipeline, 128 const GrPipeline::FixedDynamicState* fixedDynamicState, 129 const GrPipeline::DynamicStateArrays* dynamicStateArrays, 130 const GrMesh meshes[], 131 int meshCount, 132 const SkRect& bounds) { 133 SK_BEGIN_AUTORELEASE_BLOCK 134 if (!meshCount) { 135 return; 136 } 137 if (pipeline.isScissorEnabled()) { 138 return; // TODO: ScissorRects are not supported. 139 } 140 141 GrPrimitiveType primitiveType = meshes[0].primitiveType(); 142 GrMtlPipelineState* pipelineState = this->prepareDrawState(primProc, pipeline, 143 fixedDynamicState, primitiveType); 144 if (!pipelineState) { 145 return; 146 } 147 148 SkASSERT(nil == fActiveRenderCmdEncoder); 149 fActiveRenderCmdEncoder = [fGpu->commandBuffer() 150 renderCommandEncoderWithDescriptor:fRenderPassDesc]; 151 SkASSERT(fActiveRenderCmdEncoder); 152 // TODO: can we set this once somewhere at the beginning of the draw? 153 [fActiveRenderCmdEncoder setFrontFacingWinding:MTLWindingCounterClockwise]; 154 155 [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()]; 156 pipelineState->bind(fActiveRenderCmdEncoder); 157 pipelineState->setBlendConstants(fActiveRenderCmdEncoder, fRenderTarget->config(), 158 pipeline.getXferProcessor()); 159 pipelineState->setDepthStencilState(fActiveRenderCmdEncoder); 160 161 for (int i = 0; i < meshCount; ++i) { 162 const GrMesh& mesh = meshes[i]; 163 SkASSERT(nil != fActiveRenderCmdEncoder); 164 if (mesh.primitiveType() != primitiveType) { 165 SkDEBUGCODE(pipelineState = nullptr); 166 primitiveType = mesh.primitiveType(); 167 pipelineState = this->prepareDrawState(primProc, pipeline, fixedDynamicState, 168 primitiveType); 169 if (!pipelineState) { 170 return; 171 } 172 173 [fActiveRenderCmdEncoder setRenderPipelineState:pipelineState->mtlPipelineState()]; 174 pipelineState->bind(fActiveRenderCmdEncoder); 175 pipelineState->setBlendConstants(fActiveRenderCmdEncoder, fRenderTarget->config(), 176 pipeline.getXferProcessor()); 177 pipelineState->setDepthStencilState(fActiveRenderCmdEncoder); 178 } 179 180 mesh.sendToGpu(this); 181 } 182 183 [fActiveRenderCmdEncoder endEncoding]; 184 fActiveRenderCmdEncoder = nil; 185 fCommandBufferInfo.fBounds.join(bounds); 186 SK_END_AUTORELEASE_BLOCK 187} 188 189void GrMtlGpuRTCommandBuffer::onClear(const GrFixedClip& clip, const SkPMColor4f& color) { 190 // if we end up here from absClear, the clear bounds may be bigger than the RT proxy bounds - 191 // but in that case, scissor should be enabled, so this check should still succeed 192 SkASSERT(!clip.scissorEnabled() || clip.scissorRect().contains(fBounds)); 193 fRenderPassDesc.colorAttachments[0].clearColor = MTLClearColorMake(color.fR, color.fG, color.fB, 194 color.fA); 195 fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionClear; 196 this->addNullCommand(); 197 fRenderPassDesc.colorAttachments[0].loadAction = MTLLoadActionLoad; 198} 199 200void GrMtlGpuRTCommandBuffer::onClearStencilClip(const GrFixedClip& clip, bool insideStencilMask) { 201 SkASSERT(!clip.hasWindowRectangles()); 202 203 GrStencilAttachment* sb = fRenderTarget->renderTargetPriv().getStencilAttachment(); 204 // this should only be called internally when we know we have a 205 // stencil buffer. 206 SkASSERT(sb); 207 int stencilBitCount = sb->bits(); 208 209 // The contract with the callers does not guarantee that we preserve all bits in the stencil 210 // during this clear. Thus we will clear the entire stencil to the desired value. 211 if (insideStencilMask) { 212 fRenderPassDesc.stencilAttachment.clearStencil = (1 << (stencilBitCount - 1)); 213 } else { 214 fRenderPassDesc.stencilAttachment.clearStencil = 0; 215 } 216 217 fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionClear; 218 this->addNullCommand(); 219 fRenderPassDesc.stencilAttachment.loadAction = MTLLoadActionLoad; 220} 221 222MTLRenderPassDescriptor* GrMtlGpuRTCommandBuffer::createRenderPassDesc() const { 223 const static MTLLoadAction mtlLoadAction[] { 224 MTLLoadActionLoad, 225 MTLLoadActionClear, 226 MTLLoadActionDontCare 227 }; 228 GR_STATIC_ASSERT((int)GrLoadOp::kLoad == 0); 229 GR_STATIC_ASSERT((int)GrLoadOp::kClear == 1); 230 GR_STATIC_ASSERT((int)GrLoadOp::kDiscard == 2); 231 SkASSERT(fColorLoadAndStoreInfo.fLoadOp <= GrLoadOp::kDiscard); 232 233 const static MTLStoreAction mtlStoreAction[] { 234 MTLStoreActionStore, 235 MTLStoreActionDontCare 236 }; 237 GR_STATIC_ASSERT((int)GrStoreOp::kStore == 0); 238 GR_STATIC_ASSERT((int)GrStoreOp::kDiscard == 1); 239 SkASSERT(fColorLoadAndStoreInfo.fStoreOp <= GrStoreOp::kDiscard); 240 241 auto renderPassDesc = [[MTLRenderPassDescriptor alloc] init]; 242 renderPassDesc.colorAttachments[0].texture = 243 static_cast<GrMtlRenderTarget*>(fRenderTarget)->mtlRenderTexture(); 244 renderPassDesc.colorAttachments[0].slice = 0; 245 renderPassDesc.colorAttachments[0].level = 0; 246 const SkPMColor4f& clearColor = fColorLoadAndStoreInfo.fClearColor; 247 renderPassDesc.colorAttachments[0].clearColor = 248 MTLClearColorMake(clearColor[0], clearColor[1], clearColor[2], clearColor[3]); 249 renderPassDesc.colorAttachments[0].loadAction = 250 mtlLoadAction[static_cast<int>(fColorLoadAndStoreInfo.fLoadOp)]; 251 renderPassDesc.colorAttachments[0].storeAction = 252 mtlStoreAction[static_cast<int>(fColorLoadAndStoreInfo.fStoreOp)]; 253 return renderPassDesc; 254} 255 256static MTLPrimitiveType gr_to_mtl_primitive(GrPrimitiveType primitiveType) { 257 const static MTLPrimitiveType mtlPrimitiveType[] { 258 MTLPrimitiveTypeTriangle, 259 MTLPrimitiveTypeTriangleStrip, 260 MTLPrimitiveTypePoint, 261 MTLPrimitiveTypeLine, 262 MTLPrimitiveTypeLineStrip 263 }; 264 GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangles == 0); 265 GR_STATIC_ASSERT((int)GrPrimitiveType::kTriangleStrip == 1); 266 GR_STATIC_ASSERT((int)GrPrimitiveType::kPoints == 2); 267 GR_STATIC_ASSERT((int)GrPrimitiveType::kLines == 3); 268 GR_STATIC_ASSERT((int)GrPrimitiveType::kLineStrip == 4); 269 270 SkASSERT(primitiveType <= GrPrimitiveType::kLineStrip); 271 return mtlPrimitiveType[static_cast<int>(primitiveType)]; 272} 273 274void GrMtlGpuRTCommandBuffer::bindGeometry(const GrBuffer* vertexBuffer, 275 const GrBuffer* instanceBuffer) { 276 size_t bufferIndex = GrMtlUniformHandler::kLastUniformBinding + 1; 277 if (vertexBuffer) { 278 SkASSERT(!vertexBuffer->isCpuBuffer()); 279 SkASSERT(!static_cast<const GrGpuBuffer*>(vertexBuffer)->isMapped()); 280 281 auto mtlVertexBuffer = static_cast<const GrMtlBuffer*>(vertexBuffer)->mtlBuffer(); 282 SkASSERT(mtlVertexBuffer); 283 [fActiveRenderCmdEncoder setVertexBuffer:mtlVertexBuffer 284 offset:0 285 atIndex:bufferIndex++]; 286 } 287 if (instanceBuffer) { 288 SkASSERT(!instanceBuffer->isCpuBuffer()); 289 SkASSERT(!static_cast<const GrGpuBuffer*>(instanceBuffer)->isMapped()); 290 291 auto mtlInstanceBuffer = static_cast<const GrMtlBuffer*>(instanceBuffer)->mtlBuffer(); 292 SkASSERT(mtlInstanceBuffer); 293 [fActiveRenderCmdEncoder setVertexBuffer:mtlInstanceBuffer 294 offset:0 295 atIndex:bufferIndex++]; 296 } 297} 298 299void GrMtlGpuRTCommandBuffer::sendInstancedMeshToGpu(GrPrimitiveType primitiveType, 300 const GrBuffer* vertexBuffer, 301 int vertexCount, 302 int baseVertex, 303 const GrBuffer* instanceBuffer, 304 int instanceCount, 305 int baseInstance) { 306 this->bindGeometry(vertexBuffer, instanceBuffer); 307 308 SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported. 309 [fActiveRenderCmdEncoder drawPrimitives:gr_to_mtl_primitive(primitiveType) 310 vertexStart:baseVertex 311 vertexCount:vertexCount 312 instanceCount:instanceCount 313 baseInstance:baseInstance]; 314} 315 316void GrMtlGpuRTCommandBuffer::sendIndexedInstancedMeshToGpu(GrPrimitiveType primitiveType, 317 const GrBuffer* indexBuffer, 318 int indexCount, 319 int baseIndex, 320 const GrBuffer* vertexBuffer, 321 int baseVertex, 322 const GrBuffer* instanceBuffer, 323 int instanceCount, 324 int baseInstance, 325 GrPrimitiveRestart restart) { 326 this->bindGeometry(vertexBuffer, instanceBuffer); 327 328 SkASSERT(primitiveType != GrPrimitiveType::kLinesAdjacency); // Geometry shaders not supported. 329 id<MTLBuffer> mtlIndexBuffer; 330 if (indexBuffer) { 331 SkASSERT(!indexBuffer->isCpuBuffer()); 332 SkASSERT(!static_cast<const GrGpuBuffer*>(indexBuffer)->isMapped()); 333 334 mtlIndexBuffer = static_cast<const GrMtlBuffer*>(indexBuffer)->mtlBuffer(); 335 SkASSERT(mtlIndexBuffer); 336 } 337 338 SkASSERT(restart == GrPrimitiveRestart::kNo); 339 [fActiveRenderCmdEncoder drawIndexedPrimitives:gr_to_mtl_primitive(primitiveType) 340 indexCount:indexCount 341 indexType:MTLIndexTypeUInt16 342 indexBuffer:mtlIndexBuffer 343 indexBufferOffset:sizeof(uint16_t) * baseIndex 344 instanceCount:instanceCount 345 baseVertex:baseVertex 346 baseInstance:baseInstance]; 347} 348