1/* 2 * Copyright 2018 Google Inc. 3 * 4 * Use of this source code is governed by a BSD-style license that can be 5 * found in the LICENSE file. 6 */ 7 8#include "GrMtlBuffer.h" 9#include "GrMtlGpu.h" 10#include "GrGpuResourcePriv.h" 11#include "GrTypesPriv.h" 12 13#ifdef SK_DEBUG 14#define VALIDATE() this->validate() 15#else 16#define VALIDATE() do {} while(false) 17#endif 18 19sk_sp<GrMtlBuffer> GrMtlBuffer::Make(GrMtlGpu* gpu, size_t size, GrBufferType intendedType, 20 GrAccessPattern accessPattern, const void* data) { 21 // TODO: DrawIndirect buffers aren't actually supported yet because we don't have a way of 22 // uploading data to them. 23 SkASSERT(intendedType != kDrawIndirect_GrBufferType); 24 sk_sp<GrMtlBuffer> buffer(new GrMtlBuffer(gpu, size, intendedType, accessPattern)); 25 if (data && !buffer->onUpdateData(data, size)) { 26 return nullptr; 27 } 28 return buffer; 29} 30 31GrMtlBuffer::GrMtlBuffer(GrMtlGpu* gpu, size_t size, GrBufferType intendedType, 32 GrAccessPattern accessPattern) 33 : INHERITED(gpu, size, intendedType, accessPattern) 34 , fIntendedType(intendedType) 35 , fIsDynamic(accessPattern == kDynamic_GrAccessPattern) { 36 // TODO: We are treating all buffers as static access since we don't have an implementation to 37 // synchronize gpu and cpu access of a resource yet. See comments in GrMtlBuffer::internalMap() 38 // and interalUnmap() for more details. 39 fIsDynamic = false; 40 41 // The managed resource mode is only available for macOS. iOS should use shared. 42 fMtlBuffer = 43 [gpu->device() newBufferWithLength: size 44 options: !fIsDynamic ? MTLResourceStorageModePrivate 45#ifdef SK_BUILD_FOR_MAC 46 : MTLResourceStorageModeManaged]; 47#else 48 : MTLResourceStorageModeShared]; 49#endif 50 this->registerWithCache(SkBudgeted::kYes); 51 VALIDATE(); 52} 53 54GrMtlBuffer::~GrMtlBuffer() { 55 SkASSERT(fMtlBuffer == nil); 56 SkASSERT(fMappedBuffer == nil); 57 SkASSERT(fMapPtr == nullptr); 58} 59 60bool GrMtlBuffer::onUpdateData(const void* src, size_t srcInBytes) { 61 if (fMtlBuffer == nil) { 62 return false; 63 } 64 if (srcInBytes > fMtlBuffer.length) { 65 return false; 66 } 67 if (fIntendedType == kDrawIndirect_GrBufferType) { 68 // TODO: implement encoding data into argument (DrawIndirect) buffers. 69 return false; 70 } 71 VALIDATE(); 72 73 this->internalMap(srcInBytes); 74 if (fMapPtr == nil) { 75 return false; 76 } 77 SkASSERT(fMappedBuffer); 78 SkASSERT(srcInBytes == fMappedBuffer.length); 79 memcpy(fMapPtr, src, srcInBytes); 80 this->internalUnmap(srcInBytes); 81 82 VALIDATE(); 83 return true; 84} 85 86inline GrMtlGpu* GrMtlBuffer::mtlGpu() const { 87 SkASSERT(!this->wasDestroyed()); 88 return static_cast<GrMtlGpu*>(this->getGpu()); 89} 90 91void GrMtlBuffer::onAbandon() { 92 fMtlBuffer = nil; 93 fMappedBuffer = nil; 94 fMapPtr = nullptr; 95 VALIDATE(); 96 INHERITED::onAbandon(); 97} 98 99void GrMtlBuffer::onRelease() { 100 if (!this->wasDestroyed()) { 101 VALIDATE(); 102 fMtlBuffer = nil; 103 fMappedBuffer = nil; 104 fMapPtr = nullptr; 105 VALIDATE(); 106 } 107 INHERITED::onRelease(); 108} 109 110void GrMtlBuffer::internalMap(size_t sizeInBytes) { 111 SkASSERT(fMtlBuffer); 112 if (this->wasDestroyed()) { 113 return; 114 } 115 VALIDATE(); 116 SkASSERT(!this->isMapped()); 117 if (fIsDynamic) { 118 // TODO: We will want to decide if we need to create a new buffer here in order to avoid 119 // possibly invalidating a buffer which is being used by the gpu. 120 fMappedBuffer = fMtlBuffer; 121 fMapPtr = fMappedBuffer.contents; 122 } else { 123 // TODO: We can't ensure that map will only be called once on static access buffers until 124 // we actually enable dynamic access. 125 // SkASSERT(fMappedBuffer == nil); 126 fMappedBuffer = 127 [this->mtlGpu()->device() newBufferWithLength: sizeInBytes 128#ifdef SK_BUILD_FOR_MAC 129 options: MTLResourceStorageModeManaged]; 130#else 131 options: MTLResourceStorageModeShared]; 132#endif 133 fMapPtr = fMappedBuffer.contents; 134 } 135 VALIDATE(); 136} 137 138void GrMtlBuffer::internalUnmap(size_t sizeInBytes) { 139 SkASSERT(fMtlBuffer); 140 if (this->wasDestroyed()) { 141 return; 142 } 143 VALIDATE(); 144 SkASSERT(this->isMapped()); 145 if (fMtlBuffer == nil) { 146 fMappedBuffer = nil; 147 fMapPtr = nullptr; 148 return; 149 } 150#ifdef SK_BUILD_FOR_MAC 151 // TODO: by calling didModifyRange here we invalidate the buffer. This will cause problems for 152 // dynamic access buffers if they are being used by the gpu. 153 [fMappedBuffer didModifyRange: NSMakeRange(0, sizeInBytes)]; 154#endif 155 if (!fIsDynamic) { 156 id<MTLBlitCommandEncoder> blitCmdEncoder = 157 [this->mtlGpu()->commandBuffer() blitCommandEncoder]; 158 [blitCmdEncoder copyFromBuffer: fMappedBuffer 159 sourceOffset: 0 160 toBuffer: fMtlBuffer 161 destinationOffset: 0 162 size: sizeInBytes]; 163 [blitCmdEncoder endEncoding]; 164 } 165 fMappedBuffer = nil; 166 fMapPtr = nullptr; 167} 168 169void GrMtlBuffer::onMap() { 170 this->internalMap(fMtlBuffer.length); 171} 172 173void GrMtlBuffer::onUnmap() { 174 this->internalUnmap(fMappedBuffer.length); 175} 176 177#ifdef SK_DEBUG 178void GrMtlBuffer::validate() const { 179 SkASSERT(fMtlBuffer == nil || 180 fIntendedType == kVertex_GrBufferType || 181 fIntendedType == kIndex_GrBufferType || 182 fIntendedType == kXferCpuToGpu_GrBufferType || 183 fIntendedType == kXferGpuToCpu_GrBufferType); 184// fIntendedType == kDrawIndirect_GrBufferType not yet supported 185 SkASSERT(fMappedBuffer == nil || fMtlBuffer == nil || 186 fMappedBuffer.length <= fMtlBuffer.length); 187 SkASSERT(fIsDynamic == false); // TODO: implement synchronization to allow dynamic access. 188} 189#endif 190