1 // Copyright 2019 The SwiftShader Authors. All Rights Reserved.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "VkPipelineCache.hpp"
16 #include <cstring>
17
18 namespace vk {
19
SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,const std::string & entryPointName,const std::vector<uint32_t> & insns,const vk::RenderPass * renderPass,const uint32_t subpassIndex,const vk::SpecializationInfo & specializationInfo)20 PipelineCache::SpirvShaderKey::SpirvShaderKey(const VkShaderStageFlagBits pipelineStage,
21 const std::string &entryPointName,
22 const std::vector<uint32_t> &insns,
23 const vk::RenderPass *renderPass,
24 const uint32_t subpassIndex,
25 const vk::SpecializationInfo &specializationInfo)
26 : pipelineStage(pipelineStage)
27 , entryPointName(entryPointName)
28 , insns(insns)
29 , renderPass(renderPass)
30 , subpassIndex(subpassIndex)
31 , specializationInfo(specializationInfo)
32 {
33 }
34
operator <(const SpirvShaderKey & other) const35 bool PipelineCache::SpirvShaderKey::operator<(const SpirvShaderKey &other) const
36 {
37 if(pipelineStage != other.pipelineStage)
38 {
39 return pipelineStage < other.pipelineStage;
40 }
41
42 if(renderPass != other.renderPass)
43 {
44 return renderPass < other.renderPass;
45 }
46
47 if(subpassIndex != other.subpassIndex)
48 {
49 return subpassIndex < other.subpassIndex;
50 }
51
52 if(insns.size() != other.insns.size())
53 {
54 return insns.size() < other.insns.size();
55 }
56
57 if(entryPointName.size() != other.entryPointName.size())
58 {
59 return entryPointName.size() < other.entryPointName.size();
60 }
61
62 int cmp = memcmp(entryPointName.c_str(), other.entryPointName.c_str(), entryPointName.size());
63 if(cmp != 0)
64 {
65 return cmp < 0;
66 }
67
68 cmp = memcmp(insns.data(), other.insns.data(), insns.size() * sizeof(uint32_t));
69 if(cmp != 0)
70 {
71 return cmp < 0;
72 }
73
74 return (specializationInfo < other.specializationInfo);
75 }
76
PipelineCache(const VkPipelineCacheCreateInfo * pCreateInfo,void * mem)77 PipelineCache::PipelineCache(const VkPipelineCacheCreateInfo *pCreateInfo, void *mem)
78 : dataSize(ComputeRequiredAllocationSize(pCreateInfo))
79 , data(reinterpret_cast<uint8_t *>(mem))
80 {
81 CacheHeader *header = reinterpret_cast<CacheHeader *>(mem);
82 header->headerLength = sizeof(CacheHeader);
83 header->headerVersion = VK_PIPELINE_CACHE_HEADER_VERSION_ONE;
84 header->vendorID = VENDOR_ID;
85 header->deviceID = DEVICE_ID;
86 memcpy(header->pipelineCacheUUID, SWIFTSHADER_UUID, VK_UUID_SIZE);
87
88 if(pCreateInfo->pInitialData && (pCreateInfo->initialDataSize > 0))
89 {
90 memcpy(data + sizeof(CacheHeader), pCreateInfo->pInitialData, pCreateInfo->initialDataSize);
91 }
92 }
93
~PipelineCache()94 PipelineCache::~PipelineCache()
95 {
96 spirvShaders.clear();
97 computePrograms.clear();
98 }
99
destroy(const VkAllocationCallbacks * pAllocator)100 void PipelineCache::destroy(const VkAllocationCallbacks *pAllocator)
101 {
102 vk::deallocate(data, pAllocator);
103 }
104
ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo * pCreateInfo)105 size_t PipelineCache::ComputeRequiredAllocationSize(const VkPipelineCacheCreateInfo *pCreateInfo)
106 {
107 return pCreateInfo->initialDataSize + sizeof(CacheHeader);
108 }
109
getData(size_t * pDataSize,void * pData)110 VkResult PipelineCache::getData(size_t *pDataSize, void *pData)
111 {
112 if(!pData)
113 {
114 *pDataSize = dataSize;
115 return VK_SUCCESS;
116 }
117
118 if(*pDataSize != dataSize)
119 {
120 *pDataSize = 0;
121 return VK_INCOMPLETE;
122 }
123
124 if(*pDataSize > 0)
125 {
126 memcpy(pData, data, *pDataSize);
127 }
128
129 return VK_SUCCESS;
130 }
131
merge(uint32_t srcCacheCount,const VkPipelineCache * pSrcCaches)132 VkResult PipelineCache::merge(uint32_t srcCacheCount, const VkPipelineCache *pSrcCaches)
133 {
134 for(uint32_t i = 0; i < srcCacheCount; i++)
135 {
136 PipelineCache *srcCache = Cast(pSrcCaches[i]);
137
138 {
139 marl::lock thisLock(spirvShadersMutex);
140 marl::lock srcLock(srcCache->spirvShadersMutex);
141 spirvShaders.insert(srcCache->spirvShaders.begin(), srcCache->spirvShaders.end());
142 }
143
144 {
145 marl::lock thisLock(computeProgramsMutex);
146 marl::lock srcLock(srcCache->computeProgramsMutex);
147 computePrograms.insert(srcCache->computePrograms.begin(), srcCache->computePrograms.end());
148 }
149 }
150
151 return VK_SUCCESS;
152 }
153
154 } // namespace vk
155