1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "VulkanManager.h"
18
19 #include <EGL/egl.h>
20 #include <EGL/eglext.h>
21 #include <GrBackendSemaphore.h>
22 #include <GrBackendSurface.h>
23 #include <GrDirectContext.h>
24 #include <GrTypes.h>
25 #include <android/sync.h>
26 #include <gui/TraceUtils.h>
27 #include <include/gpu/ganesh/SkSurfaceGanesh.h>
28 #include <include/gpu/ganesh/vk/GrVkBackendSemaphore.h>
29 #include <include/gpu/ganesh/vk/GrVkBackendSurface.h>
30 #include <include/gpu/ganesh/vk/GrVkDirectContext.h>
31 #include <ui/FatVector.h>
32 #include <vk/GrVkExtensions.h>
33 #include <vk/GrVkTypes.h>
34
35 #include <sstream>
36
37 #include "Properties.h"
38 #include "RenderThread.h"
39 #include "pipeline/skia/ShaderCache.h"
40 #include "renderstate/RenderState.h"
41
42 namespace android {
43 namespace uirenderer {
44 namespace renderthread {
45
46 // Not all of these are strictly required, but are all enabled if present.
47 static std::array<std::string_view, 21> sEnableExtensions{
48 VK_KHR_BIND_MEMORY_2_EXTENSION_NAME,
49 VK_KHR_DEDICATED_ALLOCATION_EXTENSION_NAME,
50 VK_KHR_EXTERNAL_MEMORY_CAPABILITIES_EXTENSION_NAME,
51 VK_KHR_EXTERNAL_MEMORY_EXTENSION_NAME,
52 VK_KHR_GET_MEMORY_REQUIREMENTS_2_EXTENSION_NAME,
53 VK_KHR_GET_PHYSICAL_DEVICE_PROPERTIES_2_EXTENSION_NAME,
54 VK_KHR_MAINTENANCE1_EXTENSION_NAME,
55 VK_KHR_MAINTENANCE2_EXTENSION_NAME,
56 VK_KHR_MAINTENANCE3_EXTENSION_NAME,
57 VK_KHR_SAMPLER_YCBCR_CONVERSION_EXTENSION_NAME,
58 VK_KHR_SURFACE_EXTENSION_NAME,
59 VK_KHR_SWAPCHAIN_EXTENSION_NAME,
60 VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME,
61 VK_KHR_IMAGE_FORMAT_LIST_EXTENSION_NAME,
62 VK_EXT_IMAGE_DRM_FORMAT_MODIFIER_EXTENSION_NAME,
63 VK_ANDROID_EXTERNAL_MEMORY_ANDROID_HARDWARE_BUFFER_EXTENSION_NAME,
64 VK_EXT_QUEUE_FAMILY_FOREIGN_EXTENSION_NAME,
65 VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME,
66 VK_KHR_ANDROID_SURFACE_EXTENSION_NAME,
67 VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME,
68 VK_EXT_DEVICE_FAULT_EXTENSION_NAME,
69 };
70
shouldEnableExtension(const std::string_view & extension)71 static bool shouldEnableExtension(const std::string_view& extension) {
72 for (const auto& it : sEnableExtensions) {
73 if (it == extension) {
74 return true;
75 }
76 }
77 return false;
78 }
79
free_features_extensions_structs(const VkPhysicalDeviceFeatures2 & features)80 static void free_features_extensions_structs(const VkPhysicalDeviceFeatures2& features) {
81 // All Vulkan structs that could be part of the features chain will start with the
82 // structure type followed by the pNext pointer. We cast to the CommonVulkanHeader
83 // so we can get access to the pNext for the next struct.
84 struct CommonVulkanHeader {
85 VkStructureType sType;
86 void* pNext;
87 };
88
89 void* pNext = features.pNext;
90 while (pNext) {
91 void* current = pNext;
92 pNext = static_cast<CommonVulkanHeader*>(current)->pNext;
93 free(current);
94 }
95 }
96
97 #define GET_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(VK_NULL_HANDLE, "vk" #F)
98 #define GET_INST_PROC(F) m##F = (PFN_vk##F)vkGetInstanceProcAddr(mInstance, "vk" #F)
99 #define GET_DEV_PROC(F) m##F = (PFN_vk##F)vkGetDeviceProcAddr(mDevice, "vk" #F)
100
101 // cache a weakptr to the context to enable a second thread to share the same vulkan state
102 static wp<VulkanManager> sWeakInstance = nullptr;
103 static std::mutex sLock;
104
getInstance()105 sp<VulkanManager> VulkanManager::getInstance() {
106 std::lock_guard _lock{sLock};
107 sp<VulkanManager> vulkanManager = sWeakInstance.promote();
108 if (!vulkanManager.get()) {
109 vulkanManager = new VulkanManager();
110 sWeakInstance = vulkanManager;
111 }
112
113 return vulkanManager;
114 }
115
peekInstance()116 sp<VulkanManager> VulkanManager::peekInstance() {
117 std::lock_guard _lock{sLock};
118 return sWeakInstance.promote();
119 }
120
~VulkanManager()121 VulkanManager::~VulkanManager() {
122 if (mDevice != VK_NULL_HANDLE) {
123 mDeviceWaitIdle(mDevice);
124 mDestroyDevice(mDevice, nullptr);
125 }
126
127 if (mInstance != VK_NULL_HANDLE) {
128 mDestroyInstance(mInstance, nullptr);
129 }
130
131 mGraphicsQueue = VK_NULL_HANDLE;
132 mAHBUploadQueue = VK_NULL_HANDLE;
133 mDevice = VK_NULL_HANDLE;
134 mPhysicalDevice = VK_NULL_HANDLE;
135 mInstance = VK_NULL_HANDLE;
136 mInstanceExtensionsOwner.clear();
137 mInstanceExtensions.clear();
138 mDeviceExtensionsOwner.clear();
139 mDeviceExtensions.clear();
140 free_features_extensions_structs(mPhysicalDeviceFeatures2);
141 mPhysicalDeviceFeatures2 = {};
142 }
143
setupDevice(GrVkExtensions & grExtensions,VkPhysicalDeviceFeatures2 & features)144 void VulkanManager::setupDevice(GrVkExtensions& grExtensions, VkPhysicalDeviceFeatures2& features) {
145 VkResult err;
146
147 constexpr VkApplicationInfo app_info = {
148 VK_STRUCTURE_TYPE_APPLICATION_INFO, // sType
149 nullptr, // pNext
150 "android framework", // pApplicationName
151 0, // applicationVersion
152 "android framework", // pEngineName
153 0, // engineVerison
154 mAPIVersion, // apiVersion
155 };
156
157 {
158 GET_PROC(EnumerateInstanceExtensionProperties);
159
160 uint32_t extensionCount = 0;
161 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount, nullptr);
162 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
163 mInstanceExtensionsOwner.resize(extensionCount);
164 err = mEnumerateInstanceExtensionProperties(nullptr, &extensionCount,
165 mInstanceExtensionsOwner.data());
166 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
167 bool hasKHRSurfaceExtension = false;
168 bool hasKHRAndroidSurfaceExtension = false;
169 for (const VkExtensionProperties& extension : mInstanceExtensionsOwner) {
170 if (!shouldEnableExtension(extension.extensionName)) {
171 ALOGV("Not enabling instance extension %s", extension.extensionName);
172 continue;
173 }
174 ALOGV("Enabling instance extension %s", extension.extensionName);
175 mInstanceExtensions.push_back(extension.extensionName);
176 if (!strcmp(extension.extensionName, VK_KHR_SURFACE_EXTENSION_NAME)) {
177 hasKHRSurfaceExtension = true;
178 }
179 if (!strcmp(extension.extensionName, VK_KHR_ANDROID_SURFACE_EXTENSION_NAME)) {
180 hasKHRAndroidSurfaceExtension = true;
181 }
182 }
183 LOG_ALWAYS_FATAL_IF(!hasKHRSurfaceExtension || !hasKHRAndroidSurfaceExtension);
184 }
185
186 const VkInstanceCreateInfo instance_create = {
187 VK_STRUCTURE_TYPE_INSTANCE_CREATE_INFO, // sType
188 nullptr, // pNext
189 0, // flags
190 &app_info, // pApplicationInfo
191 0, // enabledLayerNameCount
192 nullptr, // ppEnabledLayerNames
193 (uint32_t)mInstanceExtensions.size(), // enabledExtensionNameCount
194 mInstanceExtensions.data(), // ppEnabledExtensionNames
195 };
196
197 GET_PROC(CreateInstance);
198 err = mCreateInstance(&instance_create, nullptr, &mInstance);
199 LOG_ALWAYS_FATAL_IF(err < 0);
200
201 GET_INST_PROC(CreateDevice);
202 GET_INST_PROC(DestroyInstance);
203 GET_INST_PROC(EnumerateDeviceExtensionProperties);
204 GET_INST_PROC(EnumeratePhysicalDevices);
205 GET_INST_PROC(GetPhysicalDeviceFeatures2);
206 GET_INST_PROC(GetPhysicalDeviceImageFormatProperties2);
207 GET_INST_PROC(GetPhysicalDeviceProperties);
208 GET_INST_PROC(GetPhysicalDeviceQueueFamilyProperties);
209
210 uint32_t gpuCount;
211 LOG_ALWAYS_FATAL_IF(mEnumeratePhysicalDevices(mInstance, &gpuCount, nullptr));
212 LOG_ALWAYS_FATAL_IF(!gpuCount);
213 // Just returning the first physical device instead of getting the whole array. Since there
214 // should only be one device on android.
215 gpuCount = 1;
216 err = mEnumeratePhysicalDevices(mInstance, &gpuCount, &mPhysicalDevice);
217 // VK_INCOMPLETE is returned when the count we provide is less than the total device count.
218 LOG_ALWAYS_FATAL_IF(err && VK_INCOMPLETE != err);
219
220 VkPhysicalDeviceProperties physDeviceProperties;
221 mGetPhysicalDeviceProperties(mPhysicalDevice, &physDeviceProperties);
222 LOG_ALWAYS_FATAL_IF(physDeviceProperties.apiVersion < VK_MAKE_VERSION(1, 1, 0));
223 mDriverVersion = physDeviceProperties.driverVersion;
224
225 // query to get the initial queue props size
226 uint32_t queueCount = 0;
227 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, nullptr);
228 LOG_ALWAYS_FATAL_IF(!queueCount);
229
230 // now get the actual queue props
231 std::unique_ptr<VkQueueFamilyProperties[]> queueProps(new VkQueueFamilyProperties[queueCount]);
232 mGetPhysicalDeviceQueueFamilyProperties(mPhysicalDevice, &queueCount, queueProps.get());
233
234 constexpr auto kRequestedQueueCount = 2;
235
236 // iterate to find the graphics queue
237 mGraphicsQueueIndex = queueCount;
238 for (uint32_t i = 0; i < queueCount; i++) {
239 if (queueProps[i].queueFlags & VK_QUEUE_GRAPHICS_BIT) {
240 mGraphicsQueueIndex = i;
241 LOG_ALWAYS_FATAL_IF(queueProps[i].queueCount < kRequestedQueueCount);
242 break;
243 }
244 }
245 LOG_ALWAYS_FATAL_IF(mGraphicsQueueIndex == queueCount);
246
247 {
248 uint32_t extensionCount = 0;
249 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
250 nullptr);
251 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
252 mDeviceExtensionsOwner.resize(extensionCount);
253 err = mEnumerateDeviceExtensionProperties(mPhysicalDevice, nullptr, &extensionCount,
254 mDeviceExtensionsOwner.data());
255 LOG_ALWAYS_FATAL_IF(VK_SUCCESS != err);
256 bool hasKHRSwapchainExtension = false;
257 for (const VkExtensionProperties& extension : mDeviceExtensionsOwner) {
258 if (!shouldEnableExtension(extension.extensionName)) {
259 ALOGV("Not enabling device extension %s", extension.extensionName);
260 continue;
261 }
262 ALOGV("Enabling device extension %s", extension.extensionName);
263 mDeviceExtensions.push_back(extension.extensionName);
264 if (!strcmp(extension.extensionName, VK_KHR_SWAPCHAIN_EXTENSION_NAME)) {
265 hasKHRSwapchainExtension = true;
266 }
267 }
268 LOG_ALWAYS_FATAL_IF(!hasKHRSwapchainExtension);
269 }
270
271 auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
272 if (device != VK_NULL_HANDLE) {
273 return vkGetDeviceProcAddr(device, proc_name);
274 }
275 return vkGetInstanceProcAddr(instance, proc_name);
276 };
277
278 grExtensions.init(getProc, mInstance, mPhysicalDevice, mInstanceExtensions.size(),
279 mInstanceExtensions.data(), mDeviceExtensions.size(),
280 mDeviceExtensions.data());
281
282 LOG_ALWAYS_FATAL_IF(!grExtensions.hasExtension(VK_KHR_EXTERNAL_SEMAPHORE_FD_EXTENSION_NAME, 1));
283
284 memset(&features, 0, sizeof(VkPhysicalDeviceFeatures2));
285 features.sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FEATURES_2;
286 features.pNext = nullptr;
287
288 // Setup all extension feature structs we may want to use.
289 void** tailPNext = &features.pNext;
290
291 if (grExtensions.hasExtension(VK_EXT_BLEND_OPERATION_ADVANCED_EXTENSION_NAME, 2)) {
292 VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT* blend;
293 blend = (VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT*)malloc(
294 sizeof(VkPhysicalDeviceBlendOperationAdvancedFeaturesEXT));
295 LOG_ALWAYS_FATAL_IF(!blend);
296 blend->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_BLEND_OPERATION_ADVANCED_FEATURES_EXT;
297 blend->pNext = nullptr;
298 *tailPNext = blend;
299 tailPNext = &blend->pNext;
300 }
301
302 VkPhysicalDeviceSamplerYcbcrConversionFeatures* ycbcrFeature;
303 ycbcrFeature = (VkPhysicalDeviceSamplerYcbcrConversionFeatures*)malloc(
304 sizeof(VkPhysicalDeviceSamplerYcbcrConversionFeatures));
305 LOG_ALWAYS_FATAL_IF(!ycbcrFeature);
306 ycbcrFeature->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_SAMPLER_YCBCR_CONVERSION_FEATURES;
307 ycbcrFeature->pNext = nullptr;
308 *tailPNext = ycbcrFeature;
309 tailPNext = &ycbcrFeature->pNext;
310
311 if (grExtensions.hasExtension(VK_EXT_DEVICE_FAULT_EXTENSION_NAME, 1)) {
312 VkPhysicalDeviceFaultFeaturesEXT* deviceFaultFeatures =
313 new VkPhysicalDeviceFaultFeaturesEXT;
314 deviceFaultFeatures->sType = VK_STRUCTURE_TYPE_PHYSICAL_DEVICE_FAULT_FEATURES_EXT;
315 deviceFaultFeatures->pNext = nullptr;
316 *tailPNext = deviceFaultFeatures;
317 tailPNext = &deviceFaultFeatures->pNext;
318 }
319
320 // query to get the physical device features
321 mGetPhysicalDeviceFeatures2(mPhysicalDevice, &features);
322 // this looks like it would slow things down,
323 // and we can't depend on it on all platforms
324 features.features.robustBufferAccess = VK_FALSE;
325
326 float queuePriorities[kRequestedQueueCount] = {0.0};
327
328 void* queueNextPtr = nullptr;
329
330 VkDeviceQueueGlobalPriorityCreateInfoEXT queuePriorityCreateInfo;
331
332 if (Properties::contextPriority != 0 &&
333 grExtensions.hasExtension(VK_EXT_GLOBAL_PRIORITY_EXTENSION_NAME, 2)) {
334 memset(&queuePriorityCreateInfo, 0, sizeof(VkDeviceQueueGlobalPriorityCreateInfoEXT));
335 queuePriorityCreateInfo.sType =
336 VK_STRUCTURE_TYPE_DEVICE_QUEUE_GLOBAL_PRIORITY_CREATE_INFO_EXT;
337 queuePriorityCreateInfo.pNext = nullptr;
338 switch (Properties::contextPriority) {
339 case EGL_CONTEXT_PRIORITY_LOW_IMG:
340 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_LOW_EXT;
341 break;
342 case EGL_CONTEXT_PRIORITY_MEDIUM_IMG:
343 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_MEDIUM_EXT;
344 break;
345 case EGL_CONTEXT_PRIORITY_HIGH_IMG:
346 queuePriorityCreateInfo.globalPriority = VK_QUEUE_GLOBAL_PRIORITY_HIGH_EXT;
347 break;
348 default:
349 LOG_ALWAYS_FATAL("Unsupported context priority");
350 }
351 queueNextPtr = &queuePriorityCreateInfo;
352 }
353
354 const VkDeviceQueueCreateInfo queueInfo = {
355 VK_STRUCTURE_TYPE_DEVICE_QUEUE_CREATE_INFO, // sType
356 queueNextPtr, // pNext
357 0, // VkDeviceQueueCreateFlags
358 mGraphicsQueueIndex, // queueFamilyIndex
359 kRequestedQueueCount, // queueCount
360 queuePriorities, // pQueuePriorities
361 };
362
363 const VkDeviceCreateInfo deviceInfo = {
364 VK_STRUCTURE_TYPE_DEVICE_CREATE_INFO, // sType
365 &features, // pNext
366 0, // VkDeviceCreateFlags
367 1, // queueCreateInfoCount
368 &queueInfo, // pQueueCreateInfos
369 0, // layerCount
370 nullptr, // ppEnabledLayerNames
371 (uint32_t)mDeviceExtensions.size(), // extensionCount
372 mDeviceExtensions.data(), // ppEnabledExtensionNames
373 nullptr, // ppEnabledFeatures
374 };
375
376 LOG_ALWAYS_FATAL_IF(mCreateDevice(mPhysicalDevice, &deviceInfo, nullptr, &mDevice));
377
378 GET_DEV_PROC(AllocateCommandBuffers);
379 GET_DEV_PROC(BeginCommandBuffer);
380 GET_DEV_PROC(CmdPipelineBarrier);
381 GET_DEV_PROC(CreateCommandPool);
382 GET_DEV_PROC(CreateFence);
383 GET_DEV_PROC(CreateSemaphore);
384 GET_DEV_PROC(DestroyCommandPool);
385 GET_DEV_PROC(DestroyDevice);
386 GET_DEV_PROC(DestroyFence);
387 GET_DEV_PROC(DestroySemaphore);
388 GET_DEV_PROC(DeviceWaitIdle);
389 GET_DEV_PROC(EndCommandBuffer);
390 GET_DEV_PROC(FreeCommandBuffers);
391 GET_DEV_PROC(GetDeviceQueue);
392 GET_DEV_PROC(GetSemaphoreFdKHR);
393 GET_DEV_PROC(ImportSemaphoreFdKHR);
394 GET_DEV_PROC(QueueSubmit);
395 GET_DEV_PROC(QueueWaitIdle);
396 GET_DEV_PROC(ResetCommandBuffer);
397 GET_DEV_PROC(ResetFences);
398 GET_DEV_PROC(WaitForFences);
399 GET_DEV_PROC(FrameBoundaryANDROID);
400 }
401
initialize()402 void VulkanManager::initialize() {
403 std::call_once(mInitFlag, [&] {
404 GET_PROC(EnumerateInstanceVersion);
405 uint32_t instanceVersion;
406 LOG_ALWAYS_FATAL_IF(mEnumerateInstanceVersion(&instanceVersion));
407 LOG_ALWAYS_FATAL_IF(instanceVersion < VK_MAKE_VERSION(1, 1, 0));
408
409 this->setupDevice(mExtensions, mPhysicalDeviceFeatures2);
410
411 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 0, &mGraphicsQueue);
412 mGetDeviceQueue(mDevice, mGraphicsQueueIndex, 1, &mAHBUploadQueue);
413
414 if (Properties::enablePartialUpdates && Properties::useBufferAge) {
415 mSwapBehavior = SwapBehavior::BufferAge;
416 }
417
418 mInitialized = true;
419 });
420 }
421
422 namespace {
onVkDeviceFault(const std::string & contextLabel,const std::string & description,const std::vector<VkDeviceFaultAddressInfoEXT> & addressInfos,const std::vector<VkDeviceFaultVendorInfoEXT> & vendorInfos,const std::vector<std::byte> & vendorBinaryData)423 void onVkDeviceFault(const std::string& contextLabel, const std::string& description,
424 const std::vector<VkDeviceFaultAddressInfoEXT>& addressInfos,
425 const std::vector<VkDeviceFaultVendorInfoEXT>& vendorInfos,
426 const std::vector<std::byte>& vendorBinaryData) {
427 // The final crash string should contain as much differentiating info as possible, up to 1024
428 // bytes. As this final message is constructed, the same information is also dumped to the logs
429 // but in a more verbose format. Building the crash string is unsightly, so the clearer logging
430 // statement is always placed first to give context.
431 ALOGE("VK_ERROR_DEVICE_LOST (%s context): %s", contextLabel.c_str(), description.c_str());
432 std::stringstream crashMsg;
433 crashMsg << "VK_ERROR_DEVICE_LOST (" << contextLabel;
434
435 if (!addressInfos.empty()) {
436 ALOGE("%zu VkDeviceFaultAddressInfoEXT:", addressInfos.size());
437 crashMsg << ", " << addressInfos.size() << " address info (";
438 for (VkDeviceFaultAddressInfoEXT addressInfo : addressInfos) {
439 ALOGE(" addressType: %d", (int)addressInfo.addressType);
440 ALOGE(" reportedAddress: %" PRIu64, addressInfo.reportedAddress);
441 ALOGE(" addressPrecision: %" PRIu64, addressInfo.addressPrecision);
442 crashMsg << addressInfo.addressType << ":"
443 << addressInfo.reportedAddress << ":"
444 << addressInfo.addressPrecision << ", ";
445 }
446 crashMsg.seekp(-2, crashMsg.cur); // Move back to overwrite trailing ", "
447 crashMsg << ")";
448 }
449
450 if (!vendorInfos.empty()) {
451 ALOGE("%zu VkDeviceFaultVendorInfoEXT:", vendorInfos.size());
452 crashMsg << ", " << vendorInfos.size() << " vendor info (";
453 for (VkDeviceFaultVendorInfoEXT vendorInfo : vendorInfos) {
454 ALOGE(" description: %s", vendorInfo.description);
455 ALOGE(" vendorFaultCode: %" PRIu64, vendorInfo.vendorFaultCode);
456 ALOGE(" vendorFaultData: %" PRIu64, vendorInfo.vendorFaultData);
457 // Omit descriptions for individual vendor info structs in the crash string, as the
458 // fault code and fault data fields should be enough for clustering, and the verbosity
459 // isn't worth it. Additionally, vendors may just set the general description field of
460 // the overall fault to the description of the first element in this list, and that
461 // overall description will be placed at the end of the crash string.
462 crashMsg << vendorInfo.vendorFaultCode << ":"
463 << vendorInfo.vendorFaultData << ", ";
464 }
465 crashMsg.seekp(-2, crashMsg.cur); // Move back to overwrite trailing ", "
466 crashMsg << ")";
467 }
468
469 if (!vendorBinaryData.empty()) {
470 // TODO: b/322830575 - Log in base64, or dump directly to a file that gets put in bugreports
471 ALOGE("%zu bytes of vendor-specific binary data (please notify Android's Core Graphics"
472 " Stack team if you observe this message).",
473 vendorBinaryData.size());
474 crashMsg << ", " << vendorBinaryData.size() << " bytes binary";
475 }
476
477 crashMsg << "): " << description;
478 LOG_ALWAYS_FATAL("%s", crashMsg.str().c_str());
479 }
480
deviceLostProcRenderThread(void * callbackContext,const std::string & description,const std::vector<VkDeviceFaultAddressInfoEXT> & addressInfos,const std::vector<VkDeviceFaultVendorInfoEXT> & vendorInfos,const std::vector<std::byte> & vendorBinaryData)481 void deviceLostProcRenderThread(void* callbackContext, const std::string& description,
482 const std::vector<VkDeviceFaultAddressInfoEXT>& addressInfos,
483 const std::vector<VkDeviceFaultVendorInfoEXT>& vendorInfos,
484 const std::vector<std::byte>& vendorBinaryData) {
485 onVkDeviceFault("RenderThread", description, addressInfos, vendorInfos, vendorBinaryData);
486 }
deviceLostProcUploadThread(void * callbackContext,const std::string & description,const std::vector<VkDeviceFaultAddressInfoEXT> & addressInfos,const std::vector<VkDeviceFaultVendorInfoEXT> & vendorInfos,const std::vector<std::byte> & vendorBinaryData)487 void deviceLostProcUploadThread(void* callbackContext, const std::string& description,
488 const std::vector<VkDeviceFaultAddressInfoEXT>& addressInfos,
489 const std::vector<VkDeviceFaultVendorInfoEXT>& vendorInfos,
490 const std::vector<std::byte>& vendorBinaryData) {
491 onVkDeviceFault("UploadThread", description, addressInfos, vendorInfos, vendorBinaryData);
492 }
493 } // anonymous namespace
494
onGrContextReleased(void * context)495 static void onGrContextReleased(void* context) {
496 VulkanManager* manager = (VulkanManager*)context;
497 manager->decStrong((void*)onGrContextReleased);
498 }
499
createContext(GrContextOptions & options,ContextType contextType)500 sk_sp<GrDirectContext> VulkanManager::createContext(GrContextOptions& options,
501 ContextType contextType) {
502 auto getProc = [](const char* proc_name, VkInstance instance, VkDevice device) {
503 if (device != VK_NULL_HANDLE) {
504 return vkGetDeviceProcAddr(device, proc_name);
505 }
506 return vkGetInstanceProcAddr(instance, proc_name);
507 };
508
509 GrVkBackendContext backendContext;
510 backendContext.fInstance = mInstance;
511 backendContext.fPhysicalDevice = mPhysicalDevice;
512 backendContext.fDevice = mDevice;
513 backendContext.fQueue =
514 (contextType == ContextType::kRenderThread) ? mGraphicsQueue : mAHBUploadQueue;
515 backendContext.fGraphicsQueueIndex = mGraphicsQueueIndex;
516 backendContext.fMaxAPIVersion = mAPIVersion;
517 backendContext.fVkExtensions = &mExtensions;
518 backendContext.fDeviceFeatures2 = &mPhysicalDeviceFeatures2;
519 backendContext.fGetProc = std::move(getProc);
520 backendContext.fDeviceLostContext = nullptr;
521 backendContext.fDeviceLostProc = (contextType == ContextType::kRenderThread)
522 ? deviceLostProcRenderThread
523 : deviceLostProcUploadThread;
524
525 LOG_ALWAYS_FATAL_IF(options.fContextDeleteProc != nullptr, "Conflicting fContextDeleteProcs!");
526 this->incStrong((void*)onGrContextReleased);
527 options.fContextDeleteContext = this;
528 options.fContextDeleteProc = onGrContextReleased;
529
530 return GrDirectContexts::MakeVulkan(backendContext, options);
531 }
532
getVkFunctorInitParams() const533 VkFunctorInitParams VulkanManager::getVkFunctorInitParams() const {
534 return VkFunctorInitParams{
535 .instance = mInstance,
536 .physical_device = mPhysicalDevice,
537 .device = mDevice,
538 .queue = mGraphicsQueue,
539 .graphics_queue_index = mGraphicsQueueIndex,
540 .api_version = mAPIVersion,
541 .enabled_instance_extension_names = mInstanceExtensions.data(),
542 .enabled_instance_extension_names_length =
543 static_cast<uint32_t>(mInstanceExtensions.size()),
544 .enabled_device_extension_names = mDeviceExtensions.data(),
545 .enabled_device_extension_names_length =
546 static_cast<uint32_t>(mDeviceExtensions.size()),
547 .device_features_2 = &mPhysicalDeviceFeatures2,
548 };
549 }
550
dequeueNextBuffer(VulkanSurface * surface)551 Frame VulkanManager::dequeueNextBuffer(VulkanSurface* surface) {
552 VulkanSurface::NativeBufferInfo* bufferInfo = surface->dequeueNativeBuffer();
553
554 if (bufferInfo == nullptr) {
555 ALOGE("VulkanSurface::dequeueNativeBuffer called with an invalid surface!");
556 return Frame(-1, -1, 0);
557 }
558
559 LOG_ALWAYS_FATAL_IF(!bufferInfo->dequeued);
560
561 if (bufferInfo->dequeue_fence != -1) {
562 struct sync_file_info* finfo = sync_file_info(bufferInfo->dequeue_fence);
563 bool isSignalPending = false;
564 if (finfo != NULL) {
565 isSignalPending = finfo->status != 1;
566 sync_file_info_free(finfo);
567 }
568 if (isSignalPending) {
569 int fence_clone = dup(bufferInfo->dequeue_fence);
570 if (fence_clone == -1) {
571 ALOGE("dup(fence) failed, stalling until signalled: %s (%d)", strerror(errno),
572 errno);
573 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
574 } else {
575 VkSemaphoreCreateInfo semaphoreInfo;
576 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
577 semaphoreInfo.pNext = nullptr;
578 semaphoreInfo.flags = 0;
579 VkSemaphore semaphore;
580 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
581 if (err != VK_SUCCESS) {
582 ALOGE("Failed to create import semaphore, err: %d", err);
583 close(fence_clone);
584 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
585 } else {
586 VkImportSemaphoreFdInfoKHR importInfo;
587 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
588 importInfo.pNext = nullptr;
589 importInfo.semaphore = semaphore;
590 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
591 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
592 importInfo.fd = fence_clone;
593
594 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
595 if (err != VK_SUCCESS) {
596 ALOGE("Failed to import semaphore, err: %d", err);
597 mDestroySemaphore(mDevice, semaphore, nullptr);
598 close(fence_clone);
599 sync_wait(bufferInfo->dequeue_fence, -1 /* forever */);
600 } else {
601 GrBackendSemaphore beSemaphore = GrBackendSemaphores::MakeVk(semaphore);
602 // Skia will take ownership of the VkSemaphore and delete it once the wait
603 // has finished. The VkSemaphore also owns the imported fd, so it will
604 // close the fd when it is deleted.
605 bufferInfo->skSurface->wait(1, &beSemaphore);
606 // The following flush blocks the GPU immediately instead of waiting for
607 // other drawing ops. It seems dequeue_fence is not respected otherwise.
608 // TODO: remove the flush after finding why beSemaphore is not working.
609 skgpu::ganesh::FlushAndSubmit(bufferInfo->skSurface.get());
610 }
611 }
612 }
613 }
614 }
615
616 int bufferAge = (mSwapBehavior == SwapBehavior::Discard) ? 0 : surface->getCurrentBuffersAge();
617 return Frame(surface->logicalWidth(), surface->logicalHeight(), bufferAge);
618 }
619
620 class SharedSemaphoreInfo : public LightRefBase<SharedSemaphoreInfo> {
621 PFN_vkDestroySemaphore mDestroyFunction;
622 VkDevice mDevice;
623 VkSemaphore mSemaphore;
624 GrBackendSemaphore mGrBackendSemaphore;
625
SharedSemaphoreInfo(PFN_vkDestroySemaphore destroyFunction,VkDevice device,VkSemaphore semaphore)626 SharedSemaphoreInfo(PFN_vkDestroySemaphore destroyFunction, VkDevice device,
627 VkSemaphore semaphore)
628 : mDestroyFunction(destroyFunction), mDevice(device), mSemaphore(semaphore) {
629 mGrBackendSemaphore = GrBackendSemaphores::MakeVk(mSemaphore);
630 }
631
~SharedSemaphoreInfo()632 ~SharedSemaphoreInfo() { mDestroyFunction(mDevice, mSemaphore, nullptr); }
633
634 friend class LightRefBase<SharedSemaphoreInfo>;
635 friend class sp<SharedSemaphoreInfo>;
636
637 public:
semaphore() const638 VkSemaphore semaphore() const { return mSemaphore; }
639
grBackendSemaphore()640 GrBackendSemaphore* grBackendSemaphore() { return &mGrBackendSemaphore; }
641 };
642
destroy_semaphore(void * context)643 static void destroy_semaphore(void* context) {
644 SharedSemaphoreInfo* info = reinterpret_cast<SharedSemaphoreInfo*>(context);
645 info->decStrong(0);
646 }
647
finishFrame(SkSurface * surface)648 VulkanManager::VkDrawResult VulkanManager::finishFrame(SkSurface* surface) {
649 ATRACE_NAME("Vulkan finish frame");
650
651 sp<SharedSemaphoreInfo> sharedSemaphore;
652 GrFlushInfo flushInfo;
653
654 {
655 VkExportSemaphoreCreateInfo exportInfo;
656 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
657 exportInfo.pNext = nullptr;
658 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
659
660 VkSemaphoreCreateInfo semaphoreInfo;
661 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
662 semaphoreInfo.pNext = &exportInfo;
663 semaphoreInfo.flags = 0;
664 VkSemaphore semaphore;
665 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
666 ALOGE_IF(VK_SUCCESS != err,
667 "VulkanManager::makeSwapSemaphore(): Failed to create semaphore");
668
669 if (err == VK_SUCCESS) {
670 sharedSemaphore = sp<SharedSemaphoreInfo>::make(mDestroySemaphore, mDevice, semaphore);
671 flushInfo.fNumSemaphores = 1;
672 flushInfo.fSignalSemaphores = sharedSemaphore->grBackendSemaphore();
673 flushInfo.fFinishedProc = destroy_semaphore;
674 sharedSemaphore->incStrong(0);
675 flushInfo.fFinishedContext = sharedSemaphore.get();
676 }
677 }
678
679 GrDirectContext* context = GrAsDirectContext(surface->recordingContext());
680 ALOGE_IF(!context, "Surface is not backed by gpu");
681 GrSemaphoresSubmitted submitted = context->flush(
682 surface, SkSurfaces::BackendSurfaceAccess::kPresent, flushInfo);
683 context->submit();
684 VkDrawResult drawResult{
685 .submissionTime = systemTime(),
686 };
687 if (sharedSemaphore) {
688 if (submitted == GrSemaphoresSubmitted::kYes && mFrameBoundaryANDROID) {
689 // retrieve VkImage used as render target
690 VkImage image = VK_NULL_HANDLE;
691 GrBackendRenderTarget backendRenderTarget = SkSurfaces::GetBackendRenderTarget(
692 surface, SkSurfaces::BackendHandleAccess::kFlushRead);
693 if (backendRenderTarget.isValid()) {
694 GrVkImageInfo info;
695 if (GrBackendRenderTargets::GetVkImageInfo(backendRenderTarget, &info)) {
696 image = info.fImage;
697 } else {
698 ALOGE("Frame boundary: backend is not vulkan");
699 }
700 } else {
701 ALOGE("Frame boundary: invalid backend render target");
702 }
703 // frameBoundaryANDROID needs to know about mSwapSemaphore, but
704 // it won't wait on it.
705 mFrameBoundaryANDROID(mDevice, sharedSemaphore->semaphore(), image);
706 }
707 VkSemaphoreGetFdInfoKHR getFdInfo;
708 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
709 getFdInfo.pNext = nullptr;
710 getFdInfo.semaphore = sharedSemaphore->semaphore();
711 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
712
713 int fenceFd = -1;
714 VkResult err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
715 ALOGE_IF(VK_SUCCESS != err, "VulkanManager::swapBuffers(): Failed to get semaphore Fd");
716 drawResult.presentFence.reset(fenceFd);
717 } else {
718 ALOGE("VulkanManager::finishFrame(): Semaphore submission failed");
719 mQueueWaitIdle(mGraphicsQueue);
720 }
721
722 skiapipeline::ShaderCache::get().onVkFrameFlushed(context);
723
724 return drawResult;
725 }
726
swapBuffers(VulkanSurface * surface,const SkRect & dirtyRect,android::base::unique_fd && presentFence)727 void VulkanManager::swapBuffers(VulkanSurface* surface, const SkRect& dirtyRect,
728 android::base::unique_fd&& presentFence) {
729 if (CC_UNLIKELY(Properties::waitForGpuCompletion)) {
730 ATRACE_NAME("Finishing GPU work");
731 mDeviceWaitIdle(mDevice);
732 }
733
734 surface->presentCurrentBuffer(dirtyRect, presentFence.release());
735 }
736
destroySurface(VulkanSurface * surface)737 void VulkanManager::destroySurface(VulkanSurface* surface) {
738 // Make sure all submit commands have finished before starting to destroy objects.
739 if (VK_NULL_HANDLE != mGraphicsQueue) {
740 mQueueWaitIdle(mGraphicsQueue);
741 }
742
743 delete surface;
744 }
745
createSurface(ANativeWindow * window,ColorMode colorMode,sk_sp<SkColorSpace> surfaceColorSpace,SkColorType surfaceColorType,GrDirectContext * grContext,uint32_t extraBuffers)746 VulkanSurface* VulkanManager::createSurface(ANativeWindow* window,
747 ColorMode colorMode,
748 sk_sp<SkColorSpace> surfaceColorSpace,
749 SkColorType surfaceColorType,
750 GrDirectContext* grContext,
751 uint32_t extraBuffers) {
752 LOG_ALWAYS_FATAL_IF(!hasVkContext(), "Not initialized");
753 if (!window) {
754 return nullptr;
755 }
756
757 return VulkanSurface::Create(window, colorMode, surfaceColorType, surfaceColorSpace, grContext,
758 *this, extraBuffers);
759 }
760
fenceWait(int fence,GrDirectContext * grContext)761 status_t VulkanManager::fenceWait(int fence, GrDirectContext* grContext) {
762 if (!hasVkContext()) {
763 ALOGE("VulkanManager::fenceWait: VkDevice not initialized");
764 return INVALID_OPERATION;
765 }
766
767 // Block GPU on the fence.
768 int fenceFd = ::dup(fence);
769 if (fenceFd == -1) {
770 ALOGE("VulkanManager::fenceWait: error dup'ing fence fd: %d", errno);
771 return -errno;
772 }
773
774 VkSemaphoreCreateInfo semaphoreInfo;
775 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
776 semaphoreInfo.pNext = nullptr;
777 semaphoreInfo.flags = 0;
778 VkSemaphore semaphore;
779 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
780 if (VK_SUCCESS != err) {
781 close(fenceFd);
782 ALOGE("Failed to create import semaphore, err: %d", err);
783 return UNKNOWN_ERROR;
784 }
785 VkImportSemaphoreFdInfoKHR importInfo;
786 importInfo.sType = VK_STRUCTURE_TYPE_IMPORT_SEMAPHORE_FD_INFO_KHR;
787 importInfo.pNext = nullptr;
788 importInfo.semaphore = semaphore;
789 importInfo.flags = VK_SEMAPHORE_IMPORT_TEMPORARY_BIT;
790 importInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
791 importInfo.fd = fenceFd;
792
793 err = mImportSemaphoreFdKHR(mDevice, &importInfo);
794 if (VK_SUCCESS != err) {
795 mDestroySemaphore(mDevice, semaphore, nullptr);
796 close(fenceFd);
797 ALOGE("Failed to import semaphore, err: %d", err);
798 return UNKNOWN_ERROR;
799 }
800
801 GrBackendSemaphore beSemaphore = GrBackendSemaphores::MakeVk(semaphore);
802
803 // Skia will take ownership of the VkSemaphore and delete it once the wait has finished. The
804 // VkSemaphore also owns the imported fd, so it will close the fd when it is deleted.
805 grContext->wait(1, &beSemaphore);
806 grContext->flushAndSubmit();
807
808 return OK;
809 }
810
createReleaseFence(int * nativeFence,GrDirectContext * grContext)811 status_t VulkanManager::createReleaseFence(int* nativeFence, GrDirectContext* grContext) {
812 *nativeFence = -1;
813 if (!hasVkContext()) {
814 ALOGE("VulkanManager::createReleaseFence: VkDevice not initialized");
815 return INVALID_OPERATION;
816 }
817
818 VkExportSemaphoreCreateInfo exportInfo;
819 exportInfo.sType = VK_STRUCTURE_TYPE_EXPORT_SEMAPHORE_CREATE_INFO;
820 exportInfo.pNext = nullptr;
821 exportInfo.handleTypes = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
822
823 VkSemaphoreCreateInfo semaphoreInfo;
824 semaphoreInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_CREATE_INFO;
825 semaphoreInfo.pNext = &exportInfo;
826 semaphoreInfo.flags = 0;
827 VkSemaphore semaphore;
828 VkResult err = mCreateSemaphore(mDevice, &semaphoreInfo, nullptr, &semaphore);
829 if (VK_SUCCESS != err) {
830 ALOGE("VulkanManager::createReleaseFence: Failed to create semaphore");
831 return INVALID_OPERATION;
832 }
833
834 auto sharedSemaphore = sp<SharedSemaphoreInfo>::make(mDestroySemaphore, mDevice, semaphore);
835
836 // Even if Skia fails to submit the semaphore, it will still call the destroy_semaphore callback
837 GrFlushInfo flushInfo;
838 flushInfo.fNumSemaphores = 1;
839 flushInfo.fSignalSemaphores = sharedSemaphore->grBackendSemaphore();
840 flushInfo.fFinishedProc = destroy_semaphore;
841 sharedSemaphore->incStrong(0);
842 flushInfo.fFinishedContext = sharedSemaphore.get();
843 GrSemaphoresSubmitted submitted = grContext->flush(flushInfo);
844 grContext->submit();
845
846 if (submitted == GrSemaphoresSubmitted::kNo) {
847 ALOGE("VulkanManager::createReleaseFence: Failed to submit semaphore");
848 return INVALID_OPERATION;
849 }
850
851 VkSemaphoreGetFdInfoKHR getFdInfo;
852 getFdInfo.sType = VK_STRUCTURE_TYPE_SEMAPHORE_GET_FD_INFO_KHR;
853 getFdInfo.pNext = nullptr;
854 getFdInfo.semaphore = semaphore;
855 getFdInfo.handleType = VK_EXTERNAL_SEMAPHORE_HANDLE_TYPE_SYNC_FD_BIT;
856
857 int fenceFd = 0;
858
859 err = mGetSemaphoreFdKHR(mDevice, &getFdInfo, &fenceFd);
860 if (VK_SUCCESS != err) {
861 ALOGE("VulkanManager::createReleaseFence: Failed to get semaphore Fd");
862 return INVALID_OPERATION;
863 }
864 *nativeFence = fenceFd;
865
866 return OK;
867 }
868
869 } /* namespace renderthread */
870 } /* namespace uirenderer */
871 } /* namespace android */
872