1 /*-------------------------------------------------------------------------
2 * Vulkan CTS Framework
3 * --------------------
4 *
5 * Copyright (c) 2015 Google Inc.
6 *
7 * Licensed under the Apache License, Version 2.0 (the "License");
8 * you may not use this file except in compliance with the License.
9 * You may obtain a copy of the License at
10 *
11 * http://www.apache.org/licenses/LICENSE-2.0
12 *
13 * Unless required by applicable law or agreed to in writing, software
14 * distributed under the License is distributed on an "AS IS" BASIS,
15 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
16 * See the License for the specific language governing permissions and
17 * limitations under the License.
18 *
19 *//*!
20 * \file
21 * \brief Memory allocation callback utilities.
22 *//*--------------------------------------------------------------------*/
23
24 #include "vkAllocationCallbackUtil.hpp"
25 #include "tcuFormatUtil.hpp"
26 #include "tcuTestLog.hpp"
27 #include "deSTLUtil.hpp"
28 #include "deMemory.h"
29
30 #include <map>
31
32 namespace vk
33 {
34
35 // System default allocator
36
systemAllocate(void *,size_t size,size_t alignment,VkSystemAllocationScope)37 static VKAPI_ATTR void* VKAPI_CALL systemAllocate (void*, size_t size, size_t alignment, VkSystemAllocationScope)
38 {
39 if (size > 0)
40 return deAlignedMalloc(size, (deUint32)alignment);
41 else
42 return DE_NULL;
43 }
44
systemFree(void *,void * pMem)45 static VKAPI_ATTR void VKAPI_CALL systemFree (void*, void* pMem)
46 {
47 deAlignedFree(pMem);
48 }
49
systemReallocate(void *,void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope)50 static VKAPI_ATTR void* VKAPI_CALL systemReallocate (void*, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope)
51 {
52 return deAlignedRealloc(pOriginal, size, alignment);
53 }
54
systemInternalAllocationNotification(void *,size_t,VkInternalAllocationType,VkSystemAllocationScope)55 static VKAPI_ATTR void VKAPI_CALL systemInternalAllocationNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
56 {
57 }
58
systemInternalFreeNotification(void *,size_t,VkInternalAllocationType,VkSystemAllocationScope)59 static VKAPI_ATTR void VKAPI_CALL systemInternalFreeNotification (void*, size_t, VkInternalAllocationType, VkSystemAllocationScope)
60 {
61 }
62
63 static const VkAllocationCallbacks s_systemAllocator =
64 {
65 DE_NULL, // pUserData
66 systemAllocate,
67 systemReallocate,
68 systemFree,
69 systemInternalAllocationNotification,
70 systemInternalFreeNotification,
71 };
72
getSystemAllocator(void)73 const VkAllocationCallbacks* getSystemAllocator (void)
74 {
75 return &s_systemAllocator;
76 }
77
78 // AllocationCallbacks
79
allocationCallback(void * pUserData,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)80 static VKAPI_ATTR void* VKAPI_CALL allocationCallback (void* pUserData, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
81 {
82 return reinterpret_cast<AllocationCallbacks*>(pUserData)->allocate(size, alignment, allocationScope);
83 }
84
reallocationCallback(void * pUserData,void * pOriginal,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)85 static VKAPI_ATTR void* VKAPI_CALL reallocationCallback (void* pUserData, void* pOriginal, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
86 {
87 return reinterpret_cast<AllocationCallbacks*>(pUserData)->reallocate(pOriginal, size, alignment, allocationScope);
88 }
89
freeCallback(void * pUserData,void * pMem)90 static VKAPI_ATTR void VKAPI_CALL freeCallback (void* pUserData, void* pMem)
91 {
92 reinterpret_cast<AllocationCallbacks*>(pUserData)->free(pMem);
93 }
94
internalAllocationNotificationCallback(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)95 static VKAPI_ATTR void VKAPI_CALL internalAllocationNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
96 {
97 reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalAllocation(size, allocationType, allocationScope);
98 }
99
internalFreeNotificationCallback(void * pUserData,size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)100 static VKAPI_ATTR void VKAPI_CALL internalFreeNotificationCallback (void* pUserData, size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
101 {
102 reinterpret_cast<AllocationCallbacks*>(pUserData)->notifyInternalFree(size, allocationType, allocationScope);
103 }
104
makeCallbacks(AllocationCallbacks * object)105 static VkAllocationCallbacks makeCallbacks (AllocationCallbacks* object)
106 {
107 const VkAllocationCallbacks callbacks =
108 {
109 reinterpret_cast<void*>(object),
110 allocationCallback,
111 reallocationCallback,
112 freeCallback,
113 internalAllocationNotificationCallback,
114 internalFreeNotificationCallback
115 };
116 return callbacks;
117 }
118
AllocationCallbacks(void)119 AllocationCallbacks::AllocationCallbacks (void)
120 : m_callbacks(makeCallbacks(this))
121 {
122 }
123
~AllocationCallbacks(void)124 AllocationCallbacks::~AllocationCallbacks (void)
125 {
126 }
127
128 // AllocationCallbackRecord
129
allocation(size_t size,size_t alignment,VkSystemAllocationScope scope,void * returnedPtr)130 AllocationCallbackRecord AllocationCallbackRecord::allocation (size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
131 {
132 AllocationCallbackRecord record;
133
134 record.type = TYPE_ALLOCATION;
135 record.data.allocation.size = size;
136 record.data.allocation.alignment = alignment;
137 record.data.allocation.scope = scope;
138 record.data.allocation.returnedPtr = returnedPtr;
139
140 return record;
141 }
142
reallocation(void * original,size_t size,size_t alignment,VkSystemAllocationScope scope,void * returnedPtr)143 AllocationCallbackRecord AllocationCallbackRecord::reallocation (void* original, size_t size, size_t alignment, VkSystemAllocationScope scope, void* returnedPtr)
144 {
145 AllocationCallbackRecord record;
146
147 record.type = TYPE_REALLOCATION;
148 record.data.reallocation.original = original;
149 record.data.reallocation.size = size;
150 record.data.reallocation.alignment = alignment;
151 record.data.reallocation.scope = scope;
152 record.data.reallocation.returnedPtr = returnedPtr;
153
154 return record;
155 }
156
free(void * mem)157 AllocationCallbackRecord AllocationCallbackRecord::free (void* mem)
158 {
159 AllocationCallbackRecord record;
160
161 record.type = TYPE_FREE;
162 record.data.free.mem = mem;
163
164 return record;
165 }
166
internalAllocation(size_t size,VkInternalAllocationType type,VkSystemAllocationScope scope)167 AllocationCallbackRecord AllocationCallbackRecord::internalAllocation (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
168 {
169 AllocationCallbackRecord record;
170
171 record.type = TYPE_INTERNAL_ALLOCATION;
172 record.data.internalAllocation.size = size;
173 record.data.internalAllocation.type = type;
174 record.data.internalAllocation.scope = scope;
175
176 return record;
177 }
178
internalFree(size_t size,VkInternalAllocationType type,VkSystemAllocationScope scope)179 AllocationCallbackRecord AllocationCallbackRecord::internalFree (size_t size, VkInternalAllocationType type, VkSystemAllocationScope scope)
180 {
181 AllocationCallbackRecord record;
182
183 record.type = TYPE_INTERNAL_FREE;
184 record.data.internalAllocation.size = size;
185 record.data.internalAllocation.type = type;
186 record.data.internalAllocation.scope = scope;
187
188 return record;
189 }
190
191 // ChainedAllocator
192
ChainedAllocator(const VkAllocationCallbacks * nextAllocator)193 ChainedAllocator::ChainedAllocator (const VkAllocationCallbacks* nextAllocator)
194 : m_nextAllocator(nextAllocator)
195 {
196 }
197
~ChainedAllocator(void)198 ChainedAllocator::~ChainedAllocator (void)
199 {
200 }
201
allocate(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)202 void* ChainedAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
203 {
204 return m_nextAllocator->pfnAllocation(m_nextAllocator->pUserData, size, alignment, allocationScope);
205 }
206
reallocate(void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)207 void* ChainedAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
208 {
209 return m_nextAllocator->pfnReallocation(m_nextAllocator->pUserData, original, size, alignment, allocationScope);
210 }
211
free(void * mem)212 void ChainedAllocator::free (void* mem)
213 {
214 m_nextAllocator->pfnFree(m_nextAllocator->pUserData, mem);
215 }
216
notifyInternalAllocation(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)217 void ChainedAllocator::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
218 {
219 m_nextAllocator->pfnInternalAllocation(m_nextAllocator->pUserData, size, allocationType, allocationScope);
220 }
221
notifyInternalFree(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)222 void ChainedAllocator::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
223 {
224 m_nextAllocator->pfnInternalFree(m_nextAllocator->pUserData, size, allocationType, allocationScope);
225 }
226
227 // AllocationCallbackRecorder
228
AllocationCallbackRecorder(const VkAllocationCallbacks * allocator,deUint32 callCountHint)229 AllocationCallbackRecorder::AllocationCallbackRecorder (const VkAllocationCallbacks* allocator, deUint32 callCountHint)
230 : ChainedAllocator (allocator)
231 , m_records (callCountHint)
232 {
233 }
234
~AllocationCallbackRecorder(void)235 AllocationCallbackRecorder::~AllocationCallbackRecorder (void)
236 {
237 }
238
allocate(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)239 void* AllocationCallbackRecorder::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
240 {
241 void* const ptr = ChainedAllocator::allocate(size, alignment, allocationScope);
242
243 m_records.append(AllocationCallbackRecord::allocation(size, alignment, allocationScope, ptr));
244
245 return ptr;
246 }
247
reallocate(void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)248 void* AllocationCallbackRecorder::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
249 {
250 void* const ptr = ChainedAllocator::reallocate(original, size, alignment, allocationScope);
251
252 m_records.append(AllocationCallbackRecord::reallocation(original, size, alignment, allocationScope, ptr));
253
254 return ptr;
255 }
256
free(void * mem)257 void AllocationCallbackRecorder::free (void* mem)
258 {
259 ChainedAllocator::free(mem);
260
261 m_records.append(AllocationCallbackRecord::free(mem));
262 }
263
notifyInternalAllocation(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)264 void AllocationCallbackRecorder::notifyInternalAllocation (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
265 {
266 ChainedAllocator::notifyInternalAllocation(size, allocationType, allocationScope);
267
268 m_records.append(AllocationCallbackRecord::internalAllocation(size, allocationType, allocationScope));
269 }
270
notifyInternalFree(size_t size,VkInternalAllocationType allocationType,VkSystemAllocationScope allocationScope)271 void AllocationCallbackRecorder::notifyInternalFree (size_t size, VkInternalAllocationType allocationType, VkSystemAllocationScope allocationScope)
272 {
273 ChainedAllocator::notifyInternalFree(size, allocationType, allocationScope);
274
275 m_records.append(AllocationCallbackRecord::internalFree(size, allocationType, allocationScope));
276 }
277
278 // DeterministicFailAllocator
279
DeterministicFailAllocator(const VkAllocationCallbacks * allocator,deUint32 numPassingAllocs)280 DeterministicFailAllocator::DeterministicFailAllocator (const VkAllocationCallbacks* allocator, deUint32 numPassingAllocs)
281 : ChainedAllocator (allocator)
282 , m_numPassingAllocs(numPassingAllocs)
283 , m_allocationNdx (0)
284 {
285 }
286
~DeterministicFailAllocator(void)287 DeterministicFailAllocator::~DeterministicFailAllocator (void)
288 {
289 }
290
allocate(size_t size,size_t alignment,VkSystemAllocationScope allocationScope)291 void* DeterministicFailAllocator::allocate (size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
292 {
293 if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
294 return ChainedAllocator::allocate(size, alignment, allocationScope);
295 else
296 return DE_NULL;
297 }
298
reallocate(void * original,size_t size,size_t alignment,VkSystemAllocationScope allocationScope)299 void* DeterministicFailAllocator::reallocate (void* original, size_t size, size_t alignment, VkSystemAllocationScope allocationScope)
300 {
301 if (deAtomicIncrementUint32(&m_allocationNdx) <= m_numPassingAllocs)
302 return ChainedAllocator::reallocate(original, size, alignment, allocationScope);
303 else
304 return DE_NULL;
305 }
306
307 // Utils
308
AllocationCallbackValidationResults(void)309 AllocationCallbackValidationResults::AllocationCallbackValidationResults (void)
310 {
311 deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
312 }
313
clear(void)314 void AllocationCallbackValidationResults::clear (void)
315 {
316 liveAllocations.clear();
317 violations.clear();
318 deMemset(internalAllocationTotal, 0, sizeof(internalAllocationTotal));
319 }
320
321 namespace
322 {
323
324 struct AllocationSlot
325 {
326 AllocationCallbackRecord record;
327 bool isLive;
328
AllocationSlotvk::__anon719d3ec60111::AllocationSlot329 AllocationSlot (void)
330 : isLive (false)
331 {}
332
AllocationSlotvk::__anon719d3ec60111::AllocationSlot333 AllocationSlot (const AllocationCallbackRecord& record_, bool isLive_)
334 : record (record_)
335 , isLive (isLive_)
336 {}
337 };
338
getAlignment(const AllocationCallbackRecord & record)339 size_t getAlignment (const AllocationCallbackRecord& record)
340 {
341 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION)
342 return record.data.allocation.alignment;
343 else if (record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
344 return record.data.reallocation.alignment;
345 else
346 {
347 DE_ASSERT(false);
348 return 0;
349 }
350 }
351
352 } // anonymous
353
validateAllocationCallbacks(const AllocationCallbackRecorder & recorder,AllocationCallbackValidationResults * results)354 void validateAllocationCallbacks (const AllocationCallbackRecorder& recorder, AllocationCallbackValidationResults* results)
355 {
356 std::vector<AllocationSlot> allocations;
357 std::map<void*, size_t> ptrToSlotIndex;
358
359 DE_ASSERT(results->liveAllocations.empty() && results->violations.empty());
360
361 for (AllocationCallbackRecorder::RecordIterator callbackIter = recorder.getRecordsBegin();
362 callbackIter != recorder.getRecordsEnd();
363 ++callbackIter)
364 {
365 const AllocationCallbackRecord& record = *callbackIter;
366
367 // Validate scope
368 {
369 const VkSystemAllocationScope* const scopePtr = record.type == AllocationCallbackRecord::TYPE_ALLOCATION ? &record.data.allocation.scope
370 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION ? &record.data.reallocation.scope
371 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? &record.data.internalAllocation.scope
372 : record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE ? &record.data.internalAllocation.scope
373 : DE_NULL;
374
375 if (scopePtr && !de::inBounds(*scopePtr, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST))
376 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE));
377 }
378
379 // Validate alignment
380 if (record.type == AllocationCallbackRecord::TYPE_ALLOCATION ||
381 record.type == AllocationCallbackRecord::TYPE_REALLOCATION)
382 {
383 if (!deIsPowerOfTwoSize(getAlignment(record)))
384 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_ALIGNMENT));
385 }
386
387 // Validate actual allocation behavior
388 switch (record.type)
389 {
390 case AllocationCallbackRecord::TYPE_ALLOCATION:
391 {
392 if (record.data.allocation.returnedPtr)
393 {
394 if (!de::contains(ptrToSlotIndex, record.data.allocation.returnedPtr))
395 {
396 ptrToSlotIndex[record.data.allocation.returnedPtr] = allocations.size();
397 allocations.push_back(AllocationSlot(record, true));
398 }
399 else
400 {
401 const size_t slotNdx = ptrToSlotIndex[record.data.allocation.returnedPtr];
402 if (!allocations[slotNdx].isLive)
403 {
404 allocations[slotNdx].isLive = true;
405 allocations[slotNdx].record = record;
406 }
407 else
408 {
409 // we should not have multiple live allocations with the same pointer
410 DE_ASSERT(false);
411 }
412 }
413 }
414
415 break;
416 }
417
418 case AllocationCallbackRecord::TYPE_REALLOCATION:
419 {
420 if (de::contains(ptrToSlotIndex, record.data.reallocation.original))
421 {
422 const size_t origSlotNdx = ptrToSlotIndex[record.data.reallocation.original];
423 AllocationSlot& origSlot = allocations[origSlotNdx];
424
425 DE_ASSERT(record.data.reallocation.original != DE_NULL);
426
427 if (record.data.reallocation.size > 0)
428 {
429 if (getAlignment(origSlot.record) != record.data.reallocation.alignment)
430 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT));
431
432 if (record.data.reallocation.original == record.data.reallocation.returnedPtr)
433 {
434 if (!origSlot.isLive)
435 {
436 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_FREED_PTR));
437 origSlot.isLive = true; // Mark live to suppress further errors
438 }
439
440 // Just update slot record
441 allocations[origSlotNdx].record = record;
442 }
443 else
444 {
445 if (record.data.reallocation.returnedPtr)
446 {
447 allocations[origSlotNdx].isLive = false;
448 if (!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr))
449 {
450 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
451 allocations.push_back(AllocationSlot(record, true));
452 }
453 else
454 {
455 const size_t slotNdx = ptrToSlotIndex[record.data.reallocation.returnedPtr];
456 if (!allocations[slotNdx].isLive)
457 {
458 allocations[slotNdx].isLive = true;
459 allocations[slotNdx].record = record;
460 }
461 else
462 {
463 // we should not have multiple live allocations with the same pointer
464 DE_ASSERT(false);
465 }
466 }
467 }
468 // else original ptr remains valid and live
469 }
470 }
471 else
472 {
473 DE_ASSERT(!record.data.reallocation.returnedPtr);
474
475 origSlot.isLive = false;
476 }
477 }
478 else
479 {
480 if (record.data.reallocation.original)
481 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR));
482
483 if (record.data.reallocation.returnedPtr)
484 {
485 DE_ASSERT(!de::contains(ptrToSlotIndex, record.data.reallocation.returnedPtr));
486 ptrToSlotIndex[record.data.reallocation.returnedPtr] = allocations.size();
487 allocations.push_back(AllocationSlot(record, true));
488 }
489 }
490
491 break;
492 }
493
494 case AllocationCallbackRecord::TYPE_FREE:
495 {
496 if (record.data.free.mem != DE_NULL) // Freeing null pointer is valid and ignored
497 {
498 if (de::contains(ptrToSlotIndex, record.data.free.mem))
499 {
500 const size_t slotNdx = ptrToSlotIndex[record.data.free.mem];
501
502 if (allocations[slotNdx].isLive)
503 allocations[slotNdx].isLive = false;
504 else
505 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_DOUBLE_FREE));
506 }
507 else
508 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR));
509 }
510
511 break;
512 }
513
514 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
515 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
516 {
517 if (de::inBounds(record.data.internalAllocation.type, (VkInternalAllocationType)0, VK_INTERNAL_ALLOCATION_TYPE_LAST))
518 {
519 size_t* const totalAllocSizePtr = &results->internalAllocationTotal[record.data.internalAllocation.type][record.data.internalAllocation.scope];
520 const size_t size = record.data.internalAllocation.size;
521
522 if (record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE)
523 {
524 if (*totalAllocSizePtr < size)
525 {
526 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL));
527 *totalAllocSizePtr = 0; // Reset to 0 to suppress compound errors
528 }
529 else
530 *totalAllocSizePtr -= size;
531 }
532 else
533 *totalAllocSizePtr += size;
534 }
535 else
536 results->violations.push_back(AllocationCallbackViolation(record, AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE));
537
538 break;
539 }
540
541 default:
542 DE_ASSERT(false);
543 }
544 }
545
546 DE_ASSERT(!de::contains(ptrToSlotIndex, DE_NULL));
547
548 // Collect live allocations
549 for (std::vector<AllocationSlot>::const_iterator slotIter = allocations.begin();
550 slotIter != allocations.end();
551 ++slotIter)
552 {
553 if (slotIter->isLive)
554 results->liveAllocations.push_back(slotIter->record);
555 }
556 }
557
checkAndLog(tcu::TestLog & log,const AllocationCallbackValidationResults & results,deUint32 allowedLiveAllocScopeBits)558 bool checkAndLog (tcu::TestLog& log, const AllocationCallbackValidationResults& results, deUint32 allowedLiveAllocScopeBits)
559 {
560 using tcu::TestLog;
561
562 size_t numLeaks = 0;
563
564 if (!results.violations.empty())
565 {
566 for (size_t violationNdx = 0; violationNdx < results.violations.size(); ++violationNdx)
567 {
568 log << TestLog::Message << "VIOLATION " << (violationNdx+1)
569 << ": " << results.violations[violationNdx]
570 << " (" << results.violations[violationNdx].record << ")"
571 << TestLog::EndMessage;
572 }
573
574 log << TestLog::Message << "ERROR: Found " << results.violations.size() << " invalid allocation callbacks!" << TestLog::EndMessage;
575 }
576
577 // Verify live allocations
578 for (size_t liveNdx = 0; liveNdx < results.liveAllocations.size(); ++liveNdx)
579 {
580 const AllocationCallbackRecord& record = results.liveAllocations[liveNdx];
581 const VkSystemAllocationScope scope = record.type == AllocationCallbackRecord::TYPE_ALLOCATION ? record.data.allocation.scope
582 : record.type == AllocationCallbackRecord::TYPE_REALLOCATION ? record.data.reallocation.scope
583 : VK_SYSTEM_ALLOCATION_SCOPE_LAST;
584
585 DE_ASSERT(de::inBounds(scope, (VkSystemAllocationScope)0, VK_SYSTEM_ALLOCATION_SCOPE_LAST));
586
587 if ((allowedLiveAllocScopeBits & (1u << scope)) == 0)
588 {
589 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << record << TestLog::EndMessage;
590 numLeaks += 1;
591 }
592 }
593
594 // Verify internal allocations
595 for (int internalAllocTypeNdx = 0; internalAllocTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocTypeNdx)
596 {
597 for (int scopeNdx = 0; scopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++scopeNdx)
598 {
599 const VkInternalAllocationType type = (VkInternalAllocationType)internalAllocTypeNdx;
600 const VkSystemAllocationScope scope = (VkSystemAllocationScope)scopeNdx;
601 const size_t totalAllocated = results.internalAllocationTotal[type][scope];
602
603 if ((allowedLiveAllocScopeBits & (1u << scopeNdx)) == 0 &&
604 totalAllocated > 0)
605 {
606 log << TestLog::Message << "LEAK " << (numLeaks+1) << ": " << totalAllocated
607 << " bytes of (" << type << ", " << scope << ") internal memory is still allocated"
608 << TestLog::EndMessage;
609 numLeaks += 1;
610 }
611 }
612 }
613
614 if (numLeaks > 0)
615 log << TestLog::Message << "ERROR: Found " << numLeaks << " memory leaks!" << TestLog::EndMessage;
616
617 return results.violations.empty() && numLeaks == 0;
618 }
619
validateAndLog(tcu::TestLog & log,const AllocationCallbackRecorder & recorder,deUint32 allowedLiveAllocScopeBits)620 bool validateAndLog (tcu::TestLog& log, const AllocationCallbackRecorder& recorder, deUint32 allowedLiveAllocScopeBits)
621 {
622 AllocationCallbackValidationResults validationResults;
623
624 validateAllocationCallbacks(recorder, &validationResults);
625
626 return checkAndLog(log, validationResults, allowedLiveAllocScopeBits);
627 }
628
getLiveSystemAllocationTotal(const AllocationCallbackValidationResults & validationResults)629 size_t getLiveSystemAllocationTotal (const AllocationCallbackValidationResults& validationResults)
630 {
631 size_t allocationTotal = 0;
632
633 DE_ASSERT(validationResults.violations.empty());
634
635 for (std::vector<AllocationCallbackRecord>::const_iterator alloc = validationResults.liveAllocations.begin();
636 alloc != validationResults.liveAllocations.end();
637 ++alloc)
638 {
639 DE_ASSERT(alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ||
640 alloc->type == AllocationCallbackRecord::TYPE_REALLOCATION);
641
642 const size_t size = (alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ? alloc->data.allocation.size : alloc->data.reallocation.size);
643 const size_t alignment = (alloc->type == AllocationCallbackRecord::TYPE_ALLOCATION ? alloc->data.allocation.alignment : alloc->data.reallocation.alignment);
644
645 allocationTotal += size + alignment - (alignment > 0 ? 1 : 0);
646 }
647
648 for (int internalAllocationTypeNdx = 0; internalAllocationTypeNdx < VK_INTERNAL_ALLOCATION_TYPE_LAST; ++internalAllocationTypeNdx)
649 {
650 for (int internalAllocationScopeNdx = 0; internalAllocationScopeNdx < VK_SYSTEM_ALLOCATION_SCOPE_LAST; ++internalAllocationScopeNdx)
651 allocationTotal += validationResults.internalAllocationTotal[internalAllocationTypeNdx][internalAllocationScopeNdx];
652 }
653
654 return allocationTotal;
655 }
656
operator <<(std::ostream & str,const AllocationCallbackRecord & record)657 std::ostream& operator<< (std::ostream& str, const AllocationCallbackRecord& record)
658 {
659 switch (record.type)
660 {
661 case AllocationCallbackRecord::TYPE_ALLOCATION:
662 str << "ALLOCATION: size=" << record.data.allocation.size
663 << ", alignment=" << record.data.allocation.alignment
664 << ", scope=" << record.data.allocation.scope
665 << ", returnedPtr=" << tcu::toHex(record.data.allocation.returnedPtr);
666 break;
667
668 case AllocationCallbackRecord::TYPE_REALLOCATION:
669 str << "REALLOCATION: original=" << tcu::toHex(record.data.reallocation.original)
670 << ", size=" << record.data.reallocation.size
671 << ", alignment=" << record.data.reallocation.alignment
672 << ", scope=" << record.data.reallocation.scope
673 << ", returnedPtr=" << tcu::toHex(record.data.reallocation.returnedPtr);
674 break;
675
676 case AllocationCallbackRecord::TYPE_FREE:
677 str << "FREE: mem=" << tcu::toHex(record.data.free.mem);
678 break;
679
680 case AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION:
681 case AllocationCallbackRecord::TYPE_INTERNAL_FREE:
682 str << "INTERNAL_" << (record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ? "ALLOCATION" : "FREE")
683 << ": size=" << record.data.internalAllocation.size
684 << ", type=" << record.data.internalAllocation.type
685 << ", scope=" << record.data.internalAllocation.scope;
686 break;
687
688 default:
689 DE_ASSERT(false);
690 }
691
692 return str;
693 }
694
operator <<(std::ostream & str,const AllocationCallbackViolation & violation)695 std::ostream& operator<< (std::ostream& str, const AllocationCallbackViolation& violation)
696 {
697 switch (violation.reason)
698 {
699 case AllocationCallbackViolation::REASON_DOUBLE_FREE:
700 {
701 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
702 str << "Double free of " << tcu::toHex(violation.record.data.free.mem);
703 break;
704 }
705
706 case AllocationCallbackViolation::REASON_FREE_NOT_ALLOCATED_PTR:
707 {
708 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_FREE);
709 str << "Attempt to free " << tcu::toHex(violation.record.data.free.mem) << " which has not been allocated";
710 break;
711 }
712
713 case AllocationCallbackViolation::REASON_REALLOC_NOT_ALLOCATED_PTR:
714 {
715 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
716 str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has not been allocated";
717 break;
718 }
719
720 case AllocationCallbackViolation::REASON_REALLOC_FREED_PTR:
721 {
722 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_REALLOCATION);
723 str << "Attempt to reallocate " << tcu::toHex(violation.record.data.reallocation.original) << " which has been freed";
724 break;
725 }
726
727 case AllocationCallbackViolation::REASON_NEGATIVE_INTERNAL_ALLOCATION_TOTAL:
728 {
729 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
730 str << "Internal allocation total for (" << violation.record.data.internalAllocation.type << ", " << violation.record.data.internalAllocation.scope << ") is negative";
731 break;
732 }
733
734 case AllocationCallbackViolation::REASON_INVALID_INTERNAL_ALLOCATION_TYPE:
735 {
736 DE_ASSERT(violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_ALLOCATION ||
737 violation.record.type == AllocationCallbackRecord::TYPE_INTERNAL_FREE);
738 str << "Invalid internal allocation type " << tcu::toHex(violation.record.data.internalAllocation.type);
739 break;
740 }
741
742 case AllocationCallbackViolation::REASON_INVALID_ALLOCATION_SCOPE:
743 {
744 str << "Invalid allocation scope";
745 break;
746 }
747
748 case AllocationCallbackViolation::REASON_INVALID_ALIGNMENT:
749 {
750 str << "Invalid alignment";
751 break;
752 }
753
754 case AllocationCallbackViolation::REASON_REALLOC_DIFFERENT_ALIGNMENT:
755 {
756 str << "Reallocation with different alignment";
757 break;
758 }
759
760 default:
761 DE_ASSERT(false);
762 }
763
764 return str;
765 }
766
767 } // vk
768