forked from KhronosGroup/Vulkan-LoaderAndValidationLayers
-
Notifications
You must be signed in to change notification settings - Fork 0
/
core_validation.cpp
11507 lines (10569 loc) · 607 KB
/
core_validation.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
/* Copyright (c) 2015-2016 The Khronos Group Inc.
* Copyright (c) 2015-2016 Valve Corporation
* Copyright (c) 2015-2016 LunarG, Inc.
* Copyright (C) 2015-2016 Google Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Cody Northrop <[email protected]>
* Author: Michael Lentine <[email protected]>
* Author: Tobin Ehlis <[email protected]>
* Author: Chia-I Wu <[email protected]>
* Author: Chris Forbes <[email protected]>
* Author: Mark Lobodzinski <[email protected]>
* Author: Ian Elliott <[email protected]>
*/
// Allow use of STL min and max functions in Windows
#define NOMINMAX
// Turn on mem_tracker merged code
#define MTMERGESOURCE 1
#include <SPIRV/spirv.hpp>
#include <algorithm>
#include <assert.h>
#include <iostream>
#include <list>
#include <map>
#include <mutex>
#include <set>
//#include <memory>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string>
#include <tuple>
#include "vk_loader_platform.h"
#include "vk_dispatch_table_helper.h"
#include "vk_struct_string_helper_cpp.h"
#if defined(__GNUC__)
#pragma GCC diagnostic ignored "-Wwrite-strings"
#endif
#if defined(__GNUC__)
#pragma GCC diagnostic warning "-Wwrite-strings"
#endif
#include "vk_struct_size_helper.h"
#include "core_validation.h"
#include "vk_layer_table.h"
#include "vk_layer_data.h"
#include "vk_layer_extension_utils.h"
#include "vk_layer_utils.h"
#include "spirv-tools/libspirv.h"
#if defined __ANDROID__
#include <android/log.h>
#define LOGCONSOLE(...) ((void)__android_log_print(ANDROID_LOG_INFO, "DS", __VA_ARGS__))
#else
#define LOGCONSOLE(...) \
{ \
printf(__VA_ARGS__); \
printf("\n"); \
}
#endif
using namespace std;
namespace core_validation {
using std::unordered_map;
using std::unordered_set;
// WSI Image Objects bypass usual Image Object creation methods. A special Memory
// Object value will be used to identify them internally.
static const VkDeviceMemory MEMTRACKER_SWAP_CHAIN_IMAGE_KEY = (VkDeviceMemory)(-1);
struct devExts {
bool wsi_enabled;
unordered_map<VkSwapchainKHR, unique_ptr<SWAPCHAIN_NODE>> swapchainMap;
unordered_map<VkImage, VkSwapchainKHR> imageToSwapchainMap;
};
// fwd decls
struct shader_module;
// TODO : Split this into separate structs for instance and device level data?
struct layer_data {
VkInstance instance;
unique_ptr<INSTANCE_STATE> instance_state;
debug_report_data *report_data;
std::vector<VkDebugReportCallbackEXT> logging_callback;
VkLayerDispatchTable *device_dispatch_table;
VkLayerInstanceDispatchTable *instance_dispatch_table;
devExts device_extensions;
unordered_set<VkQueue> queues; // All queues under given device
// Vector indices correspond to queueFamilyIndex
vector<unique_ptr<VkQueueFamilyProperties>> queue_family_properties;
// Global set of all cmdBuffers that are inFlight on this device
unordered_set<VkCommandBuffer> globalInFlightCmdBuffers;
// Layer specific data
unordered_map<VkSampler, unique_ptr<SAMPLER_NODE>> samplerMap;
unordered_map<VkImageView, unique_ptr<VkImageViewCreateInfo>> imageViewMap;
unordered_map<VkImage, unique_ptr<IMAGE_NODE>> imageMap;
unordered_map<VkBufferView, unique_ptr<VkBufferViewCreateInfo>> bufferViewMap;
unordered_map<VkBuffer, unique_ptr<BUFFER_NODE>> bufferMap;
unordered_map<VkPipeline, PIPELINE_NODE *> pipelineMap;
unordered_map<VkCommandPool, COMMAND_POOL_NODE> commandPoolMap;
unordered_map<VkDescriptorPool, DESCRIPTOR_POOL_NODE *> descriptorPoolMap;
unordered_map<VkDescriptorSet, cvdescriptorset::DescriptorSet *> setMap;
unordered_map<VkDescriptorSetLayout, cvdescriptorset::DescriptorSetLayout *> descriptorSetLayoutMap;
unordered_map<VkPipelineLayout, PIPELINE_LAYOUT_NODE> pipelineLayoutMap;
unordered_map<VkDeviceMemory, unique_ptr<DEVICE_MEM_INFO>> memObjMap;
unordered_map<VkFence, FENCE_NODE> fenceMap;
unordered_map<VkQueue, QUEUE_NODE> queueMap;
unordered_map<VkEvent, EVENT_NODE> eventMap;
unordered_map<QueryObject, bool> queryToStateMap;
unordered_map<VkQueryPool, QUERY_POOL_NODE> queryPoolMap;
unordered_map<VkSemaphore, SEMAPHORE_NODE> semaphoreMap;
unordered_map<VkCommandBuffer, GLOBAL_CB_NODE *> commandBufferMap;
unordered_map<VkFramebuffer, unique_ptr<FRAMEBUFFER_NODE>> frameBufferMap;
unordered_map<VkImage, vector<ImageSubresourcePair>> imageSubresourceMap;
unordered_map<ImageSubresourcePair, IMAGE_LAYOUT_NODE> imageLayoutMap;
unordered_map<VkRenderPass, RENDER_PASS_NODE *> renderPassMap;
unordered_map<VkShaderModule, unique_ptr<shader_module>> shaderModuleMap;
VkDevice device;
// Device specific data
PHYS_DEV_PROPERTIES_NODE phys_dev_properties;
VkPhysicalDeviceMemoryProperties phys_dev_mem_props;
VkPhysicalDeviceFeatures physical_device_features;
unique_ptr<PHYSICAL_DEVICE_STATE> physical_device_state;
layer_data()
: instance_state(nullptr), report_data(nullptr), device_dispatch_table(nullptr), instance_dispatch_table(nullptr),
device_extensions(), device(VK_NULL_HANDLE), phys_dev_properties{}, phys_dev_mem_props{}, physical_device_features{},
physical_device_state(nullptr){};
};
// TODO : Do we need to guard access to layer_data_map w/ lock?
static unordered_map<void *, layer_data *> layer_data_map;
static const VkLayerProperties global_layer = {
"VK_LAYER_LUNARG_core_validation", VK_LAYER_API_VERSION, 1, "LunarG Validation Layer",
};
template <class TCreateInfo> void ValidateLayerOrdering(const TCreateInfo &createInfo) {
bool foundLayer = false;
for (uint32_t i = 0; i < createInfo.enabledLayerCount; ++i) {
if (!strcmp(createInfo.ppEnabledLayerNames[i], global_layer.layerName)) {
foundLayer = true;
}
// This has to be logged to console as we don't have a callback at this point.
if (!foundLayer && !strcmp(createInfo.ppEnabledLayerNames[0], "VK_LAYER_GOOGLE_unique_objects")) {
LOGCONSOLE("Cannot activate layer VK_LAYER_GOOGLE_unique_objects prior to activating %s.",
global_layer.layerName);
}
}
}
// Code imported from shader_checker
static void build_def_index(shader_module *);
// A forward iterator over spirv instructions. Provides easy access to len, opcode, and content words
// without the caller needing to care too much about the physical SPIRV module layout.
struct spirv_inst_iter {
std::vector<uint32_t>::const_iterator zero;
std::vector<uint32_t>::const_iterator it;
uint32_t len() {
auto result = *it >> 16;
assert(result > 0);
return result;
}
uint32_t opcode() { return *it & 0x0ffffu; }
uint32_t const &word(unsigned n) {
assert(n < len());
return it[n];
}
uint32_t offset() { return (uint32_t)(it - zero); }
spirv_inst_iter() {}
spirv_inst_iter(std::vector<uint32_t>::const_iterator zero, std::vector<uint32_t>::const_iterator it) : zero(zero), it(it) {}
bool operator==(spirv_inst_iter const &other) { return it == other.it; }
bool operator!=(spirv_inst_iter const &other) { return it != other.it; }
spirv_inst_iter operator++(int) { /* x++ */
spirv_inst_iter ii = *this;
it += len();
return ii;
}
spirv_inst_iter operator++() { /* ++x; */
it += len();
return *this;
}
/* The iterator and the value are the same thing. */
spirv_inst_iter &operator*() { return *this; }
spirv_inst_iter const &operator*() const { return *this; }
};
struct shader_module {
/* the spirv image itself */
vector<uint32_t> words;
/* a mapping of <id> to the first word of its def. this is useful because walking type
* trees, constant expressions, etc requires jumping all over the instruction stream.
*/
unordered_map<unsigned, unsigned> def_index;
shader_module(VkShaderModuleCreateInfo const *pCreateInfo)
: words((uint32_t *)pCreateInfo->pCode, (uint32_t *)pCreateInfo->pCode + pCreateInfo->codeSize / sizeof(uint32_t)),
def_index() {
build_def_index(this);
}
/* expose begin() / end() to enable range-based for */
spirv_inst_iter begin() const { return spirv_inst_iter(words.begin(), words.begin() + 5); } /* first insn */
spirv_inst_iter end() const { return spirv_inst_iter(words.begin(), words.end()); } /* just past last insn */
/* given an offset into the module, produce an iterator there. */
spirv_inst_iter at(unsigned offset) const { return spirv_inst_iter(words.begin(), words.begin() + offset); }
/* gets an iterator to the definition of an id */
spirv_inst_iter get_def(unsigned id) const {
auto it = def_index.find(id);
if (it == def_index.end()) {
return end();
}
return at(it->second);
}
};
// TODO : This can be much smarter, using separate locks for separate global data
static std::mutex global_lock;
// Return ImageViewCreateInfo ptr for specified imageView or else NULL
VkImageViewCreateInfo *getImageViewData(const layer_data *dev_data, VkImageView image_view) {
auto iv_it = dev_data->imageViewMap.find(image_view);
if (iv_it == dev_data->imageViewMap.end()) {
return nullptr;
}
return iv_it->second.get();
}
// Return sampler node ptr for specified sampler or else NULL
SAMPLER_NODE *getSamplerNode(const layer_data *dev_data, VkSampler sampler) {
auto sampler_it = dev_data->samplerMap.find(sampler);
if (sampler_it == dev_data->samplerMap.end()) {
return nullptr;
}
return sampler_it->second.get();
}
// Return image node ptr for specified image or else NULL
IMAGE_NODE *getImageNode(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->imageMap.find(image);
if (img_it == dev_data->imageMap.end()) {
return nullptr;
}
return img_it->second.get();
}
// Return buffer node ptr for specified buffer or else NULL
BUFFER_NODE *getBufferNode(const layer_data *dev_data, VkBuffer buffer) {
auto buff_it = dev_data->bufferMap.find(buffer);
if (buff_it == dev_data->bufferMap.end()) {
return nullptr;
}
return buff_it->second.get();
}
// Return swapchain node for specified swapchain or else NULL
SWAPCHAIN_NODE *getSwapchainNode(const layer_data *dev_data, VkSwapchainKHR swapchain) {
auto swp_it = dev_data->device_extensions.swapchainMap.find(swapchain);
if (swp_it == dev_data->device_extensions.swapchainMap.end()) {
return nullptr;
}
return swp_it->second.get();
}
// Return swapchain for specified image or else NULL
VkSwapchainKHR getSwapchainFromImage(const layer_data *dev_data, VkImage image) {
auto img_it = dev_data->device_extensions.imageToSwapchainMap.find(image);
if (img_it == dev_data->device_extensions.imageToSwapchainMap.end()) {
return VK_NULL_HANDLE;
}
return img_it->second;
}
// Return buffer node ptr for specified buffer or else NULL
VkBufferViewCreateInfo *getBufferViewInfo(const layer_data *my_data, VkBufferView buffer_view) {
auto bv_it = my_data->bufferViewMap.find(buffer_view);
if (bv_it == my_data->bufferViewMap.end()) {
return nullptr;
}
return bv_it->second.get();
}
FENCE_NODE *getFenceNode(layer_data *dev_data, VkFence fence) {
auto it = dev_data->fenceMap.find(fence);
if (it == dev_data->fenceMap.end()) {
return nullptr;
}
return &it->second;
}
EVENT_NODE *getEventNode(layer_data *dev_data, VkEvent event) {
auto it = dev_data->eventMap.find(event);
if (it == dev_data->eventMap.end()) {
return nullptr;
}
return &it->second;
}
QUERY_POOL_NODE *getQueryPoolNode(layer_data *dev_data, VkQueryPool query_pool) {
auto it = dev_data->queryPoolMap.find(query_pool);
if (it == dev_data->queryPoolMap.end()) {
return nullptr;
}
return &it->second;
}
QUEUE_NODE *getQueueNode(layer_data *dev_data, VkQueue queue) {
auto it = dev_data->queueMap.find(queue);
if (it == dev_data->queueMap.end()) {
return nullptr;
}
return &it->second;
}
SEMAPHORE_NODE *getSemaphoreNode(layer_data *dev_data, VkSemaphore semaphore) {
auto it = dev_data->semaphoreMap.find(semaphore);
if (it == dev_data->semaphoreMap.end()) {
return nullptr;
}
return &it->second;
}
COMMAND_POOL_NODE *getCommandPoolNode(layer_data *dev_data, VkCommandPool pool) {
auto it = dev_data->commandPoolMap.find(pool);
if (it == dev_data->commandPoolMap.end()) {
return nullptr;
}
return &it->second;
}
static VkDeviceMemory *get_object_mem_binding(layer_data *my_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT: {
auto img_node = getImageNode(my_data, VkImage(handle));
if (img_node)
return &img_node->mem;
break;
}
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT: {
auto buff_node = getBufferNode(my_data, VkBuffer(handle));
if (buff_node)
return &buff_node->mem;
break;
}
default:
break;
}
return nullptr;
}
// prototype
static GLOBAL_CB_NODE *getCBNode(layer_data const *, const VkCommandBuffer);
// Helper function to validate correct usage bits set for buffers or images
// Verify that (actual & desired) flags != 0 or,
// if strict is true, verify that (actual & desired) flags == desired
// In case of error, report it via dbg callbacks
static bool validate_usage_flags(layer_data *my_data, VkFlags actual, VkFlags desired, VkBool32 strict,
uint64_t obj_handle, VkDebugReportObjectTypeEXT obj_type, char const *ty_str,
char const *func_name, char const *usage_str) {
bool correct_usage = false;
bool skip_call = false;
if (strict)
correct_usage = ((actual & desired) == desired);
else
correct_usage = ((actual & desired) != 0);
if (!correct_usage) {
skip_call = log_msg(my_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, obj_type, obj_handle, __LINE__,
MEMTRACK_INVALID_USAGE_FLAG, "MEM", "Invalid usage flag for %s 0x%" PRIxLEAST64
" used by %s. In this case, %s should have %s set during creation.",
ty_str, obj_handle, func_name, ty_str, usage_str);
}
return skip_call;
}
// Helper function to validate usage flags for buffers
// For given buffer_node send actual vs. desired usage off to helper above where
// an error will be flagged if usage is not correct
static bool ValidateImageUsageFlags(layer_data *dev_data, IMAGE_NODE const *image_node, VkFlags desired, VkBool32 strict,
char const *func_name, char const *usage_string) {
return validate_usage_flags(dev_data, image_node->createInfo.usage, desired, strict,
reinterpret_cast<const uint64_t &>(image_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
"image", func_name, usage_string);
}
// Helper function to validate usage flags for buffers
// For given buffer_node send actual vs. desired usage off to helper above where
// an error will be flagged if usage is not correct
static bool ValidateBufferUsageFlags(layer_data *dev_data, BUFFER_NODE const *buffer_node, VkFlags desired, VkBool32 strict,
char const *func_name, char const *usage_string) {
return validate_usage_flags(dev_data, buffer_node->createInfo.usage, desired, strict,
reinterpret_cast<const uint64_t &>(buffer_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
"buffer", func_name, usage_string);
}
// Return ptr to info in map container containing mem, or NULL if not found
// Calls to this function should be wrapped in mutex
DEVICE_MEM_INFO *getMemObjInfo(const layer_data *dev_data, const VkDeviceMemory mem) {
auto mem_it = dev_data->memObjMap.find(mem);
if (mem_it == dev_data->memObjMap.end()) {
return NULL;
}
return mem_it->second.get();
}
static void add_mem_obj_info(layer_data *my_data, void *object, const VkDeviceMemory mem,
const VkMemoryAllocateInfo *pAllocateInfo) {
assert(object != NULL);
my_data->memObjMap[mem] = unique_ptr<DEVICE_MEM_INFO>(new DEVICE_MEM_INFO(object, mem, pAllocateInfo));
}
// For given bound_object_handle, bound to given mem allocation, verify that the range for the bound object is valid
static bool ValidateMemoryIsValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t bound_object_handle,
const char *functionName) {
DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
if (mem_info) {
if (!mem_info->bound_ranges[bound_object_handle].valid) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
"%s: Cannot read invalid memory 0x%" PRIx64 ", please fill the memory before using.", functionName,
reinterpret_cast<uint64_t &>(mem));
}
}
return false;
}
// For given image_node
// If mem is special swapchain key, then verify that image_node valid member is true
// Else verify that the image's bound memory range is valid
static bool ValidateImageMemoryIsValid(layer_data *dev_data, IMAGE_NODE *image_node, const char *functionName) {
if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
if (!image_node->valid) {
return log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(image_node->mem), __LINE__, MEMTRACK_INVALID_USAGE_FLAG, "MEM",
"%s: Cannot read invalid swapchain image 0x%" PRIx64 ", please fill the memory before using.",
functionName, reinterpret_cast<uint64_t &>(image_node->image));
}
} else {
return ValidateMemoryIsValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), functionName);
}
return false;
}
// For given buffer_node, verify that the range it's bound to is valid
static bool ValidateBufferMemoryIsValid(layer_data *dev_data, BUFFER_NODE *buffer_node, const char *functionName) {
return ValidateMemoryIsValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), functionName);
}
// For the given memory allocation, set the range bound by the given handle object to the valid param value
static void SetMemoryValid(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle, bool valid) {
DEVICE_MEM_INFO *mem_info = getMemObjInfo(dev_data, mem);
if (mem_info) {
mem_info->bound_ranges[handle].valid = valid;
}
}
// For given image node
// If mem is special swapchain key, then set entire image_node to valid param value
// Else set the image's bound memory range to valid param value
static void SetImageMemoryValid(layer_data *dev_data, IMAGE_NODE *image_node, bool valid) {
if (image_node->mem == MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
image_node->valid = valid;
} else {
SetMemoryValid(dev_data, image_node->mem, reinterpret_cast<uint64_t &>(image_node->image), valid);
}
}
// For given buffer node set the buffer's bound memory range to valid param value
static void SetBufferMemoryValid(layer_data *dev_data, BUFFER_NODE *buffer_node, bool valid) {
SetMemoryValid(dev_data, buffer_node->mem, reinterpret_cast<uint64_t &>(buffer_node->buffer), valid);
}
// Find CB Info and add mem reference to list container
// Find Mem Obj Info and add CB reference to list container
static bool update_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb, const VkDeviceMemory mem,
const char *apiName) {
bool skip_call = false;
// Skip validation if this image was created through WSI
if (mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
if (pMemInfo) {
pMemInfo->command_buffer_bindings.insert(cb);
// Now update CBInfo's Mem reference list
GLOBAL_CB_NODE *pCBNode = getCBNode(dev_data, cb);
// TODO: keep track of all destroyed CBs so we know if this is a stale or simply invalid object
if (pCBNode) {
pCBNode->memObjs.insert(mem);
}
}
}
return skip_call;
}
// Create binding link between given sampler and command buffer node
void AddCommandBufferBindingSampler(GLOBAL_CB_NODE *cb_node, SAMPLER_NODE *sampler_node) {
sampler_node->cb_bindings.insert(cb_node);
cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(sampler_node->sampler), VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT});
}
// Create binding link between given image node and command buffer node
void AddCommandBufferBindingImage(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, IMAGE_NODE *img_node) {
// Skip validation if this image was created through WSI
if (img_node->mem != MEMTRACKER_SWAP_CHAIN_IMAGE_KEY) {
// First update CB binding in MemObj mini CB list
DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, img_node->mem);
if (pMemInfo) {
pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(img_node->mem);
}
cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(img_node->image), VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT});
}
// Now update cb binding for image
img_node->cb_bindings.insert(cb_node);
}
// Create binding link between given buffer node and command buffer node
void AddCommandBufferBindingBuffer(const layer_data *dev_data, GLOBAL_CB_NODE *cb_node, BUFFER_NODE *buff_node) {
// First update CB binding in MemObj mini CB list
DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, buff_node->mem);
if (pMemInfo) {
pMemInfo->command_buffer_bindings.insert(cb_node->commandBuffer);
// Now update CBInfo's Mem reference list
cb_node->memObjs.insert(buff_node->mem);
cb_node->object_bindings.insert({reinterpret_cast<uint64_t &>(buff_node->buffer), VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT});
}
// Now update cb binding for buffer
buff_node->cb_bindings.insert(cb_node);
}
// For every mem obj bound to particular CB, free bindings related to that CB
static void clear_cmd_buf_and_mem_references(layer_data *dev_data, GLOBAL_CB_NODE *pCBNode) {
if (pCBNode) {
if (pCBNode->memObjs.size() > 0) {
for (auto mem : pCBNode->memObjs) {
DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
if (pInfo) {
pInfo->command_buffer_bindings.erase(pCBNode->commandBuffer);
}
}
pCBNode->memObjs.clear();
}
pCBNode->validate_functions.clear();
}
}
// Overloaded call to above function when GLOBAL_CB_NODE has not already been looked-up
static void clear_cmd_buf_and_mem_references(layer_data *dev_data, const VkCommandBuffer cb) {
clear_cmd_buf_and_mem_references(dev_data, getCBNode(dev_data, cb));
}
// For given MemObjInfo, report Obj & CB bindings
static bool reportMemReferencesAndCleanUp(layer_data *dev_data, DEVICE_MEM_INFO *pMemObjInfo) {
bool skip_call = false;
size_t cmdBufRefCount = pMemObjInfo->command_buffer_bindings.size();
size_t objRefCount = pMemObjInfo->obj_bindings.size();
if ((pMemObjInfo->command_buffer_bindings.size()) != 0) {
skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
(uint64_t)pMemObjInfo->mem, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
"Attempting to free memory object 0x%" PRIxLEAST64 " which still contains " PRINTF_SIZE_T_SPECIFIER
" references",
(uint64_t)pMemObjInfo->mem, (cmdBufRefCount + objRefCount));
}
if (cmdBufRefCount > 0 && pMemObjInfo->command_buffer_bindings.size() > 0) {
for (auto cb : pMemObjInfo->command_buffer_bindings) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_COMMAND_BUFFER_EXT,
(uint64_t)cb, __LINE__, MEMTRACK_FREED_MEM_REF, "MEM",
"Command Buffer 0x%p still has a reference to mem obj 0x%" PRIxLEAST64, cb, (uint64_t)pMemObjInfo->mem);
}
// Clear the list of hanging references
pMemObjInfo->command_buffer_bindings.clear();
}
if (objRefCount > 0 && pMemObjInfo->obj_bindings.size() > 0) {
for (auto obj : pMemObjInfo->obj_bindings) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, obj.type, obj.handle, __LINE__,
MEMTRACK_FREED_MEM_REF, "MEM", "VK Object 0x%" PRIxLEAST64 " still has a reference to mem obj 0x%" PRIxLEAST64,
obj.handle, (uint64_t)pMemObjInfo->mem);
}
// Clear the list of hanging references
pMemObjInfo->obj_bindings.clear();
}
return skip_call;
}
static bool freeMemObjInfo(layer_data *dev_data, void *object, VkDeviceMemory mem, bool internal) {
bool skip_call = false;
// Parse global list to find info w/ mem
DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
if (pInfo) {
// TODO: Verify against Valid Use section
// Clear any CB bindings for completed CBs
// TODO : Is there a better place to do this?
assert(pInfo->object != VK_NULL_HANDLE);
// clear_cmd_buf_and_mem_references removes elements from
// pInfo->command_buffer_bindings -- this copy not needed in c++14,
// and probably not needed in practice in c++11
auto bindings = pInfo->command_buffer_bindings;
for (auto cb : bindings) {
if (!dev_data->globalInFlightCmdBuffers.count(cb)) {
clear_cmd_buf_and_mem_references(dev_data, cb);
}
}
// Now verify that no references to this mem obj remain and remove bindings
if (pInfo->command_buffer_bindings.size() || pInfo->obj_bindings.size()) {
skip_call |= reportMemReferencesAndCleanUp(dev_data, pInfo);
}
// Delete mem obj info
dev_data->memObjMap.erase(dev_data->memObjMap.find(mem));
} else if (VK_NULL_HANDLE != mem) {
// The request is to free an invalid, non-zero handle
skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
reinterpret_cast<uint64_t &>(mem), __LINE__,
MEMTRACK_INVALID_MEM_OBJ,
"MEM", "Request to delete memory object 0x%"
PRIxLEAST64 " not present in memory Object Map",
reinterpret_cast<uint64_t &>(mem));
}
return skip_call;
}
static const char *object_type_to_string(VkDebugReportObjectTypeEXT type) {
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
return "image";
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
return "buffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_SWAPCHAIN_KHR_EXT:
return "swapchain";
case VK_DEBUG_REPORT_OBJECT_TYPE_DESCRIPTOR_SET_EXT:
return "descriptor set";
case VK_DEBUG_REPORT_OBJECT_TYPE_FRAMEBUFFER_EXT:
return "buffer";
case VK_DEBUG_REPORT_OBJECT_TYPE_EVENT_EXT:
return "event";
case VK_DEBUG_REPORT_OBJECT_TYPE_QUERY_POOL_EXT:
return "query pool";
case VK_DEBUG_REPORT_OBJECT_TYPE_PIPELINE_EXT:
return "pipeline";
case VK_DEBUG_REPORT_OBJECT_TYPE_SAMPLER_EXT:
return "sampler";
default:
return "unknown";
}
}
// Remove object binding performs 3 tasks:
// 1. Remove ObjectInfo from MemObjInfo list container of obj bindings & free it
// 2. Clear mem binding for image/buffer by setting its handle to 0
// TODO : This only applied to Buffer, Image, and Swapchain objects now, how should it be updated/customized?
static bool clear_object_binding(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type) {
// TODO : Need to customize images/buffers/swapchains to track mem binding and clear it here appropriately
bool skip_call = false;
VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
if (pMemBinding) {
DEVICE_MEM_INFO *pMemObjInfo = getMemObjInfo(dev_data, *pMemBinding);
// TODO : Make sure this is a reasonable way to reset mem binding
*pMemBinding = VK_NULL_HANDLE;
if (pMemObjInfo) {
// This obj is bound to a memory object. Remove the reference to this object in that memory object's list,
// and set the objects memory binding pointer to NULL.
if (!pMemObjInfo->obj_bindings.erase({handle, type})) {
skip_call |=
log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
"MEM", "While trying to clear mem binding for %s obj 0x%" PRIxLEAST64
", unable to find that object referenced by mem obj 0x%" PRIxLEAST64,
object_type_to_string(type), handle, (uint64_t)pMemObjInfo->mem);
}
}
}
return skip_call;
}
// Check to see if memory was ever bound to this image
bool ValidateMemoryIsBoundToImage(const layer_data *dev_data, const IMAGE_NODE *image_node, const char *api_name) {
bool result = false;
if (0 == (static_cast<uint32_t>(image_node->createInfo.flags) & VK_IMAGE_CREATE_SPARSE_BINDING_BIT)) {
if (0 == image_node->mem) {
result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT,
reinterpret_cast<const uint64_t &>(image_node->image), __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
"%s: VkImage object 0x%" PRIxLEAST64 " used without first calling vkBindImageMemory.", api_name,
reinterpret_cast<const uint64_t &>(image_node->image));
}
}
return result;
}
// Check to see if memory was bound to this buffer
bool ValidateMemoryIsBoundToBuffer(const layer_data *dev_data, const BUFFER_NODE *buffer_node, const char *api_name) {
bool result = false;
if (0 == (static_cast<uint32_t>(buffer_node->createInfo.flags) & VK_BUFFER_CREATE_SPARSE_BINDING_BIT)) {
if (0 == buffer_node->mem) {
result = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT,
reinterpret_cast<const uint64_t &>(buffer_node->buffer), __LINE__, MEMTRACK_OBJECT_NOT_BOUND, "MEM",
"%s: VkBuffer object 0x%" PRIxLEAST64 " used without first calling vkBindBufferMemory.", api_name,
reinterpret_cast<const uint64_t &>(buffer_node->buffer));
}
}
return result;
}
// For NULL mem case, output warning
// Make sure given object is in global object map
// IF a previous binding existed, output validation error
// Otherwise, add reference from objectInfo to memoryInfo
// Add reference off of objInfo
static bool set_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
VkDebugReportObjectTypeEXT type, const char *apiName) {
bool skip_call = false;
// Handle NULL case separately, just clear previous binding & decrement reference
if (mem == VK_NULL_HANDLE) {
// TODO: Verify against Valid Use section of spec.
skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_MEM_OBJ,
"MEM", "In %s, attempting to Bind Obj(0x%" PRIxLEAST64 ") to NULL", apiName, handle);
} else {
VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
assert(pMemBinding);
DEVICE_MEM_INFO *pMemInfo = getMemObjInfo(dev_data, mem);
if (pMemInfo) {
DEVICE_MEM_INFO *pPrevBinding = getMemObjInfo(dev_data, *pMemBinding);
if (pPrevBinding != NULL) {
skip_call |= log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT,
VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, (uint64_t)mem, __LINE__, MEMTRACK_REBIND_OBJECT,
"MEM", "In %s, attempting to bind memory (0x%" PRIxLEAST64 ") to object (0x%" PRIxLEAST64
") which has already been bound to mem object 0x%" PRIxLEAST64,
apiName, (uint64_t)mem, handle, (uint64_t)pPrevBinding->mem);
} else {
pMemInfo->obj_bindings.insert({handle, type});
// For image objects, make sure default memory state is correctly set
// TODO : What's the best/correct way to handle this?
if (VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT == type) {
auto const image_node = getImageNode(dev_data, VkImage(handle));
if (image_node) {
VkImageCreateInfo ici = image_node->createInfo;
if (ici.usage & (VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
// TODO:: More memory state transition stuff.
}
}
}
*pMemBinding = mem;
}
}
}
return skip_call;
}
// For NULL mem case, clear any previous binding Else...
// Make sure given object is in its object map
// IF a previous binding existed, update binding
// Add reference from objectInfo to memoryInfo
// Add reference off of object's binding info
// Return VK_TRUE if addition is successful, VK_FALSE otherwise
static bool set_sparse_mem_binding(layer_data *dev_data, VkDeviceMemory mem, uint64_t handle,
VkDebugReportObjectTypeEXT type, const char *apiName) {
bool skip_call = VK_FALSE;
// Handle NULL case separately, just clear previous binding & decrement reference
if (mem == VK_NULL_HANDLE) {
skip_call = clear_object_binding(dev_data, handle, type);
} else {
VkDeviceMemory *pMemBinding = get_object_mem_binding(dev_data, handle, type);
assert(pMemBinding);
DEVICE_MEM_INFO *pInfo = getMemObjInfo(dev_data, mem);
if (pInfo) {
pInfo->obj_bindings.insert({handle, type});
// Need to set mem binding for this object
*pMemBinding = mem;
}
}
return skip_call;
}
// For handle of given object type, return memory binding
static bool get_mem_for_type(layer_data *dev_data, uint64_t handle, VkDebugReportObjectTypeEXT type, VkDeviceMemory *mem) {
bool skip_call = false;
*mem = VK_NULL_HANDLE;
switch (type) {
case VK_DEBUG_REPORT_OBJECT_TYPE_IMAGE_EXT:
*mem = getImageNode(dev_data, VkImage(handle))->mem;
break;
case VK_DEBUG_REPORT_OBJECT_TYPE_BUFFER_EXT:
*mem = getBufferNode(dev_data, VkBuffer(handle))->mem;
break;
default:
assert(0);
}
if (!*mem) {
skip_call = log_msg(dev_data->report_data, VK_DEBUG_REPORT_ERROR_BIT_EXT, type, handle, __LINE__, MEMTRACK_INVALID_OBJECT,
"MEM", "Trying to get mem binding for %s object 0x%" PRIxLEAST64
" but binding is NULL. Has memory been bound to this object?",
object_type_to_string(type), handle);
}
return skip_call;
}
// Print details of MemObjInfo list
static void print_mem_list(layer_data *dev_data) {
// Early out if info is not requested
if (!(dev_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
return;
}
// Just printing each msg individually for now, may want to package these into single large print
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
MEMTRACK_NONE, "MEM", "Details of Memory Object list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
dev_data->memObjMap.size());
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
MEMTRACK_NONE, "MEM", "=============================");
if (dev_data->memObjMap.size() <= 0)
return;
for (auto ii = dev_data->memObjMap.begin(); ii != dev_data->memObjMap.end(); ++ii) {
auto mem_info = (*ii).second.get();
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " ===MemObjInfo at 0x%p===", (void *)mem_info);
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " Mem object: 0x%" PRIxLEAST64, (uint64_t)(mem_info->mem));
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " Ref Count: " PRINTF_SIZE_T_SPECIFIER,
mem_info->command_buffer_bindings.size() + mem_info->obj_bindings.size());
if (0 != mem_info->alloc_info.allocationSize) {
string pAllocInfoMsg = vk_print_vkmemoryallocateinfo(&mem_info->alloc_info, "MEM(INFO): ");
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info:\n%s", pAllocInfoMsg.c_str());
} else {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " Mem Alloc info is NULL (alloc done by vkCreateSwapchainKHR())");
}
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT Binding list of size " PRINTF_SIZE_T_SPECIFIER " elements:",
mem_info->obj_bindings.size());
if (mem_info->obj_bindings.size() > 0) {
for (auto obj : mem_info->obj_bindings) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
0, __LINE__, MEMTRACK_NONE, "MEM", " VK OBJECT 0x%" PRIx64, obj.handle);
}
}
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM",
" VK Command Buffer (CB) binding list of size " PRINTF_SIZE_T_SPECIFIER " elements",
mem_info->command_buffer_bindings.size());
if (mem_info->command_buffer_bindings.size() > 0) {
for (auto cb : mem_info->command_buffer_bindings) {
log_msg(dev_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT,
0, __LINE__, MEMTRACK_NONE, "MEM", " VK CB 0x%p", cb);
}
}
}
}
static void printCBList(layer_data *my_data) {
GLOBAL_CB_NODE *pCBInfo = NULL;
// Early out if info is not requested
if (!(my_data->report_data->active_flags & VK_DEBUG_REPORT_INFORMATION_BIT_EXT)) {
return;
}
log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
MEMTRACK_NONE, "MEM", "Details of CB list (of size " PRINTF_SIZE_T_SPECIFIER " elements)",
my_data->commandBufferMap.size());
log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0, __LINE__,
MEMTRACK_NONE, "MEM", "==================");
if (my_data->commandBufferMap.size() <= 0)
return;
for (auto &cb_node : my_data->commandBufferMap) {
pCBInfo = cb_node.second;
log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " CB Info (0x%p) has CB 0x%p", (void *)pCBInfo, (void *)pCBInfo->commandBuffer);
if (pCBInfo->memObjs.size() <= 0)
continue;
for (auto obj : pCBInfo->memObjs) {
log_msg(my_data->report_data, VK_DEBUG_REPORT_INFORMATION_BIT_EXT, VK_DEBUG_REPORT_OBJECT_TYPE_DEVICE_MEMORY_EXT, 0,
__LINE__, MEMTRACK_NONE, "MEM", " Mem obj 0x%" PRIx64, (uint64_t)obj);
}
}
}
// Return a string representation of CMD_TYPE enum
static string cmdTypeToString(CMD_TYPE cmd) {
switch (cmd) {
case CMD_BINDPIPELINE:
return "CMD_BINDPIPELINE";
case CMD_BINDPIPELINEDELTA:
return "CMD_BINDPIPELINEDELTA";
case CMD_SETVIEWPORTSTATE:
return "CMD_SETVIEWPORTSTATE";
case CMD_SETLINEWIDTHSTATE:
return "CMD_SETLINEWIDTHSTATE";
case CMD_SETDEPTHBIASSTATE:
return "CMD_SETDEPTHBIASSTATE";
case CMD_SETBLENDSTATE:
return "CMD_SETBLENDSTATE";
case CMD_SETDEPTHBOUNDSSTATE:
return "CMD_SETDEPTHBOUNDSSTATE";
case CMD_SETSTENCILREADMASKSTATE:
return "CMD_SETSTENCILREADMASKSTATE";
case CMD_SETSTENCILWRITEMASKSTATE:
return "CMD_SETSTENCILWRITEMASKSTATE";
case CMD_SETSTENCILREFERENCESTATE:
return "CMD_SETSTENCILREFERENCESTATE";
case CMD_BINDDESCRIPTORSETS:
return "CMD_BINDDESCRIPTORSETS";
case CMD_BINDINDEXBUFFER:
return "CMD_BINDINDEXBUFFER";
case CMD_BINDVERTEXBUFFER:
return "CMD_BINDVERTEXBUFFER";
case CMD_DRAW:
return "CMD_DRAW";
case CMD_DRAWINDEXED:
return "CMD_DRAWINDEXED";
case CMD_DRAWINDIRECT:
return "CMD_DRAWINDIRECT";
case CMD_DRAWINDEXEDINDIRECT:
return "CMD_DRAWINDEXEDINDIRECT";
case CMD_DISPATCH:
return "CMD_DISPATCH";
case CMD_DISPATCHINDIRECT:
return "CMD_DISPATCHINDIRECT";
case CMD_COPYBUFFER:
return "CMD_COPYBUFFER";
case CMD_COPYIMAGE:
return "CMD_COPYIMAGE";
case CMD_BLITIMAGE:
return "CMD_BLITIMAGE";
case CMD_COPYBUFFERTOIMAGE:
return "CMD_COPYBUFFERTOIMAGE";
case CMD_COPYIMAGETOBUFFER:
return "CMD_COPYIMAGETOBUFFER";
case CMD_CLONEIMAGEDATA:
return "CMD_CLONEIMAGEDATA";
case CMD_UPDATEBUFFER:
return "CMD_UPDATEBUFFER";
case CMD_FILLBUFFER:
return "CMD_FILLBUFFER";
case CMD_CLEARCOLORIMAGE:
return "CMD_CLEARCOLORIMAGE";
case CMD_CLEARATTACHMENTS:
return "CMD_CLEARCOLORATTACHMENT";
case CMD_CLEARDEPTHSTENCILIMAGE:
return "CMD_CLEARDEPTHSTENCILIMAGE";
case CMD_RESOLVEIMAGE:
return "CMD_RESOLVEIMAGE";
case CMD_SETEVENT:
return "CMD_SETEVENT";
case CMD_RESETEVENT:
return "CMD_RESETEVENT";
case CMD_WAITEVENTS:
return "CMD_WAITEVENTS";
case CMD_PIPELINEBARRIER:
return "CMD_PIPELINEBARRIER";
case CMD_BEGINQUERY:
return "CMD_BEGINQUERY";
case CMD_ENDQUERY:
return "CMD_ENDQUERY";
case CMD_RESETQUERYPOOL:
return "CMD_RESETQUERYPOOL";
case CMD_COPYQUERYPOOLRESULTS:
return "CMD_COPYQUERYPOOLRESULTS";
case CMD_WRITETIMESTAMP:
return "CMD_WRITETIMESTAMP";
case CMD_INITATOMICCOUNTERS:
return "CMD_INITATOMICCOUNTERS";