diff --git a/08_HelloSwapchain/main.cpp b/08_HelloSwapchain/main.cpp index 9137fe77a..6c5e3ea82 100644 --- a/08_HelloSwapchain/main.cpp +++ b/08_HelloSwapchain/main.cpp @@ -160,6 +160,7 @@ class HelloSwapchainApp final : public examples::SimpleWindowedApplication auto window = m_winMgr->createWindow(std::move(params)); // uncomment for some nasty testing of swapchain creation! //m_winMgr->minimize(window.get()); + const_cast&>(m_surface) = CSmoothResizeSurface::create(CSurfaceVulkanWin32::create(smart_refctd_ptr(m_api),move_and_static_cast(window))); } return {{m_surface->getSurface()/*,EQF_NONE*/}}; diff --git a/61_UI/CMakeLists.txt b/61_UI/CMakeLists.txt index a34e46ce6..42425b0a8 100644 --- a/61_UI/CMakeLists.txt +++ b/61_UI/CMakeLists.txt @@ -15,4 +15,25 @@ if(NBL_BUILD_IMGUI) nbl_create_executable_project("${NBL_EXTRA_SOURCES}" "" "${NBL_INCLUDE_SERACH_DIRECTORIES}" "${NBL_LIBRARIES}" "${NBL_EXECUTABLE_PROJECT_CREATION_PCH_TARGET}") LINK_BUILTIN_RESOURCES_TO_TARGET(${EXECUTABLE_NAME} geometryCreatorSpirvBRD) -endif() \ No newline at end of file +endif() + +if(NBL_EMBED_BUILTIN_RESOURCES) + set(_BR_TARGET_ ${EXECUTABLE_NAME}_builtinResourceData) + set(RESOURCE_DIR "app_resources") + + get_filename_component(_SEARCH_DIRECTORIES_ "${CMAKE_CURRENT_SOURCE_DIR}" ABSOLUTE) + get_filename_component(_OUTPUT_DIRECTORY_SOURCE_ "${CMAKE_CURRENT_BINARY_DIR}/src" ABSOLUTE) + get_filename_component(_OUTPUT_DIRECTORY_HEADER_ "${CMAKE_CURRENT_BINARY_DIR}/include" ABSOLUTE) + + file(GLOB_RECURSE BUILTIN_RESOURCE_FILES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}/${RESOURCE_DIR}" CONFIGURE_DEPENDS "${CMAKE_CURRENT_SOURCE_DIR}/${RESOURCE_DIR}/*") + foreach(RES_FILE ${BUILTIN_RESOURCE_FILES}) + LIST_BUILTIN_RESOURCE(RESOURCES_TO_EMBED "${RES_FILE}") + endforeach() + + ADD_CUSTOM_BUILTIN_RESOURCES(${_BR_TARGET_} RESOURCES_TO_EMBED "${_SEARCH_DIRECTORIES_}" "${RESOURCE_DIR}" "nbl::this_example::builtin" "${_OUTPUT_DIRECTORY_HEADER_}" "${_OUTPUT_DIRECTORY_SOURCE_}") + + LINK_BUILTIN_RESOURCES_TO_TARGET(${EXECUTABLE_NAME} ${_BR_TARGET_}) +endif() + +add_dependencies(${EXECUTABLE_NAME} argparse) +target_include_directories(${EXECUTABLE_NAME} PUBLIC $) \ No newline at end of file diff --git a/61_UI/app_resources/cameras.json b/61_UI/app_resources/cameras.json new file mode 100644 index 000000000..7e76aef83 --- /dev/null +++ b/61_UI/app_resources/cameras.json @@ -0,0 +1,162 @@ +{ + "cameras": [ + { + "type": "FPS", + "position": [-2.438, 1.995, -3.130], + "orientation": [0.195, 0.311, -0.065, 0.928] + }, + { + "type": "Orbit", + "position": [-2.017, 0.386, 0.684], + "target": [0, 0, 0] + }, + { + "type": "Free", + "position": [2.116, 0.826, 1.152], + "orientation": [0.095, -0.835, 0.152, 0.521] + } + ], + "projections": [ + { + "type": "perspective", + "fov": 40.0, + "zNear": 0.1, + "zFar": 110.0 + }, + { + "type": "orthographic", + "orthoWidth": 16.0, + "zNear": 0.1, + "zFar": 110.0 + } + ], + "viewports": [ + { + "projection": 0, + "controllers": { + "keyboard": 0, + "mouse": 0 + } + }, + { + "projection": 1, + "controllers": { + "keyboard": 1, + "mouse": 0 + } + }, + { + "projection": 0, + "controllers": { + "keyboard": 2 + } + }, + { + "projection": 1, + "controllers": { + "keyboard": 1 + } + }, + { + "projection": 0, + "controllers": { + "keyboard": 3, + "mouse": 1 + } + }, + { + "projection": 1, + "controllers": { + "keyboard": 3, + "mouse": 1 + } + } + ], + "planars": [ + { + "camera": 0, + "viewports": [0, 1] + }, + { + "camera": 1, + "viewports": [4, 5] + }, + { + "camera": 2, + "viewports": [2, 3] + } + ], + "controllers": { + "keyboard": [ + { + "mappings": { + "W": "MoveForward", + "S": "MoveBackward", + "A": "MoveLeft", + "D": "MoveRight", + "I": "TiltDown", + "K": "TiltUp", + "J": "PanLeft", + "L": "PanRight" + } + }, + { + "mappings": { + "W": "MoveUp", + "S": "MoveDown", + "A": "MoveLeft", + "D": "MoveRight" + } + }, + { + "mappings": { + "W": "MoveForward", + "S": "MoveBackward", + "A": "MoveLeft", + "D": "MoveRight", + "E": "MoveUp", + "Q": "MoveDown", + "I": "TiltDown", + "K": "TiltUp", + "J": "PanLeft", + "L": "PanRight", + "U": "RollRight", + "O": "RollLeft" + } + }, + { + "mappings": { + "W": "MoveRight", + "S": "MoveLeft", + "A": "MoveDown", + "D": "MoveUp", + "E": "MoveForward", + "Q": "MoveBackward" + } + } + ], + "mouse": [ + { + "mappings": { + "RELATIVE_POSITIVE_MOVEMENT_X": "PanRight", + "RELATIVE_NEGATIVE_MOVEMENT_X": "PanLeft", + "RELATIVE_POSITIVE_MOVEMENT_Y": "TiltUp", + "RELATIVE_NEGATIVE_MOVEMENT_Y": "TiltDown" + } + }, + { + "mappings": { + "RELATIVE_POSITIVE_MOVEMENT_X": "MoveUp", + "RELATIVE_NEGATIVE_MOVEMENT_X": "MoveDown", + "RELATIVE_POSITIVE_MOVEMENT_Y": "MoveRight", + "RELATIVE_NEGATIVE_MOVEMENT_Y": "MoveLeft", + "VERTICAL_POSITIVE_SCROLL": "MoveForward", + "HORIZONTAL_POSITIVE_SCROLL": "MoveForward", + "VERTICAL_NEGATIVE_SCROLL": "MoveBackward", + "HORIZONTAL_NEGATIVE_SCROLL": "MoveBackward" + } + } + ] + } + } + \ No newline at end of file diff --git a/61_UI/config.json.template b/61_UI/config.json.template deleted file mode 100644 index f961745c1..000000000 --- a/61_UI/config.json.template +++ /dev/null @@ -1,28 +0,0 @@ -{ - "enableParallelBuild": true, - "threadsPerBuildProcess" : 2, - "isExecuted": false, - "scriptPath": "", - "cmake": { - "configurations": [ "Release", "Debug", "RelWithDebInfo" ], - "buildModes": [], - "requiredOptions": [] - }, - "profiles": [ - { - "backend": "vulkan", - "platform": "windows", - "buildModes": [], - "runConfiguration": "Release", - "gpuArchitectures": [] - } - ], - "dependencies": [], - "data": [ - { - "dependencies": [], - "command": [""], - "outputs": [] - } - ] -} \ No newline at end of file diff --git a/61_UI/include/common.hpp b/61_UI/include/common.hpp index a5def7551..0a709d5b4 100644 --- a/61_UI/include/common.hpp +++ b/61_UI/include/common.hpp @@ -3,13 +3,24 @@ #include +#include + // common api -#include "CCamera.hpp" +#include "camera/CFPSCamera.hpp" +#include "camera/CFreeLockCamera.hpp" +#include "camera/COrbitCamera.hpp" #include "SimpleWindowedApplication.hpp" -#include "CEventCallback.hpp" +#include "InputSystem.hpp" + +#include "camera/CCubeProjection.hpp" +#include "camera/CLinearProjection.hpp" +#include "camera/CPlanarProjection.hpp" // the example's headers -#include "transform.hpp" +#include "nbl/ui/ICursorControl.h" +#include "nbl/ext/ImGui/ImGui.h" +#include "imgui/imgui_internal.h" +#include "imguizmo/ImGuizmo.h" #include "CGeomtryCreatorScene.hpp" using namespace nbl; diff --git a/61_UI/include/keysmapping.hpp b/61_UI/include/keysmapping.hpp new file mode 100644 index 000000000..46153660c --- /dev/null +++ b/61_UI/include/keysmapping.hpp @@ -0,0 +1,234 @@ +#ifndef __NBL_KEYSMAPPING_H_INCLUDED__ +#define __NBL_KEYSMAPPING_H_INCLUDED__ + +#include "common.hpp" + +bool handleAddMapping(const char* tableID, IGimbalManipulateEncoder* encoder, IGimbalManipulateEncoder::EncoderType activeController, CVirtualGimbalEvent::VirtualEventType& selectedEventType, ui::E_KEY_CODE& newKey, ui::E_MOUSE_CODE& newMouseCode, bool& addMode) +{ + bool anyMapUpdated = false; + ImGui::BeginTable(tableID, 3, ImGuiTableFlags_Borders | ImGuiTableFlags_Resizable | ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchSame); + ImGui::TableSetupColumn("Virtual Event", ImGuiTableColumnFlags_WidthStretch, 0.33f); + ImGui::TableSetupColumn("Key", ImGuiTableColumnFlags_WidthStretch, 0.33f); + ImGui::TableSetupColumn("Actions", ImGuiTableColumnFlags_WidthStretch, 0.33f); + ImGui::TableHeadersRow(); + + ImGui::TableNextRow(); + ImGui::TableSetColumnIndex(0); + ImGui::AlignTextToFramePadding(); + if (ImGui::BeginCombo("##selectEvent", CVirtualGimbalEvent::virtualEventToString(selectedEventType).data())) + { + for (const auto& eventType : CVirtualGimbalEvent::VirtualEventsTypeTable) + { + bool isSelected = (selectedEventType == eventType); + if (ImGui::Selectable(CVirtualGimbalEvent::virtualEventToString(eventType).data(), isSelected)) + selectedEventType = eventType; + if (isSelected) + ImGui::SetItemDefaultFocus(); + } + ImGui::EndCombo(); + } + + ImGui::TableSetColumnIndex(1); + if (activeController == IGimbalManipulateEncoder::Keyboard) + { + char newKeyDisplay[2] = { ui::keyCodeToChar(newKey, true), '\0' }; + if (ImGui::BeginCombo("##selectKey", newKeyDisplay)) + { + for (int i = ui::E_KEY_CODE::EKC_A; i <= ui::E_KEY_CODE::EKC_Z; ++i) + { + bool isSelected = (newKey == static_cast(i)); + char label[2] = { ui::keyCodeToChar(static_cast(i), true), '\0' }; + if (ImGui::Selectable(label, isSelected)) + newKey = static_cast(i); + if (isSelected) + ImGui::SetItemDefaultFocus(); + } + ImGui::EndCombo(); + } + } + else + { + if (ImGui::BeginCombo("##selectMouseKey", ui::mouseCodeToString(newMouseCode).data())) + { + for (int i = ui::EMC_LEFT_BUTTON; i < ui::EMC_COUNT; ++i) + { + bool isSelected = (newMouseCode == static_cast(i)); + if (ImGui::Selectable(ui::mouseCodeToString(static_cast(i)).data(), isSelected)) + newMouseCode = static_cast(i); + if (isSelected) + ImGui::SetItemDefaultFocus(); + } + ImGui::EndCombo(); + } + } + + ImGui::TableSetColumnIndex(2); + if (ImGui::Button("Confirm Add", ImVec2(100, 30))) + { + anyMapUpdated |= true; + if (activeController == IGimbalManipulateEncoder::Keyboard) + encoder->updateKeyboardMapping([&](auto& keys) { keys[newKey] = selectedEventType; }); + else + encoder->updateMouseMapping([&](auto& mouse) { mouse[newMouseCode] = selectedEventType; }); + addMode = false; + } + + ImGui::EndTable(); + + return anyMapUpdated; +} + +bool displayKeyMappingsAndVirtualStatesInline(IGimbalManipulateEncoder* encoder, bool spawnWindow = false) +{ + bool anyMapUpdated = false; + + if (!encoder) return anyMapUpdated; + + struct MappingState + { + bool addMode = false; + CVirtualGimbalEvent::VirtualEventType selectedEventType = CVirtualGimbalEvent::VirtualEventType::MoveForward; + ui::E_KEY_CODE newKey = ui::E_KEY_CODE::EKC_A; + ui::E_MOUSE_CODE newMouseCode = ui::EMC_LEFT_BUTTON; + IGimbalManipulateEncoder::EncoderType activeController = IGimbalManipulateEncoder::Keyboard; + }; + + static std::unordered_map cameraStates; + auto& state = cameraStates[encoder]; + + const auto& keyboardMappings = encoder->getKeyboardVirtualEventMap(); + const auto& mouseMappings = encoder->getMouseVirtualEventMap(); + + if (spawnWindow) + { + ImGui::SetNextWindowSize(ImVec2(600, 400), ImGuiCond_FirstUseEver); + ImGui::Begin("Controller Mappings & Virtual States", nullptr, ImGuiWindowFlags_NoResize | ImGuiWindowFlags_AlwaysVerticalScrollbar); + } + + if (ImGui::BeginTabBar("ControllersTabBar")) + { + if (ImGui::BeginTabItem("Keyboard")) + { + state.activeController = IGimbalManipulateEncoder::Keyboard; + ImGui::Separator(); + + if (ImGui::Button("Add Key", ImVec2(100, 30))) + state.addMode = !state.addMode; + + ImGui::Separator(); + + ImGui::BeginTable("KeyboardMappingsTable", 5, ImGuiTableFlags_Borders | ImGuiTableFlags_Resizable | ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchSame); + ImGui::TableSetupColumn("Virtual Event", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Key(s)", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Active Status", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Magnitude", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Actions", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableHeadersRow(); + + for (const auto& [keyboardCode, hash] : keyboardMappings) + { + ImGui::TableNextRow(); + const char* eventName = CVirtualGimbalEvent::virtualEventToString(hash.event.type).data(); + ImGui::TableSetColumnIndex(0); + ImGui::AlignTextToFramePadding(); + ImGui::TextWrapped("%s", eventName); + + ImGui::TableSetColumnIndex(1); + std::string keyString(1, ui::keyCodeToChar(keyboardCode, true)); + ImGui::AlignTextToFramePadding(); + ImGui::TextWrapped("%s", keyString.c_str()); + + ImGui::TableSetColumnIndex(2); + bool isActive = (hash.event.magnitude > 0); + ImVec4 statusColor = isActive ? ImVec4(0.0f, 1.0f, 0.0f, 1.0f) : ImVec4(1.0f, 0.0f, 0.0f, 1.0f); + ImGui::TextColored(statusColor, "%s", isActive ? "Active" : "Inactive"); + + ImGui::TableSetColumnIndex(3); + ImGui::Text("%.2f", hash.event.magnitude); + + ImGui::TableSetColumnIndex(4); + if (ImGui::Button(("Delete##deleteKey" + std::to_string(static_cast(keyboardCode))).c_str())) + { + anyMapUpdated |= true; + encoder->updateKeyboardMapping([keyboardCode](auto& keys) { keys.erase(keyboardCode); }); + break; + } + } + ImGui::EndTable(); + + if (state.addMode) + { + ImGui::Separator(); + anyMapUpdated |= handleAddMapping("AddKeyboardMappingTable", encoder, state.activeController, state.selectedEventType, state.newKey, state.newMouseCode, state.addMode); + } + + ImGui::EndTabItem(); + } + + if (ImGui::BeginTabItem("Mouse")) + { + state.activeController = IGimbalManipulateEncoder::Mouse; + ImGui::Separator(); + + if (ImGui::Button("Add Key", ImVec2(100, 30))) + state.addMode = !state.addMode; + + ImGui::Separator(); + + ImGui::BeginTable("MouseMappingsTable", 5, ImGuiTableFlags_Borders | ImGuiTableFlags_Resizable | ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchSame); + ImGui::TableSetupColumn("Virtual Event", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Mouse Button(s)", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Active Status", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Magnitude", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableSetupColumn("Actions", ImGuiTableColumnFlags_WidthStretch, 0.2f); + ImGui::TableHeadersRow(); + + for (const auto& [mouseCode, hash] : mouseMappings) + { + ImGui::TableNextRow(); + const char* eventName = CVirtualGimbalEvent::virtualEventToString(hash.event.type).data(); + ImGui::TableSetColumnIndex(0); + ImGui::AlignTextToFramePadding(); + ImGui::TextWrapped("%s", eventName); + + ImGui::TableSetColumnIndex(1); + const char* mouseButtonName = ui::mouseCodeToString(mouseCode).data(); + ImGui::AlignTextToFramePadding(); + ImGui::TextWrapped("%s", mouseButtonName); + + ImGui::TableSetColumnIndex(2); + bool isActive = (hash.event.magnitude > 0); + ImVec4 statusColor = isActive ? ImVec4(0.0f, 1.0f, 0.0f, 1.0f) : ImVec4(1.0f, 0.0f, 0.0f, 1.0f); + ImGui::TextColored(statusColor, "%s", isActive ? "Active" : "Inactive"); + + ImGui::TableSetColumnIndex(3); + ImGui::Text("%.2f", hash.event.magnitude); + + ImGui::TableSetColumnIndex(4); + if (ImGui::Button(("Delete##deleteMouse" + std::to_string(static_cast(mouseCode))).c_str())) + { + anyMapUpdated |= true; + encoder->updateMouseMapping([mouseCode](auto& mouse) { mouse.erase(mouseCode); }); + break; + } + } + ImGui::EndTable(); + + if (state.addMode) + { + ImGui::Separator(); + handleAddMapping("AddMouseMappingTable", encoder, state.activeController, state.selectedEventType, state.newKey, state.newMouseCode, state.addMode); + } + ImGui::EndTabItem(); + } + + ImGui::EndTabBar(); + } + + if (spawnWindow) + ImGui::End(); + + return anyMapUpdated; +} + +#endif // __NBL_KEYSMAPPING_H_INCLUDED__ \ No newline at end of file diff --git a/61_UI/include/transform.hpp b/61_UI/include/transform.hpp deleted file mode 100644 index 88a78f751..000000000 --- a/61_UI/include/transform.hpp +++ /dev/null @@ -1,154 +0,0 @@ -#ifndef __NBL_THIS_EXAMPLE_TRANSFORM_H_INCLUDED__ -#define __NBL_THIS_EXAMPLE_TRANSFORM_H_INCLUDED__ - -#include "nbl/ui/ICursorControl.h" -#include "nbl/ext/ImGui/ImGui.h" -#include "imgui/imgui_internal.h" -#include "imguizmo/ImGuizmo.h" - -static constexpr inline auto OfflineSceneTextureIx = 1u; - -struct TransformRequestParams -{ - bool useWindow = true, editTransformDecomposition = false, enableViewManipulate = false; - float camDistance = 8.f; -}; - -void EditTransform(float* cameraView, const float* cameraProjection, float* matrix, const TransformRequestParams& params) -{ - static ImGuizmo::OPERATION mCurrentGizmoOperation(ImGuizmo::TRANSLATE); - static ImGuizmo::MODE mCurrentGizmoMode(ImGuizmo::LOCAL); - static bool useSnap = false; - static float snap[3] = { 1.f, 1.f, 1.f }; - static float bounds[] = { -0.5f, -0.5f, -0.5f, 0.5f, 0.5f, 0.5f }; - static float boundsSnap[] = { 0.1f, 0.1f, 0.1f }; - static bool boundSizing = false; - static bool boundSizingSnap = false; - - if (params.editTransformDecomposition) - { - if (ImGui::IsKeyPressed(ImGuiKey_T)) - mCurrentGizmoOperation = ImGuizmo::TRANSLATE; - if (ImGui::IsKeyPressed(ImGuiKey_R)) - mCurrentGizmoOperation = ImGuizmo::ROTATE; - if (ImGui::IsKeyPressed(ImGuiKey_S)) - mCurrentGizmoOperation = ImGuizmo::SCALE; - if (ImGui::RadioButton("Translate", mCurrentGizmoOperation == ImGuizmo::TRANSLATE)) - mCurrentGizmoOperation = ImGuizmo::TRANSLATE; - ImGui::SameLine(); - if (ImGui::RadioButton("Rotate", mCurrentGizmoOperation == ImGuizmo::ROTATE)) - mCurrentGizmoOperation = ImGuizmo::ROTATE; - ImGui::SameLine(); - if (ImGui::RadioButton("Scale", mCurrentGizmoOperation == ImGuizmo::SCALE)) - mCurrentGizmoOperation = ImGuizmo::SCALE; - if (ImGui::RadioButton("Universal", mCurrentGizmoOperation == ImGuizmo::UNIVERSAL)) - mCurrentGizmoOperation = ImGuizmo::UNIVERSAL; - float matrixTranslation[3], matrixRotation[3], matrixScale[3]; - ImGuizmo::DecomposeMatrixToComponents(matrix, matrixTranslation, matrixRotation, matrixScale); - ImGui::InputFloat3("Tr", matrixTranslation); - ImGui::InputFloat3("Rt", matrixRotation); - ImGui::InputFloat3("Sc", matrixScale); - ImGuizmo::RecomposeMatrixFromComponents(matrixTranslation, matrixRotation, matrixScale, matrix); - - if (mCurrentGizmoOperation != ImGuizmo::SCALE) - { - if (ImGui::RadioButton("Local", mCurrentGizmoMode == ImGuizmo::LOCAL)) - mCurrentGizmoMode = ImGuizmo::LOCAL; - ImGui::SameLine(); - if (ImGui::RadioButton("World", mCurrentGizmoMode == ImGuizmo::WORLD)) - mCurrentGizmoMode = ImGuizmo::WORLD; - } - if (ImGui::IsKeyPressed(ImGuiKey_S) && ImGui::IsKeyPressed(ImGuiKey_LeftShift)) - useSnap = !useSnap; - ImGui::Checkbox("##UseSnap", &useSnap); - ImGui::SameLine(); - - switch (mCurrentGizmoOperation) - { - case ImGuizmo::TRANSLATE: - ImGui::InputFloat3("Snap", &snap[0]); - break; - case ImGuizmo::ROTATE: - ImGui::InputFloat("Angle Snap", &snap[0]); - break; - case ImGuizmo::SCALE: - ImGui::InputFloat("Scale Snap", &snap[0]); - break; - } - ImGui::Checkbox("Bound Sizing", &boundSizing); - if (boundSizing) - { - ImGui::PushID(3); - ImGui::Checkbox("##BoundSizing", &boundSizingSnap); - ImGui::SameLine(); - ImGui::InputFloat3("Snap", boundsSnap); - ImGui::PopID(); - } - } - - ImGuiIO& io = ImGui::GetIO(); - float viewManipulateRight = io.DisplaySize.x; - float viewManipulateTop = 0; - static ImGuiWindowFlags gizmoWindowFlags = 0; - - /* - for the "useWindow" case we just render to a gui area, - otherwise to fake full screen transparent window - - note that for both cases we make sure gizmo being - rendered is aligned to our texture scene using - imgui "cursor" screen positions - */ - - SImResourceInfo info; - info.textureID = OfflineSceneTextureIx; - info.samplerIx = (uint16_t)nbl::ext::imgui::UI::DefaultSamplerIx::USER; - - if (params.useWindow) - { - ImGui::SetNextWindowSize(ImVec2(800, 400), ImGuiCond_Appearing); - ImGui::SetNextWindowPos(ImVec2(400, 20), ImGuiCond_Appearing); - ImGui::PushStyleColor(ImGuiCol_WindowBg, (ImVec4)ImColor(0.35f, 0.3f, 0.3f)); - ImGui::Begin("Gizmo", 0, gizmoWindowFlags); - ImGuizmo::SetDrawlist(); - - ImVec2 contentRegionSize = ImGui::GetContentRegionAvail(); - ImVec2 windowPos = ImGui::GetWindowPos(); - ImVec2 cursorPos = ImGui::GetCursorScreenPos(); - - ImGui::Image(info, contentRegionSize); - ImGuizmo::SetRect(cursorPos.x, cursorPos.y, contentRegionSize.x, contentRegionSize.y); - - viewManipulateRight = cursorPos.x + contentRegionSize.x; - viewManipulateTop = cursorPos.y; - - ImGuiWindow* window = ImGui::GetCurrentWindow(); - gizmoWindowFlags = (ImGui::IsWindowHovered() && ImGui::IsMouseHoveringRect(window->InnerRect.Min, window->InnerRect.Max) ? ImGuiWindowFlags_NoMove : 0); - } - else - { - ImGui::SetNextWindowPos(ImVec2(0, 0)); - ImGui::SetNextWindowSize(io.DisplaySize); - ImGui::PushStyleColor(ImGuiCol_WindowBg, ImVec4(0, 0, 0, 0)); // fully transparent fake window - ImGui::Begin("FullScreenWindow", nullptr, ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoScrollbar | ImGuiWindowFlags_NoScrollWithMouse | ImGuiWindowFlags_NoCollapse | ImGuiWindowFlags_NoBringToFrontOnFocus | ImGuiWindowFlags_NoBackground | ImGuiWindowFlags_NoInputs); - - ImVec2 contentRegionSize = ImGui::GetContentRegionAvail(); - ImVec2 cursorPos = ImGui::GetCursorScreenPos(); - - ImGui::Image(info, contentRegionSize); - ImGuizmo::SetRect(cursorPos.x, cursorPos.y, contentRegionSize.x, contentRegionSize.y); - - viewManipulateRight = cursorPos.x + contentRegionSize.x; - viewManipulateTop = cursorPos.y; - } - - ImGuizmo::Manipulate(cameraView, cameraProjection, mCurrentGizmoOperation, mCurrentGizmoMode, matrix, NULL, useSnap ? &snap[0] : NULL, boundSizing ? bounds : NULL, boundSizingSnap ? boundsSnap : NULL); - - if(params.enableViewManipulate) - ImGuizmo::ViewManipulate(cameraView, params.camDistance, ImVec2(viewManipulateRight - 128, viewManipulateTop), ImVec2(128, 128), 0x10101010); - - ImGui::End(); - ImGui::PopStyleColor(); -} - -#endif // __NBL_THIS_EXAMPLE_TRANSFORM_H_INCLUDED__ \ No newline at end of file diff --git a/61_UI/main.cpp b/61_UI/main.cpp index 470d5e723..3d52fb14c 100644 --- a/61_UI/main.cpp +++ b/61_UI/main.cpp @@ -2,7 +2,192 @@ // This file is part of the "Nabla Engine". // For conditions of distribution and use, see copyright notice in nabla.h +#include "nlohmann/json.hpp" +#include "argparse/argparse.hpp" +using json = nlohmann::json; + #include "common.hpp" +#include "keysmapping.hpp" +#include "camera/CCubeProjection.hpp" +#include "glm/glm/ext/matrix_clip_space.hpp" // TODO: TESTING +#include "nbl/this_example/builtin/CArchive.h" + +using planar_projections_range_t = std::vector; +using planar_projection_t = CPlanarProjection; + +// the only reason for those is to remind we must go with transpose & 4x4 matrices +struct ImGuizmoPlanarM16InOut +{ + float32_t4x4 view, projection; +}; + +struct ImGuizmoModelM16InOut +{ + float32_t4x4 inTRS, outTRS, outDeltaTRS; +}; + +constexpr IGPUImage::SSubresourceRange TripleBufferUsedSubresourceRange = +{ + .aspectMask = IGPUImage::EAF_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 +}; + +class CUIEventCallback : public nbl::video::ISmoothResizeSurface::ICallback // I cannot use common CEventCallback because I MUST inherit this callback in order to use smooth resize surface with window callback (for my input events) +{ +public: + CUIEventCallback(nbl::core::smart_refctd_ptr&& m_inputSystem, nbl::system::logger_opt_smart_ptr&& logger) : m_inputSystem(std::move(m_inputSystem)), m_logger(std::move(logger)) {} + CUIEventCallback() {} + + void setLogger(nbl::system::logger_opt_smart_ptr& logger) + { + m_logger = logger; + } + void setInputSystem(nbl::core::smart_refctd_ptr&& m_inputSystem) + { + m_inputSystem = std::move(m_inputSystem); + } +private: + + void onMouseConnected_impl(nbl::core::smart_refctd_ptr&& mch) override + { + m_logger.log("A mouse %p has been connected", nbl::system::ILogger::ELL_INFO, mch.get()); + m_inputSystem.get()->add(m_inputSystem.get()->m_mouse, std::move(mch)); + } + void onMouseDisconnected_impl(nbl::ui::IMouseEventChannel* mch) override + { + m_logger.log("A mouse %p has been disconnected", nbl::system::ILogger::ELL_INFO, mch); + m_inputSystem.get()->remove(m_inputSystem.get()->m_mouse, mch); + } + void onKeyboardConnected_impl(nbl::core::smart_refctd_ptr&& kbch) override + { + m_logger.log("A keyboard %p has been connected", nbl::system::ILogger::ELL_INFO, kbch.get()); + m_inputSystem.get()->add(m_inputSystem.get()->m_keyboard, std::move(kbch)); + } + void onKeyboardDisconnected_impl(nbl::ui::IKeyboardEventChannel* kbch) override + { + m_logger.log("A keyboard %p has been disconnected", nbl::system::ILogger::ELL_INFO, kbch); + m_inputSystem.get()->remove(m_inputSystem.get()->m_keyboard, kbch); + } + +private: + nbl::core::smart_refctd_ptr m_inputSystem = nullptr; + nbl::system::logger_opt_smart_ptr m_logger = nullptr; +}; + +class CSwapchainResources final : public ISmoothResizeSurface::ISwapchainResources +{ +public: + // Because we blit to the swapchain image asynchronously, we need a queue which can not only present but also perform graphics commands. + // If we for example used a compute shader to tonemap and MSAA resolve, we'd request the COMPUTE_BIT here. + constexpr static inline IQueue::FAMILY_FLAGS RequiredQueueFlags = IQueue::FAMILY_FLAGS::GRAPHICS_BIT; + +protected: + // We can return `BLIT_BIT` here, because the Source Image will be already in the correct layout to be used for the present + inline core::bitflag getTripleBufferPresentStages() const override { return asset::PIPELINE_STAGE_FLAGS::BLIT_BIT; } + + inline bool tripleBufferPresent(IGPUCommandBuffer* cmdbuf, const ISmoothResizeSurface::SPresentSource& source, const uint8_t imageIndex, const uint32_t qFamToAcquireSrcFrom) override + { + bool success = true; + auto acquiredImage = getImage(imageIndex); + + // Ownership of the Source Blit Image, not the Swapchain Image + const bool needToAcquireSrcOwnership = qFamToAcquireSrcFrom != IQueue::FamilyIgnored; + // Should never get asked to transfer ownership if the source is concurrent sharing + assert(!source.image->getCachedCreationParams().isConcurrentSharing() || !needToAcquireSrcOwnership); + + const auto blitDstLayout = IGPUImage::LAYOUT::TRANSFER_DST_OPTIMAL; + IGPUCommandBuffer::SPipelineBarrierDependencyInfo depInfo = {}; + + // barrier before to transition the swapchain image layout + using image_barrier_t = decltype(depInfo.imgBarriers)::element_type; + const image_barrier_t preBarriers[2] = { + { + .barrier = { + .dep = { + .srcStageMask = asset::PIPELINE_STAGE_FLAGS::NONE, // acquire isn't a stage + .srcAccessMask = asset::ACCESS_FLAGS::NONE, // performs no accesses + .dstStageMask = asset::PIPELINE_STAGE_FLAGS::BLIT_BIT, + .dstAccessMask = asset::ACCESS_FLAGS::TRANSFER_WRITE_BIT + } + }, + .image = acquiredImage, + .subresourceRange = { + .aspectMask = IGPUImage::EAF_COLOR_BIT, + .baseMipLevel = 0, + .levelCount = 1, + .baseArrayLayer = 0, + .layerCount = 1 + }, + .oldLayout = IGPUImage::LAYOUT::UNDEFINED, // I do not care about previous contents of the swapchain + .newLayout = blitDstLayout + }, + { + .barrier = { + .dep = { + // when acquiring ownership the source access masks don't matter + .srcStageMask = asset::PIPELINE_STAGE_FLAGS::NONE, + // Acquire must Happen-Before Semaphore wait, but neither has a true stage so NONE here + // https://github.com/KhronosGroup/Vulkan-Docs/issues/2319 + // If no ownership acquire needed then this dep info won't be used at all + .srcAccessMask = asset::ACCESS_FLAGS::NONE, + .dstStageMask = asset::PIPELINE_STAGE_FLAGS::BLIT_BIT, + .dstAccessMask = asset::ACCESS_FLAGS::TRANSFER_READ_BIT + }, + .ownershipOp = IGPUCommandBuffer::SOwnershipTransferBarrier::OWNERSHIP_OP::ACQUIRE, + .otherQueueFamilyIndex = qFamToAcquireSrcFrom + }, + .image = source.image, + .subresourceRange = TripleBufferUsedSubresourceRange + // no layout transition, already in the correct layout for the blit + } + }; + // We only barrier the source image if we need to acquire ownership, otherwise thanks to Timeline Semaphores all sync is good + depInfo.imgBarriers = { preBarriers,needToAcquireSrcOwnership ? 2ull : 1ull }; + success &= cmdbuf->pipelineBarrier(asset::EDF_NONE, depInfo); + + // TODO: Implement scaling modes other than a plain STRETCH, and allow for using subrectangles of the initial contents + { + const auto srcOffset = source.rect.offset; + const auto srcExtent = source.rect.extent; + const auto dstExtent = acquiredImage->getCreationParameters().extent; + const IGPUCommandBuffer::SImageBlit regions[1] = { { + .srcMinCoord = {static_cast(srcOffset.x),static_cast(srcOffset.y),0}, + .srcMaxCoord = {srcExtent.width,srcExtent.height,1}, + .dstMinCoord = {0,0,0}, + .dstMaxCoord = {dstExtent.width,dstExtent.height,1}, + .layerCount = acquiredImage->getCreationParameters().arrayLayers, + .srcBaseLayer = 0, + .dstBaseLayer = 0, + .srcMipLevel = 0 + } }; + success &= cmdbuf->blitImage(source.image, IGPUImage::LAYOUT::TRANSFER_SRC_OPTIMAL, acquiredImage, blitDstLayout, regions, IGPUSampler::ETF_LINEAR); + } + + // Barrier after, note that I don't care about preserving the contents of the Triple Buffer when the Render queue starts writing to it again. + // Therefore no ownership release, and no layout transition. + const image_barrier_t postBarrier[1] = { + { + .barrier = { + // When transitioning the image to VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR or VK_IMAGE_LAYOUT_PRESENT_SRC_KHR, there is no need to delay subsequent processing, + // or perform any visibility operations (as vkQueuePresentKHR performs automatic visibility operations). + // To achieve this, the dstAccessMask member of the VkImageMemoryBarrier should be set to 0, and the dstStageMask parameter should be set to VK_PIPELINE_STAGE_2_NONE + .dep = preBarriers[0].barrier.dep.nextBarrier(asset::PIPELINE_STAGE_FLAGS::NONE,asset::ACCESS_FLAGS::NONE) + }, + .image = preBarriers[0].image, + .subresourceRange = preBarriers[0].subresourceRange, + .oldLayout = blitDstLayout, + .newLayout = IGPUImage::LAYOUT::PRESENT_SRC + } + }; + depInfo.imgBarriers = postBarrier; + success &= cmdbuf->pipelineBarrier(asset::EDF_NONE, depInfo); + + return success; + } +}; /* Renders scene texture to an offline @@ -16,753 +201,1971 @@ class UISampleApp final : public examples::SimpleWindowedApplication { - using device_base_t = examples::SimpleWindowedApplication; + using base_t = examples::SimpleWindowedApplication; using clock_t = std::chrono::steady_clock; - _NBL_STATIC_INLINE_CONSTEXPR uint32_t WIN_W = 1280, WIN_H = 720; - constexpr static inline clock_t::duration DisplayImageDuration = std::chrono::milliseconds(900); public: + using base_t::base_t; + inline UISampleApp(const path& _localInputCWD, const path& _localOutputCWD, const path& _sharedInputCWD, const path& _sharedOutputCWD) : IApplicationFramework(_localInputCWD, _localOutputCWD, _sharedInputCWD, _sharedOutputCWD) {} - inline core::vector getSurfaces() const override + // Will get called mid-initialization, via `filterDevices` between when the API Connection is created and Physical Device is chosen + core::vector getSurfaces() const override { + // So let's create our Window and Surface then! if (!m_surface) { { - auto windowCallback = core::make_smart_refctd_ptr(smart_refctd_ptr(m_inputSystem), smart_refctd_ptr(m_logger)); + const auto dpyInfo = m_winMgr->getPrimaryDisplayInfo(); + auto windowCallback = core::make_smart_refctd_ptr(smart_refctd_ptr(m_inputSystem), smart_refctd_ptr(m_logger)); + IWindow::SCreationParams params = {}; - params.callback = core::make_smart_refctd_ptr(); - params.width = WIN_W; - params.height = WIN_H; + params.callback = core::make_smart_refctd_ptr(); + params.width = dpyInfo.resX; + params.height = dpyInfo.resY; params.x = 32; params.y = 32; - params.flags = ui::IWindow::ECF_HIDDEN | IWindow::ECF_BORDERLESS | IWindow::ECF_RESIZABLE; - params.windowCaption = "UISampleApp"; + params.flags = IWindow::ECF_INPUT_FOCUS | IWindow::ECF_CAN_RESIZE | IWindow::ECF_CAN_MAXIMIZE | IWindow::ECF_CAN_MINIMIZE; + params.windowCaption = "[Nabla Engine] UI App"; params.callback = windowCallback; + const_cast&>(m_window) = m_winMgr->createWindow(std::move(params)); } - auto surface = CSurfaceVulkanWin32::create(smart_refctd_ptr(m_api), smart_refctd_ptr_static_cast(m_window)); - const_cast&>(m_surface) = nbl::video::CSimpleResizeSurface::create(std::move(surface)); + const_cast&>(m_surface) = CSmoothResizeSurface::create(std::move(surface)); } if (m_surface) - return { {m_surface->getSurface()/*,EQF_NONE*/} }; + { + m_window->getManager()->maximize(m_window.get()); + auto* cc = m_window->getCursorControl(); + cc->setVisible(false); + return { {m_surface->getSurface()/*,EQF_NONE*/} }; + } + return {}; } inline bool onAppInitialized(smart_refctd_ptr&& system) override { + argparse::ArgumentParser program("Virtual camera event system demo"); + + program.add_argument("--file") + .help("Path to json file with camera inputs"); + + try + { + program.parse_args({ argv.data(), argv.data() + argv.size() }); + } + catch (const std::exception& err) + { + std::cerr << err.what() << std::endl << program; + return false; + } + + // Create imput system m_inputSystem = make_smart_refctd_ptr(logger_opt_smart_ptr(smart_refctd_ptr(m_logger))); - if (!device_base_t::onAppInitialized(smart_refctd_ptr(system))) + // Remember to call the base class initialization! + if (!base_t::onAppInitialized(std::move(system))) return false; + { + const std::optional cameraJsonFile = program.is_used("--file") ? program.get("--file") : std::optional(std::nullopt); + + json j; + auto file = cameraJsonFile.has_value() ? std::ifstream(cameraJsonFile.value()) : std::ifstream(); + if (!file.is_open()) + { + if (cameraJsonFile.has_value()) + m_logger->log("Cannot open input \"%s\" json file. Switching to default config.", ILogger::ELL_WARNING, cameraJsonFile.value().c_str()); + else + m_logger->log("No input json file provided. Switching to default config.", ILogger::ELL_WARNING); + + auto assets = make_smart_refctd_ptr(smart_refctd_ptr(m_logger)); + auto pFile = assets->getFile("cameras.json", IFile::ECF_READ, ""); + + string config; + IFile::success_t result; + config.resize(pFile->getSize()); + pFile->read(result, config.data(), 0, pFile->getSize()); + + j = json::parse(config); + } + else + { + file >> j; + } + + std::vector> cameras; + for (const auto& jCamera : j["cameras"]) + { + if (jCamera.contains("type")) + { + if (!jCamera.contains("position")) + { + logFail("Expected \"position\" keyword for camera definition!"); + return false; + } + + const bool withOrientation = jCamera.contains("orientation"); + + auto position = [&]() + { + auto jret = jCamera["position"].get>(); + return float32_t3(jret[0], jret[1], jret[2]); + }(); + + auto getOrientation = [&]() + { + auto jret = jCamera["orientation"].get>(); + + // order important for glm::quat, + // the ctor is GLM_FUNC_QUALIFIER GLM_CONSTEXPR qua::qua(T _w, T _x, T _y, T _z) + // but memory layout (and json) is x,y,z,w + return glm::quat(jret[3], jret[0], jret[1], jret[2]); + }; + + auto getTarget = [&]() + { + auto jret = jCamera["target"].get>(); + return float32_t3(jret[0], jret[1], jret[2]); + }; + + if (jCamera["type"] == "FPS") + { + if (!withOrientation) + { + logFail("Expected \"orientation\" keyword for FPS camera definition!"); + return false; + } + + cameras.emplace_back() = make_smart_refctd_ptr(position, getOrientation()); + } + else if (jCamera["type"] == "Free") + { + if (!withOrientation) + { + logFail("Expected \"orientation\" keyword for Free camera definition!"); + return false; + } + + cameras.emplace_back() = make_smart_refctd_ptr(position, getOrientation()); + } + else if (jCamera["type"] == "Orbit") + { + auto& camera = cameras.emplace_back() = make_smart_refctd_ptr(position, getTarget()); + camera->setMoveSpeedScale(0.2); + } + else + { + logFail("Unsupported camera type!"); + return false; + } + } + else + { + logFail("Expected \"type\" keyword for camera definition!"); + return false; + } + } + + std::vector projections; + for (const auto& jProjection : j["projections"]) + { + if (jProjection.contains("type")) + { + float zNear, zFar; + + if (!jProjection.contains("zNear")) + { + logFail("Expected \"zNear\" keyword for planar projection definition!"); + return false; + } + + if (!jProjection.contains("zFar")) + { + logFail("Expected \"zFar\" keyword for planar projection definition!"); + return false; + } + + zNear = jProjection["zNear"].get(); + zFar = jProjection["zFar"].get(); + + if (jProjection["type"] == "perspective") + { + if (!jProjection.contains("fov")) + { + logFail("Expected \"fov\" keyword for planar perspective projection definition!"); + return false; + } + + float fov = jProjection["fov"].get(); + projections.emplace_back(IPlanarProjection::CProjection::create(zNear, zFar, fov)); + } + else if (jProjection["type"] == "orthographic") + { + if (!jProjection.contains("orthoWidth")) + { + logFail("Expected \"orthoWidth\" keyword for planar orthographic projection definition!"); + return false; + } + + float orthoWidth = jProjection["orthoWidth"].get(); + projections.emplace_back(IPlanarProjection::CProjection::create(zNear, zFar, orthoWidth)); + } + else + { + logFail("Unsupported projection!"); + return false; + } + } + } + + struct + { + std::vector keyboard; + std::vector mouse; + } controllers; + + if (j.contains("controllers")) + { + const auto& jControllers = j["controllers"]; + + if (jControllers.contains("keyboard")) + { + for (const auto& jKeyboard : jControllers["keyboard"]) + { + if (jKeyboard.contains("mappings")) + { + auto& controller = controllers.keyboard.emplace_back(); + for (const auto& [key, value] : jKeyboard["mappings"].items()) + { + const auto nativeCode = stringToKeyCode(key.c_str()); + + if (nativeCode == EKC_NONE) + { + logFail("Invalid native key \"%s\" code mapping for keyboard controller", key.c_str()); + return false; + } + + controller[nativeCode] = CVirtualGimbalEvent::stringToVirtualEvent(value.get()); + } + } + else + { + logFail("Expected \"mappings\" keyword for keyboard controller definition!"); + return false; + } + } + } + else + { + logFail("Expected \"keyboard\" keyword in controllers definition!"); + return false; + } + + if (jControllers.contains("mouse")) + { + for (const auto& jMouse : jControllers["mouse"]) + { + if (jMouse.contains("mappings")) + { + auto& controller = controllers.mouse.emplace_back(); + for (const auto& [key, value] : jMouse["mappings"].items()) + { + const auto nativeCode = stringToMouseCode(key.c_str()); + + if (nativeCode == EMC_NONE) + { + logFail("Invalid native key \"%s\" code mapping for mouse controller", key.c_str()); + return false; + } + + controller[nativeCode] = CVirtualGimbalEvent::stringToVirtualEvent(value.get()); + } + } + else + { + logFail("Expected \"mappings\" keyword for mouse controller definition!"); + return false; + } + } + } + else + { + logFail("Expected \"mouse\" keyword in controllers definition"); + return false; + } + } + else + { + logFail("Expected \"controllers\" keyword in controllers JSON"); + return false; + } + + if (j.contains("viewports") && j.contains("planars")) + { + for (const auto& jPlanar : j["planars"]) + { + if (!jPlanar.contains("camera")) + { + logFail("Expected \"camera\" value in planar object"); + return false; + } + + if (!jPlanar.contains("viewports")) + { + logFail("Expected \"viewports\" list in planar object"); + return false; + } + + const auto cameraIx = jPlanar["camera"].get(); + auto boundViewports = jPlanar["viewports"].get>(); + + auto& planar = m_planarProjections.emplace_back() = planar_projection_t::create(smart_refctd_ptr(cameras[cameraIx])); + for (const auto viewportIx : boundViewports) + { + auto& viewport = j["viewports"][viewportIx]; + if (!viewport.contains("projection") || !viewport.contains("controllers")) + { + logFail("\"projection\" or \"controllers\" missing in viewport object index %d", viewportIx); + return false; + } + + const auto projectionIx = viewport["projection"].get(); + auto& projection = planar->getPlanarProjections().emplace_back(projections[projectionIx]); + + const bool hasKeyboardBound = viewport["controllers"].contains("keyboard"); + const bool hasMouseBound = viewport["controllers"].contains("mouse"); + + if (hasKeyboardBound) + { + auto keyboardControllerIx = viewport["controllers"]["keyboard"].get(); + projection.updateKeyboardMapping([&](auto& map) { map = controllers.keyboard[keyboardControllerIx]; }); + } + else + projection.updateKeyboardMapping([&](auto& map) { map = {}; }); // clean the map if not bound + + if (hasMouseBound) + { + auto mouseControllerIx = viewport["controllers"]["mouse"].get(); + projection.updateMouseMapping([&](auto& map) { map = controllers.mouse[mouseControllerIx]; }); + } + else + projection.updateMouseMapping([&](auto& map) { map = {}; }); // clean the map if not bound + } + + { + auto* camera = planar->getCamera(); + { + camera->updateKeyboardMapping([&](auto& map) { map = camera->getKeyboardMappingPreset(); }); + camera->updateMouseMapping([&](auto& map) { map = camera->getMouseMappingPreset(); }); + camera->updateImguizmoMapping([&](auto& map) { map = camera->getImguizmoMappingPreset(); }); + } + } + } + } + else + { + logFail("Expected \"viewports\" and \"planars\" lists in JSON"); + return false; + } + + if (m_planarProjections.size() < windowControlBinding.size()) + { + // TODO, temporary assuming it, I'm not going to implement each possible case now + logFail("Expected at least %d planars", windowControlBinding.size()); + return false; + } + + // init render window planar references - we make all render windows start with focus on first + // planar but in a way that first window has the planar's perspective preset bound & second orthographic + for (uint32_t i = 0u; i < windowControlBinding.size(); ++i) + { + auto& binding = windowControlBinding[i]; + + auto& planar = m_planarProjections[binding.activePlanarIx = 0]; + binding.pickDefaultProjections(planar->getPlanarProjections()); + + if (i) + binding.boundProjectionIx = binding.lastBoundOrthoPresetProjectionIx.value(); + else + binding.boundProjectionIx = binding.lastBoundPerspectivePresetProjectionIx.value(); + } + } + + // Create asset manager m_assetManager = make_smart_refctd_ptr(smart_refctd_ptr(m_system)); - auto* geometry = m_assetManager->getGeometryCreator(); + // First create the resources that don't depend on a swapchain m_semaphore = m_device->createSemaphore(m_realFrameIx); if (!m_semaphore) return logFail("Failed to Create a Semaphore!"); - ISwapchain::SCreationParams swapchainParams = { .surface = m_surface->getSurface() }; - if (!swapchainParams.deduceFormat(m_physicalDevice)) - return logFail("Could not choose a Surface Format for the Swapchain!"); + // The nice thing about having a triple buffer is that you don't need to do acrobatics to account for the formats available to the surface. + // You can transcode to the swapchain's format while copying, and I actually recommend to do surface rotation, tonemapping and OETF application there. + const auto format = asset::EF_R8G8B8A8_SRGB; + // Could be more clever and use the copy Triple Buffer to Swapchain as an opportunity to do a MSAA resolve or something + const auto samples = IGPUImage::ESCF_1_BIT; - const static IGPURenderpass::SCreationParams::SSubpassDependency dependencies[] = + // Create the renderpass { - { - .srcSubpass = IGPURenderpass::SCreationParams::SSubpassDependency::External, - .dstSubpass = 0, - .memoryBarrier = + const IGPURenderpass::SCreationParams::SColorAttachmentDescription colorAttachments[] = { + {{ + { + .format = format, + .samples = samples, + .mayAlias = false + }, + /*.loadOp = */IGPURenderpass::LOAD_OP::CLEAR, + /*.storeOp = */IGPURenderpass::STORE_OP::STORE, + /*.initialLayout = */IGPUImage::LAYOUT::UNDEFINED, // because we clear we don't care about contents when we grab the triple buffer img again + /*.finalLayout = */IGPUImage::LAYOUT::TRANSFER_SRC_OPTIMAL // put it already in the correct layout for the blit operation + }}, + IGPURenderpass::SCreationParams::ColorAttachmentsEnd + }; + IGPURenderpass::SCreationParams::SSubpassDescription subpasses[] = { + {}, + IGPURenderpass::SCreationParams::SubpassesEnd + }; + subpasses[0].colorAttachments[0] = { .render = {.attachmentIndex = 0,.layout = IGPUImage::LAYOUT::ATTACHMENT_OPTIMAL} }; + // We actually need external dependencies to ensure ordering of the Implicit Layout Transitions relative to the semaphore signals + IGPURenderpass::SCreationParams::SSubpassDependency dependencies[] = { + // wipe-transition to ATTACHMENT_OPTIMAL { - .srcStageMask = asset::PIPELINE_STAGE_FLAGS::COPY_BIT, - .srcAccessMask = asset::ACCESS_FLAGS::TRANSFER_WRITE_BIT, + .srcSubpass = IGPURenderpass::SCreationParams::SSubpassDependency::External, + .dstSubpass = 0, + .memoryBarrier = { + // we can have NONE as Sources because the semaphore wait is ALL_COMMANDS + // https://github.com/KhronosGroup/Vulkan-Docs/issues/2319 .dstStageMask = asset::PIPELINE_STAGE_FLAGS::COLOR_ATTACHMENT_OUTPUT_BIT, .dstAccessMask = asset::ACCESS_FLAGS::COLOR_ATTACHMENT_WRITE_BIT } + // leave view offsets and flags default }, - { - .srcSubpass = 0, - .dstSubpass = IGPURenderpass::SCreationParams::SSubpassDependency::External, - .memoryBarrier = + // ATTACHMENT_OPTIMAL to PRESENT_SRC { - .srcStageMask = asset::PIPELINE_STAGE_FLAGS::COLOR_ATTACHMENT_OUTPUT_BIT, - .srcAccessMask = asset::ACCESS_FLAGS::COLOR_ATTACHMENT_WRITE_BIT - } + .srcSubpass = 0, + .dstSubpass = IGPURenderpass::SCreationParams::SSubpassDependency::External, + .memoryBarrier = { + .srcStageMask = asset::PIPELINE_STAGE_FLAGS::COLOR_ATTACHMENT_OUTPUT_BIT, + .srcAccessMask = asset::ACCESS_FLAGS::COLOR_ATTACHMENT_WRITE_BIT + // we can have NONE as the Destinations because the semaphore signal is ALL_COMMANDS + // https://github.com/KhronosGroup/Vulkan-Docs/issues/2319 + } + // leave view offsets and flags default }, IGPURenderpass::SCreationParams::DependenciesEnd - }; + }; - auto scResources = std::make_unique(m_device.get(), swapchainParams.surfaceFormat.format, dependencies); - auto* renderpass = scResources->getRenderpass(); - - if (!renderpass) - return logFail("Failed to create Renderpass!"); + IGPURenderpass::SCreationParams params = {}; + params.colorAttachments = colorAttachments; + params.subpasses = subpasses; + params.dependencies = dependencies; + m_renderpass = m_device->createRenderpass(params); + if (!m_renderpass) + return logFail("Failed to Create a Renderpass!"); + } - auto gQueue = getGraphicsQueue(); - if (!m_surface || !m_surface->init(gQueue, std::move(scResources), swapchainParams.sharedParams)) - return logFail("Could not create Window & Surface or initialize the Surface!"); + // We just live life in easy mode and have the Swapchain Creation Parameters get deduced from the surface. + // We don't need any control over the format of the swapchain because we'll be only using Renderpasses this time! + // TODO: improve the queue allocation/choice and allocate a dedicated presentation queue to improve responsiveness and race to present. + if (!m_surface || !m_surface->init(m_surface->pickQueue(m_device.get()), std::make_unique(), {})) + return logFail("Failed to Create a Swapchain!"); - m_cmdPool = m_device->createCommandPool(gQueue->getFamilyIndex(), IGPUCommandPool::CREATE_FLAGS::RESET_COMMAND_BUFFER_BIT); - - for (auto i = 0u; i < MaxFramesInFlight; i++) + // Normally you'd want to recreate these images whenever the swapchain is resized in some increment, like 64 pixels or something. + // But I'm super lazy here and will just create "worst case sized images" and waste all the VRAM I can get. + const auto dpyInfo = m_winMgr->getPrimaryDisplayInfo(); + for (auto i = 0; i < MaxFramesInFlight; i++) { - if (!m_cmdPool) - return logFail("Couldn't create Command Pool!"); - if (!m_cmdPool->createCommandBuffers(IGPUCommandPool::BUFFER_LEVEL::PRIMARY, { m_cmdBufs.data() + i, 1 })) - return logFail("Couldn't create Command Buffer!"); + auto& image = m_tripleBuffers[i]; + { + IGPUImage::SCreationParams params = {}; + params = asset::IImage::SCreationParams{ + .type = IGPUImage::ET_2D, + .samples = samples, + .format = format, + .extent = {dpyInfo.resX,dpyInfo.resY,1}, + .mipLevels = 1, + .arrayLayers = 1, + .flags = IGPUImage::ECF_NONE, + // in this example I'll be using a renderpass to clear the image, and then a blit to copy it to the swapchain + .usage = IGPUImage::EUF_RENDER_ATTACHMENT_BIT | IGPUImage::EUF_TRANSFER_SRC_BIT + }; + image = m_device->createImage(std::move(params)); + if (!image) + return logFail("Failed to Create Triple Buffer Image!"); + + // use dedicated allocations, we have plenty of allocations left, even on Win32 + if (!m_device->allocate(image->getMemoryReqs(), image.get()).isValid()) + return logFail("Failed to allocate Device Memory for Image %d", i); + } + image->setObjectDebugName(("Triple Buffer Image " + std::to_string(i)).c_str()); + + // create framebuffers for the images + { + auto imageView = m_device->createImageView({ + .flags = IGPUImageView::ECF_NONE, + // give it a Transfer SRC usage flag so we can transition to the Tranfer SRC layout with End Renderpass + .subUsages = IGPUImage::EUF_RENDER_ATTACHMENT_BIT | IGPUImage::EUF_TRANSFER_SRC_BIT, + .image = core::smart_refctd_ptr(image), + .viewType = IGPUImageView::ET_2D, + .format = format + }); + const auto& imageParams = image->getCreationParameters(); + IGPUFramebuffer::SCreationParams params = { { + .renderpass = core::smart_refctd_ptr(m_renderpass), + .depthStencilAttachments = nullptr, + .colorAttachments = &imageView.get(), + .width = imageParams.extent.width, + .height = imageParams.extent.height, + .layers = imageParams.arrayLayers + } }; + m_framebuffers[i] = m_device->createFramebuffer(std::move(params)); + if (!m_framebuffers[i]) + return logFail("Failed to Create a Framebuffer for Image %d", i); + } } - - //pass.scene = CScene::create(smart_refctd_ptr(m_utils), smart_refctd_ptr(m_logger), gQueue, geometry); - pass.scene = CScene::create(smart_refctd_ptr(m_utils), smart_refctd_ptr(m_logger), gQueue, geometry); - nbl::ext::imgui::UI::SCreationParameters params; + // This time we'll create all CommandBuffers from one CommandPool, to keep life simple. However the Pool must support individually resettable CommandBuffers + // because they cannot be pre-recorded because the fraembuffers/swapchain images they use will change when a swapchain recreates. + auto pool = m_device->createCommandPool(getGraphicsQueue()->getFamilyIndex(), IGPUCommandPool::CREATE_FLAGS::RESET_COMMAND_BUFFER_BIT); + if (!pool || !pool->createCommandBuffers(IGPUCommandPool::BUFFER_LEVEL::PRIMARY, { m_cmdBufs.data(),MaxFramesInFlight }, core::smart_refctd_ptr(m_logger))) + return logFail("Failed to Create CommandBuffers!"); - params.resources.texturesInfo = { .setIx = 0u, .bindingIx = 0u }; - params.resources.samplersInfo = { .setIx = 0u, .bindingIx = 1u }; - params.assetManager = m_assetManager; - params.pipelineCache = nullptr; - params.pipelineLayout = nbl::ext::imgui::UI::createDefaultPipelineLayout(m_utils->getLogicalDevice(), params.resources.texturesInfo, params.resources.samplersInfo, TexturesAmount); - params.renderpass = smart_refctd_ptr(renderpass); - params.streamingBuffer = nullptr; - params.subpassIx = 0u; - params.transfer = getTransferUpQueue(); - params.utilities = m_utils; + // UI { - pass.ui.manager = nbl::ext::imgui::UI::create(std::move(params)); + { + nbl::ext::imgui::UI::SCreationParameters params; + params.resources.texturesInfo = { .setIx = 0u, .bindingIx = 0u }; + params.resources.samplersInfo = { .setIx = 0u, .bindingIx = 1u }; + params.assetManager = m_assetManager; + params.pipelineCache = nullptr; + params.pipelineLayout = nbl::ext::imgui::UI::createDefaultPipelineLayout(m_utils->getLogicalDevice(), params.resources.texturesInfo, params.resources.samplersInfo, TotalUISampleTexturesAmount); + params.renderpass = smart_refctd_ptr(m_renderpass); + params.streamingBuffer = nullptr; + params.subpassIx = 0u; + params.transfer = getTransferUpQueue(); + params.utilities = m_utils; + + m_ui.manager = nbl::ext::imgui::UI::create(std::move(params)); + } - if (!pass.ui.manager) + if (!m_ui.manager) return false; // note that we use default layout provided by our extension, but you are free to create your own by filling nbl::ext::imgui::UI::S_CREATION_PARAMETERS::resources - const auto* descriptorSetLayout = pass.ui.manager->getPipeline()->getLayout()->getDescriptorSetLayout(0u); - const auto& params = pass.ui.manager->getCreationParameters(); + const auto* descriptorSetLayout = m_ui.manager->getPipeline()->getLayout()->getDescriptorSetLayout(0u); IDescriptorPool::SCreateInfo descriptorPoolInfo = {}; descriptorPoolInfo.maxDescriptorCount[static_cast(asset::IDescriptor::E_TYPE::ET_SAMPLER)] = (uint32_t)nbl::ext::imgui::UI::DefaultSamplerIx::COUNT; - descriptorPoolInfo.maxDescriptorCount[static_cast(asset::IDescriptor::E_TYPE::ET_SAMPLED_IMAGE)] = TexturesAmount; + descriptorPoolInfo.maxDescriptorCount[static_cast(asset::IDescriptor::E_TYPE::ET_SAMPLED_IMAGE)] = TotalUISampleTexturesAmount; descriptorPoolInfo.maxSets = 1u; descriptorPoolInfo.flags = IDescriptorPool::E_CREATE_FLAGS::ECF_UPDATE_AFTER_BIND_BIT; m_descriptorSetPool = m_device->createDescriptorPool(std::move(descriptorPoolInfo)); assert(m_descriptorSetPool); - m_descriptorSetPool->createDescriptorSets(1u, &descriptorSetLayout, &pass.ui.descriptorSet); - assert(pass.ui.descriptorSet); - } - pass.ui.manager->registerListener([this]() -> void + m_descriptorSetPool->createDescriptorSets(1u, &descriptorSetLayout, &m_ui.descriptorSet); + assert(m_ui.descriptorSet); + + m_ui.manager->registerListener([this]() -> void { imguiListen(); }); { - ImGuiIO& io = ImGui::GetIO(); + const auto ds = float32_t2{ m_window->getWidth(), m_window->getHeight() }; - camera.setProjectionMatrix([&]() - { - static matrix4SIMD projection; + wInit.trsEditor.iPos = iPaddingOffset; + wInit.trsEditor.iSize = { ds.x * 0.1, ds.y - wInit.trsEditor.iPos.y * 2 }; - if (isPerspective) - if(isLH) - projection = matrix4SIMD::buildProjectionMatrixPerspectiveFovLH(core::radians(fov), io.DisplaySize.x / io.DisplaySize.y, zNear, zFar); - else - projection = matrix4SIMD::buildProjectionMatrixPerspectiveFovRH(core::radians(fov), io.DisplaySize.x / io.DisplaySize.y, zNear, zFar); - else - { - float viewHeight = viewWidth * io.DisplaySize.y / io.DisplaySize.x; + wInit.planars.iSize = { ds.x * 0.2, ds.y - iPaddingOffset.y * 2 }; + wInit.planars.iPos = { ds.x - wInit.planars.iSize.x - iPaddingOffset.x, 0 + iPaddingOffset.y }; - if(isLH) - projection = matrix4SIMD::buildProjectionMatrixOrthoLH(viewWidth, viewHeight, zNear, zFar); - else - projection = matrix4SIMD::buildProjectionMatrixOrthoRH(viewWidth, viewHeight, zNear, zFar); + { + float leftX = wInit.trsEditor.iPos.x + wInit.trsEditor.iSize.x + iPaddingOffset.x; + float eachXSize = wInit.planars.iPos.x - (wInit.trsEditor.iPos.x + wInit.trsEditor.iSize.x) - 2*iPaddingOffset.x; + float eachYSize = (ds.y - 2 * iPaddingOffset.y - (wInit.renderWindows.size() - 1) * iPaddingOffset.y) / wInit.renderWindows.size(); + + for (size_t i = 0; i < wInit.renderWindows.size(); ++i) + { + auto& rw = wInit.renderWindows[i]; + rw.iPos = { leftX, (1+i) * iPaddingOffset.y + i * eachYSize }; + rw.iSize = { eachXSize, eachYSize }; } + } + } + } - return projection; - }()); + // Geometry Creator Render Scene FBOs + { + resources = ResourcesBundle::create(m_device.get(), m_logger.get(), getGraphicsQueue(), m_assetManager->getGeometryCreator()); - ImGuizmo::SetOrthographic(false); - ImGuizmo::BeginFrame(); + if (!resources) + { + logFail("Could not create geometry creator gpu resources!"); + return false; + } - ImGui::SetNextWindowPos(ImVec2(1024, 100), ImGuiCond_Appearing); - ImGui::SetNextWindowSize(ImVec2(256, 256), ImGuiCond_Appearing); + const auto dpyInfo = m_winMgr->getPrimaryDisplayInfo(); - // create a window and insert the inspector - ImGui::SetNextWindowPos(ImVec2(10, 10), ImGuiCond_Appearing); - ImGui::SetNextWindowSize(ImVec2(320, 340), ImGuiCond_Appearing); - ImGui::Begin("Editor"); + for (uint32_t i = 0u; i < windowControlBinding.size(); ++i) + { + auto& scene = windowControlBinding[i].scene; + scene = CScene::create(smart_refctd_ptr(m_device), smart_refctd_ptr(m_logger), getGraphicsQueue(), smart_refctd_ptr(resources), dpyInfo.resX, dpyInfo.resY); - if (ImGui::RadioButton("Full view", !transformParams.useWindow)) - transformParams.useWindow = false; + if (!scene) + { + logFail("Could not create geometry creator scene[%d]!", i); + return false; + } + } + } - ImGui::SameLine(); + oracle.reportBeginFrameRecord(); + + if (base_t::argv.size() >= 3 && argv[1] == "-timeout_seconds") + timeout = std::chrono::seconds(std::atoi(argv[2].c_str())); + start = clock_t::now(); + return true; + } + + bool updateGUIDescriptorSet() + { + // UI texture atlas + our camera scene textures, note we don't create info & write pair for the font sampler because UI extension's is immutable and baked into DS layout + static std::array descriptorInfo; + static IGPUDescriptorSet::SWriteDescriptorSet writes[TotalUISampleTexturesAmount]; + + descriptorInfo[nbl::ext::imgui::UI::FontAtlasTexId].info.image.imageLayout = IImage::LAYOUT::READ_ONLY_OPTIMAL; + descriptorInfo[nbl::ext::imgui::UI::FontAtlasTexId].desc = core::smart_refctd_ptr(m_ui.manager->getFontAtlasView()); + writes[nbl::ext::imgui::UI::FontAtlasTexId].info = descriptorInfo.data() + nbl::ext::imgui::UI::FontAtlasTexId; + + for (uint32_t i = 0; i < windowControlBinding.size(); ++i) + { + const auto textureIx = i + 1u; + + descriptorInfo[textureIx].info.image.imageLayout = IImage::LAYOUT::READ_ONLY_OPTIMAL; + descriptorInfo[textureIx].desc = windowControlBinding[i].scene->getColorAttachment(); + + writes[textureIx].info = descriptorInfo.data() + textureIx; + writes[textureIx].info = descriptorInfo.data() + textureIx; + } + + for (uint32_t i = 0; i < descriptorInfo.size(); ++i) + { + writes[i].dstSet = m_ui.descriptorSet.get(); + writes[i].binding = 0u; + writes[i].arrayElement = i; + writes[i].count = 1u; + } + + return m_device->updateDescriptorSets(writes, {}); + } + + inline void workLoopBody() override + { + // framesInFlight: ensuring safe execution of command buffers and acquires, `framesInFlight` only affect semaphore waits, don't use this to index your resources because it can change with swapchain recreation. + const uint32_t framesInFlight = core::min(MaxFramesInFlight, m_surface->getMaxAcquiresInFlight()); + // We block for semaphores for 2 reasons here: + // A) Resource: Can't use resource like a command buffer BEFORE previous use is finished! [MaxFramesInFlight] + // B) Acquire: Can't have more acquires in flight than a certain threshold returned by swapchain or your surface helper class. [MaxAcquiresInFlight] + if (m_realFrameIx >= framesInFlight) + { + const ISemaphore::SWaitInfo cmdbufDonePending[] = { + { + .semaphore = m_semaphore.get(), + .value = m_realFrameIx + 1 - framesInFlight + } + }; + if (m_device->blockForSemaphores(cmdbufDonePending) != ISemaphore::WAIT_RESULT::SUCCESS) + return; + } + + // Predict size of next render, and bail if nothing to do + const auto currentSwapchainExtent = m_surface->getCurrentExtent(); + if (currentSwapchainExtent.width * currentSwapchainExtent.height <= 0) + return; + // The extent of the swapchain might change between now and `present` but the blit should adapt nicely + const VkRect2D currentRenderArea = { .offset = {0,0},.extent = currentSwapchainExtent }; + + // You explicitly should not use `getAcquireCount()` see the comment on `m_realFrameIx` + const auto resourceIx = m_realFrameIx % MaxFramesInFlight; + + // We will be using this command buffer to produce the frame + auto frame = m_tripleBuffers[resourceIx].get(); + auto cmdbuf = m_cmdBufs[resourceIx].get(); + + // update CPU stuff - controllers, events, UI state + update(); + + bool willSubmit = true; + { + willSubmit &= cmdbuf->reset(IGPUCommandBuffer::RESET_FLAGS::RELEASE_RESOURCES_BIT); + willSubmit &= cmdbuf->begin(IGPUCommandBuffer::USAGE::ONE_TIME_SUBMIT_BIT); + willSubmit &= cmdbuf->beginDebugMarker("UIApp Frame"); + + // render geometry creator scene to offline frame buffer & submit + // TODO: OK with TRI buffer this thing is retarded now + // (**) <- a note why bellow before submit + + auto renderOfflineScene = [&](auto& scene) + { + scene->begin(); + { + scene->update(); + scene->record(); + scene->end(); + } + scene->submit(getGraphicsQueue()); + }; + + if (useWindow) + for (auto binding : windowControlBinding) + renderOfflineScene(binding.scene); + else + renderOfflineScene(windowControlBinding[activeRenderWindowIx].scene.get()); + + const IGPUCommandBuffer::SClearColorValue clearValue = { .float32 = {0.f,0.f,0.f,1.f} }; + const IGPUCommandBuffer::SRenderpassBeginInfo info = { + .framebuffer = m_framebuffers[resourceIx].get(), + .colorClearValues = &clearValue, + .depthStencilClearValues = nullptr, + .renderArea = currentRenderArea + }; + + // UI renderpass + willSubmit &= cmdbuf->beginRenderPass(info, IGPUCommandBuffer::SUBPASS_CONTENTS::INLINE); + { + asset::SViewport viewport; + { + viewport.minDepth = 1.f; + viewport.maxDepth = 0.f; + viewport.x = 0u; + viewport.y = 0u; + viewport.width = m_window->getWidth(); + viewport.height = m_window->getHeight(); + } + + willSubmit &= cmdbuf->setViewport(0u, 1u, &viewport); + + const VkRect2D currentRenderArea = + { + .offset = {0,0}, + .extent = {m_window->getWidth(),m_window->getHeight()} + }; + + IQueue::SSubmitInfo::SCommandBufferInfo commandBuffersInfo[] = { {.cmdbuf = cmdbuf } }; + + const IGPUCommandBuffer::SRenderpassBeginInfo info = + { + .framebuffer = m_framebuffers[resourceIx].get(), + .colorClearValues = &Traits::clearColor, + .depthStencilClearValues = nullptr, + .renderArea = currentRenderArea + }; + + nbl::video::ISemaphore::SWaitInfo waitInfo = { .semaphore = m_semaphore.get(), .value = m_realFrameIx + 1u }; + const auto uiParams = m_ui.manager->getCreationParameters(); + auto* pipeline = m_ui.manager->getPipeline(); + + cmdbuf->bindGraphicsPipeline(pipeline); + cmdbuf->bindDescriptorSets(EPBP_GRAPHICS, pipeline->getLayout(), uiParams.resources.texturesInfo.setIx, 1u, &m_ui.descriptorSet.get()); // note that we use default UI pipeline layout where uiParams.resources.textures.setIx == uiParams.resources.samplers.setIx + + if (!keepRunning()) + return; + + willSubmit &= m_ui.manager->render(cmdbuf, waitInfo); + } + willSubmit &= cmdbuf->endRenderPass(); + + // If the Rendering and Blit/Present Queues don't come from the same family we need to transfer ownership, because we need to preserve contents between them. + auto blitQueueFamily = m_surface->getAssignedQueue()->getFamilyIndex(); + // Also should crash/error if concurrent sharing enabled but would-be-user-queue is not in the share set, but oh well. + const bool needOwnershipRelease = cmdbuf->getQueueFamilyIndex() != blitQueueFamily && !frame->getCachedCreationParams().isConcurrentSharing(); + if (needOwnershipRelease) + { + const IGPUCommandBuffer::SPipelineBarrierDependencyInfo::image_barrier_t barrier[] = { { + .barrier = { + .dep = { + // Normally I'd put `COLOR_ATTACHMENT` on the masks, but we want this to happen after Layout Transition :( + // https://github.com/KhronosGroup/Vulkan-Docs/issues/2319 + .srcStageMask = asset::PIPELINE_STAGE_FLAGS::ALL_COMMANDS_BITS, + .srcAccessMask = asset::ACCESS_FLAGS::MEMORY_READ_BITS | asset::ACCESS_FLAGS::MEMORY_WRITE_BITS, + // For a Queue Family Ownership Release the destination access masks are irrelevant + // and source stage mask can be NONE as long as the semaphore signals ALL_COMMANDS_BIT + .dstStageMask = asset::PIPELINE_STAGE_FLAGS::NONE, + .dstAccessMask = asset::ACCESS_FLAGS::NONE + }, + .ownershipOp = IGPUCommandBuffer::SOwnershipTransferBarrier::OWNERSHIP_OP::RELEASE, + .otherQueueFamilyIndex = blitQueueFamily + }, + .image = frame, + .subresourceRange = TripleBufferUsedSubresourceRange + // there will be no layout transition, already done by the Renderpass End + } }; + const IGPUCommandBuffer::SPipelineBarrierDependencyInfo depInfo = { .imgBarriers = barrier }; + willSubmit &= cmdbuf->pipelineBarrier(asset::EDF_NONE, depInfo); + } + } + willSubmit &= cmdbuf->end(); + + // submit and present under a mutex ASAP + if (willSubmit) + { + // We will signal a semaphore in the rendering queue, and await it with the presentation/blit queue + const IQueue::SSubmitInfo::SSemaphoreInfo rendered = + { + .semaphore = m_semaphore.get(), + .value = m_realFrameIx + 1, + // Normally I'd put `COLOR_ATTACHMENT` on the masks, but we want to signal after Layout Transitions and optional Ownership Release + // https://github.com/KhronosGroup/Vulkan-Docs/issues/2319 + .stageMask = asset::PIPELINE_STAGE_FLAGS::ALL_COMMANDS_BITS + }; + const IQueue::SSubmitInfo::SCommandBufferInfo cmdbufs[1] = + { { + .cmdbuf = cmdbuf + } }; + // We need to wait on previous triple buffer blits/presents from our source image to complete + auto* pBlitWaitValue = m_blitWaitValues.data() + resourceIx; + auto swapchainLock = m_surface->pseudoAcquire(pBlitWaitValue); + const IQueue::SSubmitInfo::SSemaphoreInfo blitted = + { + .semaphore = m_surface->getPresentSemaphore(), + .value = pBlitWaitValue->load(), + // Normally I'd put `BLIT` on the masks, but we want to wait before Implicit Layout Transitions and optional Implicit Ownership Acquire + // https://github.com/KhronosGroup/Vulkan-Docs/issues/2319 + .stageMask = asset::PIPELINE_STAGE_FLAGS::ALL_COMMANDS_BITS + }; + const IQueue::SSubmitInfo submitInfos[1] = + { + { + .waitSemaphores = {&blitted,1}, + .commandBuffers = cmdbufs, + .signalSemaphores = {&rendered,1} + } + }; + + // (**) -> wait on offline framebuffers in window mode + { + if (useWindow) + { + m_device->blockForSemaphores(std::to_array + ( + { + // wait for first planar scene view fb + { + .semaphore = windowControlBinding[0].scene->semaphore.progress.get(), + .value = windowControlBinding[0].scene->semaphore.finishedValue + }, + // and second one too + { + .semaphore = windowControlBinding[1].scene->semaphore.progress.get(), + .value = windowControlBinding[1].scene->semaphore.finishedValue + } + } + )); + } + else + { + m_device->blockForSemaphores(std::to_array + ( + { + // wait for picked planar only + { + .semaphore = windowControlBinding[activeRenderWindowIx].scene->semaphore.progress.get(), + .value = windowControlBinding[activeRenderWindowIx].scene->semaphore.finishedValue + } + } + )); + } + + updateGUIDescriptorSet(); + } + + if (getGraphicsQueue()->submit(submitInfos) != IQueue::RESULT::SUCCESS) + return; + + m_realFrameIx++; + + // only present if there's successful content to show + const ISmoothResizeSurface::SPresentInfo presentInfo = { + { + .source = {.image = frame,.rect = currentRenderArea}, + .waitSemaphore = rendered.semaphore, + .waitValue = rendered.value, + .pPresentSemaphoreWaitValue = pBlitWaitValue, + }, + // The Graphics Queue will be the the most recent owner just before it releases ownership + cmdbuf->getQueueFamilyIndex() + }; + m_surface->present(std::move(swapchainLock), presentInfo); + } + firstFrame = false; + } + + inline bool keepRunning() override + { + if (m_surface->irrecoverable()) + return false; + + return true; + } + + inline bool onAppTerminated() override + { + return base_t::onAppTerminated(); + } + + inline void update() + { + m_inputSystem->getDefaultMouse(&mouse); + m_inputSystem->getDefaultKeyboard(&keyboard); + + auto updatePresentationTimestamp = [&]() + { + oracle.reportEndFrameRecord(); + const auto timestamp = oracle.getNextPresentationTimeStamp(); + oracle.reportBeginFrameRecord(); + + return timestamp; + }; + + m_nextPresentationTimestamp = updatePresentationTimestamp(); + + struct + { + std::vector mouse {}; + std::vector keyboard {}; + } capturedEvents; + { + mouse.consumeEvents([&](const IMouseEventChannel::range_t& events) -> void + { + if (m_window->hasInputFocus()) + for (const auto& e : events) + capturedEvents.mouse.emplace_back(e); + }, m_logger.get()); + + keyboard.consumeEvents([&](const IKeyboardEventChannel::range_t& events) -> void + { + if (m_window->hasInputFocus()) + for (const auto& e : events) + capturedEvents.keyboard.emplace_back(e); + }, m_logger.get()); + } + + const auto cursorPosition = m_window->getCursorControl()->getPosition(); + + nbl::ext::imgui::UI::SUpdateParameters params = + { + .mousePosition = nbl::hlsl::float32_t2(cursorPosition.x, cursorPosition.y) - nbl::hlsl::float32_t2(m_window->getX(), m_window->getY()), + .displaySize = { m_window->getWidth(), m_window->getHeight() }, + .mouseEvents = { capturedEvents.mouse.data(), capturedEvents.mouse.size() }, + .keyboardEvents = { capturedEvents.keyboard.data(), capturedEvents.keyboard.size() } + }; + + if (enableActiveCameraMovement) + { + auto& binding = windowControlBinding[activeRenderWindowIx]; + auto& planar = m_planarProjections[binding.activePlanarIx]; + auto* camera = planar->getCamera(); + + assert(binding.boundProjectionIx.has_value()); + auto& projection = planar->getPlanarProjections()[binding.boundProjectionIx.value()]; + + static std::vector virtualEvents(0x45); + uint32_t vCount = {}; + + projection.beginInputProcessing(m_nextPresentationTimestamp); + { + projection.process(nullptr, vCount); + + if (virtualEvents.size() < vCount) + virtualEvents.resize(vCount); + + auto* orbit = dynamic_cast(camera); + + if (orbit) + { + uint32_t vKeyboardEventsCount = {}, vMouseEventsCount = {}; + + projection.processKeyboard(nullptr, vKeyboardEventsCount, {}); + projection.processMouse(nullptr, vMouseEventsCount, {}); + + auto* output = virtualEvents.data(); + + projection.processKeyboard(output, vKeyboardEventsCount, params.keyboardEvents); + output += vKeyboardEventsCount; + + if (ImGui::IsMouseDown(ImGuiMouseButton_Left)) + projection.processMouse(output, vMouseEventsCount, params.mouseEvents); + else + vMouseEventsCount = 0; + + vCount = vKeyboardEventsCount + vMouseEventsCount; + } + else + projection.process(virtualEvents.data(), vCount, { params.keyboardEvents, params.mouseEvents }); + } + projection.endInputProcessing(); + + if (vCount) + camera->manipulate({ virtualEvents.data(), vCount }, ICamera::Local); + } + + m_ui.manager->update(params); + } + + private: + inline void imguiListen() + { + ImGuiIO& io = ImGui::GetIO(); + + ImGuizmo::BeginFrame(); + { + SImResourceInfo info; + info.samplerIx = (uint16_t)nbl::ext::imgui::UI::DefaultSamplerIx::USER; + + + // ORBIT CAMERA TEST + { + for (auto& planar : m_planarProjections) + { + auto* camera = planar->getCamera(); + + auto* orbit = dynamic_cast(camera); + + if (orbit) + { + auto targetPostion = transpose(getMatrix3x4As4x4(m_model))[3]; + orbit->target(targetPostion); + orbit->manipulate({}, {}); + } + } + } + + // render bound planar camera views onto GUI windows + if (useWindow) + { + // ABS TRS editor to manipulate bound object + TransformEditor(); + + if(enableActiveCameraMovement) + ImGuizmo::Enable(false); + else + ImGuizmo::Enable(true); + + size_t gizmoIx = {}; + size_t manipulationCounter = {}; + const std::optional modelInUseIx = ImGuizmo::IsUsingAny() ? std::optional(boundPlanarCameraIxToManipulate.has_value() ? 1u + boundPlanarCameraIxToManipulate.value() : 0u) : std::optional(std::nullopt); + + for (uint32_t windowIx = 0; windowIx < windowControlBinding.size(); ++windowIx) + { + // setup + { + const auto& rw = wInit.renderWindows[windowIx]; + ImGui::SetNextWindowPos({ rw.iPos.x, rw.iPos.y }, ImGuiCond_Appearing); + ImGui::SetNextWindowSize({ rw.iSize.x, rw.iSize.y }, ImGuiCond_Appearing); + } + ImGui::SetNextWindowSizeConstraints(ImVec2(0x45, 0x45), ImVec2(7680, 4320)); + + ImGui::PushStyleColor(ImGuiCol_WindowBg, (ImVec4)ImColor(0.35f, 0.3f, 0.3f)); + const std::string ident = "Render Window \"" + std::to_string(windowIx) + "\""; + + ImGui::Begin(ident.data(), 0); + const ImVec2 contentRegionSize = ImGui::GetContentRegionAvail(), windowPos = ImGui::GetWindowPos(), cursorPos = ImGui::GetCursorScreenPos(); + + ImGuiWindow* window = ImGui::GetCurrentWindow(); + { + const auto mPos = ImGui::GetMousePos(); + + if (mPos.x < cursorPos.x || mPos.y < cursorPos.y || mPos.x > cursorPos.x + contentRegionSize.x || mPos.y > cursorPos.y + contentRegionSize.y) + window->Flags = ImGuiWindowFlags_None; + else + window->Flags = ImGuiWindowFlags_NoMove; + } + + // setup bound entities for the window like camera & projections + auto& binding = windowControlBinding[windowIx]; + auto& planarBound = m_planarProjections[binding.activePlanarIx]; + assert(planarBound); + + binding.aspectRatio = contentRegionSize.x / contentRegionSize.y; + auto* planarViewCameraBound = planarBound->getCamera(); + + assert(planarViewCameraBound); + assert(binding.boundProjectionIx.has_value()); + + auto& projection = planarBound->getPlanarProjections()[binding.boundProjectionIx.value()]; + projection.update(binding.leftHandedProjection, binding.aspectRatio); + + // TODO: + // would be nice to normalize imguizmo visual vectors (possible with styles) + + // first 0th texture is for UI texture atlas, then there are our window textures + auto fboImguiTextureID = windowIx + 1u; + info.textureID = fboImguiTextureID; + + if(binding.allowGizmoAxesToFlip) + ImGuizmo::AllowAxisFlip(true); + else + ImGuizmo::AllowAxisFlip(false); + + if(projection.getParameters().m_type == IPlanarProjection::CProjection::Orthographic) + ImGuizmo::SetOrthographic(true); + else + ImGuizmo::SetOrthographic(false); + + ImGuizmo::SetDrawlist(); + ImGui::Image(info, contentRegionSize); + ImGuizmo::SetRect(cursorPos.x, cursorPos.y, contentRegionSize.x, contentRegionSize.y); + + // I will assume we need to focus a window to start manipulating objects from it + if (ImGui::IsWindowFocused(ImGuiFocusedFlags_ChildWindows)) + activeRenderWindowIx = windowIx; + + // we render a scene from view of a camera bound to planar window + ImGuizmoPlanarM16InOut imguizmoPlanar; + imguizmoPlanar.view = getCastedMatrix(transpose(getMatrix3x4As4x4(planarViewCameraBound->getGimbal().getViewMatrix()))); + imguizmoPlanar.projection = getCastedMatrix(transpose(projection.getProjectionMatrix())); + + if (flipGizmoY) // note we allow to flip gizmo just to match our coordinates + imguizmoPlanar.projection[1][1] *= -1.f; // https://johannesugb.github.io/gpu-programming/why-do-opengl-proj-matrices-fail-in-vulkan/ + + static constexpr float identityMatrix[] = + { + 1.f, 0.f, 0.f, 0.f, + 0.f, 1.f, 0.f, 0.f, + 0.f, 0.f, 1.f, 0.f, + 0.f, 0.f, 0.f, 1.f + }; + + if(binding.enableDebugGridDraw) + ImGuizmo::DrawGrid(&imguizmoPlanar.view[0][0], &imguizmoPlanar.projection[0][0], identityMatrix, 100.f); + + for (uint32_t modelIx = 0; modelIx < 1u + m_planarProjections.size(); modelIx++) + { + ImGuizmo::PushID(gizmoIx); ++gizmoIx; + + const bool isCameraGimbalTarget = modelIx; // I assume scene demo model is 0th ix, left are planar cameras + ICamera* const targetGimbalManipulationCamera = isCameraGimbalTarget ? m_planarProjections[modelIx - 1u]->getCamera() : nullptr; + bool discard = isCameraGimbalTarget && mCurrentGizmoOperation != ImGuizmo::TRANSLATE; // discard WiP stuff + + // if we try to manipulate a camera which appears to be the same camera we see scene from then obvsly it doesn't make sense to manipulate its gizmo so we skip it + // EDIT: it actually makes some sense if you assume render planar view is rendered with ortho projection, but we would need to add imguizmo controller virtual map + // to ban forward/backward in this mode if this condition is true + if (targetGimbalManipulationCamera == planarViewCameraBound) + { + ImGuizmo::PopID(); + continue; + } + + ImGuizmoModelM16InOut imguizmoModel; - if (ImGui::RadioButton("Window", transformParams.useWindow)) - transformParams.useWindow = true; + if (isCameraGimbalTarget) + { + assert(targetGimbalManipulationCamera); + imguizmoModel.inTRS = gimbalToImguizmoTRS(getCastedMatrix(targetGimbalManipulationCamera->getGimbal()())); + } + else + imguizmoModel.inTRS = transpose(getMatrix3x4As4x4(m_model)); + + imguizmoModel.outTRS = imguizmoModel.inTRS; + { + const bool success = ImGuizmo::Manipulate(&imguizmoPlanar.view[0][0], &imguizmoPlanar.projection[0][0], mCurrentGizmoOperation, mCurrentGizmoMode, &imguizmoModel.outTRS[0][0], &imguizmoModel.outDeltaTRS[0][0], useSnap ? &snap[0] : nullptr); + + if (success) + { + ++manipulationCounter; + discard |= manipulationCounter > 1; + + /* + + NOTE to self & TODO: I must be killing ImGuizmo last invocation cache or something with my delta events (or at least I dont + see now where I'm *maybe* using its API incorrectly, the problem is I get translation parts in delta matrix when doing + rotations hence it glitches my cameras -> on the other hand I can see it kinda correctly outputs the delta matrix when + gc model is bound + + auto postprocessDeltaManipulation = [](const float32_t4x4& inDeltaMatrix, float32_t4x4& outDeltaMatrix, ImGuizmo::OPERATION operation) -> void + { + struct + { + float32_t3 dTranslation, dRotation, dScale; + } world; - ImGui::Text("Camera"); - bool viewDirty = false; + ImGuizmo::DecomposeMatrixToComponents(&inDeltaMatrix[0][0], &world.dTranslation[0], &world.dRotation[0], &world.dScale[0]); + + if (operation == ImGuizmo::TRANSLATE) + { + world.dRotation = float32_t3(0, 0, 0); + } + else if (operation == ImGuizmo::ROTATE) + { + world.dTranslation = float32_t3(0, 0, 0); + } + + ImGuizmo::RecomposeMatrixFromComponents(&world.dTranslation[0], &world.dRotation[0], &world.dScale[0], &outDeltaMatrix[0][0]); + }; + + postprocessDeltaManipulation(imguizmoModel.outDeltaTRS, imguizmoModel.outDeltaTRS, mCurrentGizmoOperation); + */ + + if (!discard) + { + if (targetGimbalManipulationCamera) + { + boundCameraToManipulate = smart_refctd_ptr(targetGimbalManipulationCamera); + boundPlanarCameraIxToManipulate = modelIx - 1u; + + /* + testing imguizmo controller for target camera, we use delta world imguizmo TRS matrix to generate virtual events + */ + + { + static std::vector virtualEvents(0x45); + uint32_t vCount = {}; + + targetGimbalManipulationCamera->beginInputProcessing(m_nextPresentationTimestamp); + { + targetGimbalManipulationCamera->process(nullptr, vCount); + + if (virtualEvents.size() < vCount) + virtualEvents.resize(vCount); + + IGimbalController::SUpdateParameters params; + params.imguizmoEvents = { { imguizmoModel.outDeltaTRS } }; + targetGimbalManipulationCamera->process(virtualEvents.data(), vCount, params); + } + targetGimbalManipulationCamera->endInputProcessing(); + + if (vCount) + { + const float pMoveSpeed = targetGimbalManipulationCamera->getMoveSpeedScale(); + const float pRotationSpeed = targetGimbalManipulationCamera->getRotationSpeedScale(); + + // I start to think controller should be able to set sensitivity to scale magnitudes of generated events + // in order for camera to not keep any magnitude scalars like move or rotation speed scales + + targetGimbalManipulationCamera->setMoveSpeedScale(1); + targetGimbalManipulationCamera->setRotationSpeedScale(1); + + // NOTE: generated events from ImGuizmo controller are always in world space! + targetGimbalManipulationCamera->manipulate({ virtualEvents.data(), vCount }, ICamera::World); + + targetGimbalManipulationCamera->setMoveSpeedScale(pMoveSpeed); + targetGimbalManipulationCamera->setRotationSpeedScale(pRotationSpeed); + } + } + } + else + { + // again, for scene demo model full affine transformation without limits is assumed + m_model = float32_t3x4(transpose(imguizmoModel.outTRS)); + boundCameraToManipulate = nullptr; + boundPlanarCameraIxToManipulate = std::nullopt; + } + } + } + + if (ImGuizmo::IsOver() and not ImGuizmo::IsUsingAny() && not enableActiveCameraMovement) + { + ImGui::PushStyleColor(ImGuiCol_WindowBg, ImVec4(0.2f, 0.2f, 0.2f, 0.8f)); + ImGui::PushStyleColor(ImGuiCol_Border, ImVec4(1.0f, 1.0f, 1.0f, 1.0f)); + ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 1.5f); - if (ImGui::RadioButton("LH", isLH)) - isLH = true; + ImGuiIO& io = ImGui::GetIO(); + ImVec2 mousePos = io.MousePos; + ImGui::SetNextWindowPos(ImVec2(mousePos.x + 10, mousePos.y + 10), ImGuiCond_Always); - ImGui::SameLine(); + ImGui::Begin("InfoOverlay", nullptr, + ImGuiWindowFlags_NoDecoration | + ImGuiWindowFlags_AlwaysAutoResize | + ImGuiWindowFlags_NoSavedSettings); - if (ImGui::RadioButton("RH", !isLH)) - isLH = false; + std::string ident; - if (ImGui::RadioButton("Perspective", isPerspective)) - isPerspective = true; + if (targetGimbalManipulationCamera) + ident = targetGimbalManipulationCamera->getIdentifier(); + else + ident = "Geometry Creator Object"; - ImGui::SameLine(); + ImGui::Text("Identifier: %s", ident.c_str()); + ImGui::Text("Object Ix: %u", modelIx); - if (ImGui::RadioButton("Orthographic", !isPerspective)) - isPerspective = false; + ImGui::End(); + + ImGui::PopStyleVar(); + ImGui::PopStyleColor(2); + } + } + ImGuizmo::PopID(); + } + + ImGui::End(); + ImGui::PopStyleColor(1); + } + assert(manipulationCounter <= 1u); + } + // render selected camera view onto full screen + else + { + info.textureID = 1u + activeRenderWindowIx; + + ImGui::SetNextWindowPos(ImVec2(0, 0)); + ImGui::SetNextWindowSize(io.DisplaySize); + ImGui::PushStyleColor(ImGuiCol_WindowBg, ImVec4(0, 0, 0, 0)); // fully transparent fake window + ImGui::Begin("FullScreenWindow", nullptr, ImGuiWindowFlags_NoTitleBar | ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoMove | ImGuiWindowFlags_NoScrollbar | ImGuiWindowFlags_NoScrollWithMouse | ImGuiWindowFlags_NoCollapse | ImGuiWindowFlags_NoBringToFrontOnFocus | ImGuiWindowFlags_NoBackground | ImGuiWindowFlags_NoInputs); + const ImVec2 contentRegionSize = ImGui::GetContentRegionAvail(), windowPos = ImGui::GetWindowPos(), cursorPos = ImGui::GetCursorScreenPos(); + { + auto& binding = windowControlBinding[activeRenderWindowIx]; + auto& planarBound = m_planarProjections[binding.activePlanarIx]; + assert(planarBound); + + binding.aspectRatio = contentRegionSize.x / contentRegionSize.y; + auto* planarViewCameraBound = planarBound->getCamera(); + + assert(planarViewCameraBound); + assert(binding.boundProjectionIx.has_value()); + + auto& projection = planarBound->getPlanarProjections()[binding.boundProjectionIx.value()]; + projection.update(binding.leftHandedProjection, binding.aspectRatio); + } + + ImGui::Image(info, contentRegionSize); + ImGuizmo::SetRect(cursorPos.x, cursorPos.y, contentRegionSize.x, contentRegionSize.y); + + ImGui::End(); + ImGui::PopStyleColor(1); + } + } + + // to Nabla + update camera & model matrices + { + const auto& references = resources->objects; + const auto type = static_cast(gcIndex); + const auto& [gpu, meta] = references[type]; + + for (uint32_t i = 0u; i < windowControlBinding.size(); ++i) + { + auto& binding = windowControlBinding[i]; + auto& hook = binding.scene->object; + + auto& planarBound = m_planarProjections[binding.activePlanarIx]; + assert(planarBound); + auto* boundPlanarCamera = planarBound->getCamera(); + + hook.meta.type = type; + hook.meta.name = meta.name; + { + float32_t3x4 viewMatrix, modelViewMatrix, normalMatrix; + float32_t4x4 modelViewProjectionMatrix; + + viewMatrix = getCastedMatrix(boundPlanarCamera->getGimbal().getViewMatrix()); + modelViewMatrix = concatenateBFollowedByA(viewMatrix, m_model); + + assert(binding.boundProjectionIx.has_value()); + auto& projection = planarBound->getPlanarProjections()[binding.boundProjectionIx.value()]; + projection.update(binding.leftHandedProjection, binding.aspectRatio); + + auto concatMatrix = mul(getCastedMatrix(projection.getProjectionMatrix()), getMatrix3x4As4x4(viewMatrix)); + modelViewProjectionMatrix = mul(concatMatrix, getMatrix3x4As4x4(m_model)); + + memcpy(hook.viewParameters.MVP, &modelViewProjectionMatrix[0][0], sizeof(hook.viewParameters.MVP)); + memcpy(hook.viewParameters.MV, &modelViewMatrix[0][0], sizeof(hook.viewParameters.MV)); + memcpy(hook.viewParameters.NormalMat, &normalMatrix[0][0], sizeof(hook.viewParameters.NormalMat)); + } + } + } + + // Planars + { + // setup + { + ImGui::SetNextWindowPos({ wInit.planars.iPos.x, wInit.planars.iPos.y }, ImGuiCond_Appearing); + ImGui::SetNextWindowSize({ wInit.planars.iSize.x, wInit.planars.iSize.y }, ImGuiCond_Appearing); + } - ImGui::Checkbox("Enable \"view manipulate\"", &transformParams.enableViewManipulate); - ImGui::Checkbox("Enable camera movement", &move); - ImGui::SliderFloat("Move speed", &moveSpeed, 0.1f, 10.f); - ImGui::SliderFloat("Rotate speed", &rotateSpeed, 0.1f, 10.f); + ImGui::Begin("Planar projection"); + ImGui::Checkbox("Window mode##useWindow", &useWindow); + ImGui::Separator(); - // ImGui::Checkbox("Flip Gizmo's Y axis", &flipGizmoY); // let's not expose it to be changed in UI but keep the logic in case + auto& active = windowControlBinding[activeRenderWindowIx]; + const auto activeRenderWindowIxString = std::to_string(activeRenderWindowIx); - if (isPerspective) - ImGui::SliderFloat("Fov", &fov, 20.f, 150.f); - else - ImGui::SliderFloat("Ortho width", &viewWidth, 1, 20); + ImGui::Text("Active Render Window: %s", activeRenderWindowIxString.c_str()); + { + const size_t planarsCount = m_planarProjections.size(); + assert(planarsCount); - ImGui::SliderFloat("zNear", &zNear, 0.1f, 100.f); - ImGui::SliderFloat("zFar", &zFar, 110.f, 10000.f); + std::vector sbels(planarsCount); + for (size_t i = 0; i < planarsCount; ++i) + sbels[i] = "Planar " + std::to_string(i); - viewDirty |= ImGui::SliderFloat("Distance", &transformParams.camDistance, 1.f, 69.f); + std::vector labels(planarsCount); + for (size_t i = 0; i < planarsCount; ++i) + labels[i] = sbels[i].c_str(); - if (viewDirty || firstFrame) + int currentPlanarIx = static_cast(active.activePlanarIx); + if (ImGui::Combo("Active Planar", ¤tPlanarIx, labels.data(), static_cast(labels.size()))) { - core::vectorSIMDf cameraPosition(cosf(camYAngle)* cosf(camXAngle)* transformParams.camDistance, sinf(camXAngle)* transformParams.camDistance, sinf(camYAngle)* cosf(camXAngle)* transformParams.camDistance); - core::vectorSIMDf cameraTarget(0.f, 0.f, 0.f); - const static core::vectorSIMDf up(0.f, 1.f, 0.f); + active.activePlanarIx = static_cast(currentPlanarIx); + active.pickDefaultProjections(m_planarProjections[active.activePlanarIx]->getPlanarProjections()); + } + } - camera.setPosition(cameraPosition); - camera.setTarget(cameraTarget); - camera.setBackupUpVector(up); + assert(active.boundProjectionIx.has_value()); + assert(active.lastBoundPerspectivePresetProjectionIx.has_value()); + assert(active.lastBoundOrthoPresetProjectionIx.has_value()); - camera.recomputeViewMatrix(); + const auto activePlanarIxString = std::to_string(active.activePlanarIx); + auto& planarBound = m_planarProjections[active.activePlanarIx]; + assert(planarBound); - firstFrame = false; - } + auto selectedProjectionType = planarBound->getPlanarProjections()[active.boundProjectionIx.value()].getParameters().m_type; + { + const char* labels[] = { "Perspective", "Orthographic" }; + int type = static_cast(selectedProjectionType); - ImGui::Text("X: %f Y: %f", io.MousePos.x, io.MousePos.y); - if (ImGuizmo::IsUsing()) + if (ImGui::Combo("Projection Type", &type, labels, IM_ARRAYSIZE(labels))) { - ImGui::Text("Using gizmo"); + selectedProjectionType = static_cast(type); + + switch (selectedProjectionType) + { + case IPlanarProjection::CProjection::Perspective: active.boundProjectionIx = active.lastBoundPerspectivePresetProjectionIx.value(); break; + case IPlanarProjection::CProjection::Orthographic: active.boundProjectionIx = active.lastBoundOrthoPresetProjectionIx.value(); break; + default: active.boundProjectionIx = std::nullopt; assert(false); break; + } } - else + } + + auto getPresetName = [&](auto ix) -> std::string + { + switch (selectedProjectionType) { - ImGui::Text(ImGuizmo::IsOver() ? "Over gizmo" : ""); - ImGui::SameLine(); - ImGui::Text(ImGuizmo::IsOver(ImGuizmo::TRANSLATE) ? "Over translate gizmo" : ""); - ImGui::SameLine(); - ImGui::Text(ImGuizmo::IsOver(ImGuizmo::ROTATE) ? "Over rotate gizmo" : ""); - ImGui::SameLine(); - ImGui::Text(ImGuizmo::IsOver(ImGuizmo::SCALE) ? "Over scale gizmo" : ""); + case IPlanarProjection::CProjection::Perspective: return "Perspective Projection Preset " + std::to_string(ix); + case IPlanarProjection::CProjection::Orthographic: return "Orthographic Projection Preset " + std::to_string(ix); + default: return "Unknown Projection Preset " + std::to_string(ix); } - ImGui::Separator(); + }; - /* - * ImGuizmo expects view & perspective matrix to be column major both with 4x4 layout - * and Nabla uses row major matricies - 3x4 matrix for view & 4x4 for projection + bool updateBoundVirtualMaps = false; + if (ImGui::BeginCombo("Projection Preset", getPresetName(active.boundProjectionIx.value()).c_str())) + { + auto& projections = planarBound->getPlanarProjections(); - - VIEW: + for (uint32_t i = 0; i < projections.size(); ++i) + { + const auto& projection = projections[i]; + const auto& params = projection.getParameters(); - ImGuizmo + if (params.m_type != selectedProjectionType) + continue; - | X[0] Y[0] Z[0] 0.0f | - | X[1] Y[1] Z[1] 0.0f | - | X[2] Y[2] Z[2] 0.0f | - | -Dot(X, eye) -Dot(Y, eye) -Dot(Z, eye) 1.0f | + bool isSelected = (i == active.boundProjectionIx.value()); - Nabla + if (ImGui::Selectable(getPresetName(i).c_str(), isSelected)) + { + active.boundProjectionIx = i; + updateBoundVirtualMaps |= true; - | X[0] X[1] X[2] -Dot(X, eye) | - | Y[0] Y[1] Y[2] -Dot(Y, eye) | - | Z[0] Z[1] Z[2] -Dot(Z, eye) | + switch (selectedProjectionType) + { + case IPlanarProjection::CProjection::Perspective: active.lastBoundPerspectivePresetProjectionIx = active.boundProjectionIx.value(); break; + case IPlanarProjection::CProjection::Orthographic: active.lastBoundOrthoPresetProjectionIx = active.boundProjectionIx.value(); break; + default: assert(false); break; + } + } - = transpose(nbl::core::matrix4SIMD()) + if (isSelected) + ImGui::SetItemDefaultFocus(); + } + ImGui::EndCombo(); + } - - PERSPECTIVE [PROJECTION CASE]: + auto* const boundCamera = planarBound->getCamera(); + auto& boundProjection = planarBound->getPlanarProjections()[active.boundProjectionIx.value()]; + assert(not boundProjection.isProjectionSingular()); - ImGuizmo + auto updateParameters = boundProjection.getParameters(); - | (temp / temp2) (0.0) (0.0) (0.0) | - | (0.0) (temp / temp3) (0.0) (0.0) | - | ((right + left) / temp2) ((top + bottom) / temp3) ((-zfar - znear) / temp4) (-1.0f) | - | (0.0) (0.0) ((-temp * zfar) / temp4) (0.0) | + if (useWindow) + ImGui::Checkbox("Allow axes to flip##allowAxesToFlip", &active.allowGizmoAxesToFlip); - Nabla + if(useWindow) + ImGui::Checkbox("Draw debug grid##drawDebugGrid", &active.enableDebugGridDraw); - | w (0.0) (0.0) (0.0) | - | (0.0) -h (0.0) (0.0) | - | (0.0) (0.0) (-zFar/(zFar-zNear)) (-zNear*zFar/(zFar-zNear)) | - | (0.0) (0.0) (-1.0) (0.0) | + if (ImGui::RadioButton("LH", active.leftHandedProjection)) + active.leftHandedProjection = true; - = transpose() + ImGui::SameLine(); - * - * the ViewManipulate final call (inside EditTransform) returns world space column major matrix for an object, - * note it also modifies input view matrix but projection matrix is immutable - */ + if (ImGui::RadioButton("RH", not active.leftHandedProjection)) + active.leftHandedProjection = false; - static struct - { - core::matrix4SIMD view, projection, model; - } imguizmoM16InOut; + updateParameters.m_zNear = std::clamp(updateParameters.m_zNear, 0.1f, 100.f); + updateParameters.m_zFar = std::clamp(updateParameters.m_zFar, 110.f, 10000.f); - ImGuizmo::SetID(0u); + ImGui::SliderFloat("zNear", &updateParameters.m_zNear, 0.1f, 100.f, "%.2f", ImGuiSliderFlags_Logarithmic); + ImGui::SliderFloat("zFar", &updateParameters.m_zFar, 110.f, 10000.f, "%.1f", ImGuiSliderFlags_Logarithmic); - imguizmoM16InOut.view = core::transpose(matrix4SIMD(camera.getViewMatrix())); - imguizmoM16InOut.projection = core::transpose(camera.getProjectionMatrix()); - imguizmoM16InOut.model = core::transpose(core::matrix4SIMD(pass.scene->object.model)); + switch (selectedProjectionType) + { + case IPlanarProjection::CProjection::Perspective: { - if (flipGizmoY) // note we allow to flip gizmo just to match our coordinates - imguizmoM16InOut.projection[1][1] *= -1.f; // https://johannesugb.github.io/gpu-programming/why-do-opengl-proj-matrices-fail-in-vulkan/ - - transformParams.editTransformDecomposition = true; - EditTransform(imguizmoM16InOut.view.pointer(), imguizmoM16InOut.projection.pointer(), imguizmoM16InOut.model.pointer(), transformParams); - } - - // to Nabla + update camera & model matrices - const auto& view = camera.getViewMatrix(); - const auto& projection = camera.getProjectionMatrix(); + ImGui::SliderFloat("Fov", &updateParameters.m_planar.perspective.fov, 20.f, 150.f, "%.1f", ImGuiSliderFlags_Logarithmic); + boundProjection.setPerspective(updateParameters.m_zNear, updateParameters.m_zFar, updateParameters.m_planar.perspective.fov); + } break; - // TODO: make it more nicely - const_cast(view) = core::transpose(imguizmoM16InOut.view).extractSub3x4(); // a hack, correct way would be to use inverse matrix and get position + target because now it will bring you back to last position & target when switching from gizmo move to manual move (but from manual to gizmo is ok) - camera.setProjectionMatrix(projection); // update concatanated matrix + case IPlanarProjection::CProjection::Orthographic: { - static nbl::core::matrix3x4SIMD modelView, normal; - static nbl::core::matrix4SIMD modelViewProjection; - - auto& hook = pass.scene->object; - hook.model = core::transpose(imguizmoM16InOut.model).extractSub3x4(); - { - const auto& references = pass.scene->getResources().objects; - const auto type = static_cast(gcIndex); + ImGui::SliderFloat("Ortho width", &updateParameters.m_planar.orthographic.orthoWidth, 1.f, 30.f, "%.1f", ImGuiSliderFlags_Logarithmic); + boundProjection.setOrthographic(updateParameters.m_zNear, updateParameters.m_zFar, updateParameters.m_planar.orthographic.orthoWidth); + } break; - const auto& [gpu, meta] = references[type]; - hook.meta.type = type; - hook.meta.name = meta.name; - } - - auto& ubo = hook.viewParameters; + default: break; + } - modelView = nbl::core::concatenateBFollowedByA(view, hook.model); - modelView.getSub3x3InverseTranspose(normal); - modelViewProjection = nbl::core::concatenateBFollowedByA(camera.getConcatenatedMatrix(), hook.model); + { + ImGuiIO& io = ImGui::GetIO(); - memcpy(ubo.MVP, modelViewProjection.pointer(), sizeof(ubo.MVP)); - memcpy(ubo.MV, modelView.pointer(), sizeof(ubo.MV)); - memcpy(ubo.NormalMat, normal.pointer(), sizeof(ubo.NormalMat)); + if (ImGui::IsKeyPressed(ImGuiKey_Space)) + enableActiveCameraMovement = !enableActiveCameraMovement; - // object meta display - { - ImGui::Begin("Object"); - ImGui::Text("type: \"%s\"", hook.meta.name.data()); - ImGui::End(); - } + if (enableActiveCameraMovement) + { + io.ConfigFlags |= ImGuiConfigFlags_NoMouse; + io.MouseDrawCursor = false; + io.WantCaptureMouse = false; } - - // view matrices editor + else { - ImGui::Begin("Matrices"); - - auto addMatrixTable = [&](const char* topText, const char* tableName, const int rows, const int columns, const float* pointer, const bool withSeparator = true) - { - ImGui::Text(topText); - if (ImGui::BeginTable(tableName, columns)) - { - for (int y = 0; y < rows; ++y) - { - ImGui::TableNextRow(); - for (int x = 0; x < columns; ++x) - { - ImGui::TableSetColumnIndex(x); - ImGui::Text("%.3f", *(pointer + (y * columns) + x)); - } - } - ImGui::EndTable(); - } - - if (withSeparator) - ImGui::Separator(); - }; - - addMatrixTable("Model Matrix", "ModelMatrixTable", 3, 4, pass.scene->object.model.pointer()); - addMatrixTable("Camera View Matrix", "ViewMatrixTable", 3, 4, view.pointer()); - addMatrixTable("Camera View Projection Matrix", "ViewProjectionMatrixTable", 4, 4, projection.pointer(), false); - - ImGui::End(); + io.ConfigFlags &= ~ImGuiConfigFlags_NoMouse; + io.MouseDrawCursor = true; + io.WantCaptureMouse = true; } - // Nabla Imgui backend MDI buffer info - // To be 100% accurate and not overly conservative we'd have to explicitly `cull_frees` and defragment each time, - // so unless you do that, don't use this basic info to optimize the size of your IMGUI buffer. + if (enableActiveCameraMovement) + ImGui::TextColored(ImVec4(0.0f, 1.0f, 0.0f, 1.0f), "Bound Camera Movement: Enabled"); + else + ImGui::TextColored(ImVec4(1.0f, 0.0f, 0.0f, 1.0f), "Bound Camera Movement: Disabled"); + + if (ImGui::IsItemHovered()) { - auto* streaminingBuffer = pass.ui.manager->getStreamingBuffer(); + ImGui::PushStyleColor(ImGuiCol_WindowBg, ImVec4(0.2f, 0.2f, 0.2f, 0.8f)); + ImGui::PushStyleColor(ImGuiCol_Border, ImVec4(1.0f, 1.0f, 1.0f, 1.0f)); + ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 1.5f); - const size_t total = streaminingBuffer->get_total_size(); // total memory range size for which allocation can be requested - const size_t freeSize = streaminingBuffer->getAddressAllocator().get_free_size(); // max total free bloock memory size we can still allocate from total memory available - const size_t consumedMemory = total - freeSize; // memory currently consumed by streaming buffer + ImVec2 mousePos = ImGui::GetMousePos(); + ImGui::SetNextWindowPos(ImVec2(mousePos.x + 10, mousePos.y + 10), ImGuiCond_Always); - float freePercentage = 100.0f * (float)(freeSize) / (float)total; - float allocatedPercentage = (float)(consumedMemory) / (float)total; + ImGui::Begin("HoverOverlay", nullptr, + ImGuiWindowFlags_NoDecoration | + ImGuiWindowFlags_AlwaysAutoResize | + ImGuiWindowFlags_NoSavedSettings); - ImVec2 barSize = ImVec2(400, 30); - float windowPadding = 10.0f; - float verticalPadding = ImGui::GetStyle().FramePadding.y; + ImGui::Text("Press 'Space' to Enable/Disable bound planar camera movement"); - ImGui::SetNextWindowSize(ImVec2(barSize.x + 2 * windowPadding, 110 + verticalPadding), ImGuiCond_Always); - ImGui::Begin("Nabla Imgui MDI Buffer Info", nullptr, ImGuiWindowFlags_NoResize | ImGuiWindowFlags_NoScrollbar); + ImGui::End(); - ImGui::Text("Total Allocated Size: %zu bytes", total); - ImGui::Text("In use: %zu bytes", consumedMemory); - ImGui::Text("Buffer Usage:"); + ImGui::PopStyleVar(); + ImGui::PopStyleColor(2); + } - ImGui::SetCursorPosX(windowPadding); + ImGui::Separator(); - if (freePercentage > 70.0f) - ImGui::PushStyleColor(ImGuiCol_PlotHistogram, ImVec4(0.0f, 1.0f, 0.0f, 0.4f)); // Green - else if (freePercentage > 30.0f) - ImGui::PushStyleColor(ImGuiCol_PlotHistogram, ImVec4(1.0f, 1.0f, 0.0f, 0.4f)); // Yellow - else - ImGui::PushStyleColor(ImGuiCol_PlotHistogram, ImVec4(1.0f, 0.0f, 0.0f, 0.4f)); // Red + const auto flags = ImGuiTreeNodeFlags_DefaultOpen; + if (ImGui::TreeNodeEx("Bound Camera", flags)) + { + ImGui::Text("Type: %s", boundCamera->getIdentifier().data()); + ImGui::Text("Object Ix: %s", std::to_string(active.activePlanarIx + 1u).c_str()); + ImGui::Separator(); + { + auto* orbit = dynamic_cast(boundCamera); - ImGui::ProgressBar(allocatedPercentage, barSize, ""); + float moveSpeed = boundCamera->getMoveSpeedScale(); + float rotationSpeed = boundCamera->getRotationSpeedScale(); - ImGui::PopStyleColor(); + ImGui::SliderFloat("Move speed factor", &moveSpeed, 0.0001f, 10.f, "%.4f", ImGuiSliderFlags_Logarithmic); - ImDrawList* drawList = ImGui::GetWindowDrawList(); + if(not orbit) + ImGui::SliderFloat("Rotate speed factor", &rotationSpeed, 0.0001f, 10.f, "%.4f", ImGuiSliderFlags_Logarithmic); - ImVec2 progressBarPos = ImGui::GetItemRectMin(); - ImVec2 progressBarSize = ImGui::GetItemRectSize(); + boundCamera->setMoveSpeedScale(moveSpeed); + boundCamera->setRotationSpeedScale(rotationSpeed); - const char* text = "%.2f%% free"; - char textBuffer[64]; - snprintf(textBuffer, sizeof(textBuffer), text, freePercentage); + { + if (orbit) + { + float distance = orbit->getDistance(); + ImGui::SliderFloat("Distance", &distance, COrbitCamera::MinDistance, COrbitCamera::MaxDistance, "%.4f", ImGuiSliderFlags_Logarithmic); + orbit->setDistance(distance); + } + } + } - ImVec2 textSize = ImGui::CalcTextSize(textBuffer); - ImVec2 textPos = ImVec2 - ( - progressBarPos.x + (progressBarSize.x - textSize.x) * 0.5f, - progressBarPos.y + (progressBarSize.y - textSize.y) * 0.5f - ); + if (ImGui::TreeNodeEx("World Data", flags)) + { + auto& gimbal = boundCamera->getGimbal(); + const auto position = getCastedVector(gimbal.getPosition()); + const auto& orientation = gimbal.getOrientation(); + const auto viewMatrix = getCastedMatrix(gimbal.getViewMatrix()); + + addMatrixTable("Position", ("PositionTable_" + activePlanarIxString).c_str(), 1, 3, &position[0], false); + addMatrixTable("Orientation (Quaternion)", ("OrientationTable_" + activePlanarIxString).c_str(), 1, 4, &orientation[0], false); + addMatrixTable("View Matrix", ("ViewMatrixTable_" + activePlanarIxString).c_str(), 3, 4, &viewMatrix[0][0], false); + ImGui::TreePop(); + } - ImVec4 bgColor = ImGui::GetStyleColorVec4(ImGuiCol_WindowBg); - drawList->AddRectFilled - ( - ImVec2(textPos.x - 5, textPos.y - 2), - ImVec2(textPos.x + textSize.x + 5, textPos.y + textSize.y + 2), - ImGui::GetColorU32(bgColor) - ); + if (ImGui::TreeNodeEx("Virtual Event Mappings", flags)) + { + displayKeyMappingsAndVirtualStatesInline(&boundProjection); + ImGui::TreePop(); + } - ImGui::SetCursorScreenPos(textPos); - ImGui::Text("%s", textBuffer); + ImGui::TreePop(); + } + } - ImGui::Dummy(ImVec2(0.0f, verticalPadding)); + { + ImGuiIO& io = ImGui::GetIO(); + ImVec2 mousePos = ImGui::GetMousePos(); + ImVec2 viewportSize = io.DisplaySize; + auto* cc = m_window->getCursorControl(); - ImGui::End(); + if (mousePos.x < 0.0f || mousePos.y < 0.0f || mousePos.x > viewportSize.x || mousePos.y > viewportSize.y) + { + if (not enableActiveCameraMovement) + cc->setVisible(true); + } + else + { + cc->setVisible(false); } - - ImGui::End(); } - ); - m_winMgr->setWindowSize(m_window.get(), WIN_W, WIN_H); - m_surface->recreateSwapchain(); - m_winMgr->show(m_window.get()); - oracle.reportBeginFrameRecord(); - camera.mapKeysToArrows(); - - return true; + ImGui::End(); + } } - bool updateGUIDescriptorSet() + inline void TransformEditor() { - // texture atlas + our scene texture, note we don't create info & write pair for the font sampler because UI extension's is immutable and baked into DS layout - static std::array descriptorInfo; - static IGPUDescriptorSet::SWriteDescriptorSet writes[TexturesAmount]; - - descriptorInfo[nbl::ext::imgui::UI::FontAtlasTexId].info.image.imageLayout = IImage::LAYOUT::READ_ONLY_OPTIMAL; - descriptorInfo[nbl::ext::imgui::UI::FontAtlasTexId].desc = core::smart_refctd_ptr(pass.ui.manager->getFontAtlasView()); + static float bounds[] = { -0.5f, -0.5f, -0.5f, 0.5f, 0.5f, 0.5f }; + static float boundsSnap[] = { 0.1f, 0.1f, 0.1f }; + static bool boundSizing = false; + static bool boundSizingSnap = false; - descriptorInfo[OfflineSceneTextureIx].info.image.imageLayout = IImage::LAYOUT::READ_ONLY_OPTIMAL; - descriptorInfo[OfflineSceneTextureIx].desc = pass.scene->getResources().attachments.color; + ImGuiIO& io = ImGui::GetIO(); - for (uint32_t i = 0; i < descriptorInfo.size(); ++i) + // setup { - writes[i].dstSet = pass.ui.descriptorSet.get(); - writes[i].binding = 0u; - writes[i].arrayElement = i; - writes[i].count = 1u; + ImGui::SetNextWindowPos({ wInit.trsEditor.iPos.x, wInit.trsEditor.iPos.y }, ImGuiCond_Appearing); + ImGui::SetNextWindowSize({ wInit.trsEditor.iSize.x, wInit.trsEditor.iSize.y }, ImGuiCond_Appearing); } - writes[nbl::ext::imgui::UI::FontAtlasTexId].info = descriptorInfo.data() + nbl::ext::imgui::UI::FontAtlasTexId; - writes[OfflineSceneTextureIx].info = descriptorInfo.data() + OfflineSceneTextureIx; - return m_device->updateDescriptorSets(writes, {}); - } - - inline void workLoopBody() override - { - // framesInFlight: ensuring safe execution of command buffers and acquires, `framesInFlight` only affect semaphore waits, don't use this to index your resources because it can change with swapchain recreation. - const uint32_t framesInFlight = core::min(MaxFramesInFlight, m_surface->getMaxAcquiresInFlight()); - // We block for semaphores for 2 reasons here: - // A) Resource: Can't use resource like a command buffer BEFORE previous use is finished! [MaxFramesInFlight] - // B) Acquire: Can't have more acquires in flight than a certain threshold returned by swapchain or your surface helper class. [MaxAcquiresInFlight] - if (m_realFrameIx >= framesInFlight) + ImGui::Begin("TRS Editor"); { - const ISemaphore::SWaitInfo cbDonePending[] = + const size_t objectsCount = m_planarProjections.size() + 1u; + assert(objectsCount); + + std::vector sbels(objectsCount); + for (size_t i = 0; i < objectsCount; ++i) + sbels[i] = "Object " + std::to_string(i); + + std::vector labels(objectsCount); + for (size_t i = 0; i < objectsCount; ++i) + labels[i] = sbels[i].c_str(); + + int activeObject = boundCameraToManipulate ? static_cast(boundPlanarCameraIxToManipulate.value() + 1u) : 0; + if (ImGui::Combo("Active Object", &activeObject, labels.data(), static_cast(labels.size()))) { + const auto newActiveObject = static_cast(activeObject); + + if (newActiveObject) // camera { - .semaphore = m_semaphore.get(), - .value = m_realFrameIx + 1 - framesInFlight + boundPlanarCameraIxToManipulate = newActiveObject - 1u; + ICamera* const targetGimbalManipulationCamera = m_planarProjections[boundPlanarCameraIxToManipulate.value()]->getCamera(); + boundCameraToManipulate = smart_refctd_ptr(targetGimbalManipulationCamera); } - }; - if (m_device->blockForSemaphores(cbDonePending) != ISemaphore::WAIT_RESULT::SUCCESS) - return; + else // gc model + { + boundPlanarCameraIxToManipulate = std::nullopt; + boundCameraToManipulate = nullptr; + } + } } - const auto resourceIx = m_realFrameIx % MaxFramesInFlight; - - // CPU events - update(); + ImGuizmoModelM16InOut imguizmoModel; - // render whole scene to offline frame buffer & submit - pass.scene->begin(); - { - pass.scene->update(); - pass.scene->record(); - pass.scene->end(); - } - pass.scene->submit(); + if (boundCameraToManipulate) + imguizmoModel.inTRS = gimbalToImguizmoTRS(getCastedMatrix(boundCameraToManipulate->getGimbal()())); + else + imguizmoModel.inTRS = transpose(getMatrix3x4As4x4(m_model)); - auto* const cb = m_cmdBufs.data()[resourceIx].get(); - cb->reset(IGPUCommandBuffer::RESET_FLAGS::RELEASE_RESOURCES_BIT); - cb->begin(IGPUCommandBuffer::USAGE::ONE_TIME_SUBMIT_BIT); - cb->beginDebugMarker("UISampleApp IMGUI Frame"); + imguizmoModel.outTRS = imguizmoModel.inTRS; + float* m16TRSmatrix = &imguizmoModel.outTRS[0][0]; - auto* queue = getGraphicsQueue(); + std::string indent; + if (boundCameraToManipulate) + indent = boundCameraToManipulate->getIdentifier(); + else + indent = "Geometry Creator Object"; - asset::SViewport viewport; + ImGui::Text("Identifier: \"%s\"", indent.c_str()); { - viewport.minDepth = 1.f; - viewport.maxDepth = 0.f; - viewport.x = 0u; - viewport.y = 0u; - viewport.width = WIN_W; - viewport.height = WIN_H; - } - cb->setViewport(0u, 1u, &viewport); + if (ImGuizmo::IsUsingAny()) + ImGui::TextColored(ImVec4(0.0f, 1.0f, 0.0f, 1.0f), "Gizmo: In Use"); + else + ImGui::TextColored(ImVec4(1.0f, 1.0f, 0.0f, 1.0f), "Gizmo: Idle"); - const VkRect2D currentRenderArea = - { - .offset = {0,0}, - .extent = {m_window->getWidth(),m_window->getHeight()} - }; + if (ImGui::IsItemHovered()) + { + ImGui::PushStyleColor(ImGuiCol_WindowBg, ImVec4(0.2f, 0.2f, 0.2f, 0.8f)); + ImGui::PushStyleColor(ImGuiCol_Border, ImVec4(1.0f, 1.0f, 1.0f, 1.0f)); + ImGui::PushStyleVar(ImGuiStyleVar_WindowBorderSize, 1.5f); - IQueue::SSubmitInfo::SCommandBufferInfo commandBuffersInfo[] = {{.cmdbuf = cb }}; + ImVec2 mousePos = ImGui::GetMousePos(); + ImGui::SetNextWindowPos(ImVec2(mousePos.x + 10, mousePos.y + 10), ImGuiCond_Always); - // UI render pass - { - auto scRes = static_cast(m_surface->getSwapchainResources()); - const IGPUCommandBuffer::SRenderpassBeginInfo renderpassInfo = - { - .framebuffer = scRes->getFramebuffer(m_currentImageAcquire.imageIndex), - .colorClearValues = &clear.color, - .depthStencilClearValues = nullptr, - .renderArea = currentRenderArea - }; - nbl::video::ISemaphore::SWaitInfo waitInfo = { .semaphore = m_semaphore.get(), .value = m_realFrameIx + 1u }; + ImGui::Begin("HoverOverlay", nullptr, + ImGuiWindowFlags_NoDecoration | + ImGuiWindowFlags_AlwaysAutoResize | + ImGuiWindowFlags_NoSavedSettings); - cb->beginRenderPass(renderpassInfo, IGPUCommandBuffer::SUBPASS_CONTENTS::INLINE); - const auto uiParams = pass.ui.manager->getCreationParameters(); - auto* pipeline = pass.ui.manager->getPipeline(); - cb->bindGraphicsPipeline(pipeline); - cb->bindDescriptorSets(EPBP_GRAPHICS, pipeline->getLayout(), uiParams.resources.texturesInfo.setIx, 1u, &pass.ui.descriptorSet.get()); // note that we use default UI pipeline layout where uiParams.resources.textures.setIx == uiParams.resources.samplers.setIx - - if (!keepRunning()) - return; - - if (!pass.ui.manager->render(cb,waitInfo)) - { - // TODO: need to present acquired image before bailing because its already acquired - return; + ImGui::Text("Right-click and drag on the gizmo to manipulate the object."); + + ImGui::End(); + + ImGui::PopStyleVar(); + ImGui::PopStyleColor(2); } - cb->endRenderPass(); } - cb->end(); + + ImGui::Separator(); + + if (!boundCameraToManipulate) { - const IQueue::SSubmitInfo::SSemaphoreInfo rendered[] = - { - { - .semaphore = m_semaphore.get(), - .value = ++m_realFrameIx, - .stageMask = PIPELINE_STAGE_FLAGS::COLOR_ATTACHMENT_OUTPUT_BIT - } + static const char* gcObjectTypeNames[] = { + "Cube", + "Sphere", + "Cylinder", + "Rectangle", + "Disk", + "Arrow", + "Cone", + "Icosphere" }; + if (ImGui::BeginCombo("Object Type", gcObjectTypeNames[gcIndex])) { + for (uint8_t i = 0; i < ObjectType::OT_COUNT; ++i) { - const IQueue::SSubmitInfo::SSemaphoreInfo acquired[] = - { - { - .semaphore = m_currentImageAcquire.semaphore, - .value = m_currentImageAcquire.acquireCount, - .stageMask = PIPELINE_STAGE_FLAGS::NONE - } - }; + bool isSelected = (static_cast(gcIndex) == static_cast(i)); + if (ImGui::Selectable(gcObjectTypeNames[i], isSelected)) + gcIndex = i; - const IQueue::SSubmitInfo infos[] = - { - { - .waitSemaphores = acquired, - .commandBuffers = commandBuffersInfo, - .signalSemaphores = rendered - } - }; - - const nbl::video::ISemaphore::SWaitInfo waitInfos[] = - { { - .semaphore = pass.scene->semaphore.progress.get(), - .value = pass.scene->semaphore.finishedValue - } }; - - m_device->blockForSemaphores(waitInfos); - - updateGUIDescriptorSet(); - - if (queue->submit(infos) != IQueue::RESULT::SUCCESS) - m_realFrameIx--; + if (isSelected) + ImGui::SetItemDefaultFocus(); } + ImGui::EndCombo(); } - - m_window->setCaption("[Nabla Engine] UI App Test Demo"); - m_surface->present(m_currentImageAcquire.imageIndex, rendered); } - } - inline bool keepRunning() override - { - if (m_surface->irrecoverable()) - return false; + addMatrixTable("Model (TRS) Matrix", "ModelMatrixTable", 4, 4, m16TRSmatrix); - return true; - } + if (ImGui::RadioButton("Translate", mCurrentGizmoOperation == ImGuizmo::TRANSLATE)) + mCurrentGizmoOperation = ImGuizmo::TRANSLATE; - inline bool onAppTerminated() override - { - return device_base_t::onAppTerminated(); - } + ImGui::SameLine(); + if (ImGui::RadioButton("Rotate", mCurrentGizmoOperation == ImGuizmo::ROTATE)) + mCurrentGizmoOperation = ImGuizmo::ROTATE; + ImGui::SameLine(); + if (ImGui::RadioButton("Scale", mCurrentGizmoOperation == ImGuizmo::SCALE)) + mCurrentGizmoOperation = ImGuizmo::SCALE; - inline void update() - { - camera.setMoveSpeed(moveSpeed); - camera.setRotateSpeed(rotateSpeed); + float32_t3 matrixTranslation, matrixRotation, matrixScale; + IGimbalController::input_imguizmo_event_t decomposed, recomposed; + imguizmoModel.outDeltaTRS = IGimbalController::input_imguizmo_event_t(1); - static std::chrono::microseconds previousEventTimestamp{}; + ImGuizmo::DecomposeMatrixToComponents(m16TRSmatrix, &matrixTranslation[0], &matrixRotation[0], &matrixScale[0]); + decomposed = *reinterpret_cast(m16TRSmatrix); + { + ImGuiInputTextFlags flags = 0; - m_inputSystem->getDefaultMouse(&mouse); - m_inputSystem->getDefaultKeyboard(&keyboard); + ImGui::InputFloat3("Tr", &matrixTranslation[0], "%.3f", flags); - auto updatePresentationTimestamp = [&]() - { - m_currentImageAcquire = m_surface->acquireNextImage(); + if (boundCameraToManipulate) // TODO: cameras are WiP here, imguizmo controller only works with translate manipulation + abs are banned currently + { + ImGui::PushStyleColor(ImGuiCol_FrameBg, ImVec4(1.0f, 0.0f, 0.0f, 0.5f)); + flags |= ImGuiInputTextFlags_ReadOnly; + } - oracle.reportEndFrameRecord(); - const auto timestamp = oracle.getNextPresentationTimeStamp(); - oracle.reportBeginFrameRecord(); + ImGui::InputFloat3("Rt", &matrixRotation[0], "%.3f", flags); + ImGui::InputFloat3("Sc", &matrixScale[0], "%.3f", flags); - return timestamp; - }; + if(boundCameraToManipulate) + ImGui::PopStyleColor(); + } + ImGuizmo::RecomposeMatrixFromComponents(&matrixTranslation[0], &matrixRotation[0], &matrixScale[0], m16TRSmatrix); + recomposed = *reinterpret_cast(m16TRSmatrix); - const auto nextPresentationTimestamp = updatePresentationTimestamp(); + // TODO AND NOTE: I only take care of translate part temporary! + imguizmoModel.outDeltaTRS[3] = recomposed[3] - decomposed[3]; - struct + if (mCurrentGizmoOperation != ImGuizmo::SCALE) { - std::vector mouse{}; - std::vector keyboard{}; - } capturedEvents; + if (ImGui::RadioButton("Local", mCurrentGizmoMode == ImGuizmo::LOCAL)) + mCurrentGizmoMode = ImGuizmo::LOCAL; + ImGui::SameLine(); + if (ImGui::RadioButton("World", mCurrentGizmoMode == ImGuizmo::WORLD)) + mCurrentGizmoMode = ImGuizmo::WORLD; + } - if (move) camera.beginInputProcessing(nextPresentationTimestamp); + ImGui::Checkbox(" ", &useSnap); + ImGui::SameLine(); + switch (mCurrentGizmoOperation) { - mouse.consumeEvents([&](const IMouseEventChannel::range_t& events) -> void - { - if (move) - camera.mouseProcess(events); // don't capture the events, only let camera handle them with its impl + case ImGuizmo::TRANSLATE: + ImGui::InputFloat3("Snap", &snap[0]); + break; + case ImGuizmo::ROTATE: + ImGui::InputFloat("Angle Snap", &snap[0]); + break; + case ImGuizmo::SCALE: + ImGui::InputFloat("Scale Snap", &snap[0]); + break; + } - for (const auto& e : events) // here capture + ImGui::End(); + { + // generate virtual events given delta TRS matrix + if (boundCameraToManipulate) + { { - if (e.timeStamp < previousEventTimestamp) - continue; + static std::vector virtualEvents(0x45); - previousEventTimestamp = e.timeStamp; - capturedEvents.mouse.emplace_back(e); + if (not enableActiveCameraMovement) + { + uint32_t vCount = {}; - if (e.type == nbl::ui::SMouseEvent::EET_SCROLL) - gcIndex = std::clamp(int16_t(gcIndex) + int16_t(core::sign(e.scrollEvent.verticalScroll)), int64_t(0), int64_t(OT_COUNT - (uint8_t)1u)); - } - }, m_logger.get()); + boundCameraToManipulate->beginInputProcessing(m_nextPresentationTimestamp); + { + boundCameraToManipulate->process(nullptr, vCount); - keyboard.consumeEvents([&](const IKeyboardEventChannel::range_t& events) -> void - { - if (move) - camera.keyboardProcess(events); // don't capture the events, only let camera handle them with its impl + if (virtualEvents.size() < vCount) + virtualEvents.resize(vCount); - for (const auto& e : events) // here capture - { - if (e.timeStamp < previousEventTimestamp) - continue; + IGimbalController::SUpdateParameters params; + params.imguizmoEvents = { { imguizmoModel.outDeltaTRS } }; + boundCameraToManipulate->process(virtualEvents.data(), vCount, params); + } + boundCameraToManipulate->endInputProcessing(); + + // I start to think controller should be able to set sensitivity to scale magnitudes of generated events + // in order for camera to not keep any magnitude scalars like move or rotation speed scales + + if (vCount) + { + const float pmSpeed = boundCameraToManipulate->getMoveSpeedScale(); + const float prSpeed = boundCameraToManipulate->getRotationSpeedScale(); - previousEventTimestamp = e.timeStamp; - capturedEvents.keyboard.emplace_back(e); + boundCameraToManipulate->setMoveSpeedScale(1); + boundCameraToManipulate->setRotationSpeedScale(1); + + // NOTE: generated events from ImGuizmo controller are always in world space! + boundCameraToManipulate->manipulate({ virtualEvents.data(), vCount }, ICamera::World); + + boundCameraToManipulate->setMoveSpeedScale(pmSpeed); + boundCameraToManipulate->setRotationSpeedScale(prSpeed); + } + } } - }, m_logger.get()); + } + else + { + // for scene demo model full affine transformation without limits is assumed + m_model = float32_t3x4(transpose(imguizmoModel.outTRS)); + } } - if (move) camera.endInputProcessing(nextPresentationTimestamp); - - const auto cursorPosition = m_window->getCursorControl()->getPosition(); + } - nbl::ext::imgui::UI::SUpdateParameters params = + inline void addMatrixTable(const char* topText, const char* tableName, int rows, int columns, const float* pointer, bool withSeparator = true) + { + ImGui::Text(topText); + ImGui::PushStyleColor(ImGuiCol_TableRowBg, ImGui::GetStyleColorVec4(ImGuiCol_ChildBg)); + ImGui::PushStyleColor(ImGuiCol_TableRowBgAlt, ImGui::GetStyleColorVec4(ImGuiCol_WindowBg)); + if (ImGui::BeginTable(tableName, columns, ImGuiTableFlags_Borders | ImGuiTableFlags_RowBg | ImGuiTableFlags_SizingStretchSame)) { - .mousePosition = nbl::hlsl::float32_t2(cursorPosition.x, cursorPosition.y) - nbl::hlsl::float32_t2(m_window->getX(), m_window->getY()), - .displaySize = { m_window->getWidth(), m_window->getHeight() }, - .mouseEvents = { capturedEvents.mouse.data(), capturedEvents.mouse.size() }, - .keyboardEvents = { capturedEvents.keyboard.data(), capturedEvents.keyboard.size() } - }; - - pass.ui.manager->update(params); + for (int y = 0; y < rows; ++y) + { + ImGui::TableNextRow(); + for (int x = 0; x < columns; ++x) + { + ImGui::TableSetColumnIndex(x); + if (pointer) + ImGui::Text("%.3f", *(pointer + (y * columns) + x)); + else + ImGui::Text("-"); + } + } + ImGui::EndTable(); + } + ImGui::PopStyleColor(2); + if (withSeparator) + ImGui::Separator(); } - private: - // Maximum frames which can be simultaneously submitted, used to cycle through our per-frame resources like command buffers - constexpr static inline uint32_t MaxFramesInFlight = 3u; + // TODO: need to inspect where I'm wrong, workaround + inline float32_t4x4 gimbalToImguizmoTRS(const float32_t3x4& nblGimbalTrs) + { + // *do not transpose whole matrix*, only the translate part + float32_t4x4 trs = getMatrix3x4As4x4(nblGimbalTrs); + trs[3] = float32_t4(nblGimbalTrs[0][3], nblGimbalTrs[1][3], nblGimbalTrs[2][3], 1.f); + trs[0][3] = 0.f; + trs[1][3] = 0.f; + trs[2][3] = 0.f; + + return trs; + }; + + std::chrono::seconds timeout = std::chrono::seconds(0x7fffFFFFu); + clock_t::time_point start; + //! One window & surface + smart_refctd_ptr> m_surface; smart_refctd_ptr m_window; - smart_refctd_ptr> m_surface; - smart_refctd_ptr m_pipeline; + // We can't use the same semaphore for acquire and present, because that would disable "Frames in Flight" by syncing previous present against next acquire. + // At least two timelines must be used. smart_refctd_ptr m_semaphore; - smart_refctd_ptr m_cmdPool; + // Maximum frames which can be simultaneously submitted, used to cycle through our per-frame resources like command buffers + constexpr static inline uint32_t MaxFramesInFlight = 3u; + // Use a separate counter to cycle through our resources because `getAcquireCount()` increases upon spontaneous resizes with immediate blit-presents uint64_t m_realFrameIx = 0; + // We'll write to the Triple Buffer with a Renderpass + core::smart_refctd_ptr m_renderpass = {}; + // These are atomic counters where the Surface lets us know what's the latest Blit timeline semaphore value which will be signalled on the resource + std::array m_blitWaitValues; + // Enough Command Buffers and other resources for all frames in flight! std::array, MaxFramesInFlight> m_cmdBufs; - ISimpleManagedSurface::SAcquireResult m_currentImageAcquire = {}; - + // Our own persistent images that don't get recreated with the swapchain + std::array, MaxFramesInFlight> m_tripleBuffers; + // Resources derived from the images + std::array, MaxFramesInFlight> m_framebuffers = {}; + // We will use it to get some asset stuff like geometry creator smart_refctd_ptr m_assetManager; + // Input system for capturing system events core::smart_refctd_ptr m_inputSystem; + // Handles mouse events InputSystem::ChannelReader mouse; + // Handles keyboard events InputSystem::ChannelReader keyboard; - - constexpr static inline auto TexturesAmount = 2u; + //! next presentation timestamp + std::chrono::microseconds m_nextPresentationTimestamp = {}; core::smart_refctd_ptr m_descriptorSetPool; - struct C_UI + struct CRenderUI { nbl::core::smart_refctd_ptr manager; @@ -774,25 +2177,90 @@ class UISampleApp final : public examples::SimpleWindowedApplication core::smart_refctd_ptr descriptorSet; }; - struct E_APP_PASS + // one model object in the world, testing multiuple cameraz for which view is rendered to separate frame buffers (so what they see) with new controller API including imguizmo + nbl::hlsl::float32_t3x4 m_model = nbl::hlsl::float32_t3x4(1.f); + + // if we had working IObjectTransform or something similar then it would be it instead, it is "last manipulated object" I need for TRS editor + // in reality we should store range of those IObjectTransforem interface range & index to object representing last manipulated one + nbl::core::smart_refctd_ptr boundCameraToManipulate = nullptr; + std::optional boundPlanarCameraIxToManipulate = std::nullopt; + + std::vector> m_planarProjections; + + bool enableActiveCameraMovement = false; + + struct windowControlBinding { nbl::core::smart_refctd_ptr scene; - C_UI ui; - } pass; - Camera camera = Camera(core::vectorSIMDf(0, 0, 0), core::vectorSIMDf(0, 0, 0), core::matrix4SIMD()); + uint32_t activePlanarIx = 0u; + bool allowGizmoAxesToFlip = false; + bool enableDebugGridDraw = true; + float aspectRatio = 16.f / 9.f; + bool leftHandedProjection = true; + + std::optional boundProjectionIx = std::nullopt, lastBoundPerspectivePresetProjectionIx = std::nullopt, lastBoundOrthoPresetProjectionIx = std::nullopt; + + inline void pickDefaultProjections(const planar_projections_range_t& projections) + { + auto init = [&](std::optional& presetix, IPlanarProjection::CProjection::ProjectionType requestedType) -> void + { + for (uint32_t i = 0u; i < projections.size(); ++i) + { + const auto& params = projections[i].getParameters(); + if (params.m_type == requestedType) + { + presetix = i; + break; + } + } + + assert(presetix.has_value()); + }; + + init(lastBoundPerspectivePresetProjectionIx = std::nullopt, IPlanarProjection::CProjection::Perspective); + init(lastBoundOrthoPresetProjectionIx = std::nullopt, IPlanarProjection::CProjection::Orthographic); + boundProjectionIx = lastBoundPerspectivePresetProjectionIx.value(); + } + }; + + static constexpr inline auto MaxSceneFBOs = 2u; + std::array windowControlBinding; + uint32_t activeRenderWindowIx = 0u; + + // UI font atlas + viewport FBO color attachment textures + constexpr static inline auto TotalUISampleTexturesAmount = 1u + MaxSceneFBOs; + + nbl::core::smart_refctd_ptr resources; + + CRenderUI m_ui; video::CDumbPresentationOracle oracle; + uint16_t gcIndex = {}; - uint16_t gcIndex = {}; // note: this is dirty however since I assume only single object in scene I can leave it now, when this example is upgraded to support multiple objects this needs to be changed + const bool flipGizmoY = true; - TransformRequestParams transformParams; - bool isPerspective = true, isLH = true, flipGizmoY = true, move = false; - float fov = 60.f, zNear = 0.1f, zFar = 10000.f, moveSpeed = 1.f, rotateSpeed = 1.f; - float viewWidth = 10.f; float camYAngle = 165.f / 180.f * 3.14159f; float camXAngle = 32.f / 180.f * 3.14159f; + float camDistance = 8.f; + bool useWindow = true, useSnap = false; + ImGuizmo::OPERATION mCurrentGizmoOperation = ImGuizmo::TRANSLATE; + ImGuizmo::MODE mCurrentGizmoMode = ImGuizmo::LOCAL; + float snap[3] = { 1.f, 1.f, 1.f }; bool firstFrame = true; + const float32_t2 iPaddingOffset = float32_t2(10, 10); + + struct ImWindowInit + { + float32_t2 iPos, iSize; + }; + + struct + { + ImWindowInit trsEditor; + ImWindowInit planars; + std::array renderWindows; + } wInit; }; NBL_MAIN_FUNC(UISampleApp) \ No newline at end of file diff --git a/61_UI/pipeline.groovy b/61_UI/pipeline.groovy deleted file mode 100644 index 7b7c9702a..000000000 --- a/61_UI/pipeline.groovy +++ /dev/null @@ -1,50 +0,0 @@ -import org.DevshGraphicsProgramming.Agent -import org.DevshGraphicsProgramming.BuilderInfo -import org.DevshGraphicsProgramming.IBuilder - -class CUIBuilder extends IBuilder -{ - public CUIBuilder(Agent _agent, _info) - { - super(_agent, _info) - } - - @Override - public boolean prepare(Map axisMapping) - { - return true - } - - @Override - public boolean build(Map axisMapping) - { - IBuilder.CONFIGURATION config = axisMapping.get("CONFIGURATION") - IBuilder.BUILD_TYPE buildType = axisMapping.get("BUILD_TYPE") - - def nameOfBuildDirectory = getNameOfBuildDirectory(buildType) - def nameOfConfig = getNameOfConfig(config) - - agent.execute("cmake --build ${info.rootProjectPath}/${nameOfBuildDirectory}/${info.targetProjectPathRelativeToRoot} --target ${info.targetBaseName} --config ${nameOfConfig} -j12 -v") - - return true - } - - @Override - public boolean test(Map axisMapping) - { - return true - } - - @Override - public boolean install(Map axisMapping) - { - return true - } -} - -def create(Agent _agent, _info) -{ - return new CUIBuilder(_agent, _info) -} - -return this \ No newline at end of file diff --git a/common/include/CCamera.hpp b/common/include/CCamera.hpp deleted file mode 100644 index 1b0fe9c0f..000000000 --- a/common/include/CCamera.hpp +++ /dev/null @@ -1,326 +0,0 @@ -// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. -// This file is part of the "Nabla Engine". -// For conditions of distribution and use, see copyright notice in nabla.h - -#ifndef _CAMERA_IMPL_ -#define _CAMERA_IMPL_ - -#include -#include -#include -#include -#include - -class Camera -{ -public: - Camera() = default; - Camera(const nbl::core::vectorSIMDf& position, const nbl::core::vectorSIMDf& lookat, const nbl::core::matrix4SIMD& projection, float moveSpeed = 1.0f, float rotateSpeed = 1.0f, const nbl::core::vectorSIMDf& upVec = nbl::core::vectorSIMDf(0.0f, 1.0f, 0.0f), const nbl::core::vectorSIMDf& backupUpVec = nbl::core::vectorSIMDf(0.5f, 1.0f, 0.0f)) - : position(position) - , initialPosition(position) - , target(lookat) - , initialTarget(lookat) - , firstUpdate(true) - , moveSpeed(moveSpeed) - , rotateSpeed(rotateSpeed) - , upVector(upVec) - , backupUpVector(backupUpVec) - { - initDefaultKeysMap(); - allKeysUp(); - setProjectionMatrix(projection); - recomputeViewMatrix(); - } - - ~Camera() = default; - - enum E_CAMERA_MOVE_KEYS : uint8_t - { - ECMK_MOVE_FORWARD = 0, - ECMK_MOVE_BACKWARD, - ECMK_MOVE_LEFT, - ECMK_MOVE_RIGHT, - ECMK_COUNT, - }; - - inline void mapKeysToWASD() - { - keysMap[ECMK_MOVE_FORWARD] = nbl::ui::EKC_W; - keysMap[ECMK_MOVE_BACKWARD] = nbl::ui::EKC_S; - keysMap[ECMK_MOVE_LEFT] = nbl::ui::EKC_A; - keysMap[ECMK_MOVE_RIGHT] = nbl::ui::EKC_D; - } - - inline void mapKeysToArrows() - { - keysMap[ECMK_MOVE_FORWARD] = nbl::ui::EKC_UP_ARROW; - keysMap[ECMK_MOVE_BACKWARD] = nbl::ui::EKC_DOWN_ARROW; - keysMap[ECMK_MOVE_LEFT] = nbl::ui::EKC_LEFT_ARROW; - keysMap[ECMK_MOVE_RIGHT] = nbl::ui::EKC_RIGHT_ARROW; - } - - inline void mapKeysCustom(std::array& map) { keysMap = map; } - - inline const nbl::core::matrix4SIMD& getProjectionMatrix() const { return projMatrix; } - inline const nbl::core::matrix3x4SIMD& getViewMatrix() const { return viewMatrix; } - inline const nbl::core::matrix4SIMD& getConcatenatedMatrix() const { return concatMatrix; } - - inline void setProjectionMatrix(const nbl::core::matrix4SIMD& projection) - { - projMatrix = projection; - - const auto hlslMatMap = *reinterpret_cast(&projMatrix); // TEMPORARY TILL THE CAMERA CLASS IS REFACTORED TO WORK WITH HLSL MATRICIES! - { - leftHanded = nbl::hlsl::determinant(hlslMatMap) < 0.f; - } - concatMatrix = nbl::core::matrix4SIMD::concatenateBFollowedByAPrecisely(projMatrix, nbl::core::matrix4SIMD(viewMatrix)); - } - - inline void setPosition(const nbl::core::vectorSIMDf& pos) - { - position.set(pos); - recomputeViewMatrix(); - } - - inline const nbl::core::vectorSIMDf& getPosition() const { return position; } - - inline void setTarget(const nbl::core::vectorSIMDf& pos) - { - target.set(pos); - recomputeViewMatrix(); - } - - inline const nbl::core::vectorSIMDf& getTarget() const { return target; } - - inline void setUpVector(const nbl::core::vectorSIMDf& up) { upVector = up; } - - inline void setBackupUpVector(const nbl::core::vectorSIMDf& up) { backupUpVector = up; } - - inline const nbl::core::vectorSIMDf& getUpVector() const { return upVector; } - - inline const nbl::core::vectorSIMDf& getBackupUpVector() const { return backupUpVector; } - - inline const float getMoveSpeed() const { return moveSpeed; } - - inline void setMoveSpeed(const float _moveSpeed) { moveSpeed = _moveSpeed; } - - inline const float getRotateSpeed() const { return rotateSpeed; } - - inline void setRotateSpeed(const float _rotateSpeed) { rotateSpeed = _rotateSpeed; } - - inline void recomputeViewMatrix() - { - nbl::core::vectorSIMDf pos = position; - nbl::core::vectorSIMDf localTarget = nbl::core::normalize(target - pos); - - // if upvector and vector to the target are the same, we have a - // problem. so solve this problem: - nbl::core::vectorSIMDf up = nbl::core::normalize(upVector); - nbl::core::vectorSIMDf cross = nbl::core::cross(localTarget, up); - bool upVectorNeedsChange = nbl::core::lengthsquared(cross)[0] == 0; - if (upVectorNeedsChange) - up = nbl::core::normalize(backupUpVector); - - if (leftHanded) - viewMatrix = nbl::core::matrix3x4SIMD::buildCameraLookAtMatrixLH(pos, target, up); - else - viewMatrix = nbl::core::matrix3x4SIMD::buildCameraLookAtMatrixRH(pos, target, up); - concatMatrix = nbl::core::matrix4SIMD::concatenateBFollowedByAPrecisely(projMatrix, nbl::core::matrix4SIMD(viewMatrix)); - } - - inline bool getLeftHanded() const { return leftHanded; } - -public: - - void mouseProcess(const nbl::ui::IMouseEventChannel::range_t& events) - { - for (auto eventIt=events.begin(); eventIt!=events.end(); eventIt++) - { - auto ev = *eventIt; - - if(ev.type == nbl::ui::SMouseEvent::EET_CLICK && ev.clickEvent.mouseButton == nbl::ui::EMB_LEFT_BUTTON) - if(ev.clickEvent.action == nbl::ui::SMouseEvent::SClickEvent::EA_PRESSED) - mouseDown = true; - else if (ev.clickEvent.action == nbl::ui::SMouseEvent::SClickEvent::EA_RELEASED) - mouseDown = false; - - if(ev.type == nbl::ui::SMouseEvent::EET_MOVEMENT && mouseDown) - { - nbl::core::vectorSIMDf pos = getPosition(); - nbl::core::vectorSIMDf localTarget = getTarget() - pos; - - // Get Relative Rotation for localTarget in Radians - float relativeRotationX, relativeRotationY; - relativeRotationY = atan2(localTarget.X, localTarget.Z); - const double z1 = nbl::core::sqrt(localTarget.X*localTarget.X + localTarget.Z*localTarget.Z); - relativeRotationX = atan2(z1, localTarget.Y) - nbl::core::PI()/2; - - constexpr float RotateSpeedScale = 0.003f; - relativeRotationX -= ev.movementEvent.relativeMovementY * rotateSpeed * RotateSpeedScale * -1.0f; - float tmpYRot = ev.movementEvent.relativeMovementX * rotateSpeed * RotateSpeedScale * -1.0f; - - if (leftHanded) - relativeRotationY -= tmpYRot; - else - relativeRotationY += tmpYRot; - - const double MaxVerticalAngle = nbl::core::radians(88.0f); - - if (relativeRotationX > MaxVerticalAngle*2 && relativeRotationX < 2 * nbl::core::PI()-MaxVerticalAngle) - relativeRotationX = 2 * nbl::core::PI()-MaxVerticalAngle; - else - if (relativeRotationX > MaxVerticalAngle && relativeRotationX < 2 * nbl::core::PI()-MaxVerticalAngle) - relativeRotationX = MaxVerticalAngle; - - localTarget.set(0,0, nbl::core::max(1.f, nbl::core::length(pos)[0]), 1.f); - - nbl::core::matrix3x4SIMD mat; - mat.setRotation(nbl::core::quaternion(relativeRotationX, relativeRotationY, 0)); - mat.transformVect(localTarget); - - setTarget(localTarget + pos); - } - } - } - - void keyboardProcess(const nbl::ui::IKeyboardEventChannel::range_t& events) - { - for(uint32_t k = 0; k < E_CAMERA_MOVE_KEYS::ECMK_COUNT; ++k) - perActionDt[k] = 0.0; - - /* - * If a Key was already being held down from previous frames - * Compute with this assumption that the key will be held down for this whole frame as well, - * And If an UP event was sent It will get subtracted it from this value. (Currently Disabled Because we Need better Oracle) - */ - - for(uint32_t k = 0; k < E_CAMERA_MOVE_KEYS::ECMK_COUNT; ++k) - if(keysDown[k]) - { - auto timeDiff = std::chrono::duration_cast(nextPresentationTimeStamp - lastVirtualUpTimeStamp).count(); - assert(timeDiff >= 0); - perActionDt[k] += timeDiff; - } - - for (auto eventIt=events.begin(); eventIt!=events.end(); eventIt++) - { - const auto ev = *eventIt; - - // accumulate the periods for which a key was down - const auto timeDiff = std::chrono::duration_cast(nextPresentationTimeStamp - ev.timeStamp).count(); - assert(timeDiff >= 0); - - // handle camera movement - for (const auto logicalKey : { ECMK_MOVE_FORWARD, ECMK_MOVE_BACKWARD, ECMK_MOVE_LEFT, ECMK_MOVE_RIGHT }) - { - const auto code = keysMap[logicalKey]; - - if (ev.keyCode == code) - { - if (ev.action == nbl::ui::SKeyboardEvent::ECA_PRESSED && !keysDown[logicalKey]) - { - perActionDt[logicalKey] += timeDiff; - keysDown[logicalKey] = true; - } - else if (ev.action == nbl::ui::SKeyboardEvent::ECA_RELEASED) - { - // perActionDt[logicalKey] -= timeDiff; - keysDown[logicalKey] = false; - } - } - } - - // handle reset to default state - if (ev.keyCode == nbl::ui::EKC_HOME) - if (ev.action == nbl::ui::SKeyboardEvent::ECA_RELEASED) - { - position = initialPosition; - target = initialTarget; - recomputeViewMatrix(); - } - } - } - - void beginInputProcessing(std::chrono::microseconds _nextPresentationTimeStamp) - { - nextPresentationTimeStamp = _nextPresentationTimeStamp; - return; - } - - void endInputProcessing(std::chrono::microseconds _nextPresentationTimeStamp) - { - nbl::core::vectorSIMDf pos = getPosition(); - nbl::core::vectorSIMDf localTarget = getTarget() - pos; - - if (!firstUpdate) - { - nbl::core::vectorSIMDf movedir = localTarget; - movedir.makeSafe3D(); - movedir = nbl::core::normalize(movedir); - - constexpr float MoveSpeedScale = 0.02f; - - pos += movedir * perActionDt[E_CAMERA_MOVE_KEYS::ECMK_MOVE_FORWARD] * moveSpeed * MoveSpeedScale; - pos -= movedir * perActionDt[E_CAMERA_MOVE_KEYS::ECMK_MOVE_BACKWARD] * moveSpeed * MoveSpeedScale; - - // strafing - - // if upvector and vector to the target are the same, we have a - // problem. so solve this problem: - nbl::core::vectorSIMDf up = nbl::core::normalize(upVector); - nbl::core::vectorSIMDf cross = nbl::core::cross(localTarget, up); - bool upVectorNeedsChange = nbl::core::lengthsquared(cross)[0] == 0; - if (upVectorNeedsChange) - { - up = nbl::core::normalize(backupUpVector); - } - - nbl::core::vectorSIMDf strafevect = localTarget; - if (leftHanded) - strafevect = nbl::core::cross(strafevect, up); - else - strafevect = nbl::core::cross(up, strafevect); - - strafevect = nbl::core::normalize(strafevect); - - pos += strafevect * perActionDt[E_CAMERA_MOVE_KEYS::ECMK_MOVE_LEFT] * moveSpeed * MoveSpeedScale; - pos -= strafevect * perActionDt[E_CAMERA_MOVE_KEYS::ECMK_MOVE_RIGHT] * moveSpeed * MoveSpeedScale; - } - else - firstUpdate = false; - - setPosition(pos); - setTarget(localTarget+pos); - - lastVirtualUpTimeStamp = nextPresentationTimeStamp; - } - -private: - - inline void initDefaultKeysMap() { mapKeysToWASD(); } - - inline void allKeysUp() - { - for (uint32_t i=0; i< E_CAMERA_MOVE_KEYS::ECMK_COUNT; ++i) - keysDown[i] = false; - - mouseDown = false; - } - -private: - nbl::core::vectorSIMDf initialPosition, initialTarget, position, target, upVector, backupUpVector; // TODO: make first 2 const + add default copy constructor - nbl::core::matrix3x4SIMD viewMatrix; - nbl::core::matrix4SIMD concatMatrix, projMatrix; - - float moveSpeed, rotateSpeed; - bool leftHanded, firstUpdate = true, mouseDown = false; - - std::array keysMap = { {nbl::ui::EKC_NONE} }; // map camera E_CAMERA_MOVE_KEYS to corresponding Nabla key codes, by default camera uses WSAD to move - // TODO: make them use std::array - bool keysDown[E_CAMERA_MOVE_KEYS::ECMK_COUNT] = {}; - double perActionDt[E_CAMERA_MOVE_KEYS::ECMK_COUNT] = {}; // durations for which the key was being held down from lastVirtualUpTimeStamp(=last "guessed" presentation time) to nextPresentationTimeStamp - - std::chrono::microseconds nextPresentationTimeStamp, lastVirtualUpTimeStamp; -}; - -#endif // _CAMERA_IMPL_ \ No newline at end of file diff --git a/common/include/CGeomtryCreatorScene.hpp b/common/include/CGeomtryCreatorScene.hpp index ab90f59b9..7180d4019 100644 --- a/common/include/CGeomtryCreatorScene.hpp +++ b/common/include/CGeomtryCreatorScene.hpp @@ -10,6 +10,21 @@ namespace nbl::scene::geometrycreator { +#define EXPOSE_NABLA_NAMESPACES() using namespace nbl; \ +using namespace core; \ +using namespace asset; \ +using namespace video; \ +using namespace scene; \ +using namespace system + +struct Traits +{ + static constexpr auto DefaultFramebufferW = 1280u, DefaultFramebufferH = 720u; + static constexpr auto ColorFboAttachmentFormat = nbl::asset::EF_R8G8B8A8_SRGB, DepthFboAttachmentFormat = nbl::asset::EF_D32_SFLOAT; + static constexpr auto Samples = nbl::video::IGPUImage::ESCF_1_BIT; + static constexpr nbl::video::IGPUCommandBuffer::SClearColorValue clearColor = { .float32 = {0.f,0.f,0.f,1.f} }; + static constexpr nbl::video::IGPUCommandBuffer::SClearDepthStencilValue clearDepth = { .depth = 0.f }; +}; enum ObjectType : uint8_t { @@ -32,38 +47,16 @@ struct ObjectMeta std::string_view name = "Unknown"; }; -constexpr static inline struct ClearValues -{ - nbl::video::IGPUCommandBuffer::SClearColorValue color = { .float32 = {0.f,0.f,0.f,1.f} }; - nbl::video::IGPUCommandBuffer::SClearDepthStencilValue depth = { .depth = 0.f }; -} clear; - -#define TYPES_IMPL_BOILERPLATE(WithConverter) struct Types \ -{ \ - using descriptor_set_layout_t = std::conditional_t; \ - using pipeline_layout_t = std::conditional_t; \ - using renderpass_t = std::conditional_t; \ - using image_view_t = std::conditional_t; \ - using image_t = std::conditional_t; \ - using buffer_t = std::conditional_t; \ - using shader_t = std::conditional_t; \ - using graphics_pipeline_t = std::conditional_t; \ - using descriptor_set = std::conditional_t; \ -} - -template -struct ResourcesBundleBase +struct ResourcesBundle : public virtual nbl::core::IReferenceCounted { - TYPES_IMPL_BOILERPLATE(withAssetConverter); - struct ReferenceObject { struct Bindings { - nbl::asset::SBufferBinding vertex, index; + nbl::asset::SBufferBinding vertex, index; }; - nbl::core::smart_refctd_ptr pipeline = nullptr; + nbl::core::smart_refctd_ptr pipeline = nullptr; Bindings bindings; nbl::asset::E_INDEX_TYPE indexType = nbl::asset::E_INDEX_TYPE::EIT_UNKNOWN; @@ -72,526 +65,129 @@ struct ResourcesBundleBase using ReferenceDrawHook = std::pair; - nbl::core::smart_refctd_ptr renderpass; std::array objects; - nbl::asset::SBufferBinding ubo; - - struct - { - nbl::core::smart_refctd_ptr color, depth; - } attachments; - - nbl::core::smart_refctd_ptr descriptorSet; -}; - -struct ResourcesBundle : public ResourcesBundleBase -{ - using base_t = ResourcesBundleBase; -}; - -#define EXPOSE_NABLA_NAMESPACES() using namespace nbl; \ -using namespace core; \ -using namespace asset; \ -using namespace video; \ -using namespace scene; \ -using namespace system - -template -class ResourceBuilder -{ -public: - TYPES_IMPL_BOILERPLATE(withAssetConverter); - - using this_t = ResourceBuilder; - - ResourceBuilder(nbl::video::IUtilities* const _utilities, nbl::video::IGPUCommandBuffer* const _commandBuffer, nbl::system::ILogger* const _logger, const nbl::asset::IGeometryCreator* const _geometryCreator) - : utilities(_utilities), commandBuffer(_commandBuffer), logger(_logger), geometries(_geometryCreator) - { - assert(utilities); - assert(logger); - } + nbl::core::smart_refctd_ptr renderpass; + nbl::core::smart_refctd_ptr dsLayout; - /* - if (withAssetConverter) then - -> .build cpu objects - else - -> .build gpu objects & record any resource update upload transfers into command buffer - */ - - inline bool build() + static inline nbl::core::smart_refctd_ptr create(nbl::video::ILogicalDevice* const device, nbl::system::ILogger* const logger, nbl::video::CThreadSafeQueueAdapter* transferCapableQueue, const nbl::asset::IGeometryCreator* gc) { EXPOSE_NABLA_NAMESPACES(); - if constexpr (!withAssetConverter) - { - commandBuffer->reset(IGPUCommandBuffer::RESET_FLAGS::RELEASE_RESOURCES_BIT); - commandBuffer->begin(IGPUCommandBuffer::USAGE::ONE_TIME_SUBMIT_BIT); - commandBuffer->beginDebugMarker("Resources builder's buffers upload [manual]"); - } - - using functor_t = std::function; - - auto work = std::to_array - ({ - functor_t(std::bind(&this_t::createDescriptorSetLayout, this)), - functor_t(std::bind(&this_t::createPipelineLayout, this)), - functor_t(std::bind(&this_t::createRenderpass, this)), - functor_t(std::bind(&this_t::createFramebufferAttachments, this)), - functor_t(std::bind(&this_t::createShaders, this)), - functor_t(std::bind(&this_t::createGeometries, this)), - functor_t(std::bind(&this_t::createViewParametersUboBuffer, this)), - functor_t(std::bind(&this_t::createDescriptorSet, this)) - }); - - for (auto& task : work) - if (!task()) - return false; - - if constexpr (!withAssetConverter) - commandBuffer->end(); - - return true; - } + if (!device) + return nullptr; - /* - if (withAssetConverter) then - -> .convert cpu objects to gpu & update gpu buffers - else - -> update gpu buffers - */ + if (!logger) + return nullptr; - inline bool finalize(ResourcesBundle& output, nbl::video::CThreadSafeQueueAdapter* transferCapableQueue) - { - EXPOSE_NABLA_NAMESPACES(); + if (!transferCapableQueue) + return nullptr; - // TODO: use multiple command buffers - std::array commandBuffers = {}; - { - commandBuffers.front().cmdbuf = commandBuffer; - } + auto cPool = device->createCommandPool(transferCapableQueue->getFamilyIndex(), IGPUCommandPool::CREATE_FLAGS::RESET_COMMAND_BUFFER_BIT); - if constexpr (withAssetConverter) + if (!cPool) { - // note that asset converter records basic transfer uploads itself, we only begin the recording with ONE_TIME_SUBMIT_BIT - commandBuffer->reset(IGPUCommandBuffer::RESET_FLAGS::RELEASE_RESOURCES_BIT); - commandBuffer->begin(IGPUCommandBuffer::USAGE::ONE_TIME_SUBMIT_BIT); - commandBuffer->beginDebugMarker("Resources builder's buffers upload [asset converter]"); - - // asset converter - scratch at this point has ready to convert cpu resources - smart_refctd_ptr converter = CAssetConverter::create({ .device = utilities->getLogicalDevice(),.optimizer = {} }); - CAssetConverter::SInputs inputs = {}; - inputs.logger = logger; - - struct ProxyCpuHooks - { - using object_size_t = std::tuple_size; - - std::array renderpass; - std::array pipelines; - std::array buffers; - std::array attachments; - std::array descriptorSet; - } hooks; - - enum AttachmentIx - { - AI_COLOR = 0u, - AI_DEPTH = 1u, - - AI_COUNT - }; - - // gather CPU assets into span memory views - { - hooks.renderpass.front() = scratch.renderpass.get(); - for (uint32_t i = 0u; i < hooks.pipelines.size(); ++i) - { - auto& [reference, meta] = scratch.objects[static_cast(i)]; - hooks.pipelines[i] = reference.pipeline.get(); - - // [[ [vertex, index] [vertex, index] [vertex, index] ... [ubo] ]] - hooks.buffers[2u * i + 0u] = reference.bindings.vertex.buffer.get(); - hooks.buffers[2u * i + 1u] = reference.bindings.index.buffer.get(); - } - hooks.buffers.back() = scratch.ubo.buffer.get(); - hooks.attachments[AI_COLOR] = scratch.attachments.color.get(); - hooks.attachments[AI_DEPTH] = scratch.attachments.depth.get(); - hooks.descriptorSet.front() = scratch.descriptorSet.get(); - } - - // assign the CPU hooks to converter's inputs - { - std::get>(inputs.assets) = hooks.renderpass; - std::get>(inputs.assets) = hooks.pipelines; - std::get>(inputs.assets) = hooks.buffers; - // std::get>(inputs.assets) = hooks.attachments; // NOTE: THIS IS NOT IMPLEMENTED YET IN CONVERTER! - std::get>(inputs.assets) = hooks.descriptorSet; - } - - // reserve and create the GPU object handles - auto reservation = converter->reserve(inputs); - { - auto prepass = [&](const auto& references) -> bool - { - // retrieve the reserved handles - auto objects = reservation.getGPUObjects(); - - uint32_t counter = {}; - for (auto& object : objects) - { - // anything that fails to be reserved is a nullptr in the span of GPU Objects - auto gpu = object.value; - auto* reference = references[counter]; - - if (reference) - { - // validate - if (!gpu) // throw errors only if corresponding cpu hook was VALID (eg. we may have nullptr for some index buffers in the span for converter but it's OK, I'm too lazy to filter them before passing to the converter inputs and don't want to deal with dynamic alloc) - { - logger->log("Failed to convert a CPU object to GPU!", ILogger::ELL_ERROR); - return false; - } - } - - ++counter; - } - - return true; - }; - - prepass.template operator() < ICPURenderpass > (hooks.renderpass); - prepass.template operator() < ICPUGraphicsPipeline > (hooks.pipelines); - prepass.template operator() < ICPUBuffer > (hooks.buffers); - // validate.template operator() < ICPUImageView > (hooks.attachments); - prepass.template operator() < ICPUDescriptorSet > (hooks.descriptorSet); - } - - auto semaphore = utilities->getLogicalDevice()->createSemaphore(0u); - - // TODO: compute submit as well for the images' mipmaps - SIntendedSubmitInfo transfer = {}; - transfer.queue = transferCapableQueue; - transfer.scratchCommandBuffers = commandBuffers; - transfer.scratchSemaphore = { - .semaphore = semaphore.get(), - .value = 0u, - .stageMask = PIPELINE_STAGE_FLAGS::ALL_TRANSFER_BITS - }; - // issue the convert call - { - CAssetConverter::SConvertParams params = {}; - params.utilities = utilities; - params.transfer = &transfer; - - // basically it records all data uploads and submits them right away - auto future = reservation.convert(params); - if (future.copy()!=IQueue::RESULT::SUCCESS) - { - logger->log("Failed to await submission feature!", ILogger::ELL_ERROR); - return false; - } - - // assign gpu objects to output - auto& base = static_cast(output); - { - auto&& [renderpass, pipelines, buffers, descriptorSet] = std::make_tuple(reservation.getGPUObjects().front().value, reservation.getGPUObjects(), reservation.getGPUObjects(), reservation.getGPUObjects().front().value); - { - base.renderpass = renderpass; - for (uint32_t i = 0u; i < pipelines.size(); ++i) - { - const auto type = static_cast(i); - const auto& [rcpu, rmeta] = scratch.objects[type]; - auto& [gpu, meta] = base.objects[type]; - - gpu.pipeline = pipelines[i].value; - // [[ [vertex, index] [vertex, index] [vertex, index] ... [ubo] ]] - gpu.bindings.vertex = {.offset = 0u, .buffer = buffers[2u * i + 0u].value}; - gpu.bindings.index = {.offset = 0u, .buffer = buffers[2u * i + 1u].value}; - - gpu.indexCount = rcpu.indexCount; - gpu.indexType = rcpu.indexType; - meta.name = rmeta.name; - meta.type = rmeta.type; - } - base.ubo = {.offset = 0u, .buffer = buffers.back().value}; - base.descriptorSet = descriptorSet; - - /* - // base.attachments.color = attachments[AI_COLOR].value; - // base.attachments.depth = attachments[AI_DEPTH].value; - - note conversion of image views is not yet supported by the asset converter - - it's complicated, we have to kinda temporary ignore DRY a bit here to not break the design which is correct - - TEMPORARY: we patch attachments by allocating them ourselves here given cpu instances & parameters - TODO: remove following code once asset converter works with image views & update stuff - */ - - for (uint32_t i = 0u; i < AI_COUNT; ++i) - { - const auto* reference = hooks.attachments[i]; - auto& out = (i == AI_COLOR ? base.attachments.color : base.attachments.depth); - - const auto& viewParams = reference->getCreationParameters(); - const auto& imageParams = viewParams.image->getCreationParameters(); - - auto image = utilities->getLogicalDevice()->createImage - ( - IGPUImage::SCreationParams - ({ - .type = imageParams.type, - .samples = imageParams.samples, - .format = imageParams.format, - .extent = imageParams.extent, - .mipLevels = imageParams.mipLevels, - .arrayLayers = imageParams.arrayLayers, - .usage = imageParams.usage - }) - ); - - if (!image) - { - logger->log("Could not create image!", ILogger::ELL_ERROR); - return false; - } - - bool IS_DEPTH = isDepthOrStencilFormat(imageParams.format); - std::string_view DEBUG_NAME = IS_DEPTH ? "UI Scene Depth Attachment Image" : "UI Scene Color Attachment Image"; - image->setObjectDebugName(DEBUG_NAME.data()); - - if (!utilities->getLogicalDevice()->allocate(image->getMemoryReqs(), image.get()).isValid()) - { - logger->log("Could not allocate memory for an image!", ILogger::ELL_ERROR); - return false; - } - - out = utilities->getLogicalDevice()->createImageView - ( - IGPUImageView::SCreationParams - ({ - .flags = viewParams.flags, - .subUsages = viewParams.subUsages, - .image = std::move(image), - .viewType = viewParams.viewType, - .format = viewParams.format, - .subresourceRange = viewParams.subresourceRange - }) - ); - - if (!out) - { - logger->log("Could not create image view!", ILogger::ELL_ERROR); - return false; - } - } - - logger->log("Image View attachments has been allocated by hand after asset converter successful submit becasuse it doesn't support converting them yet!", ILogger::ELL_WARNING); - } - } - } + logger->log("Couldn't create command pool!", ILogger::ELL_ERROR); + return nullptr; } - else - { - auto completed = utilities->getLogicalDevice()->createSemaphore(0u); - - std::array signals; - { - auto& signal = signals.front(); - signal.value = 1; - signal.stageMask = bitflag(PIPELINE_STAGE_FLAGS::ALL_TRANSFER_BITS); - signal.semaphore = completed.get(); - } - - const IQueue::SSubmitInfo infos [] = - { - { - .waitSemaphores = {}, - .commandBuffers = commandBuffers, // note that here our command buffer is already recorded! - .signalSemaphores = signals - } - }; - if (transferCapableQueue->submit(infos) != IQueue::RESULT::SUCCESS) - { - logger->log("Failed to submit transfer upload operations!", ILogger::ELL_ERROR); - return false; - } - - const ISemaphore::SWaitInfo info [] = - { { - .semaphore = completed.get(), - .value = 1 - } }; - - utilities->getLogicalDevice()->blockForSemaphores(info); - - static_cast(output) = static_cast(scratch); // scratch has all ready to use allocated gpu resources with uploaded memory so now just assign resources to base output - } + nbl::core::smart_refctd_ptr cmd; - // write the descriptor set + if (!cPool->createCommandBuffers(IGPUCommandPool::BUFFER_LEVEL::PRIMARY, { &cmd , 1 })) { - // descriptor write ubo - IGPUDescriptorSet::SWriteDescriptorSet write; - write.dstSet = output.descriptorSet.get(); - write.binding = 0; - write.arrayElement = 0u; - write.count = 1u; - - IGPUDescriptorSet::SDescriptorInfo info; - { - info.desc = smart_refctd_ptr(output.ubo.buffer); - info.info.buffer.offset = output.ubo.offset; - info.info.buffer.size = output.ubo.buffer->getSize(); - } - - write.info = &info; - - if(!utilities->getLogicalDevice()->updateDescriptorSets(1u, &write, 0u, nullptr)) - { - logger->log("Could not write descriptor set!", ILogger::ELL_ERROR); - return false; - } + logger->log("Couldn't create command buffer!", ILogger::ELL_ERROR); + return nullptr; } - return true; - } + if (!cmd) + return nullptr; -private: - bool createDescriptorSetLayout() - { - EXPOSE_NABLA_NAMESPACES(); + cmd->reset(IGPUCommandBuffer::RESET_FLAGS::RELEASE_RESOURCES_BIT); + cmd->begin(IGPUCommandBuffer::USAGE::ONE_TIME_SUBMIT_BIT); + cmd->beginDebugMarker("GC Scene resources upload buffer"); - typename Types::descriptor_set_layout_t::SBinding bindings[] = + //! descriptor set layout + + IGPUDescriptorSetLayout::SBinding bindings[] = { { .binding = 0u, .type = IDescriptor::E_TYPE::ET_UNIFORM_BUFFER, - .createFlags = Types::descriptor_set_layout_t::SBinding::E_CREATE_FLAGS::ECF_NONE, + .createFlags = IGPUDescriptorSetLayout::SBinding::E_CREATE_FLAGS::ECF_NONE, .stageFlags = IShader::E_SHADER_STAGE::ESS_VERTEX | IShader::E_SHADER_STAGE::ESS_FRAGMENT, .count = 1u, } }; - if constexpr (withAssetConverter) - scratch.descriptorSetLayout = make_smart_refctd_ptr(bindings); - else - scratch.descriptorSetLayout = utilities->getLogicalDevice()->createDescriptorSetLayout(bindings); + auto dsLayout = device->createDescriptorSetLayout(bindings); - if (!scratch.descriptorSetLayout) + if (!dsLayout) { logger->log("Could not descriptor set layout!", ILogger::ELL_ERROR); - return false; - } - - return true; - } - - bool createDescriptorSet() - { - EXPOSE_NABLA_NAMESPACES(); - - if constexpr (withAssetConverter) - scratch.descriptorSet = make_smart_refctd_ptr(smart_refctd_ptr(scratch.descriptorSetLayout)); - else - { - const IGPUDescriptorSetLayout* const layouts[] = { scratch.descriptorSetLayout.get()}; - const uint32_t setCounts[] = { 1u }; - - // note descriptor set has back smart pointer to its pool, so we dont need to keep it explicitly - auto pool = utilities->getLogicalDevice()->createDescriptorPoolForDSLayouts(IDescriptorPool::E_CREATE_FLAGS::ECF_NONE, layouts, setCounts); - - if (!pool) - { - logger->log("Could not create Descriptor Pool!", ILogger::ELL_ERROR); - return false; - } - - pool->createDescriptorSets(layouts, &scratch.descriptorSet); - } - - if (!scratch.descriptorSet) - { - logger->log("Could not create Descriptor Set!", ILogger::ELL_ERROR); - return false; + return nullptr; } - return true; - } - - bool createPipelineLayout() - { - EXPOSE_NABLA_NAMESPACES(); - - const std::span range = {}; - - if constexpr (withAssetConverter) - scratch.pipelineLayout = make_smart_refctd_ptr(range, nullptr, smart_refctd_ptr(scratch.descriptorSetLayout), nullptr, nullptr); - else - scratch.pipelineLayout = utilities->getLogicalDevice()->createPipelineLayout(range, nullptr, smart_refctd_ptr(scratch.descriptorSetLayout), nullptr, nullptr); + //! pipeline layout + + auto pipelineLayout = device->createPipelineLayout({}, nullptr, smart_refctd_ptr(dsLayout), nullptr, nullptr); - if (!scratch.pipelineLayout) + if (!pipelineLayout) { logger->log("Could not create pipeline layout!", ILogger::ELL_ERROR); - return false; + return nullptr; } - return true; - } - - bool createRenderpass() - { - EXPOSE_NABLA_NAMESPACES(); - - static constexpr Types::renderpass_t::SCreationParams::SColorAttachmentDescription colorAttachments[] = + //! renderpass + + static constexpr IGPURenderpass::SCreationParams::SColorAttachmentDescription colorAttachments[] = { { { { - .format = ColorFboAttachmentFormat, - .samples = Samples, + .format = Traits::ColorFboAttachmentFormat, + .samples = Traits::Samples, .mayAlias = false }, - /* .loadOp = */ Types::renderpass_t::LOAD_OP::CLEAR, - /* .storeOp = */ Types::renderpass_t::STORE_OP::STORE, - /* .initialLayout = */ Types::image_t::LAYOUT::UNDEFINED, - /* .finalLayout = */ Types::image_t::LAYOUT::READ_ONLY_OPTIMAL + /* .loadOp = */ IGPURenderpass::LOAD_OP::CLEAR, + /* .storeOp = */ IGPURenderpass::STORE_OP::STORE, + /* .initialLayout = */ IGPUImage::LAYOUT::UNDEFINED, + /* .finalLayout = */ IGPUImage::LAYOUT::READ_ONLY_OPTIMAL } }, - Types::renderpass_t::SCreationParams::ColorAttachmentsEnd + IGPURenderpass::SCreationParams::ColorAttachmentsEnd }; - static constexpr Types::renderpass_t::SCreationParams::SDepthStencilAttachmentDescription depthAttachments[] = + static constexpr IGPURenderpass::SCreationParams::SDepthStencilAttachmentDescription depthAttachments[] = { { { { - .format = DepthFboAttachmentFormat, - .samples = Samples, + .format = Traits::DepthFboAttachmentFormat, + .samples = Traits::Samples, .mayAlias = false }, - /* .loadOp = */ {Types::renderpass_t::LOAD_OP::CLEAR}, - /* .storeOp = */ {Types::renderpass_t::STORE_OP::STORE}, - /* .initialLayout = */ {Types::image_t::LAYOUT::UNDEFINED}, - /* .finalLayout = */ {Types::image_t::LAYOUT::ATTACHMENT_OPTIMAL} + /* .loadOp = */ {IGPURenderpass::LOAD_OP::CLEAR}, + /* .storeOp = */ {IGPURenderpass::STORE_OP::STORE}, + /* .initialLayout = */ {IGPUImage::LAYOUT::UNDEFINED}, + /* .finalLayout = */ {IGPUImage::LAYOUT::ATTACHMENT_OPTIMAL} } }, - Types::renderpass_t::SCreationParams::DepthStencilAttachmentsEnd + IGPURenderpass::SCreationParams::DepthStencilAttachmentsEnd }; - typename Types::renderpass_t::SCreationParams::SSubpassDescription subpasses[] = + IGPURenderpass::SCreationParams::SSubpassDescription subpasses[] = { {}, - Types::renderpass_t::SCreationParams::SubpassesEnd + IGPURenderpass::SCreationParams::SubpassesEnd }; - subpasses[0].depthStencilAttachment.render = { .attachmentIndex = 0u,.layout = Types::image_t::LAYOUT::ATTACHMENT_OPTIMAL }; - subpasses[0].colorAttachments[0] = { .render = {.attachmentIndex = 0u, .layout = Types::image_t::LAYOUT::ATTACHMENT_OPTIMAL } }; + subpasses[0].depthStencilAttachment.render = { .attachmentIndex = 0u,.layout = IGPUImage::LAYOUT::ATTACHMENT_OPTIMAL }; + subpasses[0].colorAttachments[0] = { .render = {.attachmentIndex = 0u, .layout = IGPUImage::LAYOUT::ATTACHMENT_OPTIMAL } }; - static constexpr Types::renderpass_t::SCreationParams::SSubpassDependency dependencies[] = + static constexpr IGPURenderpass::SCreationParams::SSubpassDependency dependencies[] = { // wipe-transition of Color to ATTACHMENT_OPTIMAL { - .srcSubpass = Types::renderpass_t::SCreationParams::SSubpassDependency::External, + .srcSubpass = IGPURenderpass::SCreationParams::SSubpassDependency::External, .dstSubpass = 0, .memoryBarrier = { @@ -609,7 +205,7 @@ class ResourceBuilder // color from ATTACHMENT_OPTIMAL to PRESENT_SRC { .srcSubpass = 0, - .dstSubpass = Types::renderpass_t::SCreationParams::SSubpassDependency::External, + .dstSubpass = IGPURenderpass::SCreationParams::SSubpassDependency::External, .memoryBarrier = { // last place where the depth can get modified @@ -624,199 +220,117 @@ class ResourceBuilder } // leave view offsets and flags default }, - Types::renderpass_t::SCreationParams::DependenciesEnd + IGPURenderpass::SCreationParams::DependenciesEnd }; - typename Types::renderpass_t::SCreationParams params = {}; + IGPURenderpass::SCreationParams params = {}; params.colorAttachments = colorAttachments; params.depthStencilAttachments = depthAttachments; params.subpasses = subpasses; params.dependencies = dependencies; - if constexpr (withAssetConverter) - scratch.renderpass = ICPURenderpass::create(params); - else - scratch.renderpass = utilities->getLogicalDevice()->createRenderpass(params); + auto renderpass = device->createRenderpass(params); - if (!scratch.renderpass) + if (!renderpass) { logger->log("Could not create render pass!", ILogger::ELL_ERROR); - return false; + return nullptr; } - return true; - } - - bool createFramebufferAttachments() - { - EXPOSE_NABLA_NAMESPACES(); - - auto createImageView = [&](smart_refctd_ptr& outView) -> smart_refctd_ptr - { - constexpr bool IS_DEPTH = isDepthOrStencilFormat(); - constexpr auto USAGE = [](const bool isDepth) - { - bitflag usage = Types::image_t::EUF_RENDER_ATTACHMENT_BIT; - - if (!isDepth) - usage |= Types::image_t::EUF_SAMPLED_BIT; - - return usage; - }(IS_DEPTH); - constexpr auto ASPECT = IS_DEPTH ? IImage::E_ASPECT_FLAGS::EAF_DEPTH_BIT : IImage::E_ASPECT_FLAGS::EAF_COLOR_BIT; - constexpr std::string_view DEBUG_NAME = IS_DEPTH ? "UI Scene Depth Attachment Image" : "UI Scene Color Attachment Image"; - { - smart_refctd_ptr image; - { - auto params = typename Types::image_t::SCreationParams( - { - .type = Types::image_t::ET_2D, - .samples = Samples, - .format = format, - .extent = { FramebufferW, FramebufferH, 1u }, - .mipLevels = 1u, - .arrayLayers = 1u, - .usage = USAGE - }); - - if constexpr (withAssetConverter) - image = ICPUImage::create(params); - else - image = utilities->getLogicalDevice()->createImage(std::move(params)); - } - - if (!image) - { - logger->log("Could not create image!", ILogger::ELL_ERROR); - return nullptr; - } - - if constexpr (withAssetConverter) - { - auto dummyBuffer = make_smart_refctd_ptr(FramebufferW * FramebufferH * getTexelOrBlockBytesize()); - dummyBuffer->setContentHash(dummyBuffer->computeContentHash()); - - auto regions = make_refctd_dynamic_array>(1u); - auto& region = regions->front(); - - region.imageSubresource = { .aspectMask = ASPECT, .mipLevel = 0u, .baseArrayLayer = 0u, .layerCount = 0u }; - region.bufferOffset = 0u; - region.bufferRowLength = IImageAssetHandlerBase::calcPitchInBlocks(FramebufferW, getTexelOrBlockBytesize()); - region.bufferImageHeight = 0u; - region.imageOffset = { 0u, 0u, 0u }; - region.imageExtent = { FramebufferW, FramebufferH, 1u }; - - if (!image->setBufferAndRegions(std::move(dummyBuffer), regions)) - { - logger->log("Could not set image's regions!", ILogger::ELL_ERROR); - return nullptr; - } - image->setContentHash(image->computeContentHash()); - } - else - { - image->setObjectDebugName(DEBUG_NAME.data()); - - if (!utilities->getLogicalDevice()->allocate(image->getMemoryReqs(), image.get()).isValid()) - { - logger->log("Could not allocate memory for an image!", ILogger::ELL_ERROR); - return nullptr; - } - } - - auto params = typename Types::image_view_t::SCreationParams - ({ - .flags = Types::image_view_t::ECF_NONE, - .subUsages = USAGE, - .image = std::move(image), - .viewType = Types::image_view_t::ET_2D, - .format = format, - .subresourceRange = { .aspectMask = ASPECT, .baseMipLevel = 0u, .levelCount = 1u, .baseArrayLayer = 0u, .layerCount = 1u } - }); - - if constexpr (withAssetConverter) - outView = make_smart_refctd_ptr(std::move(params)); - else - outView = utilities->getLogicalDevice()->createImageView(std::move(params)); - - if (!outView) - { - logger->log("Could not create image view!", ILogger::ELL_ERROR); - return nullptr; - } - - return smart_refctd_ptr(outView); - } - }; - - const bool allocated = createImageView.template operator() < ColorFboAttachmentFormat > (scratch.attachments.color) && createImageView.template operator() < DepthFboAttachmentFormat > (scratch.attachments.depth); - - if (!allocated) - { - logger->log("Could not allocate frame buffer's attachments!", ILogger::ELL_ERROR); - return false; - } + //! shaders + + auto createShader = [&](IShader::E_SHADER_STAGE stage, smart_refctd_ptr& outShader) -> smart_refctd_ptr + { + const SBuiltinFile& in = ::geometry::creator::spirv::builtin::get_resource(); + const auto buffer = make_smart_refctd_ptr, true> >(in.size, (void*)in.contents, adopt_memory); + auto shader = make_smart_refctd_ptr(smart_refctd_ptr(buffer), stage, IShader::E_CONTENT_TYPE::ECT_SPIRV, ""); - return true; - } + outShader = device->createShader(shader.get()); - bool createShaders() - { - EXPOSE_NABLA_NAMESPACES(); + return outShader; + }; - auto createShader = [&](IShader::E_SHADER_STAGE stage, smart_refctd_ptr& outShader) -> smart_refctd_ptr + struct GeometriesCpu { - // TODO: use SPIRV loader & our ::system ns to get those cpu shaders, do not create myself (shit I forgot it exists) + enum GeometryShader + { + GP_BASIC = 0, + GP_CONE, + GP_ICO, - const SBuiltinFile& in = ::geometry::creator::spirv::builtin::get_resource(); - const auto buffer = make_smart_refctd_ptr, true> >(in.size, (void*)in.contents, adopt_memory); - auto shader = make_smart_refctd_ptr(smart_refctd_ptr(buffer), stage, IShader::E_CONTENT_TYPE::ECT_SPIRV, ""); // must create cpu instance regardless underlying type + GP_COUNT + }; + + struct ReferenceObjectCpu + { + ObjectMeta meta; + GeometryShader shadersType; + nbl::asset::CGeometryCreator::return_type data; + }; - if constexpr (withAssetConverter) + GeometriesCpu(const nbl::asset::IGeometryCreator* _gc) + : gc(_gc), + objects + ({ + ReferenceObjectCpu {.meta = {.type = OT_CUBE, .name = "Cube Mesh" }, .shadersType = GP_BASIC, .data = gc->createCubeMesh(nbl::core::vector3df(1.f, 1.f, 1.f)) }, + ReferenceObjectCpu {.meta = {.type = OT_SPHERE, .name = "Sphere Mesh" }, .shadersType = GP_BASIC, .data = gc->createSphereMesh(2, 16, 16) }, + ReferenceObjectCpu {.meta = {.type = OT_CYLINDER, .name = "Cylinder Mesh" }, .shadersType = GP_BASIC, .data = gc->createCylinderMesh(2, 2, 20) }, + ReferenceObjectCpu {.meta = {.type = OT_RECTANGLE, .name = "Rectangle Mesh" }, .shadersType = GP_BASIC, .data = gc->createRectangleMesh(nbl::core::vector2df_SIMD(1.5, 3)) }, + ReferenceObjectCpu {.meta = {.type = OT_DISK, .name = "Disk Mesh" }, .shadersType = GP_BASIC, .data = gc->createDiskMesh(2, 30) }, + ReferenceObjectCpu {.meta = {.type = OT_ARROW, .name = "Arrow Mesh" }, .shadersType = GP_BASIC, .data = gc->createArrowMesh() }, + ReferenceObjectCpu {.meta = {.type = OT_CONE, .name = "Cone Mesh" }, .shadersType = GP_CONE, .data = gc->createConeMesh(2, 3, 10) }, + ReferenceObjectCpu {.meta = {.type = OT_ICOSPHERE, .name = "Icoshpere Mesh" }, .shadersType = GP_ICO, .data = gc->createIcoSphere(1, 3, true) } + }) { - buffer->setContentHash(buffer->computeContentHash()); - outShader = std::move(shader); + gc = nullptr; // one shot } - else - outShader = utilities->getLogicalDevice()->createShader(shader.get()); - return outShader; + private: + const nbl::asset::IGeometryCreator* gc; + + public: + const std::array objects; + }; + + struct Shaders + { + nbl::core::smart_refctd_ptr vertex = nullptr, fragment = nullptr; }; - typename ResourcesBundleScratch::Shaders& basic = scratch.shaders[GeometriesCpu::GP_BASIC]; + GeometriesCpu geometries(gc); + std::array shaders; + + auto& basic = shaders[GeometriesCpu::GP_BASIC]; createShader.template operator() < NBL_CORE_UNIQUE_STRING_LITERAL_TYPE("geometryCreator/spirv/gc.basic.vertex.spv") > (IShader::E_SHADER_STAGE::ESS_VERTEX, basic.vertex); createShader.template operator() < NBL_CORE_UNIQUE_STRING_LITERAL_TYPE("geometryCreator/spirv/gc.basic.fragment.spv") > (IShader::E_SHADER_STAGE::ESS_FRAGMENT, basic.fragment); - typename ResourcesBundleScratch::Shaders& cone = scratch.shaders[GeometriesCpu::GP_CONE]; + auto& cone = shaders[GeometriesCpu::GP_CONE]; createShader.template operator() < NBL_CORE_UNIQUE_STRING_LITERAL_TYPE("geometryCreator/spirv/gc.cone.vertex.spv") > (IShader::E_SHADER_STAGE::ESS_VERTEX, cone.vertex); createShader.template operator() < NBL_CORE_UNIQUE_STRING_LITERAL_TYPE("geometryCreator/spirv/gc.basic.fragment.spv") > (IShader::E_SHADER_STAGE::ESS_FRAGMENT, cone.fragment); // note we reuse fragment from basic! - typename ResourcesBundleScratch::Shaders& ico = scratch.shaders[GeometriesCpu::GP_ICO]; + auto& ico = shaders[GeometriesCpu::GP_ICO]; createShader.template operator() < NBL_CORE_UNIQUE_STRING_LITERAL_TYPE("geometryCreator/spirv/gc.ico.vertex.spv") > (IShader::E_SHADER_STAGE::ESS_VERTEX, ico.vertex); createShader.template operator() < NBL_CORE_UNIQUE_STRING_LITERAL_TYPE("geometryCreator/spirv/gc.basic.fragment.spv") > (IShader::E_SHADER_STAGE::ESS_FRAGMENT, ico.fragment); // note we reuse fragment from basic! - - for (const auto& it : scratch.shaders) + + for (const auto& it : shaders) { if (!it.vertex || !it.fragment) { - logger->log("Could not create shaders!", ILogger::ELL_ERROR); - return false; + logger->log("Could not create a shader!", ILogger::ELL_ERROR); + return nullptr; } } - return true; - } + // geometries - bool createGeometries() - { - EXPOSE_NABLA_NAMESPACES(); + auto output = make_smart_refctd_ptr(); + output->renderpass = smart_refctd_ptr(renderpass); + output->dsLayout = smart_refctd_ptr(dsLayout); for (uint32_t i = 0; i < geometries.objects.size(); ++i) { const auto& inGeometry = geometries.objects[i]; - auto& [obj, meta] = scratch.objects[i]; - - bool status = true; + auto& [obj, meta] = output->objects[i]; meta.name = inGeometry.meta.name; meta.type = inGeometry.meta.type; @@ -825,7 +339,7 @@ class ResourceBuilder { SBlendParams blend; SRasterizationParams rasterization; - typename Types::graphics_pipeline_t::SCreationParams pipeline; + IGPUGraphicsPipeline::SCreationParams pipeline; } params; { @@ -843,35 +357,27 @@ class ResourceBuilder params.rasterization.faceCullingMode = EFCM_NONE; { - const typename Types::shader_t::SSpecInfo info [] = + const IGPUShader::SSpecInfo sInfo [] = { - {.entryPoint = "VSMain", .shader = scratch.shaders[inGeometry.shadersType].vertex.get() }, - {.entryPoint = "PSMain", .shader = scratch.shaders[inGeometry.shadersType].fragment.get() } + {.entryPoint = "VSMain", .shader = shaders[inGeometry.shadersType].vertex.get() }, + {.entryPoint = "PSMain", .shader = shaders[inGeometry.shadersType].fragment.get() } }; - params.pipeline.layout = scratch.pipelineLayout.get(); - params.pipeline.shaders = info; - params.pipeline.renderpass = scratch.renderpass.get(); + params.pipeline.layout = pipelineLayout.get(); + params.pipeline.shaders = sInfo; + params.pipeline.renderpass = renderpass.get(); params.pipeline.cached = { .vertexInput = inGeometry.data.inputParams, .primitiveAssembly = inGeometry.data.assemblyParams, .rasterization = params.rasterization, .blend = params.blend, .subpassIx = 0u }; obj.indexCount = inGeometry.data.indexCount; obj.indexType = inGeometry.data.indexType; - // TODO: cache pipeline & try lookup for existing one first maybe - - // similar issue like with shaders again, in this case gpu contructor allows for extra cache parameters + there is no constructor you can use to fire make_smart_refctd_ptr yourself for cpu - if constexpr (withAssetConverter) - obj.pipeline = ICPUGraphicsPipeline::create(params.pipeline); - else - { - const std::array info = { { params.pipeline } }; - utilities->getLogicalDevice()->createGraphicsPipelines(nullptr, info, &obj.pipeline); - } + const std::array pInfo = { { params.pipeline } }; + device->createGraphicsPipelines(nullptr, pInfo, &obj.pipeline); if (!obj.pipeline) { logger->log("Could not create graphics pipeline for [%s] object!", ILogger::ELL_ERROR, meta.name.data()); - status = false; + return nullptr; } // object buffers @@ -888,71 +394,48 @@ class ResourceBuilder constexpr static auto INDEX_USAGE = bitflag(ibuffer_t::EUF_INDEX_BUFFER_BIT) | ibuffer_t::EUF_VERTEX_BUFFER_BIT | ibuffer_t::EUF_TRANSFER_DST_BIT | ibuffer_t::EUF_INLINE_UPDATE_VIA_CMDBUF; obj.bindings.index.offset = 0u; - if constexpr (withAssetConverter) - { - if (!vBuffer) - return false; - - vBuffer->addUsageFlags(VERTEX_USAGE); - vBuffer->setContentHash(vBuffer->computeContentHash()); - obj.bindings.vertex = { .offset = 0u, .buffer = vBuffer }; - - if (inGeometry.data.indexType != EIT_UNKNOWN) - if (iBuffer) - { - iBuffer->addUsageFlags(INDEX_USAGE); - iBuffer->setContentHash(iBuffer->computeContentHash()); - } - else - return false; + auto vertexBuffer = device->createBuffer(IGPUBuffer::SCreationParams({ .size = vBuffer->getSize(), .usage = VERTEX_USAGE })); + auto indexBuffer = iBuffer ? device->createBuffer(IGPUBuffer::SCreationParams({ .size = iBuffer->getSize(), .usage = INDEX_USAGE })) : nullptr; - obj.bindings.index = { .offset = 0u, .buffer = iBuffer }; - } - else - { - auto vertexBuffer = utilities->getLogicalDevice()->createBuffer(IGPUBuffer::SCreationParams({ .size = vBuffer->getSize(), .usage = VERTEX_USAGE })); - auto indexBuffer = iBuffer ? utilities->getLogicalDevice()->createBuffer(IGPUBuffer::SCreationParams({ .size = iBuffer->getSize(), .usage = INDEX_USAGE })) : nullptr; + if (!vertexBuffer) + return false; - if (!vertexBuffer) + if (inGeometry.data.indexType != EIT_UNKNOWN) + if (!indexBuffer) return false; - if (inGeometry.data.indexType != EIT_UNKNOWN) - if (!indexBuffer) - return false; - - const auto mask = utilities->getLogicalDevice()->getPhysicalDevice()->getUpStreamingMemoryTypeBits(); - for (auto it : { vertexBuffer , indexBuffer }) + const auto mask = device->getPhysicalDevice()->getUpStreamingMemoryTypeBits(); + for (auto it : { vertexBuffer , indexBuffer }) + { + if (it) { - if (it) - { - auto reqs = it->getMemoryReqs(); - reqs.memoryTypeBits &= mask; + auto reqs = it->getMemoryReqs(); + reqs.memoryTypeBits &= mask; - utilities->getLogicalDevice()->allocate(reqs, it.get()); - } + device->allocate(reqs, it.get()); } + } - // record transfer uploads - obj.bindings.vertex = { .offset = 0u, .buffer = std::move(vertexBuffer) }; + // record transfer uploads + obj.bindings.vertex = { .offset = 0u, .buffer = std::move(vertexBuffer) }; + { + const SBufferRange range = { .offset = obj.bindings.vertex.offset, .size = obj.bindings.vertex.buffer->getSize(), .buffer = obj.bindings.vertex.buffer }; + if (!cmd->updateBuffer(range, vBuffer->getPointer())) { - const SBufferRange range = { .offset = obj.bindings.vertex.offset, .size = obj.bindings.vertex.buffer->getSize(), .buffer = obj.bindings.vertex.buffer }; - if (!commandBuffer->updateBuffer(range, vBuffer->getPointer())) - { - logger->log("Could not record vertex buffer transfer upload for [%s] object!", ILogger::ELL_ERROR, meta.name.data()); - status = false; - } + logger->log("Could not record vertex buffer transfer upload for [%s] object!", ILogger::ELL_ERROR, meta.name.data()); + return false; } - obj.bindings.index = { .offset = 0u, .buffer = std::move(indexBuffer) }; + } + obj.bindings.index = { .offset = 0u, .buffer = std::move(indexBuffer) }; + { + if (iBuffer) { - if (iBuffer) - { - const SBufferRange range = { .offset = obj.bindings.index.offset, .size = obj.bindings.index.buffer->getSize(), .buffer = obj.bindings.index.buffer }; + const SBufferRange range = { .offset = obj.bindings.index.offset, .size = obj.bindings.index.buffer->getSize(), .buffer = obj.bindings.index.buffer }; - if (!commandBuffer->updateBuffer(range, iBuffer->getPointer())) - { - logger->log("Could not record index buffer transfer upload for [%s] object!", ILogger::ELL_ERROR, meta.name.data()); - status = false; - } + if (!cmd->updateBuffer(range, iBuffer->getPointer())) + { + logger->log("Could not record index buffer transfer upload for [%s] object!", ILogger::ELL_ERROR, meta.name.data()); + return false; } } } @@ -963,208 +446,287 @@ class ResourceBuilder if (!createVIBuffers()) { logger->log("Could not create buffers for [%s] object!", ILogger::ELL_ERROR, meta.name.data()); - status = false; + return nullptr; } + } + } - if (!status) - { - logger->log("[%s] object will not be created!", ILogger::ELL_ERROR, meta.name.data()); + cmd->end(); + + // submit + { + std::array commandBuffers = {}; + { + commandBuffers.front().cmdbuf = cmd.get(); + } - obj.bindings.vertex = {}; - obj.bindings.index = {}; - obj.indexCount = 0u; - obj.indexType = E_INDEX_TYPE::EIT_UNKNOWN; - obj.pipeline = nullptr; + auto completed = device->createSemaphore(0u); + + std::array signals; + { + auto& signal = signals.front(); + signal.value = 1; + signal.stageMask = bitflag(PIPELINE_STAGE_FLAGS::ALL_TRANSFER_BITS); + signal.semaphore = completed.get(); + } - continue; + const IQueue::SSubmitInfo infos[] = + { + { + .waitSemaphores = {}, + .commandBuffers = commandBuffers, + .signalSemaphores = signals } + }; + + if (transferCapableQueue->submit(infos) != IQueue::RESULT::SUCCESS) + { + logger->log("Failed to submit transfer upload operations!", ILogger::ELL_ERROR); + return nullptr; } + + const ISemaphore::SWaitInfo info[] = + { { + .semaphore = completed.get(), + .value = 1 + } }; + + device->blockForSemaphores(info); } - return true; + return output; } +}; + +struct ObjectInstance +{ + nbl::asset::SBasicViewParameters viewParameters; + ObjectMeta meta; +}; + +class CScene final : public nbl::core::IReferenceCounted +{ +public: + ObjectInstance object; // optional TODO: MDI, allow for multiple objects on the scene -> read (*) bellow at private class members + + struct + { + static constexpr uint32_t startedValue = 0, finishedValue = 0x45; + nbl::core::smart_refctd_ptr progress; + } semaphore; - bool createViewParametersUboBuffer() + static inline nbl::core::smart_refctd_ptr create(nbl::core::smart_refctd_ptr device, nbl::core::smart_refctd_ptr logger, nbl::video::CThreadSafeQueueAdapter* const transferCapableQueue, const nbl::core::smart_refctd_ptr resources, const uint32_t framebufferW = Traits::DefaultFramebufferW, const uint32_t framebufferH = Traits::DefaultFramebufferH) { EXPOSE_NABLA_NAMESPACES(); - using ibuffer_t = ::nbl::asset::IBuffer; // seems to be ambigous, both asset & core namespaces has IBuffer - constexpr static auto UboUsage = bitflag(ibuffer_t::EUF_UNIFORM_BUFFER_BIT) | ibuffer_t::EUF_TRANSFER_DST_BIT | ibuffer_t::EUF_INLINE_UPDATE_VIA_CMDBUF; + if (!device) + return nullptr; - if constexpr (withAssetConverter) - { - auto uboBuffer = make_smart_refctd_ptr(sizeof(SBasicViewParameters)); - uboBuffer->addUsageFlags(UboUsage); - uboBuffer->setContentHash(uboBuffer->computeContentHash()); - scratch.ubo = { .offset = 0u, .buffer = std::move(uboBuffer) }; - } - else - { - const auto mask = utilities->getLogicalDevice()->getPhysicalDevice()->getUpStreamingMemoryTypeBits(); - auto uboBuffer = utilities->getLogicalDevice()->createBuffer(IGPUBuffer::SCreationParams({ .size = sizeof(SBasicViewParameters), .usage = UboUsage })); + if (!logger) + return nullptr; + + if (!transferCapableQueue) + return nullptr; - if (!uboBuffer) - return false; + if (!resources) + return nullptr; - for (auto it : { uboBuffer }) - { - IDeviceMemoryBacked::SDeviceMemoryRequirements reqs = it->getMemoryReqs(); - reqs.memoryTypeBits &= mask; + // cmd - utilities->getLogicalDevice()->allocate(reqs, it.get()); - } + auto cPool = device->createCommandPool(transferCapableQueue->getFamilyIndex(), IGPUCommandPool::CREATE_FLAGS::RESET_COMMAND_BUFFER_BIT); - scratch.ubo = { .offset = 0u, .buffer = std::move(uboBuffer) }; + if (!cPool) + { + logger->log("Couldn't create command pool!", ILogger::ELL_ERROR); + return nullptr; } - return true; - } + nbl::core::smart_refctd_ptr cmd; - struct GeometriesCpu - { - enum GeometryShader + if (!cPool->createCommandBuffers(IGPUCommandPool::BUFFER_LEVEL::PRIMARY, { &cmd , 1 })) { - GP_BASIC = 0, - GP_CONE, - GP_ICO, + logger->log("Couldn't create command buffer!", ILogger::ELL_ERROR); + return nullptr; + } - GP_COUNT - }; + if (!cmd) + return nullptr; - struct ReferenceObjectCpu - { - ObjectMeta meta; - GeometryShader shadersType; - nbl::asset::CGeometryCreator::return_type data; - }; + // UBO with basic view parameters + + using ibuffer_t = ::nbl::asset::IBuffer; + constexpr static auto UboUsage = bitflag(ibuffer_t::EUF_UNIFORM_BUFFER_BIT) | ibuffer_t::EUF_TRANSFER_DST_BIT | ibuffer_t::EUF_INLINE_UPDATE_VIA_CMDBUF; + + const auto mask = device->getPhysicalDevice()->getUpStreamingMemoryTypeBits(); + auto uboBuffer = device->createBuffer(IGPUBuffer::SCreationParams({ .size = sizeof(SBasicViewParameters), .usage = UboUsage })); - GeometriesCpu(const nbl::asset::IGeometryCreator* _gc) - : gc(_gc), - objects - ({ - ReferenceObjectCpu {.meta = {.type = OT_CUBE, .name = "Cube Mesh" }, .shadersType = GP_BASIC, .data = gc->createCubeMesh(nbl::core::vector3df(1.f, 1.f, 1.f)) }, - ReferenceObjectCpu {.meta = {.type = OT_SPHERE, .name = "Sphere Mesh" }, .shadersType = GP_BASIC, .data = gc->createSphereMesh(2, 16, 16) }, - ReferenceObjectCpu {.meta = {.type = OT_CYLINDER, .name = "Cylinder Mesh" }, .shadersType = GP_BASIC, .data = gc->createCylinderMesh(2, 2, 20) }, - ReferenceObjectCpu {.meta = {.type = OT_RECTANGLE, .name = "Rectangle Mesh" }, .shadersType = GP_BASIC, .data = gc->createRectangleMesh(nbl::core::vector2df_SIMD(1.5, 3)) }, - ReferenceObjectCpu {.meta = {.type = OT_DISK, .name = "Disk Mesh" }, .shadersType = GP_BASIC, .data = gc->createDiskMesh(2, 30) }, - ReferenceObjectCpu {.meta = {.type = OT_ARROW, .name = "Arrow Mesh" }, .shadersType = GP_BASIC, .data = gc->createArrowMesh() }, - ReferenceObjectCpu {.meta = {.type = OT_CONE, .name = "Cone Mesh" }, .shadersType = GP_CONE, .data = gc->createConeMesh(2, 3, 10) }, - ReferenceObjectCpu {.meta = {.type = OT_ICOSPHERE, .name = "Icoshpere Mesh" }, .shadersType = GP_ICO, .data = gc->createIcoSphere(1, 3, true) } - }) + if (!uboBuffer) + logger->log("Could not create UBO!", ILogger::ELL_ERROR); + + for (auto it : { uboBuffer }) { - gc = nullptr; // one shot + IDeviceMemoryBacked::SDeviceMemoryRequirements reqs = it->getMemoryReqs(); + reqs.memoryTypeBits &= mask; + + device->allocate(reqs, it.get()); } - private: - const nbl::asset::IGeometryCreator* gc; + nbl::asset::SBufferBinding ubo = { .offset = 0u, .buffer = std::move(uboBuffer) }; - public: - const std::array objects; - }; + // descriptor set for the resource + + const IGPUDescriptorSetLayout* const layouts[] = { resources->dsLayout.get() }; + const uint32_t setCounts[] = { 1u }; - using resources_bundle_base_t = ResourcesBundleBase; + // note descriptor set has back smart pointer to its pool, so we dont need to keep it explicitly + auto dPool = device->createDescriptorPoolForDSLayouts(IDescriptorPool::E_CREATE_FLAGS::ECF_NONE, layouts, setCounts); - struct ResourcesBundleScratch : public resources_bundle_base_t - { - using Types = resources_bundle_base_t::Types; + if (!dPool) + { + logger->log("Could not create Descriptor Pool!", ILogger::ELL_ERROR); + return nullptr; + } - ResourcesBundleScratch() - : resources_bundle_base_t() {} + nbl::core::smart_refctd_ptr ds; + dPool->createDescriptorSets(layouts, &ds); - struct Shaders + if (!ds) { - nbl::core::smart_refctd_ptr vertex = nullptr, fragment = nullptr; - }; + logger->log("Could not create Descriptor Set!", ILogger::ELL_ERROR); + return nullptr; + } - nbl::core::smart_refctd_ptr descriptorSetLayout; - nbl::core::smart_refctd_ptr pipelineLayout; - std::array shaders; - }; + // write the descriptor set + { + // descriptor write ubo + IGPUDescriptorSet::SWriteDescriptorSet write; + write.dstSet = ds.get(); + write.binding = 0; + write.arrayElement = 0u; + write.count = 1u; - // TODO: we could make those params templated with default values like below - static constexpr auto FramebufferW = 1280u, FramebufferH = 720u; - static constexpr auto ColorFboAttachmentFormat = nbl::asset::EF_R8G8B8A8_SRGB, DepthFboAttachmentFormat = nbl::asset::EF_D16_UNORM; - static constexpr auto Samples = nbl::video::IGPUImage::ESCF_1_BIT; + IGPUDescriptorSet::SDescriptorInfo info; + { + info.desc = smart_refctd_ptr(ubo.buffer); + info.info.buffer.offset = ubo.offset; + info.info.buffer.size = ubo.buffer->getSize(); + } - ResourcesBundleScratch scratch; + write.info = &info; - nbl::video::IUtilities* const utilities; - nbl::video::IGPUCommandBuffer* const commandBuffer; - nbl::system::ILogger* const logger; - GeometriesCpu geometries; -}; + if (!device->updateDescriptorSets(1u, &write, 0u, nullptr)) + { + logger->log("Could not write descriptor set!", ILogger::ELL_ERROR); + return nullptr; + } + } -#undef TYPES_IMPL_BOILERPLATE + // color & depth attachments + + auto createImageView = [&](smart_refctd_ptr&outView) -> smart_refctd_ptr + { + constexpr bool IS_DEPTH = isDepthOrStencilFormat(); + constexpr auto USAGE = [](const bool isDepth) + { + bitflag usage = IGPUImage::EUF_RENDER_ATTACHMENT_BIT; -struct ObjectDrawHookCpu -{ - nbl::core::matrix3x4SIMD model; - nbl::asset::SBasicViewParameters viewParameters; - ObjectMeta meta; -}; + if (!isDepth) + usage |= IGPUImage::EUF_SAMPLED_BIT; + + return usage; + }(IS_DEPTH); + constexpr auto ASPECT = IS_DEPTH ? IImage::E_ASPECT_FLAGS::EAF_DEPTH_BIT : IImage::E_ASPECT_FLAGS::EAF_COLOR_BIT; + constexpr std::string_view DEBUG_NAME = IS_DEPTH ? "GC Scene Depth Attachment Image" : "GC Scene Color Attachment Image"; + { + smart_refctd_ptr image; + { + auto params = IGPUImage::SCreationParams( + { + .type = IGPUImage::ET_2D, + .samples = Traits::Samples, + .format = format, + .extent = { framebufferW, framebufferH, 1u }, + .mipLevels = 1u, + .arrayLayers = 1u, + .usage = USAGE + }); + + image = device->createImage(std::move(params)); + } -/* - Rendering to offline framebuffer which we don't present, color - scene attachment texture we use for second UI renderpass - sampling it & rendering into desired GUI area. + if (!image) + { + logger->log("Could not create image!", ILogger::ELL_ERROR); + } - The scene can be created from simple geometry - using our Geomtry Creator class. -*/ + image->setObjectDebugName(DEBUG_NAME.data()); -class CScene final : public nbl::core::IReferenceCounted -{ -public: - ObjectDrawHookCpu object; // TODO: this could be a vector (to not complicate the example I leave it single object), we would need a better system for drawing then to make only 1 max 2 indirect draw calls (indexed and not indexed objects) + if (!device->allocate(image->getMemoryReqs(), image.get()).isValid()) + { + logger->log("Could not allocate memory for an image!", ILogger::ELL_ERROR); + return nullptr; + } - struct - { - const uint32_t startedValue = 0, finishedValue = 0x45; - nbl::core::smart_refctd_ptr progress; - } semaphore; + auto params = IGPUImageView::SCreationParams + ({ + .flags = IGPUImageView::ECF_NONE, + .subUsages = USAGE, + .image = std::move(image), + .viewType = IGPUImageView::ET_2D, + .format = format, + .subresourceRange = {.aspectMask = ASPECT, .baseMipLevel = 0u, .levelCount = 1u, .baseArrayLayer = 0u, .layerCount = 1u } + }); - struct CreateResourcesDirectlyWithDevice { using Builder = ResourceBuilder; }; - struct CreateResourcesWithAssetConverter { using Builder = ResourceBuilder; }; + outView = device->createImageView(std::move(params)); - ~CScene() {} + if (!outView) + { + logger->log("Could not create image view!", ILogger::ELL_ERROR); + return nullptr; + } - static inline nbl::core::smart_refctd_ptr createCommandBuffer(nbl::video::ILogicalDevice* const device, nbl::system::ILogger* const logger, const uint32_t familyIx) - { - EXPOSE_NABLA_NAMESPACES(); - auto pool = device->createCommandPool(familyIx, IGPUCommandPool::CREATE_FLAGS::RESET_COMMAND_BUFFER_BIT); + return smart_refctd_ptr(outView); + } + }; + + nbl::core::smart_refctd_ptr color, depth; + const bool allocated = createImageView.template operator() < Traits::ColorFboAttachmentFormat > (color) && createImageView.template operator() < Traits::DepthFboAttachmentFormat > (depth); - if (!pool) + if (!allocated) { - logger->log("Couldn't create Command Pool!", ILogger::ELL_ERROR); + logger->log("Could not allocate frame buffer's attachments!", ILogger::ELL_ERROR); return nullptr; } - nbl::core::smart_refctd_ptr cmd; + //! frame buffer + + const auto extent = color->getCreationParameters().image->getCreationParameters().extent; + + IGPUFramebuffer::SCreationParams params = + { + { + .renderpass = smart_refctd_ptr((IGPURenderpass*)resources->renderpass.get()), // NOTE: those creation params are to be corrected & this should take immutable renderpass (smart pointer OK but take const for type) + .depthStencilAttachments = &depth.get(), + .colorAttachments = &color.get(), + .width = extent.width, + .height = extent.height, + .layers = 1u + } + }; + + auto frameBuffer = device->createFramebuffer(std::move(params)); - if (!pool->createCommandBuffers(IGPUCommandPool::BUFFER_LEVEL::PRIMARY, { &cmd , 1 })) + if (!frameBuffer) { - logger->log("Couldn't create Command Buffer!", ILogger::ELL_ERROR); + logger->log("Could not create frame buffer!", ILogger::ELL_ERROR); return nullptr; } - return cmd; + auto output = new CScene(smart_refctd_ptr(device), smart_refctd_ptr(logger), smart_refctd_ptr(cmd), smart_refctd_ptr(frameBuffer), smart_refctd_ptr(ds), ubo, smart_refctd_ptr(color), smart_refctd_ptr(depth), smart_refctd_ptr(resources)); + return smart_refctd_ptr(output); } - template - static auto create(Args&&... args) -> decltype(auto) - { - EXPOSE_NABLA_NAMESPACES(); - - /* - user should call the constructor's args without last argument explicitly, this is a trick to make constructor templated, - eg.create(smart_refctd_ptr(device), smart_refctd_ptr(logger), queuePointer, geometryPointer) - */ - - auto* scene = new CScene(std::forward(args)..., CreateWith {}); - smart_refctd_ptr smart(scene, dont_grab); - - return smart; - } + ~CScene() {} inline void begin() { @@ -1174,12 +736,13 @@ class CScene final : public nbl::core::IReferenceCounted m_commandBuffer->begin(IGPUCommandBuffer::USAGE::ONE_TIME_SUBMIT_BIT); m_commandBuffer->beginDebugMarker("UISampleApp Offline Scene Frame"); - semaphore.progress = m_utilities->getLogicalDevice()->createSemaphore(semaphore.startedValue); + semaphore.progress = m_device->createSemaphore(semaphore.startedValue); } - inline void record() + inline bool record() { EXPOSE_NABLA_NAMESPACES(); + bool valid = true; const struct { @@ -1196,12 +759,13 @@ class CScene final : public nbl::core::IReferenceCounted viewport.height = fbo.height; } - m_commandBuffer->setViewport(0u, 1u, &viewport); + valid &= m_commandBuffer->setViewport(0u, 1u, &viewport); VkRect2D scissor = {}; scissor.offset = { 0, 0 }; scissor.extent = { fbo.width, fbo.height }; - m_commandBuffer->setScissor(0u, 1u, &scissor); + + valid &= m_commandBuffer->setScissor(0u, 1u, &scissor); const VkRect2D renderArea = { @@ -1212,31 +776,16 @@ class CScene final : public nbl::core::IReferenceCounted const IGPUCommandBuffer::SRenderpassBeginInfo info = { .framebuffer = m_frameBuffer.get(), - .colorClearValues = &clear.color, - .depthStencilClearValues = &clear.depth, + .colorClearValues = &Traits::clearColor, + .depthStencilClearValues = &Traits::clearDepth, .renderArea = renderArea }; - m_commandBuffer->beginRenderPass(info, IGPUCommandBuffer::SUBPASS_CONTENTS::INLINE); - - const auto& [hook, meta] = resources.objects[object.meta.type]; - auto* rawPipeline = hook.pipeline.get(); - - SBufferBinding vertex = hook.bindings.vertex, index = hook.bindings.index; - - m_commandBuffer->bindGraphicsPipeline(rawPipeline); - m_commandBuffer->bindDescriptorSets(EPBP_GRAPHICS, rawPipeline->getLayout(), 1, 1, &resources.descriptorSet.get()); - m_commandBuffer->bindVertexBuffers(0, 1, &vertex); - - if (index.buffer && hook.indexType != EIT_UNKNOWN) - { - m_commandBuffer->bindIndexBuffer(index, hook.indexType); - m_commandBuffer->drawIndexed(hook.indexCount, 1, 0, 0, 0); - } - else - m_commandBuffer->draw(hook.indexCount, 1, 0, 0); + valid &= m_commandBuffer->beginRenderPass(info, IGPUCommandBuffer::SUBPASS_CONTENTS::INLINE); + valid &= draw(m_commandBuffer.get()); + valid &= m_commandBuffer->endRenderPass(); - m_commandBuffer->endRenderPass(); + return valid; } inline void end() @@ -1244,10 +793,13 @@ class CScene final : public nbl::core::IReferenceCounted m_commandBuffer->end(); } - inline bool submit() + inline bool submit(nbl::video::CThreadSafeQueueAdapter* queue) { EXPOSE_NABLA_NAMESPACES(); + if (!queue) + return false; + const IQueue::SSubmitInfo::SCommandBufferInfo buffers[] = { { .cmdbuf = m_commandBuffer.get() } @@ -1267,78 +819,68 @@ class CScene final : public nbl::core::IReferenceCounted return queue->submit(infos) == IQueue::RESULT::SUCCESS; } - // note: must be updated outside render pass - inline void update() + inline bool update(nbl::video::IGPUCommandBuffer* cmdbuf = nullptr) { EXPOSE_NABLA_NAMESPACES(); SBufferRange range; - range.buffer = smart_refctd_ptr(resources.ubo.buffer); - range.size = resources.ubo.buffer->getSize(); + range.buffer = smart_refctd_ptr(m_ubo.buffer); + range.size = m_ubo.buffer->getSize(); - m_commandBuffer->updateBuffer(range, &object.viewParameters); - } + if(cmdbuf) + return cmdbuf->updateBuffer(range, &object.viewParameters); - inline decltype(auto) getResources() - { - return (resources); // note: do not remove "()" - it makes the return type lvalue reference instead of copy + return m_commandBuffer->updateBuffer(range, &object.viewParameters); } + inline auto getColorAttachment() { return nbl::core::smart_refctd_ptr(m_color); } + private: - template // TODO: enforce constraints, only those 2 above are valid - CScene(nbl::core::smart_refctd_ptr _utilities, nbl::core::smart_refctd_ptr _logger, nbl::video::CThreadSafeQueueAdapter* _graphicsQueue, const nbl::asset::IGeometryCreator* _geometryCreator, CreateWith createWith = {}) - : m_utilities(nbl::core::smart_refctd_ptr(_utilities)), m_logger(nbl::core::smart_refctd_ptr(_logger)), queue(_graphicsQueue) + inline bool draw(nbl::video::IGPUCommandBuffer* cmdbuf) { EXPOSE_NABLA_NAMESPACES(); - using Builder = typename CreateWith::Builder; + bool valid = true; - m_commandBuffer = createCommandBuffer(m_utilities->getLogicalDevice(), m_utilities->getLogger(), queue->getFamilyIndex()); - Builder builder(m_utilities.get(), m_commandBuffer.get(), m_logger.get(), _geometryCreator); + const auto& [hook, meta] = m_resources->objects[object.meta.type]; + const auto* rawPipeline = hook.pipeline.get(); - // gpu resources - if (builder.build()) + SBufferBinding vertex = hook.bindings.vertex, index = hook.bindings.index; + + valid &= cmdbuf->bindGraphicsPipeline(rawPipeline); + valid &= cmdbuf->bindDescriptorSets(EPBP_GRAPHICS, rawPipeline->getLayout(), 1, 1, &m_ds.get()); + valid &= cmdbuf->bindVertexBuffers(0, 1, &vertex); + + if (index.buffer && hook.indexType != EIT_UNKNOWN) { - if (!builder.finalize(resources, queue)) - m_logger->log("Could not finalize resource objects to gpu objects!", ILogger::ELL_ERROR); + valid &= cmdbuf->bindIndexBuffer(index, hook.indexType); + valid &= cmdbuf->drawIndexed(hook.indexCount, 1, 0, 0, 0); } else - m_logger->log("Could not build resource objects!", ILogger::ELL_ERROR); - - // frame buffer - { - const auto extent = resources.attachments.color->getCreationParameters().image->getCreationParameters().extent; - - IGPUFramebuffer::SCreationParams params = - { - { - .renderpass = smart_refctd_ptr(resources.renderpass), - .depthStencilAttachments = &resources.attachments.depth.get(), - .colorAttachments = &resources.attachments.color.get(), - .width = extent.width, - .height = extent.height, - .layers = 1u - } - }; - - m_frameBuffer = m_utilities->getLogicalDevice()->createFramebuffer(std::move(params)); + valid &= cmdbuf->draw(hook.indexCount, 1, 0, 0); - if (!m_frameBuffer) - { - m_logger->log("Could not create frame buffer!", ILogger::ELL_ERROR); - return; - } - } + return valid; } - nbl::core::smart_refctd_ptr m_utilities; + CScene(nbl::core::smart_refctd_ptr device, nbl::core::smart_refctd_ptr logger, nbl::core::smart_refctd_ptr commandBuffer, nbl::core::smart_refctd_ptr frameBuffer, nbl::core::smart_refctd_ptr ds, nbl::asset::SBufferBinding ubo, nbl::core::smart_refctd_ptr color, nbl::core::smart_refctd_ptr depth, const nbl::core::smart_refctd_ptr resources) + : m_device(nbl::core::smart_refctd_ptr(device)), m_logger(nbl::core::smart_refctd_ptr(logger)), m_commandBuffer(nbl::core::smart_refctd_ptr(commandBuffer)), m_frameBuffer(nbl::core::smart_refctd_ptr(frameBuffer)), m_ds(nbl::core::smart_refctd_ptr(ds)), m_ubo(ubo), m_color(nbl::core::smart_refctd_ptr(color)), m_depth(nbl::core::smart_refctd_ptr(depth)), m_resources(nbl::core::smart_refctd_ptr(resources)) {} + + nbl::core::smart_refctd_ptr m_device; nbl::core::smart_refctd_ptr m_logger; - nbl::video::CThreadSafeQueueAdapter* queue; - nbl::core::smart_refctd_ptr m_commandBuffer; + //! (*) I still make an assumption we have only one object on the scene, + //! I'm not going to make it ext-like and go with MDI + streaming buffer now, + //! but I want it to be easy to spam multiple instances of this class to have many + //! frame buffers we can render too given resource geometry buffers with Traits constraints + //! + //! optional TODO: make it ImGUI-ext-like -> renderpass as creation input, ST buffer, MDI, ds outside - nbl::core::smart_refctd_ptr m_frameBuffer; + nbl::core::smart_refctd_ptr m_commandBuffer = nullptr; + nbl::core::smart_refctd_ptr m_frameBuffer = nullptr; + nbl::core::smart_refctd_ptr m_ds = nullptr; + nbl::asset::SBufferBinding m_ubo = {}; + nbl::core::smart_refctd_ptr m_color = nullptr, m_depth = nullptr; - ResourcesBundle resources; + const nbl::core::smart_refctd_ptr m_resources; }; } // nbl::scene::geometrycreator diff --git a/common/include/camera/CCubeProjection.hpp b/common/include/camera/CCubeProjection.hpp new file mode 100644 index 000000000..d47c5c6b8 --- /dev/null +++ b/common/include/camera/CCubeProjection.hpp @@ -0,0 +1,94 @@ +#ifndef _NBL_CCUBE_PROJECTION_HPP_ +#define _NBL_CCUBE_PROJECTION_HPP_ + +#include "IRange.hpp" +#include "IPerspectiveProjection.hpp" + +namespace nbl::hlsl +{ + +/** +* @brief A projection where each cube face is a perspective quad we project onto. +* +* Represents a cube projection given direction vector where each face of +* the cube is treated as a quad. The projection onto the cube is done using +* these quads and each face has its own unique pre-transform and +* view-port linear matrix. +*/ +class CCubeProjection final : public IPerspectiveProjection, public IProjection +{ +public: + //! Represents six face identifiers of a cube. + enum CubeFaces : uint8_t + { + //! Cube face in the +X base direction + PositiveX = 0, + + //! Cube face in the -X base direction + NegativeX, + + //! Cube face in the +Y base direction + PositiveY, + + //! Cube face in the -Y base direction + NegativeY, + + //! Cube face in the +Z base direction + PositiveZ, + + //! Cube face in the -Z base direction + NegativeZ, + + CubeFacesCount + }; + + inline static core::smart_refctd_ptr create(core::smart_refctd_ptr&& camera) + { + if (!camera) + return nullptr; + + return core::smart_refctd_ptr(new CCubeProjection(core::smart_refctd_ptr(camera)), core::dont_grab); + } + + virtual std::span getLinearProjections() const override + { + return { reinterpret_cast(m_quads.data()), m_quads.size() }; + } + + void transformCube() + { + // TODO: update m_quads + } + + virtual ProjectionType getProjectionType() const override { return ProjectionType::Cube; } + + virtual void project(const projection_vector_t& vecToProjectionSpace, projection_vector_t& output) const override + { + auto direction = normalize(vecToProjectionSpace); + + // TODO: project onto cube using quads representing faces + } + + virtual bool unproject(const projection_vector_t& vecFromProjectionSpace, projection_vector_t& output) const override + { + // TODO: return back direction vector? + } + + template + requires (FaceIx != CubeFacesCount) + inline const CProjection& getProjectionQuad() + { + return m_quads[FaceIx]; + } + +private: + CCubeProjection(core::smart_refctd_ptr&& camera) + : IPerspectiveProjection(core::smart_refctd_ptr(camera)) {} + virtual ~CCubeProjection() = default; + + std::array m_quads; +}; + +} // nbl::hlsl namespace + +#endif // _NBL_CCUBE_PROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/CFPSCamera.hpp b/common/include/camera/CFPSCamera.hpp new file mode 100644 index 000000000..cb00fe442 --- /dev/null +++ b/common/include/camera/CFPSCamera.hpp @@ -0,0 +1,139 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_FPS_CAMERA_HPP_ +#define _C_FPS_CAMERA_HPP_ + +#include "ICamera.hpp" + +namespace nbl::hlsl // TODO: DIFFERENT NAMESPACE +{ + +// FPS Camera +class CFPSCamera final : public ICamera +{ +public: + using base_t = ICamera; + + CFPSCamera(const float64_t3& position, const glm::quat& orientation = glm::quat(1.0f, 0.0f, 0.0f, 0.0f)) + : base_t(), m_gimbal({ .position = position, .orientation = orientation }) {} + ~CFPSCamera() = default; + + const base_t::keyboard_to_virtual_events_t getKeyboardMappingPreset() const override { return m_keyboard_to_virtual_events_preset; } + const base_t::mouse_to_virtual_events_t getMouseMappingPreset() const override { return m_mouse_to_virtual_events_preset; } + const base_t::imguizmo_to_virtual_events_t getImguizmoMappingPreset() const override { return m_imguizmo_to_virtual_events_preset; } + + const typename base_t::CGimbal& getGimbal() override + { + return m_gimbal; + } + + virtual bool manipulate(std::span virtualEvents, base_t::ManipulationMode mode) override + { + constexpr double MaxVerticalAngle = glm::radians(88.0f), MinVerticalAngle = -MaxVerticalAngle; + + if (!virtualEvents.size()) + return false; + + const auto& gForward = m_gimbal.getZAxis(); + const float gPitch = atan2(glm::length(glm::vec2(gForward.x, gForward.z)), gForward.y) - glm::half_pi(), gYaw = atan2(gForward.x, gForward.z); + + glm::quat newOrientation; float64_t3 newPosition; + + // TODO: I make assumption what world base is now (at least temporary), I need to think of this but maybe each ITransformObject should know what its world is + // in ideal scenario we would define this crazy enum with all possible standard bases + const auto impulse = mode == base_t::Local ? m_gimbal.accumulate(virtualEvents) + : m_gimbal.accumulate(virtualEvents, { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }); + + const auto newPitch = std::clamp(gPitch + impulse.dVirtualRotation.x * m_rotationSpeedScale, MinVerticalAngle, MaxVerticalAngle), newYaw = gYaw + impulse.dVirtualRotation.y * m_rotationSpeedScale; + newOrientation = glm::quat(glm::vec3(newPitch, newYaw, 0.0f)); newPosition = m_gimbal.getPosition() + impulse.dVirtualTranslate * m_moveSpeedScale; + + bool manipulated = true; + + m_gimbal.begin(); + { + m_gimbal.setOrientation(newOrientation); + m_gimbal.setPosition(newPosition); + } + m_gimbal.end(); + + manipulated &= bool(m_gimbal.getManipulationCounter()); + + if(manipulated) + m_gimbal.updateView(); + + return manipulated; + } + + virtual const uint32_t getAllowedVirtualEvents(base_t::ManipulationMode mode) override + { + switch (mode) + { + case base_t::Local: return AllowedLocalVirtualEvents; + case base_t::World: return AllowedWorldVirtualEvents; + default: return CVirtualGimbalEvent::None; + } + } + + virtual const std::string_view getIdentifier() override + { + return "FPS Camera"; + } + +private: + typename base_t::CGimbal m_gimbal; + + static inline constexpr auto AllowedLocalVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::TiltUp | CVirtualGimbalEvent::TiltDown | CVirtualGimbalEvent::PanRight | CVirtualGimbalEvent::PanLeft; + static inline constexpr auto AllowedWorldVirtualEvents = AllowedLocalVirtualEvents; + + static inline const auto m_keyboard_to_virtual_events_preset = []() + { + typename base_t::keyboard_to_virtual_events_t preset; + + preset[ui::E_KEY_CODE::EKC_W] = CVirtualGimbalEvent::MoveForward; + preset[ui::E_KEY_CODE::EKC_S] = CVirtualGimbalEvent::MoveBackward; + preset[ui::E_KEY_CODE::EKC_A] = CVirtualGimbalEvent::MoveLeft; + preset[ui::E_KEY_CODE::EKC_D] = CVirtualGimbalEvent::MoveRight; + preset[ui::E_KEY_CODE::EKC_I] = CVirtualGimbalEvent::TiltDown; + preset[ui::E_KEY_CODE::EKC_K] = CVirtualGimbalEvent::TiltUp; + preset[ui::E_KEY_CODE::EKC_J] = CVirtualGimbalEvent::PanLeft; + preset[ui::E_KEY_CODE::EKC_L] = CVirtualGimbalEvent::PanRight; + + return preset; + }(); + + static inline const auto m_mouse_to_virtual_events_preset = []() + { + typename base_t::mouse_to_virtual_events_t preset; + + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_X] = CVirtualGimbalEvent::PanRight; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_X] = CVirtualGimbalEvent::PanLeft; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_Y] = CVirtualGimbalEvent::TiltUp; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y] = CVirtualGimbalEvent::TiltDown; + + return preset; + }(); + + static inline const auto m_imguizmo_to_virtual_events_preset = []() + { + typename base_t::imguizmo_to_virtual_events_t preset; + + preset[CVirtualGimbalEvent::MoveForward] = CVirtualGimbalEvent::MoveForward; + preset[CVirtualGimbalEvent::MoveBackward] = CVirtualGimbalEvent::MoveBackward; + preset[CVirtualGimbalEvent::MoveLeft] = CVirtualGimbalEvent::MoveLeft; + preset[CVirtualGimbalEvent::MoveRight] = CVirtualGimbalEvent::MoveRight; + preset[CVirtualGimbalEvent::MoveUp] = CVirtualGimbalEvent::MoveUp; + preset[CVirtualGimbalEvent::MoveDown] = CVirtualGimbalEvent::MoveDown; + preset[CVirtualGimbalEvent::TiltDown] = CVirtualGimbalEvent::TiltDown; + preset[CVirtualGimbalEvent::TiltUp] = CVirtualGimbalEvent::TiltUp; + preset[CVirtualGimbalEvent::PanLeft] = CVirtualGimbalEvent::PanLeft; + preset[CVirtualGimbalEvent::PanRight] = CVirtualGimbalEvent::PanRight; + + return preset; + }(); +}; + +} + +#endif // _C_FPS_CAMERA_HPP_ \ No newline at end of file diff --git a/common/include/camera/CFreeLockCamera.hpp b/common/include/camera/CFreeLockCamera.hpp new file mode 100644 index 000000000..0788bb048 --- /dev/null +++ b/common/include/camera/CFreeLockCamera.hpp @@ -0,0 +1,143 @@ +// Copyright (C) 2018-2024 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _C_FREE_CAMERA_HPP_ +#define _C_FREE_CAMERA_HPP_ + +#include "ICamera.hpp" +#include + +namespace nbl::hlsl // TODO: DIFFERENT NAMESPACE +{ + +// Free Lock Camera +class CFreeCamera final : public ICamera +{ +public: + using base_t = ICamera; + + CFreeCamera(const float64_t3& position, const glm::quat& orientation = glm::quat(1.0f, 0.0f, 0.0f, 0.0f)) + : base_t(), m_gimbal({ .position = position, .orientation = orientation }) {} + ~CFreeCamera() = default; + + const base_t::keyboard_to_virtual_events_t getKeyboardMappingPreset() const override { return m_keyboard_to_virtual_events_preset; } + const base_t::mouse_to_virtual_events_t getMouseMappingPreset() const override { return m_mouse_to_virtual_events_preset; } + const base_t::imguizmo_to_virtual_events_t getImguizmoMappingPreset() const override { return m_imguizmo_to_virtual_events_preset; } + + const typename base_t::CGimbal& getGimbal() override + { + return m_gimbal; + } + + virtual bool manipulate(std::span virtualEvents, base_t::ManipulationMode mode) override + { + if (virtualEvents.empty()) + return false; + + const auto gOrientation = m_gimbal.getOrientation(); + + // TODO: I make assumption what world base is now (at least temporary), I need to think of this but maybe each ITransformObject should know what its world is + // in ideal scenario we would define this crazy enum with all possible standard bases + const auto impulse = mode == base_t::Local ? m_gimbal.accumulate(virtualEvents) + : m_gimbal.accumulate(virtualEvents, { 1, 0, 0 }, { 0, 1, 0 }, { 0, 0, 1 }); + + const auto pitchQuat = glm::angleAxis(impulse.dVirtualRotation.x * m_rotationSpeedScale, glm::vec3(1.0f, 0.0f, 0.0f)); + const auto yawQuat = glm::angleAxis(impulse.dVirtualRotation.y * m_rotationSpeedScale, glm::vec3(0.0f, 1.0f, 0.0f)); + const auto rollQuat = glm::angleAxis(impulse.dVirtualRotation.z * m_rotationSpeedScale, glm::vec3(0.0f, 0.0f, 1.0f)); + + const auto newOrientation = glm::normalize(gOrientation * yawQuat * pitchQuat * rollQuat); + const float64_t3 newPosition = m_gimbal.getPosition() + impulse.dVirtualTranslate * m_moveSpeedScale; + + bool manipulated = true; + + m_gimbal.begin(); + { + m_gimbal.setOrientation(newOrientation); + m_gimbal.setPosition(newPosition); + } + m_gimbal.end(); + + manipulated &= bool(m_gimbal.getManipulationCounter()); + + if (manipulated) + m_gimbal.updateView(); + + return manipulated; + } + + virtual const uint32_t getAllowedVirtualEvents(base_t::ManipulationMode mode) override + { + switch (mode) + { + case base_t::Local: return AllowedLocalVirtualEvents; + case base_t::World: return AllowedWorldVirtualEvents; + default: return CVirtualGimbalEvent::None; + } + } + + virtual const std::string_view getIdentifier() override + { + return "Free-Look Camera"; + } + +private: + typename base_t::CGimbal m_gimbal; + + static inline constexpr auto AllowedLocalVirtualEvents = CVirtualGimbalEvent::Translate | CVirtualGimbalEvent::Rotate; + static inline constexpr auto AllowedWorldVirtualEvents = AllowedLocalVirtualEvents; + + static inline const auto m_keyboard_to_virtual_events_preset = []() + { + typename base_t::keyboard_to_virtual_events_t preset; + + preset[ui::E_KEY_CODE::EKC_W] = CVirtualGimbalEvent::MoveForward; + preset[ui::E_KEY_CODE::EKC_S] = CVirtualGimbalEvent::MoveBackward; + preset[ui::E_KEY_CODE::EKC_A] = CVirtualGimbalEvent::MoveLeft; + preset[ui::E_KEY_CODE::EKC_D] = CVirtualGimbalEvent::MoveRight; + preset[ui::E_KEY_CODE::EKC_I] = CVirtualGimbalEvent::TiltDown; + preset[ui::E_KEY_CODE::EKC_K] = CVirtualGimbalEvent::TiltUp; + preset[ui::E_KEY_CODE::EKC_J] = CVirtualGimbalEvent::PanLeft; + preset[ui::E_KEY_CODE::EKC_L] = CVirtualGimbalEvent::PanRight; + preset[ui::E_KEY_CODE::EKC_Q] = CVirtualGimbalEvent::RollLeft; + preset[ui::E_KEY_CODE::EKC_E] = CVirtualGimbalEvent::RollRight; + + return preset; + }(); + + static inline const auto m_mouse_to_virtual_events_preset = []() + { + typename base_t::mouse_to_virtual_events_t preset; + + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_X] = CVirtualGimbalEvent::PanRight; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_X] = CVirtualGimbalEvent::PanLeft; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_Y] = CVirtualGimbalEvent::TiltUp; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y] = CVirtualGimbalEvent::TiltDown; + + return preset; + }(); + + static inline const auto m_imguizmo_to_virtual_events_preset = []() + { + typename base_t::imguizmo_to_virtual_events_t preset; + + preset[CVirtualGimbalEvent::MoveForward] = CVirtualGimbalEvent::MoveForward; + preset[CVirtualGimbalEvent::MoveBackward] = CVirtualGimbalEvent::MoveBackward; + preset[CVirtualGimbalEvent::MoveLeft] = CVirtualGimbalEvent::MoveLeft; + preset[CVirtualGimbalEvent::MoveRight] = CVirtualGimbalEvent::MoveRight; + preset[CVirtualGimbalEvent::MoveUp] = CVirtualGimbalEvent::MoveUp; + preset[CVirtualGimbalEvent::MoveDown] = CVirtualGimbalEvent::MoveDown; + preset[CVirtualGimbalEvent::TiltDown] = CVirtualGimbalEvent::TiltDown; + preset[CVirtualGimbalEvent::TiltUp] = CVirtualGimbalEvent::TiltUp; + preset[CVirtualGimbalEvent::PanLeft] = CVirtualGimbalEvent::PanLeft; + preset[CVirtualGimbalEvent::PanRight] = CVirtualGimbalEvent::PanRight; + preset[CVirtualGimbalEvent::RollLeft] = CVirtualGimbalEvent::RollLeft; + preset[CVirtualGimbalEvent::RollRight] = CVirtualGimbalEvent::RollRight; + + return preset; + }(); +}; + +} + +#endif // _C_FREE_CAMERA_HPP_ \ No newline at end of file diff --git a/common/include/camera/CGeneralPurposeGimbal.hpp b/common/include/camera/CGeneralPurposeGimbal.hpp new file mode 100644 index 000000000..7e1c07096 --- /dev/null +++ b/common/include/camera/CGeneralPurposeGimbal.hpp @@ -0,0 +1,20 @@ +#ifndef _NBL_CGENERAL_PURPOSE_GIMBAL_HPP_ +#define _NBL_CGENERAL_PURPOSE_GIMBAL_HPP_ + +#include "IGimbal.hpp" + +// TODO: DIFFERENT NAMESPACE +namespace nbl::hlsl +{ + template + class CGeneralPurposeGimbal : public IGimbal + { + public: + using base_t = IGimbal; + + CGeneralPurposeGimbal(typename base_t::SCreationParameters&& parameters) : base_t(std::move(parameters)) {} + ~CGeneralPurposeGimbal() = default; + }; +} + +#endif // _NBL_IGIMBAL_HPP_ \ No newline at end of file diff --git a/common/include/camera/CLinearProjection.hpp b/common/include/camera/CLinearProjection.hpp new file mode 100644 index 000000000..791e9cb1b --- /dev/null +++ b/common/include/camera/CLinearProjection.hpp @@ -0,0 +1,45 @@ +#ifndef _NBL_C_LINEAR_PROJECTION_HPP_ +#define _NBL_C_LINEAR_PROJECTION_HPP_ + +#include "ILinearProjection.hpp" +#include "IRange.hpp" + +namespace nbl::hlsl +{ + template ProjectionsRange> + class CLinearProjection : public ILinearProjection + { + public: + using ILinearProjection::ILinearProjection; + + CLinearProjection() = default; + + inline static core::smart_refctd_ptr create(core::smart_refctd_ptr&& camera) + { + if (!camera) + return nullptr; + + return core::smart_refctd_ptr(new CLinearProjection(core::smart_refctd_ptr(camera)), core::dont_grab); + } + + virtual std::span getLinearProjections() const override + { + return std::span(m_projections.data(), m_projections.size()); + } + + inline std::span getLinearProjections() + { + return std::span(m_projections.data(), m_projections.size()); + } + + private: + CLinearProjection(core::smart_refctd_ptr&& camera) + : ILinearProjection(core::smart_refctd_ptr(camera)) {} + virtual ~CLinearProjection() = default; + + ProjectionsRange m_projections; + }; + +} // nbl::hlsl namespace + +#endif // _NBL_C_LINEAR_PROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/COrbitCamera.hpp b/common/include/camera/COrbitCamera.hpp new file mode 100644 index 000000000..936b1e51f --- /dev/null +++ b/common/include/camera/COrbitCamera.hpp @@ -0,0 +1,221 @@ +#ifndef _C_ORBIT_CAMERA_HPP_ +#define _C_ORBIT_CAMERA_HPP_ + +#include "ICamera.hpp" +#include + +namespace nbl::hlsl +{ + +class COrbitCamera final : public ICamera +{ +public: + using base_t = ICamera; + + COrbitCamera(const float64_t3& position, const float64_t3& target) + : base_t(), m_targetPosition(target), m_distance(length(m_targetPosition - position)), m_gimbal({ .position = position, .orientation = glm::quat(glm::vec3(0, 0, 0)) }) {} + ~COrbitCamera() = default; + + const base_t::keyboard_to_virtual_events_t getKeyboardMappingPreset() const override { return m_keyboard_to_virtual_events_preset; } + const base_t::mouse_to_virtual_events_t getMouseMappingPreset() const override { return m_mouse_to_virtual_events_preset; } + const base_t::imguizmo_to_virtual_events_t getImguizmoMappingPreset() const override { return m_imguizmo_to_virtual_events_preset; } + + const typename base_t::CGimbal& getGimbal() override { return m_gimbal; } + + inline bool setDistance(float d) + { + const auto clamped = std::clamp(d, MinDistance, MaxDistance); + const bool ok = clamped == d; + + m_distance = clamped; + + return ok; + } + + inline void target(const float64_t3& p) + { + m_targetPosition = p; + } + + virtual bool manipulate(std::span virtualEvents, base_t::ManipulationMode mode) override + { + // position on the sphere + auto S = [&](double u, double v) -> float64_t3 + { + return float64_t3 + { + std::cos(v) * std::cos(u), + std::cos(v) * std::sin(u), + std::sin(v) + } * (double) m_distance; + }; + + /* + // partial derivative of S with respect to u + auto Sdu = [&](double u, double v) -> float64_t3 + { + return float64_t3 + { + -std::cos(v) * std::sin(u), + std::cos(v)* std::cos(u), + 0 + } * (double) m_distance; + }; + */ + + double deltaU = {}, deltaV = {}, deltaDistance = {}; + + for (const auto& it : virtualEvents) + { + if (it.type == it.MoveForward) + deltaDistance += it.magnitude; + else if (it.type == it.MoveBackward) + deltaDistance -= it.magnitude; + else if (it.type == it.MoveUp) + deltaU += it.magnitude; + else if (it.type == it.MoveDown) + deltaU -= it.magnitude; + else if (it.type == it.MoveRight) + deltaV += it.magnitude; + else if (it.type == it.MoveLeft) + deltaV -= it.magnitude; + } + + // TODO! + constexpr auto nastyScalar = 0.01; + deltaU *= nastyScalar * m_moveSpeedScale; + deltaV *= nastyScalar * m_moveSpeedScale; + + u += deltaU; + v += deltaV; + + m_distance = std::clamp(m_distance += deltaDistance * nastyScalar, MinDistance, MaxDistance); + + // partial derivative of S with respect to v + auto Sdv = [&](double u, double v) -> float64_t3 + { + return float64_t3 + { + -std::sin(v) * std::cos(u), + -std::sin(v) * std::sin(u), + std::cos(v) + } * (double) m_distance; + }; + + const auto localSpherePostion = S(u, v); + const auto newPosition = localSpherePostion + m_targetPosition; + + // note we are not using Sdu (though we could!) + // instead we benefit from forward we have for free when moving on sphere surface + // and given up vector obtained from partial derivative we can easily get right vector, this way + // we don't have frenet frame flip we would have with Sdu, however it could be adjusted anyway, less code + + const auto newUp = normalize(Sdv(u, v)); + const auto newForward = normalize(-localSpherePostion); + const auto newRight = normalize(cross(newUp, newForward)); + + const auto newOrientation = glm::quat_cast + ( + glm::dmat3 + { + newRight, + newUp, + newForward + } + ); + + m_gimbal.begin(); + { + m_gimbal.setPosition(newPosition); + m_gimbal.setOrientation(newOrientation); + } + m_gimbal.end(); + + bool manipulated = bool(m_gimbal.getManipulationCounter()); + + if (manipulated) + m_gimbal.updateView(); + + return manipulated; + } + + virtual const uint32_t getAllowedVirtualEvents(base_t::ManipulationMode mode) override + { + switch (mode) + { + case base_t::Local: return AllowedLocalVirtualEvents; + case base_t::World: return AllowedWorldVirtualEvents; + default: return CVirtualGimbalEvent::None; + } + } + + virtual const std::string_view getIdentifier() override + { + return "Orbit Camera"; + } + + inline float getDistance() { return m_distance; } + inline double getU() { return u; } + inline double getV() { return v; } + + static inline constexpr float MinDistance = 0.1f; + static inline constexpr float MaxDistance = 10000.f; + +private: + float64_t3 m_targetPosition; + float m_distance; + typename base_t::CGimbal m_gimbal; + + double u = {}, v = {}; + + static inline constexpr auto AllowedLocalVirtualEvents = CVirtualGimbalEvent::Translate; + static inline constexpr auto AllowedWorldVirtualEvents = CVirtualGimbalEvent::None; + + static inline const auto m_keyboard_to_virtual_events_preset = []() + { + typename base_t::keyboard_to_virtual_events_t preset; + + preset[ui::E_KEY_CODE::EKC_W] = CVirtualGimbalEvent::MoveUp; + preset[ui::E_KEY_CODE::EKC_S] = CVirtualGimbalEvent::MoveDown; + preset[ui::E_KEY_CODE::EKC_A] = CVirtualGimbalEvent::MoveLeft; + preset[ui::E_KEY_CODE::EKC_D] = CVirtualGimbalEvent::MoveRight; + preset[ui::E_KEY_CODE::EKC_E] = CVirtualGimbalEvent::MoveForward; + preset[ui::E_KEY_CODE::EKC_Q] = CVirtualGimbalEvent::MoveBackward; + + return preset; + }(); + + static inline const auto m_mouse_to_virtual_events_preset = []() + { + typename base_t::mouse_to_virtual_events_t preset; + + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_X] = CVirtualGimbalEvent::MoveRight; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_X] = CVirtualGimbalEvent::MoveLeft; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_POSITIVE_MOVEMENT_Y] = CVirtualGimbalEvent::MoveUp; + preset[ui::E_MOUSE_CODE::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y] = CVirtualGimbalEvent::MoveDown; + preset[ui::E_MOUSE_CODE::EMC_VERTICAL_POSITIVE_SCROLL] = CVirtualGimbalEvent::MoveForward; + preset[ui::E_MOUSE_CODE::EMC_HORIZONTAL_POSITIVE_SCROLL] = CVirtualGimbalEvent::MoveForward; + preset[ui::E_MOUSE_CODE::EMC_VERTICAL_NEGATIVE_SCROLL] = CVirtualGimbalEvent::MoveBackward; + preset[ui::E_MOUSE_CODE::EMC_HORIZONTAL_NEGATIVE_SCROLL] = CVirtualGimbalEvent::MoveBackward; + + return preset; + }(); + + static inline const auto m_imguizmo_to_virtual_events_preset = []() + { + typename base_t::imguizmo_to_virtual_events_t preset; + + preset[CVirtualGimbalEvent::MoveForward] = CVirtualGimbalEvent::MoveForward; + preset[CVirtualGimbalEvent::MoveBackward] = CVirtualGimbalEvent::MoveBackward; + preset[CVirtualGimbalEvent::MoveLeft] = CVirtualGimbalEvent::MoveLeft; + preset[CVirtualGimbalEvent::MoveRight] = CVirtualGimbalEvent::MoveRight; + preset[CVirtualGimbalEvent::MoveUp] = CVirtualGimbalEvent::MoveUp; + preset[CVirtualGimbalEvent::MoveDown] = CVirtualGimbalEvent::MoveDown; + + return preset; + }(); +}; + +} + +#endif // _C_ORBIT_CAMERA_HPP_ \ No newline at end of file diff --git a/common/include/camera/CPlanarProjection.hpp b/common/include/camera/CPlanarProjection.hpp new file mode 100644 index 000000000..a85cc39de --- /dev/null +++ b/common/include/camera/CPlanarProjection.hpp @@ -0,0 +1,42 @@ +#ifndef _NBL_C_PLANAR_PROJECTION_HPP_ +#define _NBL_C_PLANAR_PROJECTION_HPP_ + +#include "IPlanarProjection.hpp" +#include "IRange.hpp" + +namespace nbl::hlsl +{ + template ProjectionsRange> + class CPlanarProjection : public IPlanarProjection + { + public: + virtual ~CPlanarProjection() = default; + + inline static core::smart_refctd_ptr create(core::smart_refctd_ptr&& camera) + { + if (!camera) + return nullptr; + + return core::smart_refctd_ptr(new CPlanarProjection(core::smart_refctd_ptr(camera)), core::dont_grab); + } + + virtual std::span getLinearProjections() const override + { + return { reinterpret_cast(m_projections.data()), m_projections.size() }; + } + + inline ProjectionsRange& getPlanarProjections() + { + return m_projections; + } + + protected: + CPlanarProjection(core::smart_refctd_ptr&& camera) + : IPlanarProjection(core::smart_refctd_ptr(camera)) {} + + ProjectionsRange m_projections; + }; + +} // nbl::hlsl namespace + +#endif // _NBL_C_PLANAR_PROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/ICamera.hpp b/common/include/camera/ICamera.hpp new file mode 100644 index 000000000..9af3e27d9 --- /dev/null +++ b/common/include/camera/ICamera.hpp @@ -0,0 +1,112 @@ +// Copyright (C) 2018-2020 - DevSH Graphics Programming Sp. z O.O. +// This file is part of the "Nabla Engine". +// For conditions of distribution and use, see copyright notice in nabla.h + +#ifndef _I_CAMERA_HPP_ +#define _I_CAMERA_HPP_ + +#include "camera/IGimbalController.hpp" + +namespace nbl::hlsl // TODO: DIFFERENT NAMESPACE +{ + +class ICamera : public IGimbalController, virtual public core::IReferenceCounted +{ +public: + using IGimbalController::IGimbalController; + + //! Manipulation mode for virtual events + //! TODO: this should belong to IObjectTransform or something + enum ManipulationMode + { + // Interpret virtual events as accumulated impulse representing relative manipulation with respect to view gimbal base + Local, + + // Interpret virtual events as accumulated impulse representing relative manipulation with respect to world base + World + }; + + // Gimbal with view parameters representing a camera in world space + class CGimbal : public IGimbal + { + public: + using base_t = IGimbal; + + CGimbal(typename base_t::SCreationParameters&& parameters) : base_t(std::move(parameters)) { updateView(); } + ~CGimbal() = default; + + inline void updateView() + { + const auto& gRight = base_t::getXAxis(), gUp = base_t::getYAxis(), gForward = base_t::getZAxis(); + + auto isNormalized = [](const auto& v, precision_t epsilon) -> bool + { + return glm::epsilonEqual(glm::length(v), 1.0, epsilon); + }; + + auto isOrthogonal = [](const auto& a, const auto& b, precision_t epsilon) -> bool + { + return glm::epsilonEqual(glm::dot(a, b), 0.0, epsilon); + }; + + auto isOrthoBase = [&](const auto& x, const auto& y, const auto& z, precision_t epsilon = 1e-6) -> bool + { + return isNormalized(x, epsilon) && isNormalized(y, epsilon) && isNormalized(z, epsilon) && + isOrthogonal(x, y, epsilon) && isOrthogonal(x, z, epsilon) && isOrthogonal(y, z, epsilon); + }; + + assert(isOrthoBase(gRight, gUp, gForward)); + + const auto& position = base_t::getPosition(); + + m_viewMatrix[0u] = float64_t4(gRight, -glm::dot(gRight, position)); + m_viewMatrix[1u] = float64_t4(gUp, -glm::dot(gUp, position)); + m_viewMatrix[2u] = float64_t4(gForward, -glm::dot(gForward, position)); + } + + inline const float64_t3x4& getViewMatrix() const { return m_viewMatrix; } + + private: + float64_t3x4 m_viewMatrix; + }; + + ICamera() {} + virtual ~ICamera() = default; + + // Returns a gimbal which *models the camera view* + virtual const CGimbal& getGimbal() = 0u; + + // Manipulates camera with virtual events, returns true if *any* manipulation happens, it may fail partially or fully because each camera type has certain constraints which determine how it actually works + // TODO: this really needs to be moved to more abstract interface, eg. IObjectTransform or something and ICamera should inherit it (its also an object!) + virtual bool manipulate(std::span virtualEvents, ManipulationMode mode) = 0; + + // VirtualEventType bitmask for a camera view gimbal manipulation requests filtering + virtual const uint32_t getAllowedVirtualEvents(ManipulationMode mode) = 0u; + + // Identifier of a camera type + virtual const std::string_view getIdentifier() = 0u; + + // (***) + inline void setMoveSpeedScale(double scalar) + { + m_moveSpeedScale = scalar; + } + + // (***) + inline void setRotationSpeedScale(double scalar) + { + m_rotationSpeedScale = scalar; + } + + inline double getMoveSpeedScale() const { return m_moveSpeedScale; } + inline double getRotationSpeedScale() const { return m_rotationSpeedScale; } + +protected: + // (***) TODO: I need to think whether a camera should own this or controllers should be able + // to set sensitivity to scale magnitudes of generated events we put into manipulate method + double m_moveSpeedScale = 0.01, m_rotationSpeedScale = 0.003; +}; + +} + +#endif // _I_CAMERA_HPP_ \ No newline at end of file diff --git a/common/include/camera/IGimbal.hpp b/common/include/camera/IGimbal.hpp new file mode 100644 index 000000000..c70c2bbab --- /dev/null +++ b/common/include/camera/IGimbal.hpp @@ -0,0 +1,403 @@ +#ifndef _NBL_IGIMBAL_HPP_ +#define _NBL_IGIMBAL_HPP_ + +#include "glm/glm/ext/matrix_transform.hpp" // TODO: TEMPORARY!!! whatever used will be moved to cpp +#include "glm/glm/gtc/quaternion.hpp" +#include "nbl/builtin/hlsl/matrix_utils/transformation_matrix_utils.hlsl" + +// TODO: DIFFERENT NAMESPACE +namespace nbl::hlsl +{ + struct CVirtualGimbalEvent + { + enum VirtualEventType : uint32_t + { + None = 0, + + // Individual events + MoveForward = core::createBitmask({ 0 }), + MoveBackward = core::createBitmask({ 1 }), + MoveLeft = core::createBitmask({ 2 }), + MoveRight = core::createBitmask({ 3 }), + MoveUp = core::createBitmask({ 4 }), + MoveDown = core::createBitmask({ 5 }), + TiltUp = core::createBitmask({ 6 }), + TiltDown = core::createBitmask({ 7 }), + PanLeft = core::createBitmask({ 8 }), + PanRight = core::createBitmask({ 9 }), + RollLeft = core::createBitmask({ 10 }), + RollRight = core::createBitmask({ 11 }), + ScaleXInc = core::createBitmask({ 12 }), + ScaleXDec = core::createBitmask({ 13 }), + ScaleYInc = core::createBitmask({ 14 }), + ScaleYDec = core::createBitmask({ 15 }), + ScaleZInc = core::createBitmask({ 16 }), + ScaleZDec = core::createBitmask({ 17 }), + + EventsCount = 18, + + // Grouped bitmasks + Translate = MoveForward | MoveBackward | MoveLeft | MoveRight | MoveUp | MoveDown, + Rotate = TiltUp | TiltDown | PanLeft | PanRight | RollLeft | RollRight, + Scale = ScaleXInc | ScaleXDec | ScaleYInc | ScaleYDec | ScaleZInc | ScaleZDec, + + All = Translate | Rotate | Scale + }; + + using manipulation_encode_t = float64_t; + + VirtualEventType type = None; + manipulation_encode_t magnitude = {}; + + static constexpr std::string_view virtualEventToString(VirtualEventType event) + { + switch (event) + { + case MoveForward: return "MoveForward"; + case MoveBackward: return "MoveBackward"; + case MoveLeft: return "MoveLeft"; + case MoveRight: return "MoveRight"; + case MoveUp: return "MoveUp"; + case MoveDown: return "MoveDown"; + case TiltUp: return "TiltUp"; + case TiltDown: return "TiltDown"; + case PanLeft: return "PanLeft"; + case PanRight: return "PanRight"; + case RollLeft: return "RollLeft"; + case RollRight: return "RollRight"; + case ScaleXInc: return "ScaleXInc"; + case ScaleXDec: return "ScaleXDec"; + case ScaleYInc: return "ScaleYInc"; + case ScaleYDec: return "ScaleYDec"; + case ScaleZInc: return "ScaleZInc"; + case ScaleZDec: return "ScaleZDec"; + case Translate: return "Translate"; + case Rotate: return "Rotate"; + case Scale: return "Scale"; + case None: return "None"; + default: return "Unknown"; + } + } + + static constexpr VirtualEventType stringToVirtualEvent(std::string_view event) + { + if (event == "MoveForward") return MoveForward; + if (event == "MoveBackward") return MoveBackward; + if (event == "MoveLeft") return MoveLeft; + if (event == "MoveRight") return MoveRight; + if (event == "MoveUp") return MoveUp; + if (event == "MoveDown") return MoveDown; + if (event == "TiltUp") return TiltUp; + if (event == "TiltDown") return TiltDown; + if (event == "PanLeft") return PanLeft; + if (event == "PanRight") return PanRight; + if (event == "RollLeft") return RollLeft; + if (event == "RollRight") return RollRight; + if (event == "ScaleXInc") return ScaleXInc; + if (event == "ScaleXDec") return ScaleXDec; + if (event == "ScaleYInc") return ScaleYInc; + if (event == "ScaleYDec") return ScaleYDec; + if (event == "ScaleZInc") return ScaleZInc; + if (event == "ScaleZDec") return ScaleZDec; + if (event == "Translate") return Translate; + if (event == "Rotate") return Rotate; + if (event == "Scale") return Scale; + if (event == "None") return None; + return None; + } + + static inline constexpr auto VirtualEventsTypeTable = []() + { + std::array output; + + for (uint16_t i = 0u; i < EventsCount; ++i) + output[i] = static_cast(core::createBitmask({ i })); + + return output; + }(); + }; + + template + requires is_any_of_v + class IGimbal + { + public: + using precision_t = T; + //! underlying type for world matrix (TRS) + using model_matrix_t = matrix; + + struct VirtualImpulse + { + vector dVirtualTranslate { 0.0f }, dVirtualRotation { 0.0f }, dVirtualScale { 0.0f }; + }; + + // TODO: document it + template + VirtualImpulse accumulate(std::span virtualEvents, const vector& gRightOverride, const vector& gUpOverride, const vector& gForwardOverride) + { + VirtualImpulse impulse; + + const auto& gRight = gRightOverride, gUp = gUpOverride, gForward = gForwardOverride; + + for (const auto& event : virtualEvents) + { + // translation events + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveForward) + if (event.type == CVirtualGimbalEvent::MoveForward) + impulse.dVirtualTranslate += gForward * static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveBackward) + if (event.type == CVirtualGimbalEvent::MoveBackward) + impulse.dVirtualTranslate -= gForward * static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveRight) + if (event.type == CVirtualGimbalEvent::MoveRight) + impulse.dVirtualTranslate += gRight * static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveLeft) + if (event.type == CVirtualGimbalEvent::MoveLeft) + impulse.dVirtualTranslate -= gRight * static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveUp) + if (event.type == CVirtualGimbalEvent::MoveUp) + impulse.dVirtualTranslate += gUp * static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::MoveDown) + if (event.type == CVirtualGimbalEvent::MoveDown) + impulse.dVirtualTranslate -= gUp * static_cast(event.magnitude); + + // rotation events + if constexpr (AllowedEvents & CVirtualGimbalEvent::TiltUp) + if (event.type == CVirtualGimbalEvent::TiltUp) + impulse.dVirtualRotation.x += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::TiltDown) + if (event.type == CVirtualGimbalEvent::TiltDown) + impulse.dVirtualRotation.x -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::PanRight) + if (event.type == CVirtualGimbalEvent::PanRight) + impulse.dVirtualRotation.y += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::PanLeft) + if (event.type == CVirtualGimbalEvent::PanLeft) + impulse.dVirtualRotation.y -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::RollRight) + if (event.type == CVirtualGimbalEvent::RollRight) + impulse.dVirtualRotation.z += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::RollLeft) + if (event.type == CVirtualGimbalEvent::RollLeft) + impulse.dVirtualRotation.z -= static_cast(event.magnitude); + + // scaling events + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleXInc) + if (event.type == CVirtualGimbalEvent::ScaleXInc) + impulse.dVirtualScale.x += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleXDec) + if (event.type == CVirtualGimbalEvent::ScaleXDec) + impulse.dVirtualScale.x -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleYInc) + if (event.type == CVirtualGimbalEvent::ScaleYInc) + impulse.dVirtualScale.y += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleYDec) + if (event.type == CVirtualGimbalEvent::ScaleYDec) + impulse.dVirtualScale.y -= static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleZInc) + if (event.type == CVirtualGimbalEvent::ScaleZInc) + impulse.dVirtualScale.z += static_cast(event.magnitude); + + if constexpr (AllowedEvents & CVirtualGimbalEvent::ScaleZDec) + if (event.type == CVirtualGimbalEvent::ScaleZDec) + impulse.dVirtualScale.z -= static_cast(event.magnitude); + } + + return impulse; + } + + template + VirtualImpulse accumulate(std::span virtualEvents) + { + return accumulate(virtualEvents, getXAxis(), getYAxis(), getZAxis()); + } + + struct SCreationParameters + { + vector position; + glm::quat orientation = glm::quat(1.0f, 0.0f, 0.0f, 0.0f); + }; + + IGimbal(const IGimbal&) = default; + IGimbal(IGimbal&&) noexcept = default; + IGimbal& operator=(const IGimbal&) = default; + IGimbal& operator=(IGimbal&&) noexcept = default; + + IGimbal(SCreationParameters&& parameters) + : m_position(parameters.position), m_orientation(parameters.orientation), m_id(reinterpret_cast(this)) + { + updateOrthonormalOrientationBase(); + } + + inline const uintptr_t getID() const { return m_id; } + + void begin() + { + m_isManipulating = true; + m_counter = 0u; + } + + inline void setPosition(const vector& position) + { + assert(m_isManipulating); // TODO: log error and return without doing nothing + + if (m_position != position) + m_counter++; + + m_position = position; + } + + inline void setScale(const vector& scale) + { + // we are not consider it a manipulation because it only impacts TRS world matrix we do not store + m_scale = scale; + } + + inline void setOrientation(const glm::quat& orientation) + { + assert(m_isManipulating); // TODO: log error and return without doing nothing + + if(m_orientation != orientation) + m_counter++; + + m_orientation = glm::normalize(orientation); + updateOrthonormalOrientationBase(); + } + + inline void rotate(const vector& axis, float dRadians) + { + assert(m_isManipulating); // TODO: log error and return without doing nothing + + if(dRadians) + m_counter++; + + glm::quat dRotation = glm::angleAxis(dRadians, axis); + m_orientation = glm::normalize(dRotation * m_orientation); + updateOrthonormalOrientationBase(); + } + + inline void move(vector delta) + { + assert(m_isManipulating); // TODO: log error and return without doing nothing + + auto newPosition = m_position + delta; + + if (newPosition != m_position) + m_counter++; + + m_position = newPosition; + } + + inline void strafe(precision_t distance) + { + move(getXAxis() * distance); + } + + inline void climb(precision_t distance) + { + move(getYAxis() * distance); + } + + inline void advance(precision_t distance) + { + move(getZAxis() * distance); + } + + inline void end() + { + m_isManipulating = false; + } + + //! Position of gimbal in world space + inline const auto& getPosition() const { return m_position; } + + //! Orientation of gimbal + inline const auto& getOrientation() const { return m_orientation; } + + //! Scale transform component + inline const auto& getScale() const { return m_scale; } + + //! World matrix (TRS) + inline const model_matrix_t operator()() const + { + const auto& position = getPosition(); + const auto& rotation = getOrthonornalMatrix(); + const auto& scale = getScale(); + + return + { + vector(rotation[0] * scale.x, position.x), + vector(rotation[1] * scale.y, position.y), + vector(rotation[2] * scale.z, position.z) + }; + } + + //! Orthonormal [getXAxis(), getYAxis(), getZAxis()] orientation matrix + inline const auto& getOrthonornalMatrix() const { return m_orthonormal; } + + //! Base "right" vector in orthonormal orientation basis (X-axis) + inline const auto& getXAxis() const { return m_orthonormal[0u]; } + + //! Base "up" vector in orthonormal orientation basis (Y-axis) + inline const auto& getYAxis() const { return m_orthonormal[1u]; } + + //! Base "forward" vector in orthonormal orientation basis (Z-axis) + inline const auto& getZAxis() const { return m_orthonormal[2u]; } + + //! Target vector in local space, alias for getZAxis() + inline const auto getLocalTarget() const { return getZAxis(); } + + //! Target vector in world space + inline const auto getWorldTarget() const { return getPosition() + getLocalTarget(); } + + //! Counts how many times a valid manipulation has been performed, the counter resets when begin() is called + inline const auto& getManipulationCounter() { return m_counter; } + + //! Returns true if gimbal records a manipulation + inline bool isManipulating() const { return m_isManipulating; } + + private: + inline void updateOrthonormalOrientationBase() + { + m_orthonormal = matrix(glm::mat3_cast(glm::normalize(m_orientation))); + } + + //! Position of a gimbal in world space + vector m_position; + + //! Normalized orientation of gimbal + //! TODO: precision + replace with our "quat at home" + glm::quat m_orientation; + + //! Scale transform component + vector m_scale = { 1.f, 1.f , 1.f }; + + //! Orthonormal base composed from "m_orientation" representing gimbal's "forward", "up" & "right" vectors in local space - basically it spans orientation space + matrix m_orthonormal; + + //! Counter that increments for each performed manipulation, resets with each begin() call + size_t m_counter = {}; + + //! Tracks whether gimbal is currently in manipulation mode + bool m_isManipulating = false; + + //! The fact ImGUIZMO has global context I don't like, however for IDs we can do a life-tracking trick and cast addresses which are unique & we don't need any global associative container to track them! + const uintptr_t m_id; + }; +} // namespace nbl::hlsl + +#endif // _NBL_IGIMBAL_HPP_ \ No newline at end of file diff --git a/common/include/camera/IGimbalController.hpp b/common/include/camera/IGimbalController.hpp new file mode 100644 index 000000000..7b4ba365e --- /dev/null +++ b/common/include/camera/IGimbalController.hpp @@ -0,0 +1,457 @@ +#ifndef _NBL_I_CAMERA_CONTROLLER_HPP_ +#define _NBL_I_CAMERA_CONTROLLER_HPP_ + +///////////////////////// +// TODO: TEMPORARY!!! +#include "common.hpp" +namespace ImGuizmo +{ + void DecomposeMatrixToComponents(const float*, float*, float*, float*); +} +///////////////////////// + +#include "IProjection.hpp" +#include "IGimbal.hpp" + +// TODO: DIFFERENT NAMESPACE +namespace nbl::hlsl +{ + +struct IGimbalManipulateEncoder +{ + IGimbalManipulateEncoder() {} + virtual ~IGimbalManipulateEncoder() {} + + //! output of any controller process method + using gimbal_event_t = CVirtualGimbalEvent; + + //! encode keyboard code used to translate to gimbal_event_t event + using encode_keyboard_code_t = ui::E_KEY_CODE; + + //! encode mouse code used to translate to gimbal_event_t event + using encode_mouse_code_t = ui::E_MOUSE_CODE; + + //! encode ImGuizmo code used to translate to gimbal_event_t event + using encode_imguizmo_code_t = gimbal_event_t::VirtualEventType; + + //! Encoder types, a controller takes encoder type events and outputs gimbal_event_t events + enum EncoderType : uint8_t + { + Keyboard, + Mouse, + Imguizmo, + + Count + }; + + //! A key in a hash map which is "encode__code_t" as union with information about EncoderType the encode value got produced from + struct CKeyInfo + { + union + { + encode_keyboard_code_t keyboardCode; + encode_mouse_code_t mouseCode; + encode_imguizmo_code_t imguizmoCode; + }; + + CKeyInfo(encode_keyboard_code_t code) : keyboardCode(code), type(Keyboard) {} + CKeyInfo(encode_mouse_code_t code) : mouseCode(code), type(Mouse) {} + CKeyInfo(encode_imguizmo_code_t code) : imguizmoCode(code), type(Imguizmo) {} + + EncoderType type; + }; + + //! Hash value in hash map which is gimbal_event_t & state + struct CHashInfo + { + CHashInfo() {} + CHashInfo(gimbal_event_t::VirtualEventType _type) : event({ .type = _type }) {} + ~CHashInfo() = default; + + gimbal_event_t event = {}; + bool active = false; + }; + + using keyboard_to_virtual_events_t = std::unordered_map; + using mouse_to_virtual_events_t = std::unordered_map; + using imguizmo_to_virtual_events_t = std::unordered_map; + + virtual const keyboard_to_virtual_events_t getKeyboardMappingPreset() const { return {}; } + virtual const mouse_to_virtual_events_t getMouseMappingPreset() const { return {}; } + virtual const imguizmo_to_virtual_events_t getImguizmoMappingPreset() const { return {}; } + + virtual const keyboard_to_virtual_events_t& getKeyboardVirtualEventMap() const = 0; + virtual const mouse_to_virtual_events_t& getMouseVirtualEventMap() const = 0; + virtual const imguizmo_to_virtual_events_t& getImguizmoVirtualEventMap() const = 0; + + // Binds mouse key codes to virtual events, the mapKeys lambda will be executed with controller keyboard_to_virtual_events_t table + virtual void updateKeyboardMapping(const std::function& mapKeys) = 0; + + // Binds mouse key codes to virtual events, the mapKeys lambda will be executed with controller mouse_to_virtual_events_t table + virtual void updateMouseMapping(const std::function& mapKeys) = 0; + + // Binds imguizmo key codes to virtual events, the mapKeys lambda will be executed with controller imguizmo_to_virtual_events_t table + virtual void updateImguizmoMapping(const std::function& mapKeys) = 0; +}; + +class IGimbalController : public IGimbalManipulateEncoder +{ +public: + using IGimbalManipulateEncoder::IGimbalManipulateEncoder; + + IGimbalController() {} + virtual ~IGimbalController() {} + + //! input of keyboard gimbal controller process utility - Nabla UI event handler produces ui::SKeyboardEvent events + using input_keyboard_event_t = ui::SKeyboardEvent; + + //! input of mouse gimbal controller process utility - Nabla UI event handler produces ui::SMouseEvent events + using input_mouse_event_t = ui::SMouseEvent; + + //! input of ImGuizmo gimbal controller process utility - ImGuizmo manipulate utility produces "delta (TRS) matrix" events + using input_imguizmo_event_t = float32_t4x4; + + void beginInputProcessing(const std::chrono::microseconds nextPresentationTimeStamp) + { + m_nextPresentationTimeStamp = nextPresentationTimeStamp; + m_frameDeltaTime = std::chrono::duration_cast(m_nextPresentationTimeStamp - m_lastVirtualUpTimeStamp).count(); + assert(m_frameDeltaTime >= 0.f); + } + + void endInputProcessing() + { + m_lastVirtualUpTimeStamp = m_nextPresentationTimeStamp; + } + + virtual void updateKeyboardMapping(const std::function& mapKeys) override { mapKeys(m_keyboardVirtualEventMap); } + virtual void updateMouseMapping(const std::function& mapKeys) override { mapKeys(m_mouseVirtualEventMap); } + virtual void updateImguizmoMapping(const std::function& mapKeys) override { mapKeys(m_imguizmoVirtualEventMap); } + + struct SUpdateParameters + { + std::span keyboardEvents = {}; + std::span mouseEvents = {}; + std::span imguizmoEvents = {}; + }; + + /** + * @brief Processes combined events from SUpdateParameters to generate virtual manipulation events. + * + * @note This function combines the processing of events from keyboards, mouse and ImGuizmo. + * It delegates the actual processing to the respective functions: + * - @ref processKeyboard for keyboard events + * - @ref processMouse for mouse events + * - @ref processImguizmo for ImGuizmo events + * The results are accumulated into the output array and the total count. + * + * @param "output" is a pointer to the array where all generated gimbal events will be stored. + * If nullptr, the function will only calculate the total count of potential + * output events without processing. + * + * @param "count" is a uint32_t reference to store the total count of generated gimbal events. + * + * @param "parameters" is an SUpdateParameters structure containing the individual event arrays + * for keyboard, mouse, and ImGuizmo inputs. + * + * @return void. If "count" > 0 and "output" is a valid pointer, use it to dereference your "output" + * containing "count" events. If "output" is nullptr, "count" tells you the total size of "output" + * you must guarantee to be valid. + */ + void process(gimbal_event_t* output, uint32_t& count, const SUpdateParameters parameters = {}) + { + count = 0u; + uint32_t vKeyboardEventsCount = {}, vMouseEventsCount = {}, vImguizmoEventsCount = {}; + + if (output) + { + processKeyboard(output, vKeyboardEventsCount, parameters.keyboardEvents); output += vKeyboardEventsCount; + processMouse(output, vMouseEventsCount, parameters.mouseEvents); output += vMouseEventsCount; + processImguizmo(output, vImguizmoEventsCount, parameters.imguizmoEvents); + } + else + { + processKeyboard(nullptr, vKeyboardEventsCount, {}); + processMouse(nullptr, vMouseEventsCount, {}); + processImguizmo(nullptr, vImguizmoEventsCount, {}); + } + + count = vKeyboardEventsCount + vMouseEventsCount + vImguizmoEventsCount; + } + + virtual const keyboard_to_virtual_events_t& getKeyboardVirtualEventMap() const override { return m_keyboardVirtualEventMap; } + virtual const mouse_to_virtual_events_t& getMouseVirtualEventMap() const override { return m_mouseVirtualEventMap; } + virtual const imguizmo_to_virtual_events_t& getImguizmoVirtualEventMap() const override { return m_imguizmoVirtualEventMap; } + + /** + * @brief Processes keyboard events to generate virtual manipulation events. + * + * @note This function maps keyboard events into virtual gimbal manipulation events + * based on predefined mappings. It supports event types such as key press and key release + * to trigger corresponding actions. + * + * @param "output" is a pointer to the array where generated gimbal events will be stored. + * If nullptr, the function will only calculate the count of potential + * output events without processing. + * + * @param "count" is a uint32_t reference to store the count of generated gimbal events. + * + * @param "events" is a span of input_keyboard_event_t. Each such event contains a key code and action, + * such as key press or release. + * + * @return void. If "count" > 0 and "output" is a valid pointer, use it to dereference your "output" + * containing "count" events. If "output" is nullptr, "count" tells you the size of "output" you must guarantee to be valid. + */ + void processKeyboard(gimbal_event_t* output, uint32_t& count, std::span events) + { + count = 0u; + const auto mappedVirtualEventsCount = m_keyboardVirtualEventMap.size(); + + if (!output) + { + count = mappedVirtualEventsCount; + return; + } + + if (mappedVirtualEventsCount) + { + preprocess(m_keyboardVirtualEventMap); + + for (const auto& keyboardEvent : events) + { + auto request = m_keyboardVirtualEventMap.find(keyboardEvent.keyCode); + if (request != std::end(m_keyboardVirtualEventMap)) + { + auto& hash = request->second; + + if (keyboardEvent.action == input_keyboard_event_t::ECA_PRESSED) + { + if (!hash.active) + { + const auto keyDeltaTime = std::chrono::duration_cast(m_nextPresentationTimeStamp - keyboardEvent.timeStamp).count(); + assert(keyDeltaTime >= 0); + + hash.active = true; + hash.event.magnitude = keyDeltaTime; + } + } + else if (keyboardEvent.action == input_keyboard_event_t::ECA_RELEASED) + hash.active = false; + } + } + + postprocess(m_keyboardVirtualEventMap, output, count); + } + } + + /** + * @brief Processes mouse events to generate virtual manipulation events. + * + * @note This function processes mouse input, including clicks, scrolls, and movements, + * and maps them into virtual gimbal manipulation events. Mouse actions are processed + * using predefined mappings to determine corresponding gimbal manipulations. + * + * @param "output" is a pointer to the array where generated gimbal events will be stored. + * If nullptr, the function will only calculate the count of potential + * output events without processing. + * + * @param "count" is a uint32_t reference to store the count of generated gimbal events. + * + * @param "events" is a span of input_mouse_event_t. Each such event represents a mouse action, + * including clicks, scrolls, or movements. + * + * @return void. If "count" > 0 and "output" is a valid pointer, use it to dereference your "output" + * containing "count" events. If "output" is nullptr, "count" tells you the size of "output" you must guarantee to be valid. + */ + void processMouse(gimbal_event_t* output, uint32_t& count, std::span events) + { + count = 0u; + const auto mappedVirtualEventsCount = m_mouseVirtualEventMap.size(); + + if (!output) + { + count = mappedVirtualEventsCount; + return; + } + + if (mappedVirtualEventsCount) + { + preprocess(m_mouseVirtualEventMap); + + for (const auto& mouseEvent : events) + { + ui::E_MOUSE_CODE mouseCode = ui::EMC_NONE; + + switch (mouseEvent.type) + { + case input_mouse_event_t::EET_CLICK: + { + switch (mouseEvent.clickEvent.mouseButton) + { + case ui::EMB_LEFT_BUTTON: mouseCode = ui::EMC_LEFT_BUTTON; break; + case ui::EMB_RIGHT_BUTTON: mouseCode = ui::EMC_RIGHT_BUTTON; break; + case ui::EMB_MIDDLE_BUTTON: mouseCode = ui::EMC_MIDDLE_BUTTON; break; + case ui::EMB_BUTTON_4: mouseCode = ui::EMC_BUTTON_4; break; + case ui::EMB_BUTTON_5: mouseCode = ui::EMC_BUTTON_5; break; + default: continue; + } + + auto request = m_mouseVirtualEventMap.find(mouseCode); + if (request != std::end(m_mouseVirtualEventMap)) + { + auto& hash = request->second; + + if (mouseEvent.clickEvent.action == input_mouse_event_t::SClickEvent::EA_PRESSED) + { + if (!hash.active) + { + const auto keyDeltaTime = std::chrono::duration_cast(m_nextPresentationTimeStamp - mouseEvent.timeStamp).count(); + assert(keyDeltaTime >= 0); + + hash.active = true; + hash.event.magnitude += keyDeltaTime; + } + } + else if (mouseEvent.clickEvent.action == input_mouse_event_t::SClickEvent::EA_RELEASED) + hash.active = false; + } + } break; + + case input_mouse_event_t::EET_SCROLL: + { + requestMagnitudeUpdateWithScalar(0.f, float(mouseEvent.scrollEvent.verticalScroll), float(std::abs(mouseEvent.scrollEvent.verticalScroll)), ui::EMC_VERTICAL_POSITIVE_SCROLL, ui::EMC_VERTICAL_NEGATIVE_SCROLL, m_mouseVirtualEventMap); + requestMagnitudeUpdateWithScalar(0.f, mouseEvent.scrollEvent.horizontalScroll, std::abs(mouseEvent.scrollEvent.horizontalScroll), ui::EMC_HORIZONTAL_POSITIVE_SCROLL, ui::EMC_HORIZONTAL_NEGATIVE_SCROLL, m_mouseVirtualEventMap); + } break; + + case input_mouse_event_t::EET_MOVEMENT: + { + requestMagnitudeUpdateWithScalar(0.f, mouseEvent.movementEvent.relativeMovementX, std::abs(mouseEvent.movementEvent.relativeMovementX), ui::EMC_RELATIVE_POSITIVE_MOVEMENT_X, ui::EMC_RELATIVE_NEGATIVE_MOVEMENT_X, m_mouseVirtualEventMap); + requestMagnitudeUpdateWithScalar(0.f, mouseEvent.movementEvent.relativeMovementY, std::abs(mouseEvent.movementEvent.relativeMovementY), ui::EMC_RELATIVE_POSITIVE_MOVEMENT_Y, ui::EMC_RELATIVE_NEGATIVE_MOVEMENT_Y, m_mouseVirtualEventMap); + } break; + + default: + break; + } + } + + postprocess(m_mouseVirtualEventMap, output, count); + } + } + + /** + * @brief Processes input events from ImGuizmo and generates virtual gimbal events. + * + * @note This function is intended to process transformations provided by ImGuizmo and convert + * them into virtual gimbal events for the ICamera::World mode (ICamera::Local is invalid!). + * The function computes translation, rotation, and scale deltas from ImGuizmo's delta matrix, + * which are then mapped to corresponding virtual events using a predefined mapping. + * + * @param "output" is pointer to the array where generated gimbal events will be stored. + * If nullptr, the function will only calculate the count of potential + * output events without processing. + * + * @param "count" is uint32_t reference to store the count of generated gimbal events. + * + * @param "events" is a span of input_imguizmo_event_t. Each such event contains a delta + * transformation matrix that represents changes in world space. + * + * @return void. If "count" > 0 & "output" was valid pointer then use it to dereference your "output" containing "count" events. + * If "output" is nullptr then "count" tells you about size of "output" you must guarantee to be valid. + */ + void processImguizmo(gimbal_event_t* output, uint32_t& count, std::span events) + { + count = 0u; + const auto mappedVirtualEventsCount = m_imguizmoVirtualEventMap.size(); + + if (!output) + { + count = mappedVirtualEventsCount; + return; + } + + if (mappedVirtualEventsCount) + { + preprocess(m_imguizmoVirtualEventMap); + + for (const auto& ev : events) + { + const auto& deltaWorldTRS = ev; + + struct + { + float32_t3 dTranslation, dRotation, dScale; + } world; + + // TODO: write it in Nabla, this is temp + ImGuizmo::DecomposeMatrixToComponents(&deltaWorldTRS[0][0], &world.dTranslation[0], &world.dRotation[0], &world.dScale[0]); + + // Delta translation impulse + requestMagnitudeUpdateWithScalar(0.f, world.dTranslation[0], std::abs(world.dTranslation[0]), gimbal_event_t::MoveRight, gimbal_event_t::MoveLeft, m_imguizmoVirtualEventMap); + requestMagnitudeUpdateWithScalar(0.f, world.dTranslation[1], std::abs(world.dTranslation[1]), gimbal_event_t::MoveUp, gimbal_event_t::MoveDown, m_imguizmoVirtualEventMap); + requestMagnitudeUpdateWithScalar(0.f, world.dTranslation[2], std::abs(world.dTranslation[2]), gimbal_event_t::MoveForward, gimbal_event_t::MoveBackward, m_imguizmoVirtualEventMap); + + // Delta rotation impulse + requestMagnitudeUpdateWithScalar(0.f, world.dRotation[0], std::abs(world.dRotation[0]), gimbal_event_t::TiltUp , gimbal_event_t::TiltDown, m_imguizmoVirtualEventMap); + requestMagnitudeUpdateWithScalar(0.f, world.dRotation[1], std::abs(world.dRotation[1]), gimbal_event_t::PanRight, gimbal_event_t::PanLeft, m_imguizmoVirtualEventMap); + requestMagnitudeUpdateWithScalar(0.f, world.dRotation[2], std::abs(world.dRotation[2]), gimbal_event_t::RollRight, gimbal_event_t::RollLeft, m_imguizmoVirtualEventMap); + + // Delta scale impulse + requestMagnitudeUpdateWithScalar(1.f, world.dScale[0], std::abs(world.dScale[0]), gimbal_event_t::ScaleXInc, gimbal_event_t::ScaleXDec, m_imguizmoVirtualEventMap); + requestMagnitudeUpdateWithScalar(1.f, world.dScale[1], std::abs(world.dScale[1]), gimbal_event_t::ScaleYInc, gimbal_event_t::ScaleYDec, m_imguizmoVirtualEventMap); + requestMagnitudeUpdateWithScalar(1.f, world.dScale[2], std::abs(world.dScale[2]), gimbal_event_t::ScaleZInc, gimbal_event_t::ScaleZDec, m_imguizmoVirtualEventMap); + } + + postprocess(m_imguizmoVirtualEventMap, output, count); + } + } + +private: + + //! helper utility, for any controller this should be called before any update of hash map + void preprocess(auto& map) + { + for (auto& [key, hash] : map) + { + hash.event.magnitude = 0.0f; + + if (hash.active) + hash.event.magnitude = m_frameDeltaTime; + } + } + + //! helper utility, for any controller this should be called after updating a hash map + void postprocess(const auto& map, gimbal_event_t* output, uint32_t& count) + { + for (const auto& [key, hash] : map) + if (hash.event.magnitude) + { + auto* virtualEvent = output + count; + virtualEvent->type = hash.event.type; + virtualEvent->magnitude = hash.event.magnitude; + ++count; + } + } + + //! helper utility, it *doesn't* assume we keep requested events alive but only increase their magnitude + template + void requestMagnitudeUpdateWithScalar(float signPivot, float dScalar, float dMagnitude, EncodeType positive, EncodeType negative, Map& map) + { + if (dScalar != signPivot) + { + auto code = (dScalar > signPivot) ? positive : negative; + auto request = map.find(code); + if (request != map.end()) + request->second.event.magnitude += dMagnitude; + } + } + + keyboard_to_virtual_events_t m_keyboardVirtualEventMap; + mouse_to_virtual_events_t m_mouseVirtualEventMap; + imguizmo_to_virtual_events_t m_imguizmoVirtualEventMap; + + size_t m_frameDeltaTime = {}; + std::chrono::microseconds m_nextPresentationTimeStamp = {}, m_lastVirtualUpTimeStamp = {}; +}; + +} // nbl::hlsl namespace + +#endif // _NBL_I_CAMERA_CONTROLLER_HPP_ \ No newline at end of file diff --git a/common/include/camera/ILinearProjection.hpp b/common/include/camera/ILinearProjection.hpp new file mode 100644 index 000000000..fc0115610 --- /dev/null +++ b/common/include/camera/ILinearProjection.hpp @@ -0,0 +1,180 @@ +#ifndef _NBL_I_LINEAR_PROJECTION_HPP_ +#define _NBL_I_LINEAR_PROJECTION_HPP_ + +#include "IProjection.hpp" +#include "ICamera.hpp" + +namespace nbl::hlsl +{ + +/** + * @brief Interface class for any custom linear projection transformation (matrix elements are already evaluated scalars) + * referencing a camera, great for Perspective, Orthographic, Oblique, Axonometric and Shear projections + */ +class ILinearProjection : virtual public core::IReferenceCounted +{ +protected: + ILinearProjection(core::smart_refctd_ptr&& camera) + : m_camera(core::smart_refctd_ptr(camera)) {} + virtual ~ILinearProjection() = default; + + core::smart_refctd_ptr m_camera; +public: + //! underlying type for linear world TRS matrix + using model_matrix_t = typename decltype(m_camera)::pointee::CGimbal::model_matrix_t; + + //! underlying type for linear concatenated matrix + using concatenated_matrix_t = float64_t4x4; + + //! underlying type for linear inverse of concatenated matrix + using inv_concatenated_matrix_t = std::optional; + + struct CProjection : public IProjection + { + using IProjection::IProjection; + using projection_matrix_t = concatenated_matrix_t; + using inv_projection_matrix_t = inv_concatenated_matrix_t; + + CProjection() : CProjection(projection_matrix_t(1)) {} + CProjection(const projection_matrix_t& matrix) { setProjectionMatrix(matrix); } + + //! Returns P (Projection matrix) + inline const projection_matrix_t& getProjectionMatrix() const { return m_projectionMatrix; } + + //! Returns P⁻¹ (Inverse of Projection matrix) *if it exists* + inline const inv_projection_matrix_t& getInvProjectionMatrix() const { return m_invProjectionMatrix; } + + inline const std::optional& isProjectionLeftHanded() const { return m_isProjectionLeftHanded; } + inline bool isProjectionSingular() const { return m_isProjectionSingular; } + virtual ProjectionType getProjectionType() const override { return ProjectionType::Linear; } + + virtual void project(const projection_vector_t& vecToProjectionSpace, projection_vector_t& output) const override + { + output = mul(m_projectionMatrix, vecToProjectionSpace); + } + + virtual bool unproject(const projection_vector_t& vecFromProjectionSpace, projection_vector_t& output) const override + { + if (m_isProjectionSingular) + return false; + + output = mul(m_invProjectionMatrix.value(), vecFromProjectionSpace); + + return true; + } + + protected: + inline void setProjectionMatrix(const projection_matrix_t& matrix) + { + m_projectionMatrix = matrix; + const auto det = hlsl::determinant(m_projectionMatrix); + + // we will allow you to lose a dimension since such a projection itself *may* + // be valid, however then you cannot un-project because the inverse doesn't exist! + m_isProjectionSingular = not det; + + if (m_isProjectionSingular) + { + m_isProjectionLeftHanded = std::nullopt; + m_invProjectionMatrix = std::nullopt; + } + else + { + m_isProjectionLeftHanded = det < 0.0; + m_invProjectionMatrix = inverse(m_projectionMatrix); + } + } + + private: + projection_matrix_t m_projectionMatrix; + inv_projection_matrix_t m_invProjectionMatrix; + std::optional m_isProjectionLeftHanded; + bool m_isProjectionSingular; + }; + + virtual std::span getLinearProjections() const = 0; + + inline bool setCamera(core::smart_refctd_ptr&& camera) + { + if (camera) + { + m_camera = camera; + return true; + } + + return false; + } + + inline ICamera* getCamera() + { + return m_camera.get(); + } + + /** + * @brief Computes Model View (MV) matrix + * @param "model" is world TRS matrix + * @return Returns MV matrix + */ + inline concatenated_matrix_t getMV(const model_matrix_t& model) const + { + const auto& v = m_camera->getGimbal().getViewMatrix(); + return mul(getMatrix3x4As4x4(v), getMatrix3x4As4x4(model)); + } + + /** + * @brief Computes Model View Projection (MVP) matrix + * @param "projection" is linear projection + * @param "model" is world TRS matrix + * @return Returns MVP matrix + */ + inline concatenated_matrix_t getMVP(const CProjection& projection, const model_matrix_t& model) const + { + const auto& v = m_camera->getGimbal().getViewMatrix(); + const auto& p = projection.getProjectionMatrix(); + auto mv = mul(getMatrix3x4As4x4(v), getMatrix3x4As4x4(model)); + return mul(p, mv); + } + + /** + * @brief Computes Model View Projection (MVP) matrix + * @param "projection" is linear projection + * @param "mv" is Model View (MV) matrix + * @return Returns MVP matrix + */ + inline concatenated_matrix_t getMVP(const CProjection& projection, const concatenated_matrix_t& mv) const + { + const auto& p = projection.getProjectionMatrix(); + return mul(p, mv); + } + + /** + * @brief Computes Inverse of Model View ((MV)⁻¹) matrix + * @param "mv" is Model View (MV) matrix + * @return Returns ((MV)⁻¹) matrix *if it exists*, otherwise returns std::nullopt + */ + inline inv_concatenated_matrix_t getMVInverse(const model_matrix_t& model) const + { + const auto mv = getMV(model); + if (auto det = determinant(mv); det) + return inverse(mv); + return std::nullopt; + } + + /** + * @brief Computes Inverse of Model View Projection ((MVP)⁻¹) matrix + * @param "projection" is linear projection + * @param "model" is world TRS matrix + * @return Returns ((MVP)⁻¹) matrix *if it exists*, otherwise returns std::nullopt + */ + inline inv_concatenated_matrix_t getMVPInverse(const CProjection& projection, const model_matrix_t& model) const + { + const auto mvp = getMVP(projection, model); + if (auto det = determinant(mvp); det) + return inverse(mvp); + return std::nullopt; + } +}; + +} // nbl::hlsl namespace + +#endif // _NBL_I_LINEAR_PROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/IPerspectiveProjection.hpp b/common/include/camera/IPerspectiveProjection.hpp new file mode 100644 index 000000000..479db170e --- /dev/null +++ b/common/include/camera/IPerspectiveProjection.hpp @@ -0,0 +1,62 @@ +#ifndef _NBL_I_QUAD_PROJECTION_HPP_ +#define _NBL_I_QUAD_PROJECTION_HPP_ + +#include "ILinearProjection.hpp" + +namespace nbl::hlsl +{ + +/** +* @brief Interface class for quad projections. +* +* This projection transforms a vector into the **model space of a perspective quad** +* (defined by the pre-transform matrix) and then projects it onto the perspective quad +* using the linear view-port transform. +* +* A perspective quad projection is represented by: +* - A **pre-transform matrix** (non-linear/skewed transformation). +* - A **linear view-port transform matrix**. +* +* The final projection matrix is the concatenation of the pre-transform and the linear view-port transform. +* +* @note Single perspective quad projection can represent a face quad of a CAVE-like system. +*/ +class IPerspectiveProjection : public ILinearProjection +{ +public: + struct CProjection : ILinearProjection::CProjection + { + using base_t = ILinearProjection::CProjection; + + CProjection() = default; + CProjection(const ILinearProjection::model_matrix_t& pretransform, ILinearProjection::concatenated_matrix_t viewport) + { + setQuadTransform(pretransform, viewport); + } + + inline void setQuadTransform(const ILinearProjection::model_matrix_t& pretransform, ILinearProjection::concatenated_matrix_t viewport) + { + auto concatenated = mul(getMatrix3x4As4x4(pretransform), viewport); + base_t::setProjectionMatrix(concatenated); + + m_pretransform = pretransform; + m_viewport = viewport; + } + + inline const ILinearProjection::model_matrix_t& getPretransform() const { return m_pretransform; } + inline const ILinearProjection::concatenated_matrix_t& getViewportProjection() const { return m_viewport; } + + private: + ILinearProjection::model_matrix_t m_pretransform = ILinearProjection::model_matrix_t(1); + ILinearProjection::concatenated_matrix_t m_viewport = ILinearProjection::concatenated_matrix_t(1); + }; + +protected: + IPerspectiveProjection(core::smart_refctd_ptr&& camera) + : ILinearProjection(core::smart_refctd_ptr(camera)) {} + virtual ~IPerspectiveProjection() = default; +}; + +} // nbl::hlsl namespace + +#endif // _NBL_I_QUAD_PROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/IPlanarProjection.hpp b/common/include/camera/IPlanarProjection.hpp new file mode 100644 index 000000000..1fccf0ef4 --- /dev/null +++ b/common/include/camera/IPlanarProjection.hpp @@ -0,0 +1,120 @@ +#ifndef _NBL_I_PLANAR_PROJECTION_HPP_ +#define _NBL_I_PLANAR_PROJECTION_HPP_ + +#include "ILinearProjection.hpp" + +namespace nbl::hlsl +{ + +class IPlanarProjection : public ILinearProjection +{ +public: + struct CProjection : public ILinearProjection::CProjection, public IGimbalController + { + using base_t = ILinearProjection::CProjection; + + enum ProjectionType : uint8_t + { + Perspective, + Orthographic, + + Count + }; + + template + static CProjection create(Args&&... args) + requires (T != Count) + { + CProjection output; + + if constexpr (T == Perspective) output.setPerspective(std::forward(args)...); + else if (T == Orthographic) output.setOrthographic(std::forward(args)...); + + return output; + } + + CProjection(const CProjection& other) = default; + CProjection(CProjection&& other) noexcept = default; + + struct ProjectionParameters + { + ProjectionType m_type; + + union PlanarParameters + { + struct + { + float fov; + } perspective; + + struct + { + float orthoWidth; + } orthographic; + + PlanarParameters() {} + ~PlanarParameters() {} + } m_planar; + + float m_zNear; + float m_zFar; + }; + + inline void update(bool leftHanded, float aspectRatio) + { + switch (m_parameters.m_type) + { + case Perspective: + { + const auto& fov = m_parameters.m_planar.perspective.fov; + + if (leftHanded) + base_t::setProjectionMatrix(buildProjectionMatrixPerspectiveFovLH(glm::radians(fov), aspectRatio, m_parameters.m_zNear, m_parameters.m_zFar)); + else + base_t::setProjectionMatrix(buildProjectionMatrixPerspectiveFovRH(glm::radians(fov), aspectRatio, m_parameters.m_zNear, m_parameters.m_zFar)); + } break; + + case Orthographic: + { + const auto& orthoW = m_parameters.m_planar.orthographic.orthoWidth; + const auto viewHeight = orthoW * core::reciprocal(aspectRatio); + + if (leftHanded) + base_t::setProjectionMatrix(buildProjectionMatrixOrthoLH(orthoW, viewHeight, m_parameters.m_zNear, m_parameters.m_zFar)); + else + base_t::setProjectionMatrix(buildProjectionMatrixOrthoRH(orthoW, viewHeight, m_parameters.m_zNear, m_parameters.m_zFar)); + } break; + } + } + + inline void setPerspective(float zNear = 0.1f, float zFar = 100.f, float fov = 60.f) + { + m_parameters.m_type = Perspective; + m_parameters.m_planar.perspective.fov = fov; + m_parameters.m_zNear = zNear; + m_parameters.m_zFar = zFar; + } + + inline void setOrthographic(float zNear = 0.1f, float zFar = 100.f, float orthoWidth = 10.f) + { + m_parameters.m_type = Orthographic; + m_parameters.m_planar.orthographic.orthoWidth = orthoWidth; + m_parameters.m_zNear = zNear; + m_parameters.m_zFar = zFar; + } + + inline const ProjectionParameters& getParameters() const { return m_parameters; } + private: + CProjection() = default; + ProjectionParameters m_parameters; + }; + +protected: + IPlanarProjection(core::smart_refctd_ptr&& camera) + : ILinearProjection(core::smart_refctd_ptr(camera)) {} + virtual ~IPlanarProjection() = default; +}; + +} // nbl::hlsl namespace + +#endif // _NBL_I_PLANAR_PROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/IProjection.hpp b/common/include/camera/IProjection.hpp new file mode 100644 index 000000000..cb6facdcc --- /dev/null +++ b/common/include/camera/IProjection.hpp @@ -0,0 +1,70 @@ +#ifndef _NBL_I_PROJECTION_HPP_ +#define _NBL_I_PROJECTION_HPP_ + +#include + +namespace nbl::hlsl +{ + +//! Interface class for any type of projection +class IProjection +{ +public: + //! underlying type for all vectors we project or un-project (inverse), projections *may* transform vectors in less dimensions + using projection_vector_t = float64_t4; + + enum class ProjectionType + { + //! Any raw linear transformation, for example it may represent Perspective, Orthographic, Oblique, Axonometric, Shear projections + Linear, + + //! Specialized linear projection for planar projections with parameters + Planar, + + //! Extension of planar projection represented by pre-transform & planar transform combined projecting onto R3 cave quad + CaveQuad, + + //! Specialized CaveQuad projection, represents planar projections onto cube with 6 quad cube faces + Cube, + + Spherical, + ThinLens, + + Count + }; + + IProjection() = default; + virtual ~IProjection() = default; + + /** + * @brief Transforms a vector from its input space into the projection space. + * + * @param "vecToProjectionSpace" is a vector to transform from its space into projection space. + * @param "output" is a vector which is "vecToProjectionSpace" transformed into projection space. + * @return void. "output" is the vector in projection space. + */ + virtual void project(const projection_vector_t& vecToProjectionSpace, projection_vector_t& output) const = 0; + + /** + * @brief Transforms a vector from the projection space back to the original space. + * Note the inverse transform may fail because original projection may be singular. + * + * @param "vecFromProjectionSpace" is a vector in the projection space to transform back to original space. + * @param "output" is a vector which is "vecFromProjectionSpace" transformed back to its original space. + * @return true if inverse succeeded and then "output" is the vector in the original space. False otherwise. + */ + virtual bool unproject(const projection_vector_t& vecFromProjectionSpace, projection_vector_t& output) const = 0; + + /** + * @brief Returns the specific type of the projection + * (e.g., linear, spherical, thin-lens) as defined by the + * ProjectionType enumeration. + * + * @return The type of this projection. + */ + virtual ProjectionType getProjectionType() const = 0; +}; + +} // namespace nbl::hlsl + +#endif // _NBL_IPROJECTION_HPP_ \ No newline at end of file diff --git a/common/include/camera/IRange.hpp b/common/include/camera/IRange.hpp new file mode 100644 index 000000000..a6ed29270 --- /dev/null +++ b/common/include/camera/IRange.hpp @@ -0,0 +1,20 @@ +#ifndef _NBL_IRANGE_HPP_ +#define _NBL_IRANGE_HPP_ + +namespace nbl::hlsl +{ + +template +concept GeneralPurposeRange = requires +{ + typename std::ranges::range_value_t; +}; + +template +concept ContiguousGeneralPurposeRangeOf = GeneralPurposeRange && +std::ranges::contiguous_range && +std::same_as, T>; + +} // namespace nbl::hlsl + +#endif // _NBL_IRANGE_HPP_ \ No newline at end of file diff --git a/common/src/geometry/creator/CMakeLists.txt b/common/src/geometry/creator/CMakeLists.txt index 336d32fe5..f0e824ebe 100644 --- a/common/src/geometry/creator/CMakeLists.txt +++ b/common/src/geometry/creator/CMakeLists.txt @@ -12,10 +12,6 @@ set(NBL_THIS_EXAMPLE_INPUT_SHADERS "${NBL_THIS_EXAMPLE_INPUT_SHADERS_DIRECTORY}/gc.basic.vertex.hlsl" "${NBL_THIS_EXAMPLE_INPUT_SHADERS_DIRECTORY}/gc.cone.vertex.hlsl" "${NBL_THIS_EXAMPLE_INPUT_SHADERS_DIRECTORY}/gc.ico.vertex.hlsl" - - # grid - "${NBL_THIS_EXAMPLE_INPUT_SHADERS_DIRECTORY}/grid.vertex.hlsl" - "${NBL_THIS_EXAMPLE_INPUT_SHADERS_DIRECTORY}/grid.fragment.hlsl" ) file(GLOB_RECURSE NBL_THIS_EXAMPLE_INPUT_COMMONS CONFIGURE_DEPENDS "${NBL_THIS_EXAMPLE_INPUT_SHADERS_DIRECTORY}/template/*.hlsl") diff --git a/common/src/geometry/creator/shaders/gc.basic.fragment.hlsl b/common/src/geometry/creator/shaders/gc.basic.fragment.hlsl index 3dc9b9f1d..fd5cd4d34 100644 --- a/common/src/geometry/creator/shaders/gc.basic.fragment.hlsl +++ b/common/src/geometry/creator/shaders/gc.basic.fragment.hlsl @@ -3,4 +3,4 @@ float4 PSMain(PSInput input) : SV_Target0 { return input.color; -} \ No newline at end of file +} diff --git a/common/src/geometry/creator/shaders/gc.basic.vertex.hlsl b/common/src/geometry/creator/shaders/gc.basic.vertex.hlsl index 1afd468d9..4ca66e4ab 100644 --- a/common/src/geometry/creator/shaders/gc.basic.vertex.hlsl +++ b/common/src/geometry/creator/shaders/gc.basic.vertex.hlsl @@ -1,6 +1,9 @@ -#include "template/gc.basic.vertex.input.hlsl" -#include "template/gc.vertex.hlsl" +struct VSInput +{ + [[vk::location(0)]] float3 position : POSITION; + [[vk::location(1)]] float4 color : COLOR; + [[vk::location(2)]] float2 uv : TEXCOORD; + [[vk::location(3)]] float3 normal : NORMAL; +}; -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ +#include "template/gc.vertex.hlsl" diff --git a/common/src/geometry/creator/shaders/gc.cone.vertex.hlsl b/common/src/geometry/creator/shaders/gc.cone.vertex.hlsl index ee0c42431..0552842a5 100644 --- a/common/src/geometry/creator/shaders/gc.cone.vertex.hlsl +++ b/common/src/geometry/creator/shaders/gc.cone.vertex.hlsl @@ -1,6 +1,8 @@ -#include "template/gc.cone.vertex.input.hlsl" -#include "template/gc.vertex.hlsl" +struct VSInput +{ + [[vk::location(0)]] float3 position : POSITION; + [[vk::location(1)]] float4 color : COLOR; + [[vk::location(2)]] float3 normal : NORMAL; +}; -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ +#include "template/gc.vertex.hlsl" diff --git a/common/src/geometry/creator/shaders/gc.ico.vertex.hlsl b/common/src/geometry/creator/shaders/gc.ico.vertex.hlsl index d63fdc809..917bd2677 100644 --- a/common/src/geometry/creator/shaders/gc.ico.vertex.hlsl +++ b/common/src/geometry/creator/shaders/gc.ico.vertex.hlsl @@ -1,6 +1,8 @@ -#include "template/gc.ico.vertex.input.hlsl" -#include "template/gc.vertex.hlsl" +struct VSInput +{ + [[vk::location(0)]] float3 position : POSITION; + [[vk::location(1)]] float3 normal : NORMAL; + [[vk::location(2)]] float2 uv : TEXCOORD; +}; -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ +#include "template/gc.vertex.hlsl" diff --git a/common/src/geometry/creator/shaders/grid.fragment.hlsl b/common/src/geometry/creator/shaders/grid.fragment.hlsl deleted file mode 100644 index 4b4c1e691..000000000 --- a/common/src/geometry/creator/shaders/grid.fragment.hlsl +++ /dev/null @@ -1,12 +0,0 @@ -#include "template/grid.common.hlsl" - -float4 PSMain(PSInput input) : SV_Target0 -{ - float2 uv = (input.uv - float2(0.5, 0.5)) + 0.5 / 30.0; - float grid = gridTextureGradBox(uv, ddx(input.uv), ddy(input.uv)); - float4 fragColor = float4(1.0 - grid, 1.0 - grid, 1.0 - grid, 1.0); - fragColor *= 0.25; - fragColor *= 0.3 + 0.6 * smoothstep(0.0, 0.1, 1.0 - length(input.uv) / 5.5); - - return fragColor; -} \ No newline at end of file diff --git a/common/src/geometry/creator/shaders/grid.vertex.hlsl b/common/src/geometry/creator/shaders/grid.vertex.hlsl deleted file mode 100644 index 167b981d3..000000000 --- a/common/src/geometry/creator/shaders/grid.vertex.hlsl +++ /dev/null @@ -1,17 +0,0 @@ -#include "template/grid.common.hlsl" - -// set 1, binding 0 -[[vk::binding(0, 1)]] -cbuffer CameraData -{ - SBasicViewParameters params; -}; - -PSInput VSMain(VSInput input) -{ - PSInput output; - output.position = mul(params.MVP, float4(input.position, 1.0)); - output.uv = (input.uv - float2(0.5, 0.5)) * abs(input.position.xy); - - return output; -} \ No newline at end of file diff --git a/common/src/geometry/creator/shaders/template/gc.basic.vertex.input.hlsl b/common/src/geometry/creator/shaders/template/gc.basic.vertex.input.hlsl deleted file mode 100644 index d9e2fa172..000000000 --- a/common/src/geometry/creator/shaders/template/gc.basic.vertex.input.hlsl +++ /dev/null @@ -1,16 +0,0 @@ -#ifndef _THIS_EXAMPLE_GC_BASIC_VERTEX_INPUT_HLSL_ -#define _THIS_EXAMPLE_GC_BASIC_VERTEX_INPUT_HLSL_ - -struct VSInput -{ - [[vk::location(0)]] float3 position : POSITION; - [[vk::location(1)]] float4 color : COLOR; - [[vk::location(2)]] float2 uv : TEXCOORD; - [[vk::location(3)]] float3 normal : NORMAL; -}; - -#endif // _THIS_EXAMPLE_GC_BASIC_VERTEX_INPUT_HLSL_ - -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ diff --git a/common/src/geometry/creator/shaders/template/gc.common.hlsl b/common/src/geometry/creator/shaders/template/gc.common.hlsl index 4590cd4a3..d0f9aa116 100644 --- a/common/src/geometry/creator/shaders/template/gc.common.hlsl +++ b/common/src/geometry/creator/shaders/template/gc.common.hlsl @@ -12,7 +12,3 @@ #include "SBasicViewParameters.hlsl" #endif // _THIS_EXAMPLE_GC_COMMON_HLSL_ - -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ \ No newline at end of file diff --git a/common/src/geometry/creator/shaders/template/gc.cone.vertex.input.hlsl b/common/src/geometry/creator/shaders/template/gc.cone.vertex.input.hlsl deleted file mode 100644 index 66221fef1..000000000 --- a/common/src/geometry/creator/shaders/template/gc.cone.vertex.input.hlsl +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _THIS_EXAMPLE_GC_CONE_VERTEX_INPUT_HLSL_ -#define _THIS_EXAMPLE_GC_CONE_VERTEX_INPUT_HLSL_ - -struct VSInput -{ - [[vk::location(0)]] float3 position : POSITION; - [[vk::location(1)]] float4 color : COLOR; - [[vk::location(2)]] float3 normal : NORMAL; -}; - -#endif // _THIS_EXAMPLE_GC_CONE_VERTEX_INPUT_HLSL_ - -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ diff --git a/common/src/geometry/creator/shaders/template/gc.ico.vertex.input.hlsl b/common/src/geometry/creator/shaders/template/gc.ico.vertex.input.hlsl deleted file mode 100644 index 6b85486d9..000000000 --- a/common/src/geometry/creator/shaders/template/gc.ico.vertex.input.hlsl +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef _THIS_EXAMPLE_GC_ICO_VERTEX_INPUT_HLSL_ -#define _THIS_EXAMPLE_GC_ICO_VERTEX_INPUT_HLSL_ - -struct VSInput -{ - [[vk::location(0)]] float3 position : POSITION; - [[vk::location(1)]] float3 normal : NORMAL; - [[vk::location(2)]] float2 uv : TEXCOORD; -}; - -#endif // _THIS_EXAMPLE_GC_ICO_VERTEX_INPUT_HLSL_ - -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ diff --git a/common/src/geometry/creator/shaders/template/gc.vertex.hlsl b/common/src/geometry/creator/shaders/template/gc.vertex.hlsl index 5a8f26722..783421613 100644 --- a/common/src/geometry/creator/shaders/template/gc.vertex.hlsl +++ b/common/src/geometry/creator/shaders/template/gc.vertex.hlsl @@ -16,7 +16,3 @@ PSInput VSMain(VSInput input) return output; } - -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ diff --git a/common/src/geometry/creator/shaders/template/grid.common.hlsl b/common/src/geometry/creator/shaders/template/grid.common.hlsl deleted file mode 100644 index bc6516600..000000000 --- a/common/src/geometry/creator/shaders/template/grid.common.hlsl +++ /dev/null @@ -1,40 +0,0 @@ -#ifndef _THIS_EXAMPLE_GRID_COMMON_HLSL_ -#define _THIS_EXAMPLE_GRID_COMMON_HLSL_ - -#ifdef __HLSL_VERSION - struct VSInput - { - [[vk::location(0)]] float3 position : POSITION; - [[vk::location(1)]] float4 color : COLOR; - [[vk::location(2)]] float2 uv : TEXCOORD; - [[vk::location(3)]] float3 normal : NORMAL; - }; - - struct PSInput - { - float4 position : SV_Position; - float2 uv : TEXCOORD0; - }; - - float gridTextureGradBox(float2 p, float2 ddx, float2 ddy) - { - float N = 30.0; // grid ratio - float2 w = max(abs(ddx), abs(ddy)) + 0.01; // filter kernel - - // analytic (box) filtering - float2 a = p + 0.5 * w; - float2 b = p - 0.5 * w; - float2 i = (floor(a) + min(frac(a) * N, 1.0) - floor(b) - min(frac(b) * N, 1.0)) / (N * w); - - // pattern - return (1.0 - i.x) * (1.0 - i.y); - } -#endif // __HLSL_VERSION - -#include "SBasicViewParameters.hlsl" - -#endif // _THIS_EXAMPLE_GRID_COMMON_HLSL_ - -/* - do not remove this text, WAVE is so bad that you can get errors if no proper ending xD -*/ \ No newline at end of file