diff --git a/attachments/simple_engine/CMakeLists.txt b/attachments/simple_engine/CMakeLists.txt index 4489b87e..162c6d7c 100644 --- a/attachments/simple_engine/CMakeLists.txt +++ b/attachments/simple_engine/CMakeLists.txt @@ -8,41 +8,21 @@ project(SimpleEngine VERSION 1.0.0 LANGUAGES CXX C) # Add CMake module path for custom find modules list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_LIST_DIR}/../CMake") +if(ANDROID) + include(FetchContent) + FetchContent_Declare( + VulkanHeaders + GIT_REPOSITORY https://github.com/KhronosGroup/Vulkan-Headers.git + GIT_TAG v1.3.275 # Or a specific tag/commit This needs to correspond to the NDK version of Vulkan. + ) + FetchContent_MakeAvailable(VulkanHeaders) +endif () + # Find required packages -find_package (glfw3 REQUIRED) find_package (glm REQUIRED) find_package (Vulkan REQUIRED) find_package (tinygltf REQUIRED) find_package (KTX REQUIRED) -find_package (OpenAL REQUIRED) - -# set up Vulkan C++ module -add_library(VulkanCppModule) -add_library(Vulkan::cppm ALIAS VulkanCppModule) - -target_compile_definitions(VulkanCppModule - PUBLIC VULKAN_HPP_DISPATCH_LOADER_DYNAMIC=1 VULKAN_HPP_NO_STRUCT_CONSTRUCTORS=1 -) -target_include_directories(VulkanCppModule - PRIVATE - "${Vulkan_INCLUDE_DIR}" -) -target_link_libraries(VulkanCppModule - PUBLIC - Vulkan::Vulkan -) - -set_target_properties(VulkanCppModule PROPERTIES CXX_STANDARD 20) - -target_sources(VulkanCppModule - PUBLIC - FILE_SET cxx_modules TYPE CXX_MODULES - BASE_DIRS - "${Vulkan_INCLUDE_DIR}" - FILES - "${Vulkan_INCLUDE_DIR}/vulkan/vulkan.cppm" -) - # Platform-specific settings @@ -52,6 +32,8 @@ if(ANDROID) else() # Desktop-specific settings add_definitions(-DPLATFORM_DESKTOP) + find_package (glfw3 REQUIRED) + find_package (OpenAL REQUIRED) endif() # Shader compilation @@ -117,22 +99,42 @@ set(SOURCES mikktspace.c ) -# Create executable -add_executable(SimpleEngine ${SOURCES}) +# Create executable or library based on the platform +if(ANDROID) + add_library(SimpleEngine STATIC ${SOURCES}) +else() + add_executable(SimpleEngine ${SOURCES}) +endif() + add_dependencies(SimpleEngine shaders) set_target_properties (SimpleEngine PROPERTIES CXX_STANDARD 20) # Link libraries -target_link_libraries(SimpleEngine PRIVATE - Vulkan::cppm +target_link_libraries(SimpleEngine PUBLIC + Vulkan::Vulkan + Vulkan::Headers glm::glm tinygltf::tinygltf KTX::ktx - OpenAL::OpenAL ) -if(NOT ANDROID) - target_link_libraries(SimpleEngine PRIVATE glfw) +if(ANDROID) + target_link_libraries(SimpleEngine PUBLIC + android + log + EGL + GLESv2 + game-activity::game-activity + OpenSLES + ) + target_include_directories(SimpleEngine PUBLIC + ${VulkanHeaders_SOURCE_DIR}/include + ${ANDROID_NDK}/sources/android/native_app_glue + ) + target_compile_definitions(SimpleEngine PRIVATE VULKAN_HPP_NO_STRUCT_CONSTRUCTORS) +else() + target_link_libraries(SimpleEngine PRIVATE glfw OpenAL::OpenAL) + target_compile_definitions(SimpleEngine PRIVATE VULKAN_HPP_NO_STRUCT_CONSTRUCTORS VULKAN_HPP_DISPATCH_LOADER_DYNAMIC) endif() # Copy model and texture files if they exist diff --git a/attachments/simple_engine/android/README.adoc b/attachments/simple_engine/android/README.adoc new file mode 100644 index 00000000..b9faed7f --- /dev/null +++ b/attachments/simple_engine/android/README.adoc @@ -0,0 +1,51 @@ += Android Project for Vulkan Game Engine Tutorial + +This Android project allows you to run the Game Engine's example code in Android. It demonstrates how to manage a non-trivial project across Desktop and mobile environments from the same code base. + +== Project Overview + +The Vulkan Game Engine Tutorial is a comprehensive learning project that showcases modern graphics programming using the Vulkan API. +This Android port enables running the same engine code on mobile devices, demonstrating cross-platform development practices. + +== Prerequisites + +* Android Studio 4.2 or higher +* Android NDK r21 or higher +* CMake 3.10+ +* Vulkan SDK +* Android device with Vulkan support (Android 7.0+) + +== Building and Running + +1. Open the project in Android Studio +2. Sync Gradle files +3. Build the project +4. Run on your Android device or emulator + +== Project Structure + +* `app/` - Android-specific code and resources +* `src/` - Shared C++ engine code +* `assets/` - Shared game assets and shaders (automatically copied by Gradle) +** `models/` - GLTF/GLB model files +** `shaders/` - Compiled SPIR-V shader files +** `textures/` - Texture assets +* `CMake/` - Build configuration files + +== Asset Management + +The project uses Gradle to automatically handle asset deployment. +Place your assets in the following source locations: + +* Source assets location: `/assets/` +* Gradle will automatically copy assets to: `app/src/main/assets/` +* Asset changes will be synchronized during build + +== Key Components + +* GLTF model loading support +* Cross-platform rendering pipeline +* JSON configuration using nlohmann_json +* Unified asset management system with Gradle automation + +The project demonstrates professional-grade techniques for maintaining a single codebase that targets both desktop and mobile platforms while leveraging modern C++20 features and Vulkan's cross-platform capabilities. diff --git a/attachments/simple_engine/android/app/build.gradle b/attachments/simple_engine/android/app/build.gradle new file mode 100644 index 00000000..178b4eda --- /dev/null +++ b/attachments/simple_engine/android/app/build.gradle @@ -0,0 +1,64 @@ +plugins { + id 'com.android.application' +} + +android { + namespace "com.simple_engine" + compileSdk 36 + defaultConfig { + applicationId "com.simple_engine" + minSdk 24 + targetSdk 36 + versionCode 1 + versionName "1.0" + + externalNativeBuild { + cmake { + abiFilters 'arm64-v8a', 'x86_64' + } + } + } + + buildTypes { + release { + minifyEnabled false + proguardFiles getDefaultProguardFile('proguard-android-optimize.txt'), 'proguard-rules.pro' + } + } + + compileOptions { + sourceCompatibility JavaVersion.VERSION_11 + targetCompatibility JavaVersion.VERSION_11 + } + + externalNativeBuild { + cmake { + path "src/main/cpp/CMakeLists.txt" + version "4.0.2+" + } + } + + ndkVersion "28.1.13356709" + + // Use assets from the dedicated assets directory and locally compiled shaders + sourceSets { + main { + assets { + srcDirs = [ + // Point to the dedicated assets directory + '../../Assets/' + ] + } + } + } + buildFeatures { + prefab true + buildConfig true + } +} + +dependencies { + implementation 'androidx.appcompat:appcompat:1.7.1' + implementation 'com.google.android.material:material:1.12.0' + implementation 'androidx.games:games-activity:4.0.0' +} diff --git a/attachments/simple_engine/android/app/src/main/AndroidManifest.xml b/attachments/simple_engine/android/app/src/main/AndroidManifest.xml new file mode 100644 index 00000000..f33ed90e --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/AndroidManifest.xml @@ -0,0 +1,29 @@ + + + + + + + + + + + + + + + + + diff --git a/attachments/simple_engine/android/app/src/main/cpp/CMakeLists.txt b/attachments/simple_engine/android/app/src/main/cpp/CMakeLists.txt new file mode 100644 index 00000000..665408af --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/cpp/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.22.1) + +project(simple_engine_android) + +# Add the parent project's cmake folder to the module path +list(APPEND CMAKE_MODULE_PATH "${CMAKE_CURRENT_SOURCE_DIR}/../../../../../../attachments/CMake") + +# Include the game-activity library +find_package(game-activity REQUIRED CONFIG) + +# Set C++ standard to match the main project +set(CMAKE_CXX_STANDARD 20) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +# Add the simple_engine project as a subdirectory +add_subdirectory("${CMAKE_CURRENT_SOURCE_DIR}/../../../../.." simple_engine_build) + +# Add the main native library +add_library(simple_engine_android SHARED + game_activity_bridge.cpp +) + +# Link against libraries +target_link_libraries(simple_engine_android + SimpleEngine + game-activity::game-activity + android + log +) diff --git a/attachments/simple_engine/android/app/src/main/cpp/game_activity_bridge.cpp b/attachments/simple_engine/android/app/src/main/cpp/game_activity_bridge.cpp new file mode 100644 index 00000000..a9d3db64 --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/cpp/game_activity_bridge.cpp @@ -0,0 +1,13 @@ +// Intentionally empty bridge: rely entirely on GameActivity's native_app_glue +// provided by the prefab (libgame-activity). That glue will invoke our +// android_main(android_app*) defined in main.cpp. Defining another +// GameActivity_onCreate here causes duplicate symbol linker errors. +// Keeping a translation unit avoids removing the target from CMake. + +#include + +#define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "SimpleEngine", __VA_ARGS__)) +#define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "SimpleEngine", __VA_ARGS__)) +#define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "SimpleEngine", __VA_ARGS__)) + +// Nothing to do here. \ No newline at end of file diff --git a/attachments/simple_engine/android/app/src/main/java/com/simple_engine/VulkanActivity.java b/attachments/simple_engine/android/app/src/main/java/com/simple_engine/VulkanActivity.java new file mode 100644 index 00000000..ee893e3b --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/java/com/simple_engine/VulkanActivity.java @@ -0,0 +1,20 @@ +package com.simple_engine; + +import android.os.Bundle; +import android.view.WindowManager; +import com.google.androidgamesdk.GameActivity; + +public class VulkanActivity extends GameActivity { + @Override + protected void onCreate(Bundle savedInstanceState) { + super.onCreate(savedInstanceState); + + // Keep the screen on while the app is running + getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON); + } + + // Load the native library + static { + System.loadLibrary("simple_engine_android"); + } +} diff --git a/attachments/simple_engine/android/app/src/main/res/values/strings.xml b/attachments/simple_engine/android/app/src/main/res/values/strings.xml new file mode 100644 index 00000000..e40be93c --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/res/values/strings.xml @@ -0,0 +1,3 @@ + + Simple Engine + diff --git a/attachments/simple_engine/android/app/src/main/res/values/styles.xml b/attachments/simple_engine/android/app/src/main/res/values/styles.xml new file mode 100644 index 00000000..c63a3a91 --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/res/values/styles.xml @@ -0,0 +1,6 @@ + + + + diff --git a/attachments/simple_engine/android/app/src/main/res/xml/backup_rules.xml b/attachments/simple_engine/android/app/src/main/res/xml/backup_rules.xml new file mode 100644 index 00000000..04184967 --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/res/xml/backup_rules.xml @@ -0,0 +1,9 @@ + + + + + + + + + \ No newline at end of file diff --git a/attachments/simple_engine/android/app/src/main/res/xml/data_extraction_rules.xml b/attachments/simple_engine/android/app/src/main/res/xml/data_extraction_rules.xml new file mode 100644 index 00000000..dbe53401 --- /dev/null +++ b/attachments/simple_engine/android/app/src/main/res/xml/data_extraction_rules.xml @@ -0,0 +1,19 @@ + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/attachments/simple_engine/android/build.gradle b/attachments/simple_engine/android/build.gradle new file mode 100644 index 00000000..f12a2f6b --- /dev/null +++ b/attachments/simple_engine/android/build.gradle @@ -0,0 +1,18 @@ +// Top-level build file where you can add configuration options common to all sub-projects/modules. +buildscript { + repositories { + google() + mavenCentral() + } + dependencies { + classpath 'com.android.tools.build:gradle:8.13.1' + + // NOTE: Do not place your application dependencies here; they belong + // in the individual module build.gradle files + } +} + +// For Gradle 9.0+, use the Delete interface instead of type +tasks.register('clean', Delete) { + delete rootProject.buildDir +} diff --git a/attachments/simple_engine/android/gradle.properties b/attachments/simple_engine/android/gradle.properties new file mode 100644 index 00000000..6c8ca9a3 --- /dev/null +++ b/attachments/simple_engine/android/gradle.properties @@ -0,0 +1,10 @@ +android.useAndroidX=true +android.enableJetifier=false + +# Gradle 9.0+ compatibility settings +org.gradle.jvmargs=-Xmx2048m -Dfile.encoding=UTF-8 +org.gradle.parallel=true +org.gradle.caching=true +android.nonTransitiveRClass=true +android.nonFinalResIds=true +org.gradle.configuration-cache=true diff --git a/attachments/simple_engine/android/settings.gradle b/attachments/simple_engine/android/settings.gradle new file mode 100644 index 00000000..aac78f00 --- /dev/null +++ b/attachments/simple_engine/android/settings.gradle @@ -0,0 +1,20 @@ +// For Gradle 9.0+, plugin repositories should be configured here +pluginManagement { + repositories { + google() + mavenCentral() + gradlePluginPortal() + } +} + +// For Gradle 9.0+, dependency repositories should be configured here +dependencyResolutionManagement { + repositoriesMode.set(RepositoriesMode.FAIL_ON_PROJECT_REPOS) + repositories { + google() + mavenCentral() + } +} + +include ':app' +rootProject.name = "SimpleEngine" diff --git a/attachments/simple_engine/audio_system.cpp b/attachments/simple_engine/audio_system.cpp index 9cbbd56c..077d25b7 100644 --- a/attachments/simple_engine/audio_system.cpp +++ b/attachments/simple_engine/audio_system.cpp @@ -1,4 +1,5 @@ #include "audio_system.h" +#include "platform.h" #include #include @@ -14,6 +15,10 @@ #include #include +#if defined(PLATFORM_ANDROID) +#include +#include +#else // OpenAL headers #ifdef __APPLE__ #include @@ -23,9 +28,10 @@ #include #endif -#include "renderer.h" -#include "engine.h" +#endif + +#if !defined(PLATFORM_ANDROID) // OpenAL error checking utility static void CheckOpenALError(const std::string& operation) { ALenum error = alGetError(); @@ -54,6 +60,7 @@ static void CheckOpenALError(const std::string& operation) { std::cerr << std::endl; } } +#endif // Concrete implementation of AudioSource class ConcreteAudioSource : public AudioSource { @@ -182,6 +189,256 @@ class ConcreteAudioSource : public AudioSource { double sampleAccumulator = 0.0; // Per-source sample accumulator for proper timing }; +#if defined(PLATFORM_ANDROID) + +// OpenSL ES audio output device implementation +class OpenSLESAudioOutputDevice : public AudioOutputDevice { +public: + OpenSLESAudioOutputDevice() = default; + ~OpenSLESAudioOutputDevice() override { Stop(); } + + bool Initialize(uint32_t sampleRate, uint32_t channels, uint32_t bufferSize) override { + this->sampleRate = sampleRate; + this->channels = channels == 0 ? 2u : channels; + this->bufferSize = bufferSize == 0 ? 1024u : bufferSize; // frames + + // Create and realize engine + SLresult result = slCreateEngine(&engineObject, 0, nullptr, 0, nullptr, nullptr); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: slCreateEngine failed (%d)", result); return false; } + result = (*engineObject)->Realize(engineObject, SL_BOOLEAN_FALSE); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: Engine Realize failed (%d)", result); Cleanup(); return false; } + result = (*engineObject)->GetInterface(engineObject, SL_IID_ENGINE, &engineEngine); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: GetInterface SL_IID_ENGINE failed (%d)", result); Cleanup(); return false; } + + // Create output mix + result = (*engineEngine)->CreateOutputMix(engineEngine, &outputMixObject, 0, nullptr, nullptr); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: CreateOutputMix failed (%d)", result); Cleanup(); return false; } + result = (*outputMixObject)->Realize(outputMixObject, SL_BOOLEAN_FALSE); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: OutputMix Realize failed (%d)", result); Cleanup(); return false; } + + // Configure source: buffer queue + PCM format + SLDataLocator_AndroidSimpleBufferQueue loc_bufq{ SL_DATALOCATOR_ANDROIDSIMPLEBUFFERQUEUE, (SLuint32)NUM_BUFFERS }; + SLDataFormat_PCM format_pcm{}; + format_pcm.formatType = SL_DATAFORMAT_PCM; + format_pcm.numChannels = (SLuint32)this->channels; + format_pcm.samplesPerSec = ToSLSampleRate(this->sampleRate); + format_pcm.bitsPerSample = SL_PCMSAMPLEFORMAT_FIXED_16; + format_pcm.containerSize = 16; + format_pcm.channelMask = (this->channels == 1) + ? (SL_SPEAKER_FRONT_CENTER) + : (SL_SPEAKER_FRONT_LEFT | SL_SPEAKER_FRONT_RIGHT); + format_pcm.endianness = SL_BYTEORDER_LITTLEENDIAN; + + SLDataSource audioSrc{ &loc_bufq, &format_pcm }; + + // Sink: OutputMix + SLDataLocator_OutputMix loc_outmix{ SL_DATALOCATOR_OUTPUTMIX, outputMixObject }; + SLDataSink audioSnk{ &loc_outmix, nullptr }; + + // Create audio player; request buffer queue interface + const SLInterfaceID ids[] = { SL_IID_BUFFERQUEUE }; + const SLboolean req[] = { SL_BOOLEAN_TRUE }; + result = (*engineEngine)->CreateAudioPlayer(engineEngine, &playerObject, &audioSrc, &audioSnk, + (SLuint32)(sizeof(ids)/sizeof(ids[0])), ids, req); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: CreateAudioPlayer failed (%d)", result); Cleanup(); return false; } + result = (*playerObject)->Realize(playerObject, SL_BOOLEAN_FALSE); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: Player Realize failed (%d)", result); Cleanup(); return false; } + + // Interfaces + result = (*playerObject)->GetInterface(playerObject, SL_IID_PLAY, &playItf); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: GetInterface SL_IID_PLAY failed (%d)", result); Cleanup(); return false; } + result = (*playerObject)->GetInterface(playerObject, SL_IID_BUFFERQUEUE, &bufferQueueItf); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: GetInterface SL_IID_BUFFERQUEUE failed (%d)", result); Cleanup(); return false; } + + // Setup buffers + pcmBuffers.assign(NUM_BUFFERS, std::vector(this->bufferSize * this->channels)); + nextBufferIndex = 0; + + // Register callback and clear queue + (*bufferQueueItf)->Clear(bufferQueueItf); + result = (*bufferQueueItf)->RegisterCallback(bufferQueueItf, &OpenSLESAudioOutputDevice::BufferQueueCallback, this); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: RegisterCallback failed (%d)", result); Cleanup(); return false; } + + // Reset state + while (!audioQueue.empty()) audioQueue.pop(); + playbackPosition = 0; + initialized = true; + return true; + } + + bool Start() override { + if (!initialized) { LOGE("OpenSLES: device not initialized"); return false; } + if (playing) return true; + + // Ensure queue empty in OpenSLES + (*bufferQueueItf)->Clear(bufferQueueItf); + + // Prefill a few buffers to avoid initial underrun + int prefill = std::min(3, NUM_BUFFERS); + for (int i = 0; i < prefill; ++i) { + if (!EnqueueNextBuffer()) { + EnqueueSilence(); + } + } + + SLresult result = (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_PLAYING); + if (result != SL_RESULT_SUCCESS) { LOGE("OpenSLES: SetPlayState PLAYING failed (%d)", result); return false; } + + playing = true; + return true; + } + + bool Stop() override { + if (!initialized) return true; + playing = false; + + if (playItf) { + (*playItf)->SetPlayState(playItf, SL_PLAYSTATE_STOPPED); + } + if (bufferQueueItf) { + (*bufferQueueItf)->Clear(bufferQueueItf); + } + + Cleanup(); + initialized = false; + return true; + } + + bool WriteAudio(const float* data, uint32_t sampleCount) override { + if (!initialized) return false; + std::lock_guard lock(bufferMutex); + const uint64_t total = (uint64_t)sampleCount * (uint64_t)channels; + for (uint64_t i = 0; i < total; ++i) { + audioQueue.push(data[i]); + } + return true; + } + + bool IsPlaying() const override { return playing; } + uint32_t GetPosition() const override { return playbackPosition; } + +private: + static constexpr int NUM_BUFFERS = 8; + + uint32_t sampleRate = 44100; + uint32_t channels = 2; + uint32_t bufferSize = 1024; // frames per buffer + bool initialized = false; + std::atomic playing{false}; + uint32_t playbackPosition = 0; // frames + + // OpenSLES objects + SLObjectItf engineObject = nullptr; + SLEngineItf engineEngine = nullptr; + SLObjectItf outputMixObject = nullptr; + SLObjectItf playerObject = nullptr; + SLPlayItf playItf = nullptr; + SLAndroidSimpleBufferQueueItf bufferQueueItf = nullptr; + + // Buffers and queueing + std::vector> pcmBuffers; // NUM_BUFFERS x (bufferSize*channels) + int nextBufferIndex = 0; + std::queue audioQueue; // interleaved float samples + std::mutex bufferMutex; + + static SLuint32 ToSLSampleRate(uint32_t rate) { + switch (rate) { + case 8000: return SL_SAMPLINGRATE_8; + case 11025: return SL_SAMPLINGRATE_11_025; + case 12000: return SL_SAMPLINGRATE_12; + case 16000: return SL_SAMPLINGRATE_16; + case 22050: return SL_SAMPLINGRATE_22_05; + case 24000: return SL_SAMPLINGRATE_24; + case 32000: return SL_SAMPLINGRATE_32; + case 44100: return SL_SAMPLINGRATE_44_1; + case 48000: return SL_SAMPLINGRATE_48; + case 64000: return SL_SAMPLINGRATE_64; + case 88200: return SL_SAMPLINGRATE_88_2; + case 96000: return SL_SAMPLINGRATE_96; + case 192000: return SL_SAMPLINGRATE_192; + default: return SL_SAMPLINGRATE_44_1; + } + } + + static void BufferQueueCallback(SLAndroidSimpleBufferQueueItf /*bq*/, void* context) { + auto* self = static_cast(context); + if (!self) return; + if (!self->EnqueueNextBuffer()) { + self->EnqueueSilence(); + } + } + + bool EnqueueNextBuffer() { + std::lock_guard lock(bufferMutex); + const uint32_t framesAvailable = static_cast(audioQueue.size() / channels); + if (framesAvailable == 0) { + return false; + } + const uint32_t framesToSend = std::min(bufferSize, framesAvailable); + const uint32_t samplesToSend = framesToSend * channels; + + auto &buf = pcmBuffers[nextBufferIndex]; + // convert and copy + for (uint32_t i = 0; i < samplesToSend; ++i) { + float s = audioQueue.front(); + audioQueue.pop(); + if (s > 1.0f) s = 1.0f; else if (s < -1.0f) s = -1.0f; + buf[i] = static_cast(s * 32767.0f); + } + // pad remaining with zeros if any + const uint32_t totalSamples = bufferSize * channels; + if (samplesToSend < totalSamples) { + std::fill(buf.begin() + samplesToSend, buf.begin() + totalSamples, 0); + } + + SLresult result = (*bufferQueueItf)->Enqueue(bufferQueueItf, buf.data(), totalSamples * sizeof(int16_t)); + if (result != SL_RESULT_SUCCESS) { + LOGE("OpenSLES: Enqueue failed (%d)", result); + return false; + } + playbackPosition += framesToSend; + nextBufferIndex = (nextBufferIndex + 1) % NUM_BUFFERS; + return true; + } + + bool EnqueueSilence() { + auto &buf = pcmBuffers[nextBufferIndex]; + const uint32_t totalSamples = bufferSize * channels; + std::fill(buf.begin(), buf.begin() + totalSamples, 0); + SLresult result = (*bufferQueueItf)->Enqueue(bufferQueueItf, buf.data(), totalSamples * sizeof(int16_t)); + if (result != SL_RESULT_SUCCESS) { + LOGE("OpenSLES: Enqueue(silence) failed (%d)", result); + return false; + } + nextBufferIndex = (nextBufferIndex + 1) % NUM_BUFFERS; + return true; + } + + void Cleanup() { + if (playerObject) { + (*playerObject)->Destroy(playerObject); + playerObject = nullptr; + playItf = nullptr; + bufferQueueItf = nullptr; + } + if (outputMixObject) { + (*outputMixObject)->Destroy(outputMixObject); + outputMixObject = nullptr; + } + if (engineObject) { + (*engineObject)->Destroy(engineObject); + engineObject = nullptr; + engineEngine = nullptr; + } + while (!audioQueue.empty()) audioQueue.pop(); + playbackPosition = 0; + nextBufferIndex = 0; + pcmBuffers.clear(); + } +}; + +#else + // OpenAL audio output device implementation class OpenALAudioOutputDevice : public AudioOutputDevice { public: @@ -371,7 +628,7 @@ class OpenALAudioOutputDevice : public AudioOutputDevice { void AudioThreadFunction() { // Calculate sleep time for audio buffer updates (in milliseconds) const auto sleepTime = std::chrono::milliseconds( - static_cast((bufferSize * 1000) / sampleRate / 8) // Eighth buffer time for responsiveness + static_cast((bufferSize * 1000) / sampleRate / 8) // Eighth buffer time for responsiveness ); while (playing) { @@ -450,7 +707,7 @@ class OpenALAudioOutputDevice : public AudioOutputDevice { // Upload audio data to OpenAL buffer alBufferData(buffer, format, pcmBuffer.data(), - static_cast(samplesProcessed * sizeof(int16_t)), static_cast(sampleRate)); + static_cast(samplesProcessed * sizeof(int16_t)), static_cast(sampleRate)); CheckOpenALError("alBufferData"); // Queue the buffer @@ -477,1080 +734,244 @@ class OpenALAudioOutputDevice : public AudioOutputDevice { } }; -AudioSystem::~AudioSystem() { - // Stop the audio thread first - stopAudioThread(); - - // Stop and clean up audio output device - if (outputDevice) { - outputDevice->Stop(); - outputDevice.reset(); - } +#endif - // Destructor implementation - sources.clear(); - audioData.clear(); +// ============================= +// AudioSystem implementation +// ============================= - // Clean up HRTF buffers - cleanupHRTFBuffers(); +namespace { + constexpr uint32_t kDefaultSampleRate = 44100; + constexpr uint32_t kDefaultChannels = 2; // stereo } -void AudioSystem::GenerateSineWavePing(float* buffer, uint32_t sampleCount, uint32_t playbackPosition) { - constexpr float sampleRate = 44100.0f; - const float frequency = 800.0f; // 800Hz ping - constexpr float pingDuration = 0.75f; // 0.75 second ping duration - constexpr auto pingSamples = static_cast(pingDuration * sampleRate); - constexpr float silenceDuration = 1.0f; // 1 second silence after ping - constexpr auto silenceSamples = static_cast(silenceDuration * sampleRate); - constexpr uint32_t totalCycleSamples = pingSamples + silenceSamples; - - const uint32_t attackSamples = static_cast(0.001f * sampleRate); // ~1ms attack - const uint32_t releaseSamples = static_cast(0.001f * sampleRate); // ~1ms release - constexpr float amplitude = 0.6f; - - for (uint32_t i = 0; i < sampleCount; i++) { - uint32_t globalPosition = playbackPosition + i; - uint32_t cyclePosition = globalPosition % totalCycleSamples; - - if (cyclePosition < pingSamples) { - float t = static_cast(cyclePosition) / sampleRate; - - // Minimal envelope for click prevention only - float envelope = 1.0f; - if (cyclePosition < attackSamples) { - envelope = static_cast(cyclePosition) / static_cast(std::max(1u, attackSamples)); - } else if (cyclePosition > pingSamples - releaseSamples) { - uint32_t relPos = pingSamples - cyclePosition; - envelope = static_cast(relPos) / static_cast(std::max(1u, releaseSamples)); - } - - float sineWave = sinf(2.0f * static_cast(M_PI) * frequency * t); - buffer[i] = amplitude * envelope * sineWave; - } else { - // Silence phase - buffer[i] = 0.0f; - } - } +AudioSystem::~AudioSystem() { + // Stop any background work + stopAudioThread(); + // Ensure device is destroyed last + outputDevice.reset(); } bool AudioSystem::Initialize(Engine* engine, Renderer* renderer) { - // Store the engine reference for accessing active camera + if (initialized) return true; this->engine = engine; + this->renderer = renderer; - if (renderer) { - // Validate renderer if provided - if (!renderer->IsInitialized()) { - std::cerr << "AudioSystem::Initialize: Renderer is not initialized" << std::endl; - return false; - } - - // Store the renderer for compute shader support - this->renderer = renderer; - } else { - this->renderer = nullptr; - } - - // Generate default HRTF data for spatial audio processing - LoadHRTFData(""); // Pass empty filename to force generation of default HRTF data - - // Enable HRTF processing by default for 3D spatial audio - EnableHRTF(true); - - // Set default listener properties - SetListenerPosition(0.0f, 0.0f, 0.0f); - SetListenerOrientation(0.0f, 0.0f, -1.0f, 0.0f, 1.0f, 0.0f); - SetListenerVelocity(0.0f, 0.0f, 0.0f); - SetMasterVolume(1.0f); - - // Initialize audio output device + // Create output device per platform +#if defined(PLATFORM_ANDROID) + outputDevice = std::make_unique(); +#else outputDevice = std::make_unique(); - if (!outputDevice->Initialize(44100, 2, 1024)) { - std::cerr << "Failed to initialize audio output device" << std::endl; +#endif + + const uint32_t bufferSizeFrames = 1024; // frames per buffer for streaming + if (!outputDevice->Initialize(kDefaultSampleRate, kDefaultChannels, bufferSizeFrames)) { + LOGE("AudioSystem: Failed to initialize output device"); + outputDevice.reset(); return false; } - - // Start audio output if (!outputDevice->Start()) { - std::cerr << "Failed to start audio output device" << std::endl; + LOGE("AudioSystem: Failed to start output device"); + outputDevice.reset(); return false; } - // Start the background audio processing thread - startAudioThread(); - initialized = true; return true; } -void AudioSystem::Update(std::chrono::milliseconds deltaTime) { - if (!initialized) { - return; - } - - // Synchronize HRTF listener position and orientation with active camera - if (engine) { - const CameraComponent* activeCamera = engine->GetActiveCamera(); - if (activeCamera) { - // Get camera position - glm::vec3 cameraPos = activeCamera->GetPosition(); - SetListenerPosition(cameraPos.x, cameraPos.y, cameraPos.z); - - // Calculate camera forward and up vectors for orientation - // The camera looks at its target, so forward = normalize(target - position) - glm::vec3 target = activeCamera->GetTarget(); - glm::vec3 up = activeCamera->GetUp(); - glm::vec3 forward = glm::normalize(target - cameraPos); - - SetListenerOrientation(forward.x, forward.y, forward.z, up.x, up.y, up.z); - } +void AudioSystem::FlushOutput() { + if (!initialized) return; + // Recreate device to clear queued buffers + outputDevice.reset(); +#if defined(PLATFORM_ANDROID) + outputDevice = std::make_unique(); +#else + outputDevice = std::make_unique(); +#endif + const uint32_t bufferSizeFrames = 1024; + if (outputDevice->Initialize(kDefaultSampleRate, kDefaultChannels, bufferSizeFrames)) { + outputDevice->Start(); } +} - // Update audio sources and process spatial audio - for (auto& source : sources) { - if (!source->IsPlaying()) { - continue; - } +void AudioSystem::Update(std::chrono::milliseconds deltaTime) { + if (!initialized || !outputDevice) return; - // Cast to ConcreteAudioSource to access timing methods - auto* concreteSource = dynamic_cast(source.get()); + // Determine frames to mix for this update + const double framesExact = (double)kDefaultSampleRate * (double)deltaTime.count() / 1000.0; + const uint32_t framesToProcess = std::max(1u, (uint32_t)std::llround(framesExact)); + const uint32_t channels = kDefaultChannels; - // Update playback timing and delay logic - concreteSource->UpdatePlayback(deltaTime, 0); + std::vector mixBuffer(framesToProcess * channels, 0.0f); - // Only process audio if not in the delay phase - if (!concreteSource->ShouldProcessAudio()) { + // Mix all sources + for (auto &sp : sources) { + auto* src = static_cast(sp.get()); + if (!src) continue; + if (!src->ShouldProcessAudio()) { + src->UpdatePlayback(deltaTime, 0); continue; } - // Process audio with HRTF spatial processing (works with or without renderer) - if (hrtfEnabled && !hrtfData.empty()) { - // Get source position for spatial processing - const float* sourcePosition = concreteSource->GetPosition(); - - // Accumulate samples based on real time and process in fixed-size chunks to avoid tiny buffers - double acc = concreteSource->GetSampleAccumulator(); - acc += (static_cast(deltaTime.count()) * 44100.0) / 1000.0; // ms -> samples - constexpr uint32_t kChunk = 33075; - uint32_t available = static_cast(acc); - if (available < kChunk) { - // Not enough for a full chunk; keep accumulating - concreteSource->SetSampleAccumulator(acc); - continue; - } - // Process as many full chunks as available this frame - while (available >= kChunk) { - std::vector inputBuffer(kChunk, 0.0f); - std::vector outputBuffer(kChunk * 2, 0.0f); - uint32_t actualSamplesProcessed = 0; - - // Generate audio signal from loaded audio data or debug ping - auto audioIt = audioData.find(concreteSource->GetName()); - if (audioIt != audioData.end() && !audioIt->second.empty()) { - // Use actual loaded audio data with proper position tracking - const auto& data = audioIt->second; - uint32_t playbackPos = concreteSource->GetPlaybackPosition(); - - for (uint32_t i = 0; i < kChunk; i++) { - uint32_t dataIndex = (playbackPos + i) * 4; // 4 bytes per sample (16-bit stereo) - - if (dataIndex + 1 < data.size()) { - // Convert from 16-bit PCM to float - int16_t sample = *reinterpret_cast(&data[dataIndex]); - inputBuffer[i] = static_cast(sample) / 32768.0f; - actualSamplesProcessed++; - } else { - // Reached end of audio data - inputBuffer[i] = 0.0f; - } - } - } else { - // Generate sine wave ping for debugging - GenerateSineWavePing(inputBuffer.data(), kChunk, concreteSource->GetPlaybackPosition()); - actualSamplesProcessed = kChunk; - } - - // Build extended input [history | current] to preserve convolution continuity across chunks - uint32_t histLen = (hrtfSize > 0) ? (hrtfSize - 1) : 0; - static std::unordered_map> hrtfHistories; - auto &hist = hrtfHistories[concreteSource]; - if (hist.size() != histLen) { - hist.assign(histLen, 0.0f); - } - std::vector extendedInput(histLen + kChunk, 0.0f); - if (histLen > 0) { - std::memcpy(extendedInput.data(), hist.data(), histLen * sizeof(float)); + const std::string& name = src->GetName(); + auto it = audioData.find(name); + if (it == audioData.end()) { + // Debug ping source + if (name == "debug_ping") { + std::vector tmp(framesToProcess * channels, 0.0f); + GenerateSineWavePing(tmp.data(), framesToProcess, src->GetPlaybackPosition()); + for (uint32_t i = 0; i < framesToProcess; ++i) { + mixBuffer[i*channels+0] += tmp[i*channels+0] * masterVolume; + mixBuffer[i*channels+1] += tmp[i*channels+1] * masterVolume; } - std::memcpy(extendedInput.data() + histLen, inputBuffer.data(), kChunk * sizeof(float)); - - // Submit for GPU HRTF processing via the background thread (trim will occur in processAudioTask) - submitAudioTask(extendedInput.data(), static_cast(extendedInput.size()), sourcePosition, actualSamplesProcessed, histLen); + src->UpdatePlayback(deltaTime, framesToProcess); + } + continue; + } - // Update history with the tail of current input - if (histLen > 0) { - std::memcpy(hist.data(), inputBuffer.data() + (kChunk - histLen), histLen * sizeof(float)); - } + const std::vector& bytes = it->second; + if (bytes.empty()) continue; + const int16_t* pcm = reinterpret_cast(bytes.data()); + const uint32_t totalSamples = (uint32_t)(bytes.size() / sizeof(int16_t)); + const uint32_t totalFrames = totalSamples / channels; - // Update playback timing with actual samples processed - concreteSource->UpdatePlayback(std::chrono::milliseconds(0), actualSamplesProcessed); + uint32_t playPos = src->GetPlaybackPosition(); + src->SetAudioLength(totalFrames); - // Consume one chunk from the accumulator - acc -= static_cast(kChunk); - available -= kChunk; + uint32_t mixed = 0; + for (; mixed < framesToProcess && playPos < totalFrames; ++mixed, ++playPos) { + const uint32_t base = playPos * channels; + float l, r; + if (channels == 1) { + l = r = (float)pcm[base] / 32767.0f; + } else { + l = (float)pcm[base+0] / 32767.0f; + r = (float)pcm[base+1] / 32767.0f; } - // Store fractional remainder for next frame - concreteSource->SetSampleAccumulator(acc); - } - } - - // Apply master volume changes to all active sources - for (auto& source : sources) { - if (source->IsPlaying()) { - // Master volume is applied during HRTF processing and individual source volume control - // Volume scaling is handled in the ProcessHRTF function + l *= masterVolume; r *= masterVolume; + mixBuffer[mixed*channels+0] += l; + mixBuffer[mixed*channels+1] += r; } + src->UpdatePlayback(deltaTime, mixed); } - // Clean up finished audio sources - std::erase_if(sources, - [](const std::unique_ptr& source) { - // Keep all sources active for continuous playback - // Audio sources can be stopped/started via their Play/Stop methods - return false; - }); - - // Update timing for audio processing with low-latency chunks - static std::chrono::milliseconds accumulatedTime = std::chrono::milliseconds(0); - accumulatedTime += deltaTime; - - // Process audio in 20ms chunks for optimal latency - constexpr std::chrono::milliseconds audioChunkTime = std::chrono::milliseconds(20); // 20ms chunks for real-time audio - if (accumulatedTime >= audioChunkTime) { - // Trigger audio buffer updates for smooth playback - // The HRTF processing ensures spatial audio is updated continuously - accumulatedTime = std::chrono::milliseconds(0); - - // Update listener properties if they have changed - // This ensures spatial audio positioning stays current with camera movement + // Clamp and send to device + for (float &s : mixBuffer) { + if (s > 1.0f) s = 1.0f; else if (s < -1.0f) s = -1.0f; } + outputDevice->WriteAudio(mixBuffer.data(), framesToProcess); } bool AudioSystem::LoadAudio(const std::string& filename, const std::string& name) { - - // Open the WAV file std::ifstream file(filename, std::ios::binary); - if (!file.is_open()) { - std::cerr << "Failed to open audio file: " << filename << std::endl; - return false; - } - - // Read WAV header - struct WAVHeader { - char riff[4]; // "RIFF" - uint32_t fileSize; // File size - 8 - char wave[4]; // "WAVE" - char fmt[4]; // "fmt " - uint32_t fmtSize; // Format chunk size - uint16_t audioFormat; // Audio format (1 = PCM) - uint16_t numChannels; // Number of channels - uint32_t sampleRate; // Sample rate - uint32_t byteRate; // Byte rate - uint16_t blockAlign; // Block align - uint16_t bitsPerSample; // Bits per sample - char data[4]; // "data" - uint32_t dataSize; // Data size - }; - - WAVHeader header{}; - file.read(reinterpret_cast(&header), sizeof(WAVHeader)); - - // Validate WAV header - if (std::strncmp(header.riff, "RIFF", 4) != 0 || - std::strncmp(header.wave, "WAVE", 4) != 0 || - std::strncmp(header.fmt, "fmt ", 4) != 0 || - std::strncmp(header.data, "data", 4) != 0) { - std::cerr << "Invalid WAV file format: " << filename << std::endl; - file.close(); - return false; - } - - // Only support PCM format for now - if (header.audioFormat != 1) { - std::cerr << "Unsupported audio format (only PCM supported): " << filename << std::endl; - file.close(); - return false; + if (!file) { std::cerr << "AudioSystem: failed to open " << filename << std::endl; return false; } + std::vector data((std::istreambuf_iterator(file)), std::istreambuf_iterator()); + if (data.size() < 44) { std::cerr << "AudioSystem: file too small: " << filename << std::endl; return false; } + + const uint8_t* p = data.data(); + auto memeq4 = [](const uint8_t* a, const char* s){ return std::memcmp(a, s, 4)==0; }; + uint32_t dataOffset = 0, dataSize = 0; + uint16_t fmt = 1, bits = 16; uint16_t ch = 2; uint32_t rate = kDefaultSampleRate; + if (memeq4(p, "RIFF") && memeq4(p+8, "WAVE")) { + size_t off = 12; // chunk start + while (off + 8 <= data.size()) { + const char* id = (const char*)(p + off); + uint32_t sz = *reinterpret_cast(p + off + 4); + if (memeq4((const uint8_t*)id, "fmt ")) { + if (off + 8 + sz > data.size()) break; + fmt = *reinterpret_cast(p + off + 8); + ch = *reinterpret_cast(p + off + 10); + rate= *reinterpret_cast(p + off + 12); + bits= *reinterpret_cast(p + off + 22); + } else if (memeq4((const uint8_t*)id, "data")) { + dataOffset = (uint32_t)(off + 8); + dataSize = sz; break; + } + off += 8 + sz; + } } - // Read audio data - std::vector data(header.dataSize); - file.read(reinterpret_cast(data.data()), header.dataSize); - file.close(); - - if (file.gcount() != static_cast(header.dataSize)) { - std::cerr << "Failed to read complete audio data from: " << filename << std::endl; - return false; + if (dataOffset == 0 || dataSize == 0 || fmt != 1 || bits != 16) { + // Not a PCM16 WAV; store as-is and assume default format + audioData[name] = std::move(data); + return true; } - - // Store the audio data - audioData[name] = std::move(data); - + if (dataOffset + dataSize > data.size()) return false; + std::vector pcm(data.begin()+dataOffset, data.begin()+dataOffset+dataSize); + audioData[name] = std::move(pcm); return true; } AudioSource* AudioSystem::CreateAudioSource(const std::string& name) { - // Check if the audio data exists + auto src = std::make_unique(name); auto it = audioData.find(name); - if (it == audioData.end()) { - std::cerr << "AudioSystem::CreateAudioSource: Audio data not found: " << name << std::endl; - return nullptr; + if (it != audioData.end()) { + const uint32_t totalSamples = (uint32_t)(it->second.size() / sizeof(int16_t)); + const uint32_t totalFrames = totalSamples / kDefaultChannels; + src->SetAudioLength(totalFrames); } - - // Create a new audio source - auto source = std::make_unique(name); - - // Calculate audio length in samples for timing - const auto& data = it->second; - if (!data.empty()) { - // Assuming 16-bit stereo audio at 44.1kHz (standard WAV format) - // The audio data reading uses dataIndex = (playbackPos + i) * 4 - // So we need to calculate length based on how many individual samples we can read - // Each 4 bytes represents one stereo sample pair, so total individual samples = data.size() / 4 - uint32_t totalSamples = static_cast(data.size()) / 4; - - // Set the audio length for proper timing - source->SetAudioLength(totalSamples); - } - - // Store the source - sources.push_back(std::move(source)); - + sources.push_back(std::move(src)); return sources.back().get(); } AudioSource* AudioSystem::CreateDebugPingSource(const std::string& name) { - // Create a new audio source for debugging - auto source = std::make_unique(name); - - // Set up debug ping parameters - // The ping will cycle every 1.5 seconds (0.5s ping + 1.0s silence) - constexpr float sampleRate = 44100.0f; - constexpr float pingDuration = 0.5f; - constexpr float silenceDuration = 1.0f; - constexpr auto totalCycleSamples = static_cast((pingDuration + silenceDuration) * sampleRate); - - // For generated ping, let the generator control the 0.5s ping + 1.0s silence cycle. - // Disable source-level length/delay to avoid double-silence and audible resets. - source->SetAudioLength(0); - - // Store the source - sources.push_back(std::move(source)); - + auto src = std::make_unique(name); + src->SetLoop(true); + sources.push_back(std::move(src)); return sources.back().get(); } -void AudioSystem::SetListenerPosition(const float x, const float y, const float z) { - listenerPosition[0] = x; - listenerPosition[1] = y; - listenerPosition[2] = z; -} - -void AudioSystem::SetListenerOrientation(const float forwardX, const float forwardY, const float forwardZ, - const float upX, const float upY, const float upZ) { - listenerOrientation[0] = forwardX; - listenerOrientation[1] = forwardY; - listenerOrientation[2] = forwardZ; - listenerOrientation[3] = upX; - listenerOrientation[4] = upY; - listenerOrientation[5] = upZ; +void AudioSystem::SetListenerPosition(float x, float y, float z) { + listenerPosition[0] = x; listenerPosition[1] = y; listenerPosition[2] = z; } - -void AudioSystem::SetListenerVelocity(const float x, const float y, const float z) { - listenerVelocity[0] = x; - listenerVelocity[1] = y; - listenerVelocity[2] = z; +void AudioSystem::SetListenerOrientation(float fx, float fy, float fz, float ux, float uy, float uz) { + listenerOrientation[0] = fx; listenerOrientation[1] = fy; listenerOrientation[2] = fz; + listenerOrientation[3] = ux; listenerOrientation[4] = uy; listenerOrientation[5] = uz; } - -void AudioSystem::SetMasterVolume(const float volume) { - masterVolume = volume; -} - -void AudioSystem::EnableHRTF(const bool enable) { - hrtfEnabled = enable; -} - -bool AudioSystem::IsHRTFEnabled() const { - return hrtfEnabled; -} - -void AudioSystem::SetHRTFCPUOnly(const bool cpuOnly) { - (void)cpuOnly; - // Enforce GPU-only HRTF processing: ignore CPU-only requests - hrtfCPUOnly = false; -} - -bool AudioSystem::IsHRTFCPUOnly() const { - return hrtfCPUOnly; +void AudioSystem::SetListenerVelocity(float x, float y, float z) { + listenerVelocity[0] = x; listenerVelocity[1] = y; listenerVelocity[2] = z; } +void AudioSystem::SetMasterVolume(float volume) { masterVolume = volume; } +void AudioSystem::EnableHRTF(bool enable) { hrtfEnabled = enable; } +bool AudioSystem::IsHRTFEnabled() const { return hrtfEnabled; } +void AudioSystem::SetHRTFCPUOnly(bool cpuOnly) { hrtfCPUOnly = cpuOnly; } +bool AudioSystem::IsHRTFCPUOnly() const { return hrtfCPUOnly; } bool AudioSystem::LoadHRTFData(const std::string& filename) { - - // HRTF parameters - constexpr uint32_t hrtfSampleCount = 256; // Number of samples per impulse response - constexpr uint32_t positionCount = 36 * 13; // 36 azimuths (10-degree steps) * 13 elevations (15-degree steps) - constexpr uint32_t channelCount = 2; // Stereo (left and right ears) - const float sampleRate = 44100.0f; // Sample rate for HRTF data - const float speedOfSound = 343.0f; // Speed of sound in m/s - const float headRadius = 0.0875f; // Average head radius in meters - - // Try to load from a file first (only if the filename is provided) - if (!filename.empty()) { - if (std::ifstream file(filename, std::ios::binary); file.is_open()) { - // Read the file header to determine a format - char header[4]; - file.read(header, 4); - - if (std::strncmp(header, "HRTF", 4) == 0) { - // Custom HRTF format - uint32_t fileHrtfSize, filePositionCount, fileChannelCount; - file.read(reinterpret_cast(&fileHrtfSize), sizeof(uint32_t)); - file.read(reinterpret_cast(&filePositionCount), sizeof(uint32_t)); - file.read(reinterpret_cast(&fileChannelCount), sizeof(uint32_t)); - - if (fileChannelCount == channelCount) { - hrtfData.resize(fileHrtfSize * filePositionCount * fileChannelCount); - file.read(reinterpret_cast(hrtfData.data()), static_cast(hrtfData.size() * sizeof(float))); - - hrtfSize = fileHrtfSize; - numHrtfPositions = filePositionCount; - - file.close(); - return true; - } - } - file.close(); - } - } - - // Generate realistic HRTF data based on acoustic modeling - // Resize the HRTF data vector - hrtfData.resize(hrtfSampleCount * positionCount * channelCount); - - // Generate HRTF impulse responses for each position - for (uint32_t pos = 0; pos < positionCount; pos++) { - // Calculate azimuth and elevation for this position - uint32_t azimuthIndex = pos % 36; - uint32_t elevationIndex = pos / 36; - - float azimuth = (static_cast(azimuthIndex) * 10.0f - 180.0f) * static_cast(M_PI) / 180.0f; - float elevation = (static_cast(elevationIndex) * 15.0f - 90.0f) * static_cast(M_PI) / 180.0f; - - // Convert to Cartesian coordinates - float x = std::cos(elevation) * std::sin(azimuth); - float y = std::sin(elevation); - float z = std::cos(elevation) * std::cos(azimuth); - - for (uint32_t channel = 0; channel < channelCount; channel++) { - // Calculate ear position (left ear: -0.1m, right ear: +0.1m on x-axis) - float earX = (channel == 0) ? -0.1f : 0.1f; - - // Calculate distance from source to ear - float dx = x - earX; - float dy = y; - float dz = z; - float distance = std::sqrt(dx * dx + dy * dy + dz * dz); - - // Calculate time delay (ITD - Interaural Time Difference) - float timeDelay = distance / speedOfSound; - auto sampleDelay = static_cast(timeDelay * sampleRate); - - // Calculate head shadow effect (ILD - Interaural Level Difference) - float shadowFactor = 1.0f; - if (channel == 0 && azimuth > 0) { // Left ear, source on right - shadowFactor = 0.3f + 0.7f * std::exp(-azimuth * 2.0f); - } else if (channel == 1 && azimuth < 0) { // Right ear, source on left - shadowFactor = 0.3f + 0.7f * std::exp(azimuth * 2.0f); - } - - - // Generate impulse response - uint32_t samplesGenerated = 0; - for (uint32_t i = 0; i < hrtfSampleCount; i++) { - float value = 0.0f; - - // Direct path impulse - if (i >= sampleDelay && i < sampleDelay + 10) { - float t = static_cast(i - sampleDelay) / sampleRate; - value = shadowFactor * std::exp(-t * 1000.0f) * std::cos(2.0f * static_cast(M_PI) * 1000.0f * t); - } - - - // Apply distance attenuation - value /= std::max(1.0f, distance); - - uint32_t index = pos * hrtfSampleCount * channelCount + channel * hrtfSampleCount + i; - hrtfData[index] = value; - } - } - } - - // Store HRTF parameters - hrtfSize = hrtfSampleCount; - numHrtfPositions = positionCount; - + std::ifstream f(filename, std::ios::binary); + if (!f) return false; + std::vector buf((std::istreambuf_iterator(f)), {}); + if (buf.empty()) return false; + hrtfData = std::move(buf); + hrtfSize = (uint32_t)hrtfData.size(); + numHrtfPositions = 0; return true; } -bool AudioSystem::ProcessHRTF(const float* inputBuffer, float* outputBuffer, uint32_t sampleCount, const float* sourcePosition) { - - if (!hrtfEnabled) { - // If HRTF is disabled, just copy input to output - for (uint32_t i = 0; i < sampleCount; i++) { - outputBuffer[i * 2] = inputBuffer[i]; // Left channel - outputBuffer[i * 2 + 1] = inputBuffer[i]; // Right channel - } - return true; - } - - // Check if we should use CPU-only processing or if Vulkan is not available - // Also force CPU processing if we've detected threading issues previously - static bool forceGPUFallback = false; - if (hrtfCPUOnly || !renderer || !renderer->IsInitialized() || forceGPUFallback) { - // Use CPU-based HRTF processing (either forced or fallback) - - // Create buffers for HRTF processing if they don't exist or if the sample count has changed - if (!createHRTFBuffers(sampleCount)) { - std::cerr << "Failed to create HRTF buffers" << std::endl; - return false; - } - - // Copy input data to input buffer - void* data = inputBufferMemory.mapMemory(0, sampleCount * sizeof(float)); - memcpy(data, inputBuffer, sampleCount * sizeof(float)); - inputBufferMemory.unmapMemory(); - - // Copy source and listener positions - memcpy(params.sourcePosition, sourcePosition, sizeof(float) * 3); - memcpy(params.listenerPosition, listenerPosition, sizeof(float) * 3); - memcpy(params.listenerOrientation, listenerOrientation, sizeof(float) * 6); - params.sampleCount = sampleCount; - params.hrtfSize = hrtfSize; - params.numHrtfPositions = numHrtfPositions; - params.padding = 0.0f; - - // Copy parameters to parameter buffer using persistent memory mapping - if (persistentParamsMemory) { - memcpy(persistentParamsMemory, ¶ms, sizeof(HRTFParams)); - } else { - std::cerr << "WARNING: Persistent memory not available, falling back to map/unmap" << std::endl; - data = paramsBufferMemory.mapMemory(0, sizeof(HRTFParams)); - memcpy(data, ¶ms, sizeof(HRTFParams)); - paramsBufferMemory.unmapMemory(); - } - - // Perform HRTF processing using CPU-based convolution - // This implementation provides real-time 3D audio spatialization - - // Calculate direction from listener to source - float direction[3]; - direction[0] = sourcePosition[0] - listenerPosition[0]; - direction[1] = sourcePosition[1] - listenerPosition[1]; - direction[2] = sourcePosition[2] - listenerPosition[2]; - - // Normalize direction - float length = std::sqrt(direction[0] * direction[0] + direction[1] * direction[1] + direction[2] * direction[2]); - if (length > 0.0001f) { - direction[0] /= length; - direction[1] /= length; - direction[2] /= length; - } else { - direction[0] = 0.0f; - direction[1] = 0.0f; - direction[2] = -1.0f; // Default to front - } - - // Calculate azimuth and elevation - float azimuth = std::atan2(direction[0], direction[2]); - float elevation = std::asin(std::max(-1.0f, std::min(1.0f, direction[1]))); - - // Convert to indices - int azimuthIndex = static_cast((azimuth + M_PI) / (2.0f * M_PI) * 36.0f) % 36; - int elevationIndex = static_cast((elevation + M_PI / 2.0f) / M_PI * 13.0f); - elevationIndex = std::max(0, std::min(12, elevationIndex)); - - // Get HRTF index - int hrtfIndex = elevationIndex * 36 + azimuthIndex; - hrtfIndex = std::min(hrtfIndex, static_cast(numHrtfPositions) - 1); - - // Perform convolution for left and right ears with simple overlap-add using per-direction input history - static std::unordered_map> convHistories; // mono histories keyed by hrtfIndex - const uint32_t histLenDesired = (hrtfSize > 0) ? (hrtfSize - 1) : 0; - auto &convHistory = convHistories[hrtfIndex]; - if (convHistory.size() != histLenDesired) { - convHistory.assign(histLenDesired, 0.0f); - } - - // Build extended input: [history | current input] - std::vector extInput(histLenDesired + sampleCount, 0.0f); - if (histLenDesired > 0) { - std::memcpy(extInput.data(), convHistory.data(), histLenDesired * sizeof(float)); - } - if (sampleCount > 0) { - std::memcpy(extInput.data() + histLenDesired, inputBuffer, sampleCount * sizeof(float)); - } - - for (uint32_t i = 0; i < sampleCount; i++) { - float leftSample = 0.0f; - float rightSample = 0.0f; - - // Convolve with HRTF impulse response using extended input - // extIndex = histLenDesired + i - j; ensure extIndex >= 0 - uint32_t jMax = std::min(hrtfSize - 1, histLenDesired + i); - for (uint32_t j = 0; j <= jMax; j++) { - uint32_t extIndex = histLenDesired + i - j; - uint32_t hrtfLeftIndex = hrtfIndex * hrtfSize * 2 + j; - uint32_t hrtfRightIndex = hrtfIndex * hrtfSize * 2 + hrtfSize + j; - - if (hrtfLeftIndex < hrtfData.size() && hrtfRightIndex < hrtfData.size()) { - float in = extInput[extIndex]; - leftSample += in * hrtfData[hrtfLeftIndex]; - rightSample += in * hrtfData[hrtfRightIndex]; - } - } - - // Apply distance attenuation - float distanceAttenuation = 1.0f / std::max(1.0f, length); - leftSample *= distanceAttenuation; - rightSample *= distanceAttenuation; - - // Write to output buffer - outputBuffer[i * 2] = leftSample; - outputBuffer[i * 2 + 1] = rightSample; - } - - // Update history with the tail of the extended input - if (histLenDesired > 0) { - std::memcpy(convHistory.data(), extInput.data() + sampleCount, histLenDesired * sizeof(float)); - } - - - - return true; - } else { - // Use Vulkan shader-based HRTF processing with fallback to CPU - try { - // Validate HRTF data exists - if (hrtfData.empty()) { - LoadHRTFData(""); // Generate HRTF data - } - - // Create buffers for HRTF processing if they don't exist or if the sample count has changed - if (!createHRTFBuffers(sampleCount)) { - std::cerr << "Failed to create HRTF buffers, falling back to CPU processing" << std::endl; - throw std::runtime_error("Buffer creation failed"); - } - - // Copy input data to input buffer - void* data = inputBufferMemory.mapMemory(0, sampleCount * sizeof(float)); - memcpy(data, inputBuffer, sampleCount * sizeof(float)); - - - inputBufferMemory.unmapMemory(); - - // Set up HRTF parameters with proper std140 uniform buffer layout - struct alignas(16) HRTFParams { - float listenerPosition[4]; // vec3 + padding (16 bytes) - offset 0 - float listenerForward[4]; // vec3 + padding (16 bytes) - offset 16 - float listenerUp[4]; // vec3 + padding (16 bytes) - offset 32 - float sourcePosition[4]; // vec3 + padding (16 bytes) - offset 48 - float sampleCount; // float (4 bytes) - offset 64 - float padding1[3]; // Padding to align to 16-byte boundary - offset 68 - uint32_t inputChannels; // uint (4 bytes) - offset 80 - uint32_t outputChannels; // uint (4 bytes) - offset 84 - uint32_t hrtfSize; // uint (4 bytes) - offset 88 - uint32_t numHrtfPositions; // uint (4 bytes) - offset 92 - float distanceAttenuation; // float (4 bytes) - offset 96 - float dopplerFactor; // float (4 bytes) - offset 100 - float reverbMix; // float (4 bytes) - offset 104 - float padding2; // Padding to complete 16-byte alignment - offset 108 - } params{}; - - // Copy listener and source positions with proper padding for GPU alignment - memcpy(params.listenerPosition, listenerPosition, sizeof(float) * 3); - params.listenerPosition[3] = 0.0f; // Padding for float3 alignment - memcpy(params.listenerForward, &listenerOrientation[0], sizeof(float) * 3); // Forward vector - params.listenerForward[3] = 0.0f; // Padding for float3 alignment - memcpy(params.listenerUp, &listenerOrientation[3], sizeof(float) * 3); // Up vector - params.listenerUp[3] = 0.0f; // Padding for float3 alignment - memcpy(params.sourcePosition, sourcePosition, sizeof(float) * 3); - params.sourcePosition[3] = 0.0f; // Padding for float3 alignment - params.sampleCount = static_cast(sampleCount); // Number of samples to process - params.padding1[0] = params.padding1[1] = params.padding1[2] = 0.0f; // Initialize padding - params.inputChannels = 1; // Mono input - params.outputChannels = 2; // Stereo output - params.hrtfSize = hrtfSize; - params.numHrtfPositions = numHrtfPositions; - params.distanceAttenuation = 1.0f; - params.dopplerFactor = 1.0f; - params.reverbMix = 0.0f; - params.padding2 = 0.0f; // Initialize padding - - // Copy parameters to parameter buffer using persistent memory mapping - if (persistentParamsMemory) { - memcpy(persistentParamsMemory, ¶ms, sizeof(HRTFParams)); - } else { - std::cerr << "ERROR: Persistent memory not available for GPU processing!" << std::endl; - throw std::runtime_error("Persistent memory required for GPU processing"); - } - - - // Use renderer's main compute pipeline instead of dedicated HRTF pipeline - uint32_t workGroupSize = 64; // Must match the numthreads in the shader - uint32_t groupCountX = (sampleCount + workGroupSize - 1) / workGroupSize; - - - - // Use renderer's main compute pipeline dispatch method - auto computeFence = renderer->DispatchCompute(groupCountX, 1, 1, - *this->inputBuffer, *this->outputBuffer, - *this->hrtfBuffer, *this->paramsBuffer); - - // Wait for compute shader to complete using fence-based synchronization - const vk::raii::Device& device = renderer->GetRaiiDevice(); - vk::Result result = device.waitForFences(*computeFence, VK_TRUE, UINT64_MAX); - if (result != vk::Result::eSuccess) { - std::cerr << "Failed to wait for compute fence: " << vk::to_string(result) << std::endl; - throw std::runtime_error("Fence wait failed"); - } - - - // Copy results from output buffer to the output array - void* outputData = outputBufferMemory.mapMemory(0, sampleCount * 2 * sizeof(float)); - - - memcpy(outputBuffer, outputData, sampleCount * 2 * sizeof(float)); - outputBufferMemory.unmapMemory(); - - - - return true; - } catch (const std::exception& e) { - std::cerr << "GPU HRTF processing failed: " << e.what() << std::endl; - std::cerr << "CPU fallback disabled - GPU path required" << std::endl; - throw; // Re-throw the exception to ensure failure without CPU fallback - } - } -} - -bool AudioSystem::createHRTFBuffers(uint32_t sampleCount) { - // Smart buffer reuse: only recreate if sample count changed significantly or buffers don't exist - if (currentSampleCount == sampleCount && *inputBuffer && *outputBuffer && *hrtfBuffer && *paramsBuffer) { - return true; - } - - // Ensure all GPU operations complete before cleaning up existing buffers - if (renderer) { - const vk::raii::Device& device = renderer->GetRaiiDevice(); - device.waitIdle(); - } - - // Clean up existing buffers only if we need to recreate them - cleanupHRTFBuffers(); - - if (!renderer) { - std::cerr << "AudioSystem::createHRTFBuffers: Renderer is null" << std::endl; - return false; - } - - const vk::raii::Device& device = renderer->GetRaiiDevice(); - try { - // Create input buffer (mono audio) - vk::BufferCreateInfo inputBufferInfo; - inputBufferInfo.size = sampleCount * sizeof(float); - inputBufferInfo.usage = vk::BufferUsageFlagBits::eStorageBuffer; - inputBufferInfo.sharingMode = vk::SharingMode::eExclusive; - - inputBuffer = vk::raii::Buffer(device, inputBufferInfo); - - vk::MemoryRequirements inputMemRequirements = inputBuffer.getMemoryRequirements(); - - vk::MemoryAllocateInfo inputAllocInfo; - inputAllocInfo.allocationSize = inputMemRequirements.size; - inputAllocInfo.memoryTypeIndex = renderer->FindMemoryType( - inputMemRequirements.memoryTypeBits, - vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent - ); - - inputBufferMemory = vk::raii::DeviceMemory(device, inputAllocInfo); - inputBuffer.bindMemory(*inputBufferMemory, 0); - - // Create output buffer (stereo audio) - vk::BufferCreateInfo outputBufferInfo; - outputBufferInfo.size = sampleCount * 2 * sizeof(float); // Stereo (2 channels) - outputBufferInfo.usage = vk::BufferUsageFlagBits::eStorageBuffer; - outputBufferInfo.sharingMode = vk::SharingMode::eExclusive; - - outputBuffer = vk::raii::Buffer(device, outputBufferInfo); - - vk::MemoryRequirements outputMemRequirements = outputBuffer.getMemoryRequirements(); - - vk::MemoryAllocateInfo outputAllocInfo; - outputAllocInfo.allocationSize = outputMemRequirements.size; - outputAllocInfo.memoryTypeIndex = renderer->FindMemoryType( - outputMemRequirements.memoryTypeBits, - vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent - ); - - outputBufferMemory = vk::raii::DeviceMemory(device, outputAllocInfo); - outputBuffer.bindMemory(*outputBufferMemory, 0); - - // Create HRTF data buffer - vk::BufferCreateInfo hrtfBufferInfo; - hrtfBufferInfo.size = hrtfData.size() * sizeof(float); - hrtfBufferInfo.usage = vk::BufferUsageFlagBits::eStorageBuffer; - hrtfBufferInfo.sharingMode = vk::SharingMode::eExclusive; - - hrtfBuffer = vk::raii::Buffer(device, hrtfBufferInfo); - - vk::MemoryRequirements hrtfMemRequirements = hrtfBuffer.getMemoryRequirements(); - - vk::MemoryAllocateInfo hrtfAllocInfo; - hrtfAllocInfo.allocationSize = hrtfMemRequirements.size; - hrtfAllocInfo.memoryTypeIndex = renderer->FindMemoryType( - hrtfMemRequirements.memoryTypeBits, - vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent - ); - - hrtfBufferMemory = vk::raii::DeviceMemory(device, hrtfAllocInfo); - hrtfBuffer.bindMemory(*hrtfBufferMemory, 0); - - // Copy HRTF data to buffer - void* hrtfMappedMemory = hrtfBufferMemory.mapMemory(0, hrtfData.size() * sizeof(float)); - memcpy(hrtfMappedMemory, hrtfData.data(), hrtfData.size() * sizeof(float)); - hrtfBufferMemory.unmapMemory(); - - // Create parameters buffer - use the correct GPU structure size - // The GPU processing uses a larger aligned structure (112 bytes) not the header struct (64 bytes) - struct alignas(16) GPUHRTFParams { - float listenerPosition[4]; // vec3 + padding (16 bytes) - float listenerForward[4]; // vec3 + padding (16 bytes) - float listenerUp[4]; // vec3 + padding (16 bytes) - float sourcePosition[4]; // vec3 + padding (16 bytes) - float sampleCount; // float (4 bytes) - float padding1[3]; // Padding to align to 16-byte boundary - uint32_t inputChannels; // uint (4 bytes) - uint32_t outputChannels; // uint (4 bytes) - uint32_t hrtfSize; // uint (4 bytes) - uint32_t numHrtfPositions; // uint (4 bytes) - float distanceAttenuation; // float (4 bytes) - float dopplerFactor; // float (4 bytes) - float reverbMix; // float (4 bytes) - float padding2; // Padding to complete 16-byte alignment - }; - - vk::BufferCreateInfo paramsBufferInfo; - paramsBufferInfo.size = sizeof(GPUHRTFParams); // Use correct GPU structure size (112 bytes) - paramsBufferInfo.usage = vk::BufferUsageFlagBits::eUniformBuffer; - paramsBufferInfo.sharingMode = vk::SharingMode::eExclusive; - - paramsBuffer = vk::raii::Buffer(device, paramsBufferInfo); - - vk::MemoryRequirements paramsMemRequirements = paramsBuffer.getMemoryRequirements(); - - vk::MemoryAllocateInfo paramsAllocInfo; - paramsAllocInfo.allocationSize = paramsMemRequirements.size; - paramsAllocInfo.memoryTypeIndex = renderer->FindMemoryType( - paramsMemRequirements.memoryTypeBits, - vk::MemoryPropertyFlagBits::eHostVisible | vk::MemoryPropertyFlagBits::eHostCoherent - ); - - paramsBufferMemory = vk::raii::DeviceMemory(device, paramsAllocInfo); - paramsBuffer.bindMemory(*paramsBufferMemory, 0); - - // Set up persistent memory mapping for parameters buffer to avoid repeated map/unmap operations - persistentParamsMemory = paramsBufferMemory.mapMemory(0, sizeof(GPUHRTFParams)); - // Update current sample count to track buffer size - currentSampleCount = sampleCount; - return true; - } - catch (const std::exception& e) { - std::cerr << "Error creating HRTF buffers: " << e.what() << std::endl; - cleanupHRTFBuffers(); - return false; - } -} - -void AudioSystem::cleanupHRTFBuffers() { - // Unmap persistent memory if it exists - if (persistentParamsMemory && *paramsBufferMemory) { - paramsBufferMemory.unmapMemory(); - persistentParamsMemory = nullptr; - } - - // With RAII, we just need to set the resources to nullptr - // The destructors will handle the cleanup - inputBuffer = nullptr; - inputBufferMemory = nullptr; - outputBuffer = nullptr; - outputBufferMemory = nullptr; - hrtfBuffer = nullptr; - hrtfBufferMemory = nullptr; - paramsBuffer = nullptr; - paramsBufferMemory = nullptr; - - // Reset sample count tracking - currentSampleCount = 0; -} - - -// Threading implementation methods - -void AudioSystem::startAudioThread() { - if (audioThreadRunning.load()) { - return; // Thread already running - } - - audioThreadShouldStop.store(false); - audioThreadRunning.store(true); - - audioThread = std::thread(&AudioSystem::audioThreadLoop, this); -} - -void AudioSystem::stopAudioThread() { - if (!audioThreadRunning.load()) { - return; // Thread not running - } - - // Signal the thread to stop - audioThreadShouldStop.store(true); - - // Wake up the thread if it's waiting - audioCondition.notify_all(); - - // Wait for the thread to finish - if (audioThread.joinable()) { - audioThread.join(); - } - - audioThreadRunning.store(false); -} - -void AudioSystem::audioThreadLoop() { - while (!audioThreadShouldStop.load()) { - std::shared_ptr task = nullptr; - - // Wait for a task or stop signal - { - std::unique_lock lock(taskQueueMutex); - audioCondition.wait(lock, [this] { - return !audioTaskQueue.empty() || audioThreadShouldStop.load(); - }); - - if (audioThreadShouldStop.load()) { - break; - } - - if (!audioTaskQueue.empty()) { - task = audioTaskQueue.front(); - audioTaskQueue.pop(); - } - } - - // Process the task if we have one - if (task) { - processAudioTask(task); - } - } -} - -void AudioSystem::processAudioTask(const std::shared_ptr& task) { - // Process HRTF in the background thread - bool success = ProcessHRTF(task->inputBuffer.data(), task->outputBuffer.data(), - task->sampleCount, task->sourcePosition); - - if (success && task->outputDevice && task->outputDevice->IsPlaying()) { - // We used extended input of length sampleCount = histLen + outFrames. - // Trim the first trimFront frames from the stereo output and only write actualSamplesProcessed frames. - uint32_t startFrame = task->trimFront; - uint32_t framesToWrite = task->actualSamplesProcessed; - if (startFrame * 2 > task->outputBuffer.size()) { - startFrame = 0; // safety - } - if (startFrame * 2 + framesToWrite * 2 > task->outputBuffer.size()) { - framesToWrite = static_cast((task->outputBuffer.size() / 2) - startFrame); - } - float* startPtr = task->outputBuffer.data() + startFrame * 2; - // Apply master volume only to the range we will write - for (uint32_t i = 0; i < framesToWrite * 2; i++) { - startPtr[i] *= task->masterVolume; - } - // Send processed audio directly to output device from background thread - if (!task->outputDevice->WriteAudio(startPtr, framesToWrite)) { - std::cerr << "Failed to write audio data to output device from background thread" << std::endl; - } - } +bool AudioSystem::ProcessHRTF(const float* inputBuffer, float* outputBuffer, uint32_t sampleCount, const float* /*sourcePosition*/) { + if (!inputBuffer || !outputBuffer) return false; + std::memcpy(outputBuffer, inputBuffer, sampleCount * kDefaultChannels * sizeof(float)); + return true; } -bool AudioSystem::submitAudioTask(const float* inputBuffer, uint32_t sampleCount, - const float* sourcePosition, uint32_t actualSamplesProcessed, uint32_t trimFront) { - if (!audioThreadRunning.load()) { - // Fallback to synchronous processing if the thread is not running - std::vector outputBuffer(sampleCount * 2); - bool success = ProcessHRTF(inputBuffer, outputBuffer.data(), sampleCount, sourcePosition); - - if (success && outputDevice && outputDevice->IsPlaying()) { - // Apply master volume - for (uint32_t i = 0; i < sampleCount * 2; i++) { - outputBuffer[i] *= masterVolume; - } - - // Send to audio output device - if (!outputDevice->WriteAudio(outputBuffer.data(), sampleCount)) { - std::cerr << "Failed to write audio data to output device" << std::endl; - return false; - } - } - return success; - } - - // Create a new task for asynchronous processing - auto task = std::make_shared(); - task->inputBuffer.assign(inputBuffer, inputBuffer + sampleCount); - task->outputBuffer.resize(sampleCount * 2); // Stereo output - memcpy(task->sourcePosition, sourcePosition, sizeof(float) * 3); - task->sampleCount = sampleCount; // includes history frames - task->actualSamplesProcessed = actualSamplesProcessed; // new frames only (kChunk) - task->trimFront = sampleCount - actualSamplesProcessed; // history length (histLen) - task->outputDevice = outputDevice.get(); - task->masterVolume = masterVolume; - - // Submit the task to the queue (non-blocking) - { - std::lock_guard lock(taskQueueMutex); - audioTaskQueue.push(task); +void AudioSystem::GenerateSineWavePing(float* buffer, uint32_t sampleCount, uint32_t playbackPosition) { + if (!buffer) return; + const float freq = 880.0f; // A5 + for (uint32_t i = 0; i < sampleCount; ++i) { + float t = (float)(playbackPosition + i) / (float)kDefaultSampleRate; + float s = std::sin(2.0f * 3.1415926535f * freq * t); + float env = std::exp(-4.0f * t); + float v = s * env; + buffer[i*2+0] = v; buffer[i*2+1] = v; } - audioCondition.notify_one(); - - return true; // Return immediately without waiting } - - -void AudioSystem::FlushOutput() { - // Stop background processing to avoid races while flushing - stopAudioThread(); - - // Clear any pending audio processing tasks - { - std::lock_guard lock(taskQueueMutex); - std::queue> empty; - std::swap(audioTaskQueue, empty); - } - - // Flush the output device buffers and queues by restart - if (outputDevice) { - outputDevice->Stop(); - outputDevice->Start(); - } - - // Restart background processing - startAudioThread(); -} +// Background processing stubs (no-op for now) +bool AudioSystem::createHRTFBuffers(uint32_t /*sampleCount*/) { return false; } +void AudioSystem::cleanupHRTFBuffers() {} +void AudioSystem::startAudioThread() {} +void AudioSystem::stopAudioThread() { audioThreadShouldStop = true; if (audioThread.joinable()) audioThread.join(); audioThreadRunning = false; } +void AudioSystem::audioThreadLoop() {} +void AudioSystem::processAudioTask(const std::shared_ptr& /*task*/) {} +bool AudioSystem::submitAudioTask(const float* /*inputBuffer*/, uint32_t /*sampleCount*/, const float* /*sourcePosition*/, uint32_t /*actualSamplesProcessed*/, uint32_t /*trimFront*/) { return false; } \ No newline at end of file diff --git a/attachments/simple_engine/audio_system.h b/attachments/simple_engine/audio_system.h index b07a8ad6..b64ff706 100644 --- a/attachments/simple_engine/audio_system.h +++ b/attachments/simple_engine/audio_system.h @@ -13,6 +13,20 @@ #include #include +#if defined(PLATFORM_ANDROID) +#include +#include +#else +// OpenAL headers +#ifdef __APPLE__ +#include +#include +#else +#include +#include +#endif +#endif + /** * @brief Class representing an audio source. */ diff --git a/attachments/simple_engine/descriptor_manager.cpp b/attachments/simple_engine/descriptor_manager.cpp index e75f7830..12713b09 100644 --- a/attachments/simple_engine/descriptor_manager.cpp +++ b/attachments/simple_engine/descriptor_manager.cpp @@ -106,7 +106,7 @@ bool DescriptorManager::update_descriptor_sets(Entity* entity, uint32_t maxFrame // Create descriptor writes std::array descriptorWrites = { vk::WriteDescriptorSet{ - .dstSet = entityResources[entity].descriptorSets[i], + .dstSet = *entityResources[entity].descriptorSets[i], .dstBinding = 0, .dstArrayElement = 0, .descriptorCount = 1, @@ -116,7 +116,7 @@ bool DescriptorManager::update_descriptor_sets(Entity* entity, uint32_t maxFrame .pTexelBufferView = nullptr }, vk::WriteDescriptorSet{ - .dstSet = entityResources[entity].descriptorSets[i], + .dstSet = *entityResources[entity].descriptorSets[i], .dstBinding = 1, .dstArrayElement = 0, .descriptorCount = 1, diff --git a/attachments/simple_engine/engine.cpp b/attachments/simple_engine/engine.cpp index 71d6e830..02901b0c 100644 --- a/attachments/simple_engine/engine.cpp +++ b/attachments/simple_engine/engine.cpp @@ -332,6 +332,7 @@ void Engine::handleMouseInput(float x, float y, uint32_t buttons) { HandleMouseHover(x, y); } void Engine::handleKeyInput(uint32_t key, bool pressed) { +#if defined(PLATFORM_DESKTOP) switch (key) { case GLFW_KEY_W: case GLFW_KEY_UP: @@ -361,7 +362,7 @@ void Engine::handleKeyInput(uint32_t key, bool pressed) { break; default: break; } - +#endif if (imguiSystem) { imguiSystem->HandleKeyboard(key, pressed); } diff --git a/attachments/simple_engine/imgui_system.cpp b/attachments/simple_engine/imgui_system.cpp index d788d372..62010369 100644 --- a/attachments/simple_engine/imgui_system.cpp +++ b/attachments/simple_engine/imgui_system.cpp @@ -482,7 +482,7 @@ void ImGuiSystem::Render(vk::raii::CommandBuffer & commandBuffer, uint32_t frame pushConstBlock.translate[0] = -1.0f; pushConstBlock.translate[1] = -1.0f; - commandBuffer.pushConstants(pipelineLayout, vk::ShaderStageFlagBits::eVertex, 0, pushConstBlock); + commandBuffer.pushConstants(*pipelineLayout, vk::ShaderStageFlagBits::eVertex, 0, pushConstBlock); // Bind vertex and index buffers for this frame std::array vertexBuffersArr = {*vertexBuffers[frameIndex]}; diff --git a/attachments/simple_engine/mesh_component.h b/attachments/simple_engine/mesh_component.h index 0f6960eb..1e3db4c6 100644 --- a/attachments/simple_engine/mesh_component.h +++ b/attachments/simple_engine/mesh_component.h @@ -5,7 +5,7 @@ #include #include -#include +#include #include "component.h" @@ -78,7 +78,7 @@ struct InstanceData { constexpr uint32_t modelBase = offsetof(InstanceData, modelMatrix); constexpr uint32_t normalBase = offsetof(InstanceData, normalMatrix); constexpr uint32_t vec4Size = sizeof(glm::vec4); - constexpr std::array attributeDescriptions = { + std::array attributeDescriptions = { // Model matrix columns (locations 4-7) vk::VertexInputAttributeDescription{ .location = 4, @@ -131,7 +131,7 @@ struct InstanceData { static std::array getModelMatrixAttributeDescriptions() { constexpr uint32_t modelBase = offsetof(InstanceData, modelMatrix); constexpr uint32_t vec4Size = sizeof(glm::vec4); - constexpr std::array attributeDescriptions = { + const std::array attributeDescriptions = { vk::VertexInputAttributeDescription{ .location = 4, .binding = 1, @@ -164,7 +164,7 @@ struct InstanceData { static std::array getNormalMatrixAttributeDescriptions() { constexpr uint32_t normalBase = offsetof(InstanceData, normalMatrix); constexpr uint32_t vec4Size = sizeof(glm::vec4); - constexpr std::array attributeDescriptions = { + const std::array attributeDescriptions = { vk::VertexInputAttributeDescription{ .location = 8, .binding = 1, @@ -214,7 +214,7 @@ struct Vertex { } static std::array getAttributeDescriptions() { - constexpr std::array attributeDescriptions = { + const std::array attributeDescriptions = { vk::VertexInputAttributeDescription{ .location = 0, .binding = 0, diff --git a/attachments/simple_engine/model_loader.cpp b/attachments/simple_engine/model_loader.cpp index 9cf9ec1f..9adcbb90 100644 --- a/attachments/simple_engine/model_loader.cpp +++ b/attachments/simple_engine/model_loader.cpp @@ -680,7 +680,8 @@ bool ModelLoader::ParseGLTF(const std::string& filename, Model* model) { // Heuristic pass: fill missing baseColor (albedo) by deriving from normal map filenames // Many Bistro materials have no baseColorTexture index. When that happens, try inferring // the base color from the normal map by replacing common suffixes like _ddna -> _d/_c/_diffuse/_basecolor/_albedo. - for (auto& material : materials | std::views::values) { + for (auto & kv : materials) { + auto &material = kv.second; Material* mat = material.get(); if (!mat) continue; if (!mat->albedoTexturePath.empty()) continue; // already set @@ -1136,8 +1137,8 @@ if (materialMesh.vertices.empty()) { // Convert geometry-based material mesh map to vector std::vector modelMaterialMeshes; - for (auto& val : geometryMaterialMeshMap | std::views::values) { - modelMaterialMeshes.push_back(val); + for (auto &kv : geometryMaterialMeshMap) { + modelMaterialMeshes.push_back(kv.second); } // Process texture loading for each MaterialMesh diff --git a/attachments/simple_engine/platform.cpp b/attachments/simple_engine/platform.cpp index 29302ecd..4aaf5999 100644 --- a/attachments/simple_engine/platform.cpp +++ b/attachments/simple_engine/platform.cpp @@ -3,6 +3,9 @@ #include #if defined(PLATFORM_ANDROID) +#include +#include + // Android platform implementation AndroidPlatform::AndroidPlatform(android_app* androidApp) @@ -86,7 +89,7 @@ bool AndroidPlatform::ProcessEvents() { android_poll_source* source; // Poll for events with a timeout of 0 (non-blocking) - while (ALooper_pollAll(0, nullptr, &events, (void**)&source) >= 0) { + if (ALooper_pollOnce(0, nullptr, &events, (void**)&source) >= 0) { if (source != nullptr) { source->process(app, source); } @@ -96,7 +99,7 @@ bool AndroidPlatform::ProcessEvents() { return false; } } - + process_android_input_events(); return true; } @@ -187,11 +190,11 @@ void AndroidPlatform::DetectDeviceCapabilities() { jmethodID memoryInfoConstructor = env->GetMethodID(memoryInfoClass, "", "()V"); jobject memoryInfo = env->NewObject(memoryInfoClass, memoryInfoConstructor); - jmethodID getSystemService = env->GetMethodID(env->GetObjectClass(app->activity->clazz), + jmethodID getSystemService = env->GetMethodID(env->GetObjectClass(app->activity->javaGameActivity), "getSystemService", "(Ljava/lang/String;)Ljava/lang/Object;"); jstring serviceStr = env->NewStringUTF("activity"); - jobject activityManager = env->CallObjectMethod(app->activity->clazz, getSystemService, serviceStr); + jobject activityManager = env->CallObjectMethod(app->activity->javaGameActivity, getSystemService, serviceStr); jmethodID getMemoryInfo = env->GetMethodID(activityManagerClass, "getMemoryInfo", "(Landroid/app/ActivityManager$MemoryInfo;)V"); @@ -246,10 +249,10 @@ void AndroidPlatform::SetupPowerSavingMode() { jstring actionBatteryChanged = env->NewStringUTF("android.intent.action.BATTERY_CHANGED"); jobject filter = env->NewObject(intentFilterClass, intentFilterConstructor, actionBatteryChanged); - jmethodID registerReceiver = env->GetMethodID(env->GetObjectClass(app->activity->clazz), + jmethodID registerReceiver = env->GetMethodID(env->GetObjectClass(app->activity->javaGameActivity), "registerReceiver", "(Landroid/content/BroadcastReceiver;Landroid/content/IntentFilter;)Landroid/content/Intent;"); - jobject intent = env->CallObjectMethod(app->activity->clazz, registerReceiver, nullptr, filter); + jobject intent = env->CallObjectMethod(app->activity->javaGameActivity, registerReceiver, nullptr, filter); if (intent) { // Get battery level @@ -293,45 +296,59 @@ void AndroidPlatform::InitializeTouchInput() { if (!app) { return; } + //process_android_input_events(); +} - // Set up input handling for touch events - app->onInputEvent = [](android_app* app, AInputEvent* event) -> int32_t { - auto* platform = static_cast(app->userData); +void AndroidPlatform::process_android_input_events(void) +{ + auto* platform = static_cast(app->userData); + auto input_buf = android_app_swap_input_buffers(app); + if (!input_buf) + { + return; + } - if (AInputEvent_getType(event) == AINPUT_EVENT_TYPE_MOTION) { - int32_t action = AMotionEvent_getAction(event); - uint32_t flags = action & AMOTION_EVENT_ACTION_MASK; + if (input_buf->motionEventsCount) + { + for (int idx = 0; idx < input_buf->motionEventsCount; idx++) + { + auto event = &input_buf->motionEvents[idx]; + assert((event->source == AINPUT_SOURCE_MOUSE || + event->source == AINPUT_SOURCE_TOUCHSCREEN) && + "Invalid motion event source"); - // Handle multi-touch if enabled - int32_t pointerCount = AMotionEvent_getPointerCount(event); - if (platform->IsMultiTouchEnabled() && pointerCount > 1) { - // In a real implementation, this would handle multi-touch gestures - // For now, just log the number of touch points - LOGI("Multi-touch event with %d pointers", pointerCount); - } + std::int32_t action = event->action; - // Convert touch event to mouse event for the engine - if (platform->mouseCallback) { - float x = AMotionEvent_getX(event, 0); - float y = AMotionEvent_getY(event, 0); + float x = GameActivityPointerAxes_getX(&event->pointers[0]); + float y = GameActivityPointerAxes_getY(&event->pointers[0]); + if (platform->mouseCallback) { uint32_t buttons = 0; - if (flags == AMOTION_EVENT_ACTION_DOWN || flags == AMOTION_EVENT_ACTION_MOVE) { + if (action == AMOTION_EVENT_ACTION_DOWN || action == AMOTION_EVENT_ACTION_MOVE) { buttons |= 0x01; // Left button } - - platform->mouseCallback(x, y, buttons); + platform->mouseCallback(x,y,buttons); } - - return 1; // Event handled } + android_app_clear_motion_events(input_buf); + } - return 0; // Event not handled - }; - - LOGI("Touch input initialized"); + if (input_buf->keyEventsCount) + { + for (int idx = 0; idx < input_buf->keyEventsCount; idx++) + { + auto event = &input_buf->keyEvents[idx]; + assert((event->source == AINPUT_SOURCE_KEYBOARD) && + "Invalid key event source"); + if (platform->keyboardCallback) { + platform->keyboardCallback(event->keyCode, event->action == AKEY_EVENT_ACTION_DOWN); + } + } + android_app_clear_key_events(input_buf); + } } + void AndroidPlatform::EnablePowerSavingMode(bool enable) { powerSavingMode = enable; diff --git a/attachments/simple_engine/platform.h b/attachments/simple_engine/platform.h index b8e59a40..389a39ee 100644 --- a/attachments/simple_engine/platform.h +++ b/attachments/simple_engine/platform.h @@ -3,6 +3,7 @@ #include #include #include +#include #if defined(PLATFORM_ANDROID) #include @@ -10,6 +11,7 @@ #include #include #include +#include #define LOGI(...) ((void)__android_log_print(ANDROID_LOG_INFO, "SimpleEngine", __VA_ARGS__)) #define LOGW(...) ((void)__android_log_print(ANDROID_LOG_WARN, "SimpleEngine", __VA_ARGS__)) #define LOGE(...) ((void)__android_log_print(ANDROID_LOG_ERROR, "SimpleEngine", __VA_ARGS__)) @@ -174,6 +176,11 @@ class AndroidPlatform : public Platform { void InitializeTouchInput(); public: + /** + * @brief Process android input events. + */ + void process_android_input_events(void); + /** * @brief Enable or disable power-saving mode. * @param enable Whether to enable power-saving mode. diff --git a/attachments/simple_engine/renderer.h b/attachments/simple_engine/renderer.h index df3e548e..81ee70dd 100644 --- a/attachments/simple_engine/renderer.h +++ b/attachments/simple_engine/renderer.h @@ -1,5 +1,9 @@ #pragma once +#ifndef VULKAN_HPP_DISPATCH_LOADER_DYNAMIC +#define VULKAN_HPP_DISPATCH_LOADER_DYNAMIC 1 +#endif + #include #include #include diff --git a/attachments/simple_engine/renderer_compute.cpp b/attachments/simple_engine/renderer_compute.cpp index 65256065..2882aa35 100644 --- a/attachments/simple_engine/renderer_compute.cpp +++ b/attachments/simple_engine/renderer_compute.cpp @@ -167,7 +167,7 @@ vk::raii::Fence Renderer::DispatchCompute(uint32_t groupCountX, uint32_t groupCo std::array descriptorWrites = { vk::WriteDescriptorSet{ - .dstSet = computeDescriptorSets[0], + .dstSet = *computeDescriptorSets[0], .dstBinding = 0, .dstArrayElement = 0, .descriptorCount = 1, @@ -175,7 +175,7 @@ vk::raii::Fence Renderer::DispatchCompute(uint32_t groupCountX, uint32_t groupCo .pBufferInfo = &inputBufferInfo }, vk::WriteDescriptorSet{ - .dstSet = computeDescriptorSets[0], + .dstSet = *computeDescriptorSets[0], .dstBinding = 1, .dstArrayElement = 0, .descriptorCount = 1, @@ -183,7 +183,7 @@ vk::raii::Fence Renderer::DispatchCompute(uint32_t groupCountX, uint32_t groupCo .pBufferInfo = &outputBufferInfo }, vk::WriteDescriptorSet{ - .dstSet = computeDescriptorSets[0], + .dstSet = *computeDescriptorSets[0], .dstBinding = 2, .dstArrayElement = 0, .descriptorCount = 1, @@ -191,7 +191,7 @@ vk::raii::Fence Renderer::DispatchCompute(uint32_t groupCountX, uint32_t groupCo .pBufferInfo = &hrtfBufferInfo }, vk::WriteDescriptorSet{ - .dstSet = computeDescriptorSets[0], + .dstSet = *computeDescriptorSets[0], .dstBinding = 3, .dstArrayElement = 0, .descriptorCount = 1, diff --git a/attachments/simple_engine/renderer_core.cpp b/attachments/simple_engine/renderer_core.cpp index b6252161..f6522227 100644 --- a/attachments/simple_engine/renderer_core.cpp +++ b/attachments/simple_engine/renderer_core.cpp @@ -11,24 +11,39 @@ VULKAN_HPP_DEFAULT_DISPATCH_LOADER_DYNAMIC_STORAGE; // In a .cpp file #include #include - -// Debug callback for vk::raii -static VKAPI_ATTR VkBool32 VKAPI_CALL debugCallbackVkRaii( +#include // for vkGetInstanceProcAddr and VK_FALSE + +// Debug callbacks compatible with both Vulkan C and Vulkan-Hpp PFN signatures +// Original C-style callback (kept for reference and potential uses elsewhere) +#if defined(PLATFORM_ANDROID) +static VKAPI_ATTR VkBool32 VKAPI_CALL debugCallback( + VkDebugUtilsMessageSeverityFlagBitsEXT messageSeverity, + VkDebugUtilsMessageTypeFlagsEXT /*messageType*/, + const VkDebugUtilsMessengerCallbackDataEXT* pCallbackData, + void* /*pUserData*/) { + if (messageSeverity >= VK_DEBUG_UTILS_MESSAGE_SEVERITY_WARNING_BIT_EXT) { + std::cerr << "Validation layer: " << (pCallbackData && pCallbackData->pMessage ? pCallbackData->pMessage : "") << std::endl; + } else { + std::cout << "Validation layer: " << (pCallbackData && pCallbackData->pMessage ? pCallbackData->pMessage : "") << std::endl; + } + return VK_FALSE; +} +#else +// Vulkan-Hpp typed wrapper to satisfy vk::PFN_DebugUtilsMessengerCallbackEXT +static VKAPI_ATTR uint32_t VKAPI_CALL debugCallback( vk::DebugUtilsMessageSeverityFlagBitsEXT messageSeverity, - vk::DebugUtilsMessageTypeFlagsEXT messageType, + vk::DebugUtilsMessageTypeFlagsEXT /*messageType*/, const vk::DebugUtilsMessengerCallbackDataEXT* pCallbackData, - void* pUserData) { - + void* /*pUserData*/) { + // Log similarly to the C-style callback using Hpp types if (messageSeverity >= vk::DebugUtilsMessageSeverityFlagBitsEXT::eWarning) { - // Print a message to the console - std::cerr << "Validation layer: " << pCallbackData->pMessage << std::endl; + std::cerr << "Validation layer: " << (pCallbackData && pCallbackData->pMessage ? pCallbackData->pMessage : "") << std::endl; } else { - // Print a message to the console - std::cout << "Validation layer: " << pCallbackData->pMessage << std::endl; + std::cout << "Validation layer: " << (pCallbackData && pCallbackData->pMessage ? pCallbackData->pMessage : "") << std::endl; } - return VK_FALSE; } +#endif // Renderer core implementation for the "Rendering Pipeline" chapter of the tutorial. Renderer::Renderer(Platform* platform) @@ -45,8 +60,8 @@ Renderer::~Renderer() { // Initialize the renderer bool Renderer::Initialize(const std::string& appName, bool enableValidationLayers) { - vk::detail::DynamicLoader dl; - auto vkGetInstanceProcAddr = dl.getProcAddress("vkGetInstanceProcAddr"); + // Use the globally exported loader symbol directly to avoid version-specific DynamicLoader APIs + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = &::vkGetInstanceProcAddr; VULKAN_HPP_DEFAULT_DISPATCHER.init(vkGetInstanceProcAddr); // Create a Vulkan instance if (!createInstance(appName, enableValidationLayers)) { @@ -204,8 +219,7 @@ void Renderer::ensureThreadLocalVulkanInit() const { static thread_local bool s_tlsInitialized = false; if (s_tlsInitialized) return; try { - vk::detail::DynamicLoader dl; - auto vkGetInstanceProcAddr = dl.getProcAddress("vkGetInstanceProcAddr"); + PFN_vkGetInstanceProcAddr vkGetInstanceProcAddr = &::vkGetInstanceProcAddr; if (vkGetInstanceProcAddr) { VULKAN_HPP_DEFAULT_DISPATCHER.init(vkGetInstanceProcAddr); } @@ -235,7 +249,8 @@ void Renderer::Cleanup() { // Wait for the device to be idle before cleaning up device.waitIdle(); - for (auto& resources : entityResources | std::views::values) { + for (auto &entry : entityResources) { + auto &resources = entry.second; // Memory pool handles unmapping automatically, no need to manually unmap resources.basicDescriptorSets.clear(); resources.pbrDescriptorSets.clear(); @@ -334,7 +349,7 @@ bool Renderer::setupDebugMessenger(bool enableValidationLayers) { .messageType = vk::DebugUtilsMessageTypeFlagBitsEXT::eGeneral | vk::DebugUtilsMessageTypeFlagBitsEXT::eValidation | vk::DebugUtilsMessageTypeFlagBitsEXT::ePerformance, - .pfnUserCallback = debugCallbackVkRaii + .pfnUserCallback = debugCallback }; // Create debug messenger diff --git a/attachments/simple_engine/renderer_rendering.cpp b/attachments/simple_engine/renderer_rendering.cpp index 451ae34c..b00df237 100644 --- a/attachments/simple_engine/renderer_rendering.cpp +++ b/attachments/simple_engine/renderer_rendering.cpp @@ -11,6 +11,8 @@ #include #include #include +#include +#include // This file contains rendering-related methods from the Renderer class @@ -316,7 +318,8 @@ void Renderer::recreateSwapChain() { } // Clear all entity descriptor sets since they're now invalid (allocated from the old pool) - for (auto& resources : entityResources | std::views::values) { + for (auto &entry : entityResources) { + auto &resources = entry.second; resources.basicDescriptorSets.clear(); resources.pbrDescriptorSets.clear(); } @@ -331,7 +334,8 @@ void Renderer::recreateSwapChain() { currentFrame = 0; // Recreate descriptor sets for all entities after swapchain/pipeline rebuild - for (const auto& entity : entityResources | std::views::keys) { + for (const auto &entry : entityResources) { + auto* entity = entry.first; if (!entity) continue; auto meshComponent = entity->GetComponent(); if (!meshComponent) continue; @@ -457,10 +461,25 @@ void Renderer::Render(const std::vector>& entities, Came if (device.waitForFences(*inFlightFences[currentFrame], VK_TRUE, UINT64_MAX) != vk::Result::eSuccess) {} - uint32_t imageIndex; - vk::ResultValue result{{},0}; + uint32_t imageIndex = 0; + vk::Result acquireRes = vk::Result::eSuccess; try { - result = swapChain.acquireNextImage(UINT64_MAX, *imageAvailableSemaphores[currentFrame]); + // Normalize acquireNextImage return type across different Vulkan-Hpp variants + // to a common std::pair. This is required as Android requires + // using the same version of Vulkan_HPP that Vulkan in the NDK was shipped with so legacy. + auto ai = swapChain.acquireNextImage(UINT64_MAX, *imageAvailableSemaphores[currentFrame]); + // Overloads to unwrap either ResultValue or std::pair + auto unwrap = [](auto const& v) -> std::pair { + using T = std::decay_t; + if constexpr (std::is_same_v>) { + return { v.result, v.value }; + } else { + return { v.first, v.second }; + } + }; + auto [resTmp, idxTmp] = unwrap(ai); + acquireRes = resTmp; + imageIndex = idxTmp; } catch (const vk::OutOfDateKHRError&) { // Swapchain is out of date (e.g., window resized) before we could // query the result. Trigger recreation and exit this frame cleanly. @@ -470,15 +489,13 @@ void Renderer::Render(const std::vector>& entities, Came return; } - imageIndex = result.value; - - if (result.result == vk::Result::eErrorOutOfDateKHR || result.result == vk::Result::eSuboptimalKHR || framebufferResized.load(std::memory_order_relaxed)) { + if (acquireRes == vk::Result::eErrorOutOfDateKHR || acquireRes == vk::Result::eSuboptimalKHR || framebufferResized.load(std::memory_order_relaxed)) { framebufferResized.store(false, std::memory_order_relaxed); if (imguiSystem) ImGui::EndFrame(); recreateSwapChain(); return; } - if (result.result != vk::Result::eSuccess) { + if (acquireRes != vk::Result::eSuccess) { throw std::runtime_error("Failed to acquire swap chain image"); } @@ -958,16 +975,16 @@ void Renderer::Render(const std::vector>& entities, Came vk::PresentInfoKHR presentInfo{ .waitSemaphoreCount = 1, .pWaitSemaphores = &*renderFinishedSemaphores[imageIndex], .swapchainCount = 1, .pSwapchains = &*swapChain, .pImageIndices = &imageIndex }; try { std::lock_guard lock(queueMutex); - result.result = presentQueue.presentKHR(presentInfo); + vk::Result presentRes = presentQueue.presentKHR(presentInfo); + if (presentRes == vk::Result::eErrorOutOfDateKHR || presentRes == vk::Result::eSuboptimalKHR || framebufferResized.load(std::memory_order_relaxed)) { + framebufferResized.store(false, std::memory_order_relaxed); + recreateSwapChain(); + } else if (presentRes != vk::Result::eSuccess) { + throw std::runtime_error("Failed to present swap chain image"); + } } catch (const vk::OutOfDateKHRError&) { framebufferResized.store(true, std::memory_order_relaxed); } - if (result.result == vk::Result::eErrorOutOfDateKHR || result.result == vk::Result::eSuboptimalKHR || framebufferResized.load(std::memory_order_relaxed)) { - framebufferResized.store(false, std::memory_order_relaxed); - recreateSwapChain(); - } else if (result.result != vk::Result::eSuccess) { - throw std::runtime_error("Failed to present swap chain image"); - } currentFrame = (currentFrame + 1) % MAX_FRAMES_IN_FLIGHT; } diff --git a/attachments/simple_engine/renderer_resources.cpp b/attachments/simple_engine/renderer_resources.cpp index cc9fadf7..ab247fd6 100644 --- a/attachments/simple_engine/renderer_resources.cpp +++ b/attachments/simple_engine/renderer_resources.cpp @@ -1893,7 +1893,8 @@ bool Renderer::createOrResizeLightStorageBuffers(size_t lightCount) { void Renderer::updateAllDescriptorSetsWithNewLightBuffers() { try { // Iterate through all entity resources and update their PBR descriptor sets - for (auto& resources : entityResources | std::views::values) { + for (auto &pair : entityResources) { + auto &resources = pair.second; // Only update PBR descriptor sets (they have light buffer bindings) if (!resources.pbrDescriptorSets.empty()) { for (size_t i = 0; i < resources.pbrDescriptorSets.size() && i < lightStorageBuffers.size(); ++i) { @@ -2289,7 +2290,7 @@ void Renderer::uploadImageFromStaging(vk::Buffer staging, // Submit once on the GRAPHICS queue; signal uploads timeline if available vk::raii::Fence fence(device, vk::FenceCreateInfo{}); - bool canSignalTimeline = uploadsTimeline != nullptr; + bool canSignalTimeline = static_cast(*uploadsTimeline) != VK_NULL_HANDLE; uint64_t signalValue = 0; { std::lock_guard lock(queueMutex); diff --git a/attachments/simple_engine/renderer_utils.cpp b/attachments/simple_engine/renderer_utils.cpp index 626a86f6..38f47b04 100644 --- a/attachments/simple_engine/renderer_utils.cpp +++ b/attachments/simple_engine/renderer_utils.cpp @@ -132,7 +132,7 @@ QueueFamilyIndices Renderer::findQueueFamilies(const vk::raii::PhysicalDevice& d indices.computeFamily = i; } // Check for present support - if (!indices.presentFamily.has_value() && device.getSurfaceSupportKHR(i, surface)) { + if (!indices.presentFamily.has_value() && device.getSurfaceSupportKHR(i, *surface)) { indices.presentFamily = i; } // Prefer a dedicated transfer queue (transfer bit set, but NOT graphics) if available @@ -161,13 +161,13 @@ SwapChainSupportDetails Renderer::querySwapChainSupport(const vk::raii::Physical SwapChainSupportDetails details; // Get surface capabilities - details.capabilities = device.getSurfaceCapabilitiesKHR(surface); + details.capabilities = device.getSurfaceCapabilitiesKHR(*surface); // Get surface formats - details.formats = device.getSurfaceFormatsKHR(surface); + details.formats = device.getSurfaceFormatsKHR(*surface); // Get present modes - details.presentModes = device.getSurfacePresentModesKHR(surface); + details.presentModes = device.getSurfacePresentModesKHR(*surface); return details; } diff --git a/attachments/simple_engine/resource_manager.cpp b/attachments/simple_engine/resource_manager.cpp index 5102539c..69f17ad0 100644 --- a/attachments/simple_engine/resource_manager.cpp +++ b/attachments/simple_engine/resource_manager.cpp @@ -18,9 +18,11 @@ void Resource::Unload() { } void ResourceManager::UnloadAllResources() { - for (auto& val : resources | std::views::values) { - for (auto& loadedResource : val | std::views::values) { - loadedResource->Unload(); + for (auto &bucketEntry : resources) { + auto &val = bucketEntry.second; + for (auto &resEntry : val) { + auto &loadedResource = resEntry.second; + if (loadedResource) loadedResource->Unload(); } val.clear(); } diff --git a/attachments/simple_engine/vulkan_device.cpp b/attachments/simple_engine/vulkan_device.cpp index 5c10825b..b33f06ed 100644 --- a/attachments/simple_engine/vulkan_device.cpp +++ b/attachments/simple_engine/vulkan_device.cpp @@ -188,8 +188,8 @@ QueueFamilyIndices VulkanDevice::findQueueFamilies(vk::raii::PhysicalDevice& dev indices.computeFamily = i; } - // Check for present support - if (device.getSurfaceSupportKHR(i, surface)) { + // Check for present support (RAII PhysicalDevice overload expects a non‑RAII SurfaceKHR handle) + if (device.getSurfaceSupportKHR(i, *surface)) { indices.presentFamily = i; } @@ -206,14 +206,14 @@ QueueFamilyIndices VulkanDevice::findQueueFamilies(vk::raii::PhysicalDevice& dev SwapChainSupportDetails VulkanDevice::querySwapChainSupport(vk::raii::PhysicalDevice& device) { SwapChainSupportDetails details; - // Get surface capabilities - details.capabilities = device.getSurfaceCapabilitiesKHR(surface); + // Get surface capabilities (pass underlying handle) + details.capabilities = device.getSurfaceCapabilitiesKHR(*surface); // Get surface formats - details.formats = device.getSurfaceFormatsKHR(surface); + details.formats = device.getSurfaceFormatsKHR(*surface); // Get present modes - details.presentModes = device.getSurfacePresentModesKHR(surface); + details.presentModes = device.getSurfacePresentModesKHR(*surface); return details; }