Dynamically load FFmpeg and libfdk-aac if available. (#6570)
This commit is contained in:
parent
d807cdfe62
commit
38435e9b3e
38 changed files with 1311 additions and 877 deletions
|
@ -3,7 +3,7 @@
|
||||||
#Building Citra
|
#Building Citra
|
||||||
mkdir build
|
mkdir build
|
||||||
cd build
|
cd build
|
||||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DENABLE_QT_TRANSLATION=ON -DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_FFMPEG_VIDEO_DUMPER=ON
|
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DENABLE_QT_TRANSLATION=ON -DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON
|
||||||
ninja
|
ninja
|
||||||
|
|
||||||
ctest -VV -C Release
|
ctest -VV -C Release
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
#!/bin/bash -ex
|
#!/bin/bash -ex
|
||||||
|
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DENABLE_QT_TRANSLATION=ON -DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_FFMPEG_VIDEO_DUMPER=ON
|
cmake .. -G Ninja -DCMAKE_BUILD_TYPE=Release -DCMAKE_C_COMPILER=/usr/lib/ccache/gcc -DCMAKE_CXX_COMPILER=/usr/lib/ccache/g++ -DENABLE_QT_TRANSLATION=ON -DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON
|
||||||
ninja
|
ninja
|
||||||
|
|
||||||
ctest -VV -C Release
|
ctest -VV -C Release
|
||||||
|
|
|
@ -5,7 +5,7 @@ mkdir -p "$HOME/.ccache/"
|
||||||
echo 'max_size = 3.0G' > "$HOME/.ccache/ccache.conf"
|
echo 'max_size = 3.0G' > "$HOME/.ccache/ccache.conf"
|
||||||
|
|
||||||
mkdir build && cd build
|
mkdir build && cd build
|
||||||
cmake .. -G Ninja -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DCITRA_USE_CCACHE=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_QT_TRANSLATION=ON -DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_MF=ON -DENABLE_FFMPEG_VIDEO_DUMPER=ON -DCMAKE_NO_SYSTEM_FROM_IMPORTED=TRUE -DCOMPILE_WITH_DWARF=OFF
|
cmake .. -G Ninja -DCMAKE_TOOLCHAIN_FILE="$(pwd)/../CMakeModules/MinGWCross.cmake" -DCITRA_USE_CCACHE=ON -DCMAKE_BUILD_TYPE=Release -DENABLE_QT_TRANSLATION=ON -DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} -DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON -DUSE_DISCORD_PRESENCE=ON -DENABLE_MF=ON -DCMAKE_NO_SYSTEM_FROM_IMPORTED=TRUE -DCOMPILE_WITH_DWARF=OFF
|
||||||
ninja
|
ninja
|
||||||
|
|
||||||
echo "Tests skipped"
|
echo "Tests skipped"
|
||||||
|
|
|
@ -22,7 +22,6 @@ cmake .. -DCMAKE_BUILD_TYPE=Release \
|
||||||
-DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} \
|
-DCITRA_ENABLE_COMPATIBILITY_REPORTING=${ENABLE_COMPATIBILITY_REPORTING:-"OFF"} \
|
||||||
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
|
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
|
||||||
-DUSE_DISCORD_PRESENCE=ON \
|
-DUSE_DISCORD_PRESENCE=ON \
|
||||||
-DENABLE_FFMPEG_VIDEO_DUMPER=ON \
|
|
||||||
-DENABLE_ASM=OFF \
|
-DENABLE_ASM=OFF \
|
||||||
-GNinja
|
-GNinja
|
||||||
ninja
|
ninja
|
||||||
|
|
|
@ -11,7 +11,6 @@ cmake .. \
|
||||||
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
|
-DENABLE_COMPATIBILITY_LIST_DOWNLOAD=ON \
|
||||||
-DUSE_DISCORD_PRESENCE=ON \
|
-DUSE_DISCORD_PRESENCE=ON \
|
||||||
-DENABLE_MF=ON \
|
-DENABLE_MF=ON \
|
||||||
-DENABLE_FFMPEG_VIDEO_DUMPER=ON \
|
|
||||||
-DOPENSSL_DLL_DIR="C:\Program Files\OpenSSL\bin"
|
-DOPENSSL_DLL_DIR="C:\Program Files\OpenSSL\bin"
|
||||||
|
|
||||||
ninja
|
ninja
|
||||||
|
|
3
.gitmodules
vendored
3
.gitmodules
vendored
|
@ -79,3 +79,6 @@
|
||||||
[submodule "sirit"]
|
[submodule "sirit"]
|
||||||
path = externals/sirit
|
path = externals/sirit
|
||||||
url = https://github.com/yuzu-emu/sirit
|
url = https://github.com/yuzu-emu/sirit
|
||||||
|
[submodule "library-headers"]
|
||||||
|
path = externals/library-headers
|
||||||
|
url = https://github.com/citra-emu/ext-library-headers.git
|
||||||
|
|
|
@ -48,15 +48,6 @@ option(ENABLE_OPENAL "Enables the OpenAL audio backend" ON)
|
||||||
|
|
||||||
CMAKE_DEPENDENT_OPTION(ENABLE_LIBUSB "Enable libusb for GameCube Adapter support" ON "NOT IOS" OFF)
|
CMAKE_DEPENDENT_OPTION(ENABLE_LIBUSB "Enable libusb for GameCube Adapter support" ON "NOT IOS" OFF)
|
||||||
|
|
||||||
option(ENABLE_FFMPEG_AUDIO_DECODER "Enable FFmpeg audio (AAC) decoder" OFF)
|
|
||||||
option(ENABLE_FFMPEG_VIDEO_DUMPER "Enable FFmpeg video dumper" OFF)
|
|
||||||
|
|
||||||
if (ENABLE_FFMPEG_AUDIO_DECODER OR ENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
set(ENABLE_FFMPEG ON)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
CMAKE_DEPENDENT_OPTION(CITRA_USE_BUNDLED_FFMPEG "Download bundled FFmpeg binaries" ON "ENABLE_FFMPEG;MSVC OR APPLE" OFF)
|
|
||||||
|
|
||||||
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
option(USE_DISCORD_PRESENCE "Enables Discord Rich Presence" OFF)
|
||||||
|
|
||||||
option(CITRA_USE_PRECOMPILED_HEADERS "Use precompiled headers" ON)
|
option(CITRA_USE_PRECOMPILED_HEADERS "Use precompiled headers" ON)
|
||||||
|
@ -68,7 +59,6 @@ CMAKE_DEPENDENT_OPTION(COMPILE_WITH_DWARF "Add DWARF debugging information" ON "
|
||||||
|
|
||||||
option(USE_SYSTEM_BOOST "Use the system Boost libs (instead of the bundled ones)" OFF)
|
option(USE_SYSTEM_BOOST "Use the system Boost libs (instead of the bundled ones)" OFF)
|
||||||
|
|
||||||
CMAKE_DEPENDENT_OPTION(ENABLE_FDK "Use FDK AAC decoder" OFF "NOT ENABLE_FFMPEG_AUDIO_DECODER;NOT ENABLE_MF" OFF)
|
|
||||||
|
|
||||||
CMAKE_DEPENDENT_OPTION(CITRA_USE_BUNDLED_MOLTENVK "Download the bundled MoltenVK" ON "APPLE" OFF)
|
CMAKE_DEPENDENT_OPTION(CITRA_USE_BUNDLED_MOLTENVK "Download the bundled MoltenVK" ON "APPLE" OFF)
|
||||||
|
|
||||||
|
@ -200,6 +190,8 @@ message(STATUS "Target architecture: ${ARCHITECTURE}")
|
||||||
|
|
||||||
# boost asio's concept usage doesn't play nicely with some compilers yet.
|
# boost asio's concept usage doesn't play nicely with some compilers yet.
|
||||||
add_definitions(-DBOOST_ASIO_DISABLE_CONCEPTS)
|
add_definitions(-DBOOST_ASIO_DISABLE_CONCEPTS)
|
||||||
|
# boost can have issues compiling with C++17 and up on newer versions of Clang.
|
||||||
|
add_definitions(-DBOOST_NO_CXX98_FUNCTION_BASE)
|
||||||
set(CMAKE_CXX_STANDARD 20)
|
set(CMAKE_CXX_STANDARD 20)
|
||||||
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
set(CMAKE_CXX_STANDARD_REQUIRED ON)
|
||||||
|
|
||||||
|
@ -250,43 +242,6 @@ if (ENABLE_LIBUSB)
|
||||||
endif()
|
endif()
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (ENABLE_FFMPEG)
|
|
||||||
if (CITRA_USE_BUNDLED_FFMPEG)
|
|
||||||
if ((MSVC_VERSION GREATER_EQUAL 1920 AND MSVC_VERSION LESS 1940) AND "x86_64" IN_LIST ARCHITECTURE)
|
|
||||||
set(FFmpeg_VER "ffmpeg-4.1-win64")
|
|
||||||
elseif (APPLE)
|
|
||||||
set(FFmpeg_VER "ffmpeg-6.0")
|
|
||||||
else()
|
|
||||||
message(FATAL_ERROR "No bundled FFmpeg binaries for your toolchain. Disable CITRA_USE_BUNDLED_FFMPEG and provide your own.")
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (DEFINED FFmpeg_VER)
|
|
||||||
download_bundled_external("ffmpeg/" ${FFmpeg_VER} FFmpeg_PREFIX)
|
|
||||||
set(FFMPEG_DIR "${FFmpeg_PREFIX}")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
find_package(FFmpeg REQUIRED COMPONENTS avcodec avfilter avformat avutil swresample)
|
|
||||||
else()
|
|
||||||
find_package(FFmpeg REQUIRED COMPONENTS avcodec)
|
|
||||||
endif()
|
|
||||||
if ("${FFmpeg_avcodec_VERSION}" VERSION_LESS "58.4.100")
|
|
||||||
message(FATAL_ERROR "Found version for libavcodec is too low. The required version is at least 58.4.100 (included in FFmpeg 4.0 and later).")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
add_definitions(-DENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (ENABLE_FDK)
|
|
||||||
find_library(FDK_AAC fdk-aac DOC "The path to fdk_aac library")
|
|
||||||
if(FDK_AAC STREQUAL "FDK_AAC-NOTFOUND")
|
|
||||||
message(FATAL_ERROR "fdk_aac library not found.")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
|
|
||||||
# Use system tsl::robin_map if available (otherwise we fallback to version bundled with dynarmic)
|
# Use system tsl::robin_map if available (otherwise we fallback to version bundled with dynarmic)
|
||||||
find_package(tsl-robin-map QUIET)
|
find_package(tsl-robin-map QUIET)
|
||||||
|
|
||||||
|
|
|
@ -1,13 +0,0 @@
|
||||||
function(copy_citra_FFmpeg_deps target_dir)
|
|
||||||
include(WindowsCopyFiles)
|
|
||||||
set(DLL_DEST "${CMAKE_BINARY_DIR}/bin/$<CONFIG>/")
|
|
||||||
windows_copy_files(${target_dir} ${FFMPEG_DIR}/bin ${DLL_DEST}
|
|
||||||
avcodec*.dll
|
|
||||||
avfilter*.dll
|
|
||||||
avformat*.dll
|
|
||||||
avutil*.dll
|
|
||||||
postproc*.dll
|
|
||||||
swresample*.dll
|
|
||||||
swscale*.dll
|
|
||||||
)
|
|
||||||
endfunction(copy_citra_FFmpeg_deps)
|
|
3
externals/CMakeLists.txt
vendored
3
externals/CMakeLists.txt
vendored
|
@ -117,6 +117,9 @@ endif()
|
||||||
# Open Source Archives
|
# Open Source Archives
|
||||||
add_subdirectory(open_source_archives)
|
add_subdirectory(open_source_archives)
|
||||||
|
|
||||||
|
# Dynamic library headers
|
||||||
|
add_subdirectory(library-headers EXCLUDE_FROM_ALL)
|
||||||
|
|
||||||
# SoundTouch
|
# SoundTouch
|
||||||
set(INTEGER_SAMPLES ON CACHE BOOL "")
|
set(INTEGER_SAMPLES ON CACHE BOOL "")
|
||||||
set(SOUNDSTRETCH OFF CACHE BOOL "")
|
set(SOUNDSTRETCH OFF CACHE BOOL "")
|
||||||
|
|
192
externals/cmake-modules/FindFFmpeg.cmake
vendored
192
externals/cmake-modules/FindFFmpeg.cmake
vendored
|
@ -1,192 +0,0 @@
|
||||||
# FindFFmpeg
|
|
||||||
# ----------
|
|
||||||
#
|
|
||||||
# Find the native FFmpeg includes and libraries
|
|
||||||
#
|
|
||||||
# This module defines the following variables:
|
|
||||||
#
|
|
||||||
# FFmpeg_INCLUDE_<component>: where to find <component>.h
|
|
||||||
# FFmpeg_LIBRARY_<component>: where to find the <component> library
|
|
||||||
# FFmpeg_INCLUDES: aggregate all the include paths
|
|
||||||
# FFmpeg_LIBRARIES: aggregate all the paths to the libraries
|
|
||||||
# FFmpeg_FOUND: True if all components have been found
|
|
||||||
#
|
|
||||||
# This module defines the following targets, which are prefered over variables:
|
|
||||||
#
|
|
||||||
# FFmpeg::<component>: Target to use <component> directly, with include path,
|
|
||||||
# library and dependencies set up. If you are using a static build, you are
|
|
||||||
# responsible for adding any external dependencies (such as zlib, bzlib...).
|
|
||||||
#
|
|
||||||
# <component> can be one of:
|
|
||||||
# avcodec
|
|
||||||
# avdevice
|
|
||||||
# avfilter
|
|
||||||
# avformat
|
|
||||||
# postproc
|
|
||||||
# swresample
|
|
||||||
# swscale
|
|
||||||
#
|
|
||||||
|
|
||||||
set(_FFmpeg_ALL_COMPONENTS
|
|
||||||
avcodec
|
|
||||||
avdevice
|
|
||||||
avfilter
|
|
||||||
avformat
|
|
||||||
avutil
|
|
||||||
postproc
|
|
||||||
swresample
|
|
||||||
swscale
|
|
||||||
)
|
|
||||||
|
|
||||||
set(_FFmpeg_DEPS_avcodec avutil)
|
|
||||||
set(_FFmpeg_DEPS_avdevice avcodec avformat avutil)
|
|
||||||
set(_FFmpeg_DEPS_avfilter avutil)
|
|
||||||
set(_FFmpeg_DEPS_avformat avcodec avutil)
|
|
||||||
set(_FFmpeg_DEPS_postproc avutil)
|
|
||||||
set(_FFmpeg_DEPS_swresample avutil)
|
|
||||||
set(_FFmpeg_DEPS_swscale avutil)
|
|
||||||
|
|
||||||
function(find_ffmpeg LIBNAME)
|
|
||||||
if(DEFINED ENV{FFMPEG_DIR})
|
|
||||||
set(FFMPEG_DIR $ENV{FFMPEG_DIR})
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(FFMPEG_DIR)
|
|
||||||
list(APPEND INCLUDE_PATHS
|
|
||||||
${FFMPEG_DIR}
|
|
||||||
${FFMPEG_DIR}/ffmpeg
|
|
||||||
${FFMPEG_DIR}/lib${LIBNAME}
|
|
||||||
${FFMPEG_DIR}/include/lib${LIBNAME}
|
|
||||||
${FFMPEG_DIR}/include/ffmpeg
|
|
||||||
${FFMPEG_DIR}/include
|
|
||||||
NO_DEFAULT_PATH
|
|
||||||
NO_CMAKE_FIND_ROOT_PATH
|
|
||||||
)
|
|
||||||
list(APPEND LIB_PATHS
|
|
||||||
${FFMPEG_DIR}
|
|
||||||
${FFMPEG_DIR}/lib
|
|
||||||
${FFMPEG_DIR}/lib${LIBNAME}
|
|
||||||
NO_DEFAULT_PATH
|
|
||||||
NO_CMAKE_FIND_ROOT_PATH
|
|
||||||
)
|
|
||||||
else()
|
|
||||||
list(APPEND INCLUDE_PATHS
|
|
||||||
/usr/local/include/ffmpeg
|
|
||||||
/usr/local/include/lib${LIBNAME}
|
|
||||||
/usr/include/ffmpeg
|
|
||||||
/usr/include/lib${LIBNAME}
|
|
||||||
/usr/include/ffmpeg/lib${LIBNAME}
|
|
||||||
)
|
|
||||||
|
|
||||||
list(APPEND LIB_PATHS
|
|
||||||
/usr/local/lib
|
|
||||||
/usr/lib
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
find_path(FFmpeg_INCLUDE_${LIBNAME} lib${LIBNAME}/${LIBNAME}.h
|
|
||||||
HINTS ${INCLUDE_PATHS}
|
|
||||||
)
|
|
||||||
|
|
||||||
find_library(FFmpeg_LIBRARY_${LIBNAME} ${LIBNAME}
|
|
||||||
HINTS ${LIB_PATHS}
|
|
||||||
)
|
|
||||||
|
|
||||||
if(NOT FFMPEG_DIR AND (NOT FFmpeg_LIBRARY_${LIBNAME} OR NOT FFmpeg_INCLUDE_${LIBNAME}))
|
|
||||||
# Didn't find it in the usual paths, try pkg-config
|
|
||||||
find_package(PkgConfig QUIET)
|
|
||||||
pkg_check_modules(FFmpeg_PKGCONFIG_${LIBNAME} QUIET lib${LIBNAME})
|
|
||||||
|
|
||||||
find_path(FFmpeg_INCLUDE_${LIBNAME} lib${LIBNAME}/${LIBNAME}.h
|
|
||||||
${FFmpeg_PKGCONFIG_${LIBNAME}_INCLUDE_DIRS}
|
|
||||||
)
|
|
||||||
|
|
||||||
find_library(FFmpeg_LIBRARY_${LIBNAME} ${LIBNAME}
|
|
||||||
${FFmpeg_PKGCONFIG_${LIBNAME}_LIBRARY_DIRS}
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if(FFmpeg_INCLUDE_${LIBNAME} AND FFmpeg_LIBRARY_${LIBNAME})
|
|
||||||
set(FFmpeg_INCLUDE_${LIBNAME} "${FFmpeg_INCLUDE_${LIBNAME}}" PARENT_SCOPE)
|
|
||||||
set(FFmpeg_LIBRARY_${LIBNAME} "${FFmpeg_LIBRARY_${LIBNAME}}" PARENT_SCOPE)
|
|
||||||
|
|
||||||
# Extract FFmpeg version from version.h
|
|
||||||
foreach(v MAJOR MINOR MICRO)
|
|
||||||
set(FFmpeg_${LIBNAME}_VERSION_${v} 0)
|
|
||||||
endforeach()
|
|
||||||
string(TOUPPER ${LIBNAME} LIBNAME_UPPER)
|
|
||||||
file(STRINGS "${FFmpeg_INCLUDE_${LIBNAME}}/lib${LIBNAME}/version.h" _FFmpeg_VERSION_H_CONTENTS REGEX "#define LIB${LIBNAME_UPPER}_VERSION_(MAJOR|MINOR|MICRO) ")
|
|
||||||
if (EXISTS "${FFmpeg_INCLUDE_${LIBNAME}}/lib${LIBNAME}/version_major.h")
|
|
||||||
file(STRINGS "${FFmpeg_INCLUDE_${LIBNAME}}/lib${LIBNAME}/version_major.h" _FFmpeg_MAJOR_VERSION_H_CONTENTS REGEX "#define LIB${LIBNAME_UPPER}_VERSION_MAJOR ")
|
|
||||||
string(APPEND _FFmpeg_VERSION_H_CONTENTS "\n" ${_FFmpeg_MAJOR_VERSION_H_CONTENTS})
|
|
||||||
endif()
|
|
||||||
set(_FFmpeg_VERSION_REGEX "([0-9]+)")
|
|
||||||
foreach(v MAJOR MINOR MICRO)
|
|
||||||
if("${_FFmpeg_VERSION_H_CONTENTS}" MATCHES "#define LIB${LIBNAME_UPPER}_VERSION_${v}[\\t ]+${_FFmpeg_VERSION_REGEX}")
|
|
||||||
set(FFmpeg_${LIBNAME}_VERSION_${v} "${CMAKE_MATCH_1}")
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
set(FFmpeg_${LIBNAME}_VERSION "${FFmpeg_${LIBNAME}_VERSION_MAJOR}.${FFmpeg_${LIBNAME}_VERSION_MINOR}.${FFmpeg_${LIBNAME}_VERSION_MICRO}")
|
|
||||||
set(FFmpeg_${c}_VERSION "${FFmpeg_${LIBNAME}_VERSION}" PARENT_SCOPE)
|
|
||||||
unset(_FFmpeg_VERSION_REGEX)
|
|
||||||
unset(_FFmpeg_VERSION_H_CONTENTS)
|
|
||||||
|
|
||||||
set(FFmpeg_${c}_FOUND TRUE PARENT_SCOPE)
|
|
||||||
if(NOT FFmpeg_FIND_QUIETLY)
|
|
||||||
message("-- Found ${LIBNAME}: ${FFmpeg_INCLUDE_${LIBNAME}} ${FFmpeg_LIBRARY_${LIBNAME}} (version: ${FFmpeg_${LIBNAME}_VERSION})")
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endfunction()
|
|
||||||
|
|
||||||
foreach(c ${_FFmpeg_ALL_COMPONENTS})
|
|
||||||
find_ffmpeg(${c})
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
foreach(c ${_FFmpeg_ALL_COMPONENTS})
|
|
||||||
if(FFmpeg_${c}_FOUND)
|
|
||||||
list(APPEND FFmpeg_INCLUDES ${FFmpeg_INCLUDE_${c}})
|
|
||||||
list(APPEND FFmpeg_LIBRARIES ${FFmpeg_LIBRARY_${c}})
|
|
||||||
|
|
||||||
add_library(FFmpeg::${c} IMPORTED UNKNOWN)
|
|
||||||
set_target_properties(FFmpeg::${c} PROPERTIES
|
|
||||||
IMPORTED_LOCATION ${FFmpeg_LIBRARY_${c}}
|
|
||||||
INTERFACE_INCLUDE_DIRECTORIES ${FFmpeg_INCLUDE_${c}}
|
|
||||||
)
|
|
||||||
if(APPLE)
|
|
||||||
set_target_properties(FFmpeg::${c} PROPERTIES
|
|
||||||
MACOSX_RPATH 1
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
if(_FFmpeg_DEPS_${c})
|
|
||||||
set(deps)
|
|
||||||
foreach(dep ${_FFmpeg_DEPS_${c}})
|
|
||||||
list(APPEND deps FFmpeg::${dep})
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
set_target_properties(FFmpeg::${c} PROPERTIES
|
|
||||||
INTERFACE_LINK_LIBRARIES "${deps}"
|
|
||||||
)
|
|
||||||
unset(deps)
|
|
||||||
endif()
|
|
||||||
endif()
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
if(FFmpeg_INCLUDES)
|
|
||||||
list(REMOVE_DUPLICATES FFmpeg_INCLUDES)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
foreach(c ${FFmpeg_FIND_COMPONENTS})
|
|
||||||
list(APPEND _FFmpeg_REQUIRED_VARS FFmpeg_INCLUDE_${c} FFmpeg_LIBRARY_${c})
|
|
||||||
endforeach()
|
|
||||||
|
|
||||||
include(FindPackageHandleStandardArgs)
|
|
||||||
find_package_handle_standard_args(FFmpeg
|
|
||||||
REQUIRED_VARS ${_FFmpeg_REQUIRED_VARS}
|
|
||||||
HANDLE_COMPONENTS
|
|
||||||
)
|
|
||||||
|
|
||||||
foreach(c ${_FFmpeg_ALL_COMPONENTS})
|
|
||||||
unset(_FFmpeg_DEPS_${c})
|
|
||||||
endforeach()
|
|
||||||
unset(_FFmpeg_ALL_COMPONENTS)
|
|
||||||
unset(_FFmpeg_REQUIRED_VARS)
|
|
1
externals/library-headers
vendored
Submodule
1
externals/library-headers
vendored
Submodule
|
@ -0,0 +1 @@
|
||||||
|
Subproject commit 071bc4282ca29ec255ab2dae32c978481ca5dfea
|
|
@ -9,6 +9,10 @@ add_library(audio_core STATIC
|
||||||
hle/common.h
|
hle/common.h
|
||||||
hle/decoder.cpp
|
hle/decoder.cpp
|
||||||
hle/decoder.h
|
hle/decoder.h
|
||||||
|
hle/fdk_decoder.cpp
|
||||||
|
hle/fdk_decoder.h
|
||||||
|
hle/ffmpeg_decoder.cpp
|
||||||
|
hle/ffmpeg_decoder.h
|
||||||
hle/filter.cpp
|
hle/filter.cpp
|
||||||
hle/filter.h
|
hle/filter.h
|
||||||
hle/hle.cpp
|
hle/hle.cpp
|
||||||
|
@ -67,26 +71,6 @@ elseif(ENABLE_AUDIOTOOLBOX)
|
||||||
find_library(AUDIOTOOLBOX AudioToolbox)
|
find_library(AUDIOTOOLBOX AudioToolbox)
|
||||||
target_link_libraries(audio_core PRIVATE ${AUDIOTOOLBOX})
|
target_link_libraries(audio_core PRIVATE ${AUDIOTOOLBOX})
|
||||||
target_compile_definitions(audio_core PUBLIC HAVE_AUDIOTOOLBOX)
|
target_compile_definitions(audio_core PUBLIC HAVE_AUDIOTOOLBOX)
|
||||||
elseif(ENABLE_FFMPEG_AUDIO_DECODER)
|
|
||||||
target_sources(audio_core PRIVATE
|
|
||||||
hle/ffmpeg_decoder.cpp
|
|
||||||
hle/ffmpeg_decoder.h
|
|
||||||
hle/ffmpeg_dl.cpp
|
|
||||||
hle/ffmpeg_dl.h
|
|
||||||
)
|
|
||||||
if(UNIX)
|
|
||||||
target_link_libraries(audio_core PRIVATE FFmpeg::avcodec)
|
|
||||||
else()
|
|
||||||
target_include_directories(audio_core PRIVATE ${FFMPEG_DIR}/include)
|
|
||||||
endif()
|
|
||||||
target_compile_definitions(audio_core PUBLIC HAVE_FFMPEG)
|
|
||||||
elseif(ENABLE_FDK)
|
|
||||||
target_sources(audio_core PRIVATE
|
|
||||||
hle/fdk_decoder.cpp
|
|
||||||
hle/fdk_decoder.h
|
|
||||||
)
|
|
||||||
target_link_libraries(audio_core PRIVATE ${FDK_AAC})
|
|
||||||
target_compile_definitions(audio_core PUBLIC HAVE_FDK)
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if(ANDROID)
|
if(ANDROID)
|
||||||
|
|
|
@ -47,8 +47,9 @@ void DspInterface::OutputFrame(StereoFrame16 frame) {
|
||||||
|
|
||||||
fifo.Push(frame.data(), frame.size());
|
fifo.Push(frame.data(), frame.size());
|
||||||
|
|
||||||
if (Core::System::GetInstance().VideoDumper().IsDumping()) {
|
auto video_dumper = Core::System::GetInstance().GetVideoDumper();
|
||||||
Core::System::GetInstance().VideoDumper().AddAudioFrame(std::move(frame));
|
if (video_dumper && video_dumper->IsDumping()) {
|
||||||
|
video_dumper->AddAudioFrame(std::move(frame));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -58,8 +59,9 @@ void DspInterface::OutputSample(std::array<s16, 2> sample) {
|
||||||
|
|
||||||
fifo.Push(&sample, 1);
|
fifo.Push(&sample, 1);
|
||||||
|
|
||||||
if (Core::System::GetInstance().VideoDumper().IsDumping()) {
|
auto video_dumper = Core::System::GetInstance().GetVideoDumper();
|
||||||
Core::System::GetInstance().VideoDumper().AddAudioSample(std::move(sample));
|
if (video_dumper && video_dumper->IsDumping()) {
|
||||||
|
video_dumper->AddAudioSample(std::move(sample));
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -2,8 +2,10 @@
|
||||||
// Licensed under GPLv2 or any later version
|
// Licensed under GPLv2 or any later version
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <fdk-aac/aacdecoder_lib.h>
|
|
||||||
#include "audio_core/hle/fdk_decoder.h"
|
#include "audio_core/hle/fdk_decoder.h"
|
||||||
|
#include "common/dynamic_library/fdk-aac.h"
|
||||||
|
|
||||||
|
using namespace DynamicLibrary;
|
||||||
|
|
||||||
namespace AudioCore::HLE {
|
namespace AudioCore::HLE {
|
||||||
|
|
||||||
|
@ -29,13 +31,17 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
FDKDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
FDKDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||||
|
if (!FdkAac::LoadFdkAac()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
// allocate an array of LIB_INFO structures
|
// allocate an array of LIB_INFO structures
|
||||||
// if we don't pre-fill the whole segment with zeros, when we call `aacDecoder_GetLibInfo`
|
// if we don't pre-fill the whole segment with zeros, when we call `aacDecoder_GetLibInfo`
|
||||||
// it will segfault, upon investigation, there is some code in fdk_aac depends on your initial
|
// it will segfault, upon investigation, there is some code in fdk_aac depends on your initial
|
||||||
// values in this array
|
// values in this array
|
||||||
LIB_INFO decoder_info[FDK_MODULE_LAST] = {};
|
LIB_INFO decoder_info[FDK_MODULE_LAST] = {};
|
||||||
// get library information and fill the struct
|
// get library information and fill the struct
|
||||||
if (aacDecoder_GetLibInfo(decoder_info) != 0) {
|
if (FdkAac::aacDecoder_GetLibInfo(decoder_info) != 0) {
|
||||||
LOG_ERROR(Audio_DSP, "Failed to retrieve fdk_aac library information!");
|
LOG_ERROR(Audio_DSP, "Failed to retrieve fdk_aac library information!");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -44,14 +50,14 @@ FDKDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||||
decoder_info[0].build_date);
|
decoder_info[0].build_date);
|
||||||
|
|
||||||
// choose the input format when initializing: 1 layer of ADTS
|
// choose the input format when initializing: 1 layer of ADTS
|
||||||
decoder = aacDecoder_Open(TRANSPORT_TYPE::TT_MP4_ADTS, 1);
|
decoder = FdkAac::aacDecoder_Open(TRANSPORT_TYPE::TT_MP4_ADTS, 1);
|
||||||
// set maximum output channel to two (stereo)
|
// set maximum output channel to two (stereo)
|
||||||
// if the input samples have more channels, fdk_aac will perform a downmix
|
// if the input samples have more channels, fdk_aac will perform a downmix
|
||||||
AAC_DECODER_ERROR ret = aacDecoder_SetParam(decoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
|
AAC_DECODER_ERROR ret = FdkAac::aacDecoder_SetParam(decoder, AAC_PCM_MAX_OUTPUT_CHANNELS, 2);
|
||||||
if (ret != AAC_DEC_OK) {
|
if (ret != AAC_DEC_OK) {
|
||||||
// unable to set this parameter reflects the decoder implementation might be broken
|
// unable to set this parameter reflects the decoder implementation might be broken
|
||||||
// we'd better shuts down everything
|
// we'd better shuts down everything
|
||||||
aacDecoder_Close(decoder);
|
FdkAac::aacDecoder_Close(decoder);
|
||||||
decoder = nullptr;
|
decoder = nullptr;
|
||||||
LOG_ERROR(Audio_DSP, "Unable to set downmix parameter: {}", ret);
|
LOG_ERROR(Audio_DSP, "Unable to set downmix parameter: {}", ret);
|
||||||
return;
|
return;
|
||||||
|
@ -73,8 +79,9 @@ std::optional<BinaryMessage> FDKDecoder::Impl::Initalize(const BinaryMessage& re
|
||||||
}
|
}
|
||||||
|
|
||||||
FDKDecoder::Impl::~Impl() {
|
FDKDecoder::Impl::~Impl() {
|
||||||
if (decoder)
|
if (decoder) {
|
||||||
aacDecoder_Close(decoder);
|
FdkAac::aacDecoder_Close(decoder);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void FDKDecoder::Impl::Clear() {
|
void FDKDecoder::Impl::Clear() {
|
||||||
|
@ -84,9 +91,10 @@ void FDKDecoder::Impl::Clear() {
|
||||||
// FLUSH - flush internal buffer
|
// FLUSH - flush internal buffer
|
||||||
// INTR - treat the current internal buffer as discontinuous
|
// INTR - treat the current internal buffer as discontinuous
|
||||||
// CONCEAL - try to interpolate and smooth out the samples
|
// CONCEAL - try to interpolate and smooth out the samples
|
||||||
if (decoder)
|
if (decoder) {
|
||||||
aacDecoder_DecodeFrame(decoder, decoder_output, 8192,
|
FdkAac::aacDecoder_DecodeFrame(decoder, decoder_output, 8192,
|
||||||
AACDEC_FLUSH & AACDEC_INTR & AACDEC_CONCEAL);
|
AACDEC_FLUSH & AACDEC_INTR & AACDEC_CONCEAL);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::optional<BinaryMessage> FDKDecoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
std::optional<BinaryMessage> FDKDecoder::Impl::ProcessRequest(const BinaryMessage& request) {
|
||||||
|
@ -140,7 +148,7 @@ std::optional<BinaryMessage> FDKDecoder::Impl::Decode(const BinaryMessage& reque
|
||||||
|
|
||||||
std::array<std::vector<s16>, 2> out_streams;
|
std::array<std::vector<s16>, 2> out_streams;
|
||||||
|
|
||||||
std::size_t data_size = request.decode_aac_request.size;
|
u32 data_size = request.decode_aac_request.size;
|
||||||
|
|
||||||
// decoding loops
|
// decoding loops
|
||||||
AAC_DECODER_ERROR result = AAC_DEC_OK;
|
AAC_DECODER_ERROR result = AAC_DEC_OK;
|
||||||
|
@ -156,18 +164,18 @@ std::optional<BinaryMessage> FDKDecoder::Impl::Decode(const BinaryMessage& reque
|
||||||
while (buffer_remaining) {
|
while (buffer_remaining) {
|
||||||
// queue the input buffer, fdk_aac will automatically slice out the buffer it needs
|
// queue the input buffer, fdk_aac will automatically slice out the buffer it needs
|
||||||
// from the input buffer
|
// from the input buffer
|
||||||
result = aacDecoder_Fill(decoder, &data, &input_size, &buffer_remaining);
|
result = FdkAac::aacDecoder_Fill(decoder, &data, &input_size, &buffer_remaining);
|
||||||
if (result != AAC_DEC_OK) {
|
if (result != AAC_DEC_OK) {
|
||||||
// there are some issues when queuing the input buffer
|
// there are some issues when queuing the input buffer
|
||||||
LOG_ERROR(Audio_DSP, "Failed to enqueue the input samples");
|
LOG_ERROR(Audio_DSP, "Failed to enqueue the input samples");
|
||||||
return std::nullopt;
|
return std::nullopt;
|
||||||
}
|
}
|
||||||
// get output from decoder
|
// get output from decoder
|
||||||
result = aacDecoder_DecodeFrame(decoder, decoder_output,
|
result = FdkAac::aacDecoder_DecodeFrame(decoder, decoder_output,
|
||||||
sizeof(decoder_output) / sizeof(s16), 0);
|
sizeof(decoder_output) / sizeof(s16), 0);
|
||||||
if (result == AAC_DEC_OK) {
|
if (result == AAC_DEC_OK) {
|
||||||
// get the stream information
|
// get the stream information
|
||||||
stream_info = aacDecoder_GetStreamInfo(decoder);
|
stream_info = FdkAac::aacDecoder_GetStreamInfo(decoder);
|
||||||
// fill the stream information for binary response
|
// fill the stream information for binary response
|
||||||
response.decode_aac_response.sample_rate = GetSampleRateEnum(stream_info->sampleRate);
|
response.decode_aac_response.sample_rate = GetSampleRateEnum(stream_info->sampleRate);
|
||||||
response.decode_aac_response.num_channels = stream_info->numChannels;
|
response.decode_aac_response.num_channels = stream_info->numChannels;
|
||||||
|
|
|
@ -3,7 +3,9 @@
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include "audio_core/hle/ffmpeg_decoder.h"
|
#include "audio_core/hle/ffmpeg_decoder.h"
|
||||||
#include "audio_core/hle/ffmpeg_dl.h"
|
#include "common/dynamic_library/ffmpeg.h"
|
||||||
|
|
||||||
|
using namespace DynamicLibrary;
|
||||||
|
|
||||||
namespace AudioCore::HLE {
|
namespace AudioCore::HLE {
|
||||||
|
|
||||||
|
@ -25,25 +27,25 @@ private:
|
||||||
|
|
||||||
struct AVPacketDeleter {
|
struct AVPacketDeleter {
|
||||||
void operator()(AVPacket* packet) const {
|
void operator()(AVPacket* packet) const {
|
||||||
av_packet_free_dl(&packet);
|
FFmpeg::av_packet_free(&packet);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AVCodecContextDeleter {
|
struct AVCodecContextDeleter {
|
||||||
void operator()(AVCodecContext* context) const {
|
void operator()(AVCodecContext* context) const {
|
||||||
avcodec_free_context_dl(&context);
|
FFmpeg::avcodec_free_context(&context);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AVCodecParserContextDeleter {
|
struct AVCodecParserContextDeleter {
|
||||||
void operator()(AVCodecParserContext* parser) const {
|
void operator()(AVCodecParserContext* parser) const {
|
||||||
av_parser_close_dl(parser);
|
FFmpeg::av_parser_close(parser);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AVFrameDeleter {
|
struct AVFrameDeleter {
|
||||||
void operator()(AVFrame* frame) const {
|
void operator()(AVFrame* frame) const {
|
||||||
av_frame_free_dl(&frame);
|
FFmpeg::av_frame_free(&frame);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -60,7 +62,7 @@ private:
|
||||||
};
|
};
|
||||||
|
|
||||||
FFMPEGDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
FFMPEGDecoder::Impl::Impl(Memory::MemorySystem& memory) : memory(memory) {
|
||||||
have_ffmpeg_dl = InitFFmpegDL();
|
have_ffmpeg_dl = FFmpeg::LoadFFmpeg();
|
||||||
}
|
}
|
||||||
|
|
||||||
FFMPEGDecoder::Impl::~Impl() = default;
|
FFMPEGDecoder::Impl::~Impl() = default;
|
||||||
|
@ -102,27 +104,27 @@ std::optional<BinaryMessage> FFMPEGDecoder::Impl::Initalize(const BinaryMessage&
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_packet.reset(av_packet_alloc_dl());
|
av_packet.reset(FFmpeg::av_packet_alloc());
|
||||||
|
|
||||||
codec = avcodec_find_decoder_dl(AV_CODEC_ID_AAC);
|
codec = FFmpeg::avcodec_find_decoder(AV_CODEC_ID_AAC);
|
||||||
if (!codec) {
|
if (!codec) {
|
||||||
LOG_ERROR(Audio_DSP, "Codec not found\n");
|
LOG_ERROR(Audio_DSP, "Codec not found\n");
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
parser.reset(av_parser_init_dl(codec->id));
|
parser.reset(FFmpeg::av_parser_init(codec->id));
|
||||||
if (!parser) {
|
if (!parser) {
|
||||||
LOG_ERROR(Audio_DSP, "Parser not found\n");
|
LOG_ERROR(Audio_DSP, "Parser not found\n");
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
av_context.reset(avcodec_alloc_context3_dl(codec));
|
av_context.reset(FFmpeg::avcodec_alloc_context3(codec));
|
||||||
if (!av_context) {
|
if (!av_context) {
|
||||||
LOG_ERROR(Audio_DSP, "Could not allocate audio codec context\n");
|
LOG_ERROR(Audio_DSP, "Could not allocate audio codec context\n");
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (avcodec_open2_dl(av_context.get(), codec, nullptr) < 0) {
|
if (FFmpeg::avcodec_open2(av_context.get(), codec, nullptr) < 0) {
|
||||||
LOG_ERROR(Audio_DSP, "Could not open codec\n");
|
LOG_ERROR(Audio_DSP, "Could not open codec\n");
|
||||||
return response;
|
return response;
|
||||||
}
|
}
|
||||||
|
@ -170,16 +172,16 @@ std::optional<BinaryMessage> FFMPEGDecoder::Impl::Decode(const BinaryMessage& re
|
||||||
std::size_t data_size = request.decode_aac_request.size;
|
std::size_t data_size = request.decode_aac_request.size;
|
||||||
while (data_size > 0) {
|
while (data_size > 0) {
|
||||||
if (!decoded_frame) {
|
if (!decoded_frame) {
|
||||||
decoded_frame.reset(av_frame_alloc_dl());
|
decoded_frame.reset(FFmpeg::av_frame_alloc());
|
||||||
if (!decoded_frame) {
|
if (!decoded_frame) {
|
||||||
LOG_ERROR(Audio_DSP, "Could not allocate audio frame");
|
LOG_ERROR(Audio_DSP, "Could not allocate audio frame");
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
int ret =
|
int ret = FFmpeg::av_parser_parse2(parser.get(), av_context.get(), &av_packet->data,
|
||||||
av_parser_parse2_dl(parser.get(), av_context.get(), &av_packet->data, &av_packet->size,
|
&av_packet->size, data, static_cast<int>(data_size),
|
||||||
data, data_size, AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
AV_NOPTS_VALUE, AV_NOPTS_VALUE, 0);
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
LOG_ERROR(Audio_DSP, "Error while parsing");
|
LOG_ERROR(Audio_DSP, "Error while parsing");
|
||||||
return {};
|
return {};
|
||||||
|
@ -187,7 +189,7 @@ std::optional<BinaryMessage> FFMPEGDecoder::Impl::Decode(const BinaryMessage& re
|
||||||
data += ret;
|
data += ret;
|
||||||
data_size -= ret;
|
data_size -= ret;
|
||||||
|
|
||||||
ret = avcodec_send_packet_dl(av_context.get(), av_packet.get());
|
ret = FFmpeg::avcodec_send_packet(av_context.get(), av_packet.get());
|
||||||
if (ret < 0) {
|
if (ret < 0) {
|
||||||
LOG_ERROR(Audio_DSP, "Error submitting the packet to the decoder");
|
LOG_ERROR(Audio_DSP, "Error submitting the packet to the decoder");
|
||||||
return {};
|
return {};
|
||||||
|
@ -195,33 +197,39 @@ std::optional<BinaryMessage> FFMPEGDecoder::Impl::Decode(const BinaryMessage& re
|
||||||
|
|
||||||
if (av_packet->size) {
|
if (av_packet->size) {
|
||||||
while (ret >= 0) {
|
while (ret >= 0) {
|
||||||
ret = avcodec_receive_frame_dl(av_context.get(), decoded_frame.get());
|
ret = FFmpeg::avcodec_receive_frame(av_context.get(), decoded_frame.get());
|
||||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
|
||||||
break;
|
break;
|
||||||
else if (ret < 0) {
|
else if (ret < 0) {
|
||||||
LOG_ERROR(Audio_DSP, "Error during decoding");
|
LOG_ERROR(Audio_DSP, "Error during decoding");
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
int bytes_per_sample = av_get_bytes_per_sample_dl(av_context->sample_fmt);
|
int bytes_per_sample = FFmpeg::av_get_bytes_per_sample(av_context->sample_fmt);
|
||||||
if (bytes_per_sample < 0) {
|
if (bytes_per_sample < 0) {
|
||||||
LOG_ERROR(Audio_DSP, "Failed to calculate data size");
|
LOG_ERROR(Audio_DSP, "Failed to calculate data size");
|
||||||
return {};
|
return {};
|
||||||
}
|
}
|
||||||
|
|
||||||
ASSERT(decoded_frame->channels <= out_streams.size());
|
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
|
||||||
|
auto num_channels = static_cast<u32>(decoded_frame->ch_layout.nb_channels);
|
||||||
|
#else
|
||||||
|
auto num_channels = static_cast<u32>(decoded_frame->channels);
|
||||||
|
#endif
|
||||||
|
|
||||||
|
ASSERT(num_channels <= out_streams.size());
|
||||||
|
|
||||||
std::size_t size = bytes_per_sample * (decoded_frame->nb_samples);
|
std::size_t size = bytes_per_sample * (decoded_frame->nb_samples);
|
||||||
|
|
||||||
response.decode_aac_response.sample_rate =
|
response.decode_aac_response.sample_rate =
|
||||||
GetSampleRateEnum(decoded_frame->sample_rate);
|
GetSampleRateEnum(decoded_frame->sample_rate);
|
||||||
response.decode_aac_response.num_channels = decoded_frame->channels;
|
response.decode_aac_response.num_channels = num_channels;
|
||||||
response.decode_aac_response.num_samples += decoded_frame->nb_samples;
|
response.decode_aac_response.num_samples += decoded_frame->nb_samples;
|
||||||
|
|
||||||
// FFmpeg converts to 32 signed floating point PCM, we need s16 PCM so we need to
|
// FFmpeg converts to 32 signed floating point PCM, we need s16 PCM so we need to
|
||||||
// convert it
|
// convert it
|
||||||
f32 val_float;
|
f32 val_float;
|
||||||
for (std::size_t current_pos(0); current_pos < size;) {
|
for (std::size_t current_pos(0); current_pos < size;) {
|
||||||
for (std::size_t channel(0); channel < decoded_frame->channels; channel++) {
|
for (std::size_t channel(0); channel < num_channels; channel++) {
|
||||||
std::memcpy(&val_float, decoded_frame->data[channel] + current_pos,
|
std::memcpy(&val_float, decoded_frame->data[channel] + current_pos,
|
||||||
sizeof(val_float));
|
sizeof(val_float));
|
||||||
val_float = std::clamp(val_float, -1.0f, 1.0f);
|
val_float = std::clamp(val_float, -1.0f, 1.0f);
|
||||||
|
|
|
@ -1,178 +0,0 @@
|
||||||
// Copyright 2018 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
|
|
||||||
#include <memory>
|
|
||||||
#include "audio_core/hle/ffmpeg_dl.h"
|
|
||||||
#include "common/file_util.h"
|
|
||||||
#include "common/logging/log.h"
|
|
||||||
#include "common/string_util.h"
|
|
||||||
|
|
||||||
namespace {
|
|
||||||
|
|
||||||
struct LibraryDeleter {
|
|
||||||
using pointer = HMODULE;
|
|
||||||
void operator()(HMODULE h) const {
|
|
||||||
if (h != nullptr)
|
|
||||||
FreeLibrary(h);
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
||||||
std::unique_ptr<HMODULE, LibraryDeleter> dll_util{nullptr};
|
|
||||||
std::unique_ptr<HMODULE, LibraryDeleter> dll_codec{nullptr};
|
|
||||||
|
|
||||||
} // namespace
|
|
||||||
|
|
||||||
FuncDL<int(AVSampleFormat)> av_get_bytes_per_sample_dl;
|
|
||||||
FuncDL<AVFrame*(void)> av_frame_alloc_dl;
|
|
||||||
FuncDL<void(AVFrame**)> av_frame_free_dl;
|
|
||||||
FuncDL<AVCodecContext*(const AVCodec*)> avcodec_alloc_context3_dl;
|
|
||||||
FuncDL<void(AVCodecContext**)> avcodec_free_context_dl;
|
|
||||||
FuncDL<int(AVCodecContext*, const AVCodec*, AVDictionary**)> avcodec_open2_dl;
|
|
||||||
FuncDL<AVPacket*(void)> av_packet_alloc_dl;
|
|
||||||
FuncDL<void(AVPacket**)> av_packet_free_dl;
|
|
||||||
FuncDL<AVCodec*(AVCodecID)> avcodec_find_decoder_dl;
|
|
||||||
FuncDL<int(AVCodecContext*, const AVPacket*)> avcodec_send_packet_dl;
|
|
||||||
FuncDL<int(AVCodecContext*, AVFrame*)> avcodec_receive_frame_dl;
|
|
||||||
FuncDL<AVCodecParserContext*(int)> av_parser_init_dl;
|
|
||||||
FuncDL<int(AVCodecParserContext*, AVCodecContext*, uint8_t**, int*, const uint8_t*, int, int64_t,
|
|
||||||
int64_t, int64_t)>
|
|
||||||
av_parser_parse2_dl;
|
|
||||||
FuncDL<void(AVCodecParserContext*)> av_parser_close_dl;
|
|
||||||
|
|
||||||
bool InitFFmpegDL() {
|
|
||||||
std::string dll_path = FileUtil::GetUserPath(FileUtil::UserPath::DLLDir);
|
|
||||||
FileUtil::CreateDir(dll_path);
|
|
||||||
std::wstring w_dll_path = Common::UTF8ToUTF16W(dll_path);
|
|
||||||
SetDllDirectoryW(w_dll_path.c_str());
|
|
||||||
|
|
||||||
dll_util.reset(LoadLibrary("avutil-56.dll"));
|
|
||||||
if (!dll_util) {
|
|
||||||
DWORD error_message_id = GetLastError();
|
|
||||||
LPSTR message_buffer = nullptr;
|
|
||||||
size_t size =
|
|
||||||
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
|
|
||||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
||||||
nullptr, error_message_id, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
|
||||||
reinterpret_cast<LPSTR>(&message_buffer), 0, nullptr);
|
|
||||||
|
|
||||||
std::string message(message_buffer, size);
|
|
||||||
|
|
||||||
LocalFree(message_buffer);
|
|
||||||
LOG_ERROR(Audio_DSP, "Could not load avutil-56.dll: {}", message);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
dll_codec.reset(LoadLibrary("avcodec-58.dll"));
|
|
||||||
if (!dll_codec) {
|
|
||||||
DWORD error_message_id = GetLastError();
|
|
||||||
LPSTR message_buffer = nullptr;
|
|
||||||
size_t size =
|
|
||||||
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
|
|
||||||
FORMAT_MESSAGE_IGNORE_INSERTS,
|
|
||||||
nullptr, error_message_id, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
|
||||||
reinterpret_cast<LPSTR>(&message_buffer), 0, nullptr);
|
|
||||||
|
|
||||||
std::string message(message_buffer, size);
|
|
||||||
|
|
||||||
LocalFree(message_buffer);
|
|
||||||
LOG_ERROR(Audio_DSP, "Could not load avcodec-58.dll: {}", message);
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
av_get_bytes_per_sample_dl =
|
|
||||||
FuncDL<int(AVSampleFormat)>(dll_util.get(), "av_get_bytes_per_sample");
|
|
||||||
if (!av_get_bytes_per_sample_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_get_bytes_per_sample");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_frame_alloc_dl = FuncDL<AVFrame*()>(dll_util.get(), "av_frame_alloc");
|
|
||||||
if (!av_frame_alloc_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_frame_alloc");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_frame_free_dl = FuncDL<void(AVFrame**)>(dll_util.get(), "av_frame_free");
|
|
||||||
if (!av_frame_free_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_frame_free");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
avcodec_alloc_context3_dl =
|
|
||||||
FuncDL<AVCodecContext*(const AVCodec*)>(dll_codec.get(), "avcodec_alloc_context3");
|
|
||||||
if (!avcodec_alloc_context3_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function avcodec_alloc_context3");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
avcodec_free_context_dl =
|
|
||||||
FuncDL<void(AVCodecContext**)>(dll_codec.get(), "avcodec_free_context");
|
|
||||||
if (!av_get_bytes_per_sample_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function avcodec_free_context");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
avcodec_open2_dl = FuncDL<int(AVCodecContext*, const AVCodec*, AVDictionary**)>(
|
|
||||||
dll_codec.get(), "avcodec_open2");
|
|
||||||
if (!avcodec_open2_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function avcodec_open2");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
av_packet_alloc_dl = FuncDL<AVPacket*(void)>(dll_codec.get(), "av_packet_alloc");
|
|
||||||
if (!av_packet_alloc_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_packet_alloc");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_packet_free_dl = FuncDL<void(AVPacket**)>(dll_codec.get(), "av_packet_free");
|
|
||||||
if (!av_packet_free_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_packet_free");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
avcodec_find_decoder_dl = FuncDL<AVCodec*(AVCodecID)>(dll_codec.get(), "avcodec_find_decoder");
|
|
||||||
if (!avcodec_find_decoder_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function avcodec_find_decoder");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
avcodec_send_packet_dl =
|
|
||||||
FuncDL<int(AVCodecContext*, const AVPacket*)>(dll_codec.get(), "avcodec_send_packet");
|
|
||||||
if (!avcodec_send_packet_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function avcodec_send_packet");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
avcodec_receive_frame_dl =
|
|
||||||
FuncDL<int(AVCodecContext*, AVFrame*)>(dll_codec.get(), "avcodec_receive_frame");
|
|
||||||
if (!avcodec_receive_frame_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function avcodec_receive_frame");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_parser_init_dl = FuncDL<AVCodecParserContext*(int)>(dll_codec.get(), "av_parser_init");
|
|
||||||
if (!av_parser_init_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_parser_init");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_parser_parse2_dl =
|
|
||||||
FuncDL<int(AVCodecParserContext*, AVCodecContext*, uint8_t**, int*, const uint8_t*, int,
|
|
||||||
int64_t, int64_t, int64_t)>(dll_codec.get(), "av_parser_parse2");
|
|
||||||
if (!av_parser_parse2_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_parser_parse2");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
av_parser_close_dl = FuncDL<void(AVCodecParserContext*)>(dll_codec.get(), "av_parser_close");
|
|
||||||
if (!av_parser_close_dl) {
|
|
||||||
LOG_ERROR(Audio_DSP, "Can not load function av_parser_close");
|
|
||||||
return false;
|
|
||||||
}
|
|
||||||
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // _Win32
|
|
|
@ -1,79 +0,0 @@
|
||||||
// Copyright 2018 Citra Emulator Project
|
|
||||||
// Licensed under GPLv2 or any later version
|
|
||||||
// Refer to the license.txt file included.
|
|
||||||
|
|
||||||
#pragma once
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
#include <windows.h>
|
|
||||||
#endif // _WIN32
|
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
#include <libavcodec/avcodec.h>
|
|
||||||
}
|
|
||||||
|
|
||||||
#ifdef _WIN32
|
|
||||||
|
|
||||||
template <typename T>
|
|
||||||
struct FuncDL {
|
|
||||||
FuncDL() = default;
|
|
||||||
FuncDL(HMODULE dll, const char* name) {
|
|
||||||
if (dll) {
|
|
||||||
ptr_function = reinterpret_cast<T*>(GetProcAddress(dll, name));
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
operator T*() const {
|
|
||||||
return ptr_function;
|
|
||||||
}
|
|
||||||
|
|
||||||
explicit operator bool() const {
|
|
||||||
return ptr_function != nullptr;
|
|
||||||
}
|
|
||||||
|
|
||||||
T* ptr_function = nullptr;
|
|
||||||
};
|
|
||||||
|
|
||||||
extern FuncDL<int(AVSampleFormat)> av_get_bytes_per_sample_dl;
|
|
||||||
extern FuncDL<AVFrame*(void)> av_frame_alloc_dl;
|
|
||||||
extern FuncDL<void(AVFrame**)> av_frame_free_dl;
|
|
||||||
extern FuncDL<AVCodecContext*(const AVCodec*)> avcodec_alloc_context3_dl;
|
|
||||||
extern FuncDL<void(AVCodecContext**)> avcodec_free_context_dl;
|
|
||||||
extern FuncDL<int(AVCodecContext*, const AVCodec*, AVDictionary**)> avcodec_open2_dl;
|
|
||||||
extern FuncDL<AVPacket*(void)> av_packet_alloc_dl;
|
|
||||||
extern FuncDL<void(AVPacket**)> av_packet_free_dl;
|
|
||||||
extern FuncDL<AVCodec*(AVCodecID)> avcodec_find_decoder_dl;
|
|
||||||
extern FuncDL<int(AVCodecContext*, const AVPacket*)> avcodec_send_packet_dl;
|
|
||||||
extern FuncDL<int(AVCodecContext*, AVFrame*)> avcodec_receive_frame_dl;
|
|
||||||
extern FuncDL<AVCodecParserContext*(int)> av_parser_init_dl;
|
|
||||||
extern FuncDL<int(AVCodecParserContext*, AVCodecContext*, uint8_t**, int*, const uint8_t*, int,
|
|
||||||
int64_t, int64_t, int64_t)>
|
|
||||||
av_parser_parse2_dl;
|
|
||||||
extern FuncDL<void(AVCodecParserContext*)> av_parser_close_dl;
|
|
||||||
|
|
||||||
bool InitFFmpegDL();
|
|
||||||
|
|
||||||
#else // _Win32
|
|
||||||
|
|
||||||
// No dynamic loading for Unix and Apple
|
|
||||||
|
|
||||||
const auto av_get_bytes_per_sample_dl = &av_get_bytes_per_sample;
|
|
||||||
const auto av_frame_alloc_dl = &av_frame_alloc;
|
|
||||||
const auto av_frame_free_dl = &av_frame_free;
|
|
||||||
const auto avcodec_alloc_context3_dl = &avcodec_alloc_context3;
|
|
||||||
const auto avcodec_free_context_dl = &avcodec_free_context;
|
|
||||||
const auto avcodec_open2_dl = &avcodec_open2;
|
|
||||||
const auto av_packet_alloc_dl = &av_packet_alloc;
|
|
||||||
const auto av_packet_free_dl = &av_packet_free;
|
|
||||||
const auto avcodec_find_decoder_dl = &avcodec_find_decoder;
|
|
||||||
const auto avcodec_send_packet_dl = &avcodec_send_packet;
|
|
||||||
const auto avcodec_receive_frame_dl = &avcodec_receive_frame;
|
|
||||||
const auto av_parser_init_dl = &av_parser_init;
|
|
||||||
const auto av_parser_parse2_dl = &av_parser_parse2;
|
|
||||||
const auto av_parser_close_dl = &av_parser_close;
|
|
||||||
|
|
||||||
bool InitFFmpegDL() {
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
#endif // _Win32
|
|
|
@ -12,15 +12,13 @@
|
||||||
#include "audio_core/hle/wmf_decoder.h"
|
#include "audio_core/hle/wmf_decoder.h"
|
||||||
#elif HAVE_AUDIOTOOLBOX
|
#elif HAVE_AUDIOTOOLBOX
|
||||||
#include "audio_core/hle/audiotoolbox_decoder.h"
|
#include "audio_core/hle/audiotoolbox_decoder.h"
|
||||||
#elif HAVE_FFMPEG
|
|
||||||
#include "audio_core/hle/ffmpeg_decoder.h"
|
|
||||||
#elif ANDROID
|
#elif ANDROID
|
||||||
#include "audio_core/hle/mediandk_decoder.h"
|
#include "audio_core/hle/mediandk_decoder.h"
|
||||||
#elif HAVE_FDK
|
|
||||||
#include "audio_core/hle/fdk_decoder.h"
|
|
||||||
#endif
|
#endif
|
||||||
#include "audio_core/hle/common.h"
|
#include "audio_core/hle/common.h"
|
||||||
#include "audio_core/hle/decoder.h"
|
#include "audio_core/hle/decoder.h"
|
||||||
|
#include "audio_core/hle/fdk_decoder.h"
|
||||||
|
#include "audio_core/hle/ffmpeg_decoder.h"
|
||||||
#include "audio_core/hle/hle.h"
|
#include "audio_core/hle/hle.h"
|
||||||
#include "audio_core/hle/mixers.h"
|
#include "audio_core/hle/mixers.h"
|
||||||
#include "audio_core/hle/shared_memory.h"
|
#include "audio_core/hle/shared_memory.h"
|
||||||
|
@ -120,6 +118,31 @@ private:
|
||||||
friend class boost::serialization::access;
|
friend class boost::serialization::access;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static std::vector<std::function<std::unique_ptr<HLE::DecoderBase>(Memory::MemorySystem&)>>
|
||||||
|
decoder_backends = {
|
||||||
|
#if defined(HAVE_MF)
|
||||||
|
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||||
|
return std::make_unique<HLE::WMFDecoder>(memory);
|
||||||
|
},
|
||||||
|
#endif
|
||||||
|
#if defined(HAVE_AUDIOTOOLBOX)
|
||||||
|
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||||
|
return std::make_unique<HLE::AudioToolboxDecoder>(memory);
|
||||||
|
},
|
||||||
|
#endif
|
||||||
|
#if ANDROID
|
||||||
|
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||||
|
return std::make_unique<HLE::MediaNDKDecoder>(memory);
|
||||||
|
},
|
||||||
|
#endif
|
||||||
|
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||||
|
return std::make_unique<HLE::FDKDecoder>(memory);
|
||||||
|
},
|
||||||
|
[](Memory::MemorySystem& memory) -> std::unique_ptr<HLE::DecoderBase> {
|
||||||
|
return std::make_unique<HLE::FFMPEGDecoder>(memory);
|
||||||
|
},
|
||||||
|
};
|
||||||
|
|
||||||
DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory, Core::Timing& timing)
|
DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory, Core::Timing& timing)
|
||||||
: parent(parent_), core_timing(timing) {
|
: parent(parent_), core_timing(timing) {
|
||||||
dsp_memory.raw_memory.fill(0);
|
dsp_memory.raw_memory.fill(0);
|
||||||
|
@ -128,28 +151,14 @@ DspHle::Impl::Impl(DspHle& parent_, Memory::MemorySystem& memory, Core::Timing&
|
||||||
source.SetMemory(memory);
|
source.SetMemory(memory);
|
||||||
}
|
}
|
||||||
|
|
||||||
#if defined(HAVE_MF) && defined(HAVE_FFMPEG)
|
for (auto& factory : decoder_backends) {
|
||||||
decoder = std::make_unique<HLE::WMFDecoder>(memory);
|
decoder = factory(memory);
|
||||||
if (!decoder->IsValid()) {
|
if (decoder && decoder->IsValid()) {
|
||||||
LOG_WARNING(Audio_DSP, "Unable to load MediaFoundation. Attempting to load FFMPEG instead");
|
break;
|
||||||
decoder = std::make_unique<HLE::FFMPEGDecoder>(memory);
|
}
|
||||||
}
|
}
|
||||||
#elif defined(HAVE_MF)
|
|
||||||
decoder = std::make_unique<HLE::WMFDecoder>(memory);
|
|
||||||
#elif defined(HAVE_AUDIOTOOLBOX)
|
|
||||||
decoder = std::make_unique<HLE::AudioToolboxDecoder>(memory);
|
|
||||||
#elif defined(HAVE_FFMPEG)
|
|
||||||
decoder = std::make_unique<HLE::FFMPEGDecoder>(memory);
|
|
||||||
#elif ANDROID
|
|
||||||
decoder = std::make_unique<HLE::MediaNDKDecoder>(memory);
|
|
||||||
#elif defined(HAVE_FDK)
|
|
||||||
decoder = std::make_unique<HLE::FDKDecoder>(memory);
|
|
||||||
#else
|
|
||||||
LOG_WARNING(Audio_DSP, "No decoder found, this could lead to missing audio");
|
|
||||||
decoder = std::make_unique<HLE::NullDecoder>();
|
|
||||||
#endif // HAVE_MF
|
|
||||||
|
|
||||||
if (!decoder->IsValid()) {
|
if (!decoder || !decoder->IsValid()) {
|
||||||
LOG_WARNING(Audio_DSP,
|
LOG_WARNING(Audio_DSP,
|
||||||
"Unable to load any decoders, this could cause missing audio in some games");
|
"Unable to load any decoders, this could cause missing audio in some games");
|
||||||
decoder = std::make_unique<HLE::NullDecoder>();
|
decoder = std::make_unique<HLE::NullDecoder>();
|
||||||
|
|
|
@ -27,6 +27,7 @@
|
||||||
#include "common/string_util.h"
|
#include "common/string_util.h"
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/dumping/backend.h"
|
#include "core/dumping/backend.h"
|
||||||
|
#include "core/dumping/ffmpeg_backend.h"
|
||||||
#include "core/file_sys/cia_container.h"
|
#include "core/file_sys/cia_container.h"
|
||||||
#include "core/frontend/applets/default_applets.h"
|
#include "core/frontend/applets/default_applets.h"
|
||||||
#include "core/frontend/framebuffer_layout.h"
|
#include "core/frontend/framebuffer_layout.h"
|
||||||
|
@ -445,10 +446,13 @@ int main(int argc, char** argv) {
|
||||||
if (!movie_record.empty()) {
|
if (!movie_record.empty()) {
|
||||||
Core::Movie::GetInstance().StartRecording(movie_record, movie_record_author);
|
Core::Movie::GetInstance().StartRecording(movie_record, movie_record_author);
|
||||||
}
|
}
|
||||||
if (!dump_video.empty()) {
|
if (!dump_video.empty() && DynamicLibrary::FFmpeg::LoadFFmpeg()) {
|
||||||
Layout::FramebufferLayout layout{Layout::FrameLayoutFromResolutionScale(
|
Layout::FramebufferLayout layout{Layout::FrameLayoutFromResolutionScale(
|
||||||
VideoCore::g_renderer->GetResolutionScaleFactor())};
|
VideoCore::g_renderer->GetResolutionScaleFactor())};
|
||||||
system.VideoDumper().StartDumping(dump_video, layout);
|
auto dumper = std::make_shared<VideoDumper::FFmpegBackend>();
|
||||||
|
if (dumper->StartDumping(dump_video, layout)) {
|
||||||
|
Core::System::GetInstance().RegisterVideoDumper(dumper);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::thread main_render_thread([&emu_window] { emu_window->Present(); });
|
std::thread main_render_thread([&emu_window] { emu_window->Present(); });
|
||||||
|
@ -491,8 +495,10 @@ int main(int argc, char** argv) {
|
||||||
secondary_render_thread.join();
|
secondary_render_thread.join();
|
||||||
|
|
||||||
Core::Movie::GetInstance().Shutdown();
|
Core::Movie::GetInstance().Shutdown();
|
||||||
if (system.VideoDumper().IsDumping()) {
|
|
||||||
system.VideoDumper().StopDumping();
|
auto video_dumper = system.GetVideoDumper();
|
||||||
|
if (video_dumper && video_dumper->IsDumping()) {
|
||||||
|
video_dumper->StopDumping();
|
||||||
}
|
}
|
||||||
|
|
||||||
Network::Shutdown();
|
Network::Shutdown();
|
||||||
|
|
|
@ -120,6 +120,15 @@ add_executable(citra-qt
|
||||||
debugger/wait_tree.cpp
|
debugger/wait_tree.cpp
|
||||||
debugger/wait_tree.h
|
debugger/wait_tree.h
|
||||||
discord.h
|
discord.h
|
||||||
|
dumping/dumping_dialog.cpp
|
||||||
|
dumping/dumping_dialog.h
|
||||||
|
dumping/dumping_dialog.ui
|
||||||
|
dumping/option_set_dialog.cpp
|
||||||
|
dumping/option_set_dialog.h
|
||||||
|
dumping/option_set_dialog.ui
|
||||||
|
dumping/options_dialog.cpp
|
||||||
|
dumping/options_dialog.h
|
||||||
|
dumping/options_dialog.ui
|
||||||
game_list.cpp
|
game_list.cpp
|
||||||
game_list.h
|
game_list.h
|
||||||
game_list_p.h
|
game_list_p.h
|
||||||
|
@ -178,20 +187,6 @@ add_executable(citra-qt
|
||||||
util/util.h
|
util/util.h
|
||||||
)
|
)
|
||||||
|
|
||||||
if (ENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
target_sources(citra-qt PRIVATE
|
|
||||||
dumping/dumping_dialog.cpp
|
|
||||||
dumping/dumping_dialog.h
|
|
||||||
dumping/dumping_dialog.ui
|
|
||||||
dumping/option_set_dialog.cpp
|
|
||||||
dumping/option_set_dialog.h
|
|
||||||
dumping/option_set_dialog.ui
|
|
||||||
dumping/options_dialog.cpp
|
|
||||||
dumping/options_dialog.h
|
|
||||||
dumping/options_dialog.ui
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
file(GLOB COMPAT_LIST
|
file(GLOB COMPAT_LIST
|
||||||
${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.qrc
|
${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.qrc
|
||||||
${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.json)
|
${PROJECT_BINARY_DIR}/dist/compatibility_list/compatibility_list.json)
|
||||||
|
@ -377,11 +372,6 @@ if (MSVC)
|
||||||
include(CopyCitraOpensslDeps)
|
include(CopyCitraOpensslDeps)
|
||||||
copy_citra_openssl_deps(citra-qt)
|
copy_citra_openssl_deps(citra-qt)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (ENABLE_FFMPEG)
|
|
||||||
include(CopyCitraFFmpegDeps)
|
|
||||||
copy_citra_FFmpeg_deps(citra-qt)
|
|
||||||
endif()
|
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (NOT APPLE)
|
if (NOT APPLE)
|
||||||
|
|
|
@ -10,10 +10,6 @@
|
||||||
#include "common/string_util.h"
|
#include "common/string_util.h"
|
||||||
#include "ui_option_set_dialog.h"
|
#include "ui_option_set_dialog.h"
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
#include <libavutil/pixdesc.h>
|
|
||||||
}
|
|
||||||
|
|
||||||
static const std::unordered_map<AVOptionType, const char*> TypeNameMap{{
|
static const std::unordered_map<AVOptionType, const char*> TypeNameMap{{
|
||||||
{AV_OPT_TYPE_BOOL, QT_TR_NOOP("boolean")},
|
{AV_OPT_TYPE_BOOL, QT_TR_NOOP("boolean")},
|
||||||
{AV_OPT_TYPE_FLAGS, QT_TR_NOOP("flags")},
|
{AV_OPT_TYPE_FLAGS, QT_TR_NOOP("flags")},
|
||||||
|
@ -56,21 +52,14 @@ std::vector<std::pair<QString, QString>> GetPresetValues(const VideoDumper::Opti
|
||||||
}
|
}
|
||||||
case AV_OPT_TYPE_PIXEL_FMT: {
|
case AV_OPT_TYPE_PIXEL_FMT: {
|
||||||
std::vector<std::pair<QString, QString>> out{{QObject::tr("none"), QStringLiteral("none")}};
|
std::vector<std::pair<QString, QString>> out{{QObject::tr("none"), QStringLiteral("none")}};
|
||||||
// List all pixel formats
|
for (const auto& name : VideoDumper::GetPixelFormats()) {
|
||||||
const AVPixFmtDescriptor* current = nullptr;
|
out.emplace_back(QString::fromUtf8(name), QString::fromUtf8(name));
|
||||||
while ((current = av_pix_fmt_desc_next(current))) {
|
|
||||||
out.emplace_back(QString::fromUtf8(current->name), QString::fromUtf8(current->name));
|
|
||||||
}
|
}
|
||||||
return out;
|
return out;
|
||||||
}
|
}
|
||||||
case AV_OPT_TYPE_SAMPLE_FMT: {
|
case AV_OPT_TYPE_SAMPLE_FMT: {
|
||||||
std::vector<std::pair<QString, QString>> out{{QObject::tr("none"), QStringLiteral("none")}};
|
std::vector<std::pair<QString, QString>> out{{QObject::tr("none"), QStringLiteral("none")}};
|
||||||
// List all sample formats
|
for (const auto& name : VideoDumper::GetSampleFormats()) {
|
||||||
int current = 0;
|
|
||||||
while (true) {
|
|
||||||
const char* name = av_get_sample_fmt_name(static_cast<AVSampleFormat>(current));
|
|
||||||
if (name == nullptr)
|
|
||||||
break;
|
|
||||||
out.emplace_back(QString::fromUtf8(name), QString::fromUtf8(name));
|
out.emplace_back(QString::fromUtf8(name), QString::fromUtf8(name));
|
||||||
}
|
}
|
||||||
return out;
|
return out;
|
||||||
|
|
|
@ -49,6 +49,7 @@
|
||||||
#include "citra_qt/debugger/registers.h"
|
#include "citra_qt/debugger/registers.h"
|
||||||
#include "citra_qt/debugger/wait_tree.h"
|
#include "citra_qt/debugger/wait_tree.h"
|
||||||
#include "citra_qt/discord.h"
|
#include "citra_qt/discord.h"
|
||||||
|
#include "citra_qt/dumping/dumping_dialog.h"
|
||||||
#include "citra_qt/game_list.h"
|
#include "citra_qt/game_list.h"
|
||||||
#include "citra_qt/hotkeys.h"
|
#include "citra_qt/hotkeys.h"
|
||||||
#include "citra_qt/loading_screen.h"
|
#include "citra_qt/loading_screen.h"
|
||||||
|
@ -102,10 +103,6 @@
|
||||||
#include "citra_qt/discord_impl.h"
|
#include "citra_qt/discord_impl.h"
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
|
||||||
#include "citra_qt/dumping/dumping_dialog.h"
|
|
||||||
#endif
|
|
||||||
|
|
||||||
#ifdef QT_STATICPLUGIN
|
#ifdef QT_STATICPLUGIN
|
||||||
Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin);
|
Q_IMPORT_PLUGIN(QWindowsIntegrationPlugin);
|
||||||
#endif
|
#endif
|
||||||
|
@ -834,18 +831,7 @@ void GMainWindow::ConnectMenuEvents() {
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
connect_menu(ui->action_Capture_Screenshot, &GMainWindow::OnCaptureScreenshot);
|
connect_menu(ui->action_Capture_Screenshot, &GMainWindow::OnCaptureScreenshot);
|
||||||
|
connect_menu(ui->action_Dump_Video, &GMainWindow::OnDumpVideo);
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
|
||||||
connect_menu(ui->action_Dump_Video, [this] {
|
|
||||||
if (ui->action_Dump_Video->isChecked()) {
|
|
||||||
OnStartVideoDumping();
|
|
||||||
} else {
|
|
||||||
OnStopVideoDumping();
|
|
||||||
}
|
|
||||||
});
|
|
||||||
#else
|
|
||||||
ui->action_Dump_Video->setEnabled(false);
|
|
||||||
#endif
|
|
||||||
|
|
||||||
// Help
|
// Help
|
||||||
connect_menu(ui->action_Open_Citra_Folder, &GMainWindow::OnOpenCitraFolder);
|
connect_menu(ui->action_Open_Citra_Folder, &GMainWindow::OnOpenCitraFolder);
|
||||||
|
@ -1203,15 +1189,7 @@ void GMainWindow::BootGame(const QString& filename) {
|
||||||
}
|
}
|
||||||
|
|
||||||
if (video_dumping_on_start) {
|
if (video_dumping_on_start) {
|
||||||
Layout::FramebufferLayout layout{Layout::FrameLayoutFromResolutionScale(
|
StartVideoDumping(video_dumping_path);
|
||||||
VideoCore::g_renderer->GetResolutionScaleFactor())};
|
|
||||||
if (!system.VideoDumper().StartDumping(video_dumping_path.toStdString(), layout)) {
|
|
||||||
|
|
||||||
QMessageBox::critical(
|
|
||||||
this, tr("Citra"),
|
|
||||||
tr("Could not start video dumping.<br>Refer to the log for details."));
|
|
||||||
ui->action_Dump_Video->setChecked(false);
|
|
||||||
}
|
|
||||||
video_dumping_on_start = false;
|
video_dumping_on_start = false;
|
||||||
video_dumping_path.clear();
|
video_dumping_path.clear();
|
||||||
}
|
}
|
||||||
|
@ -1279,13 +1257,12 @@ void GMainWindow::ShutdownGame() {
|
||||||
HideFullscreen();
|
HideFullscreen();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
auto video_dumper = system.GetVideoDumper();
|
||||||
if (system.VideoDumper().IsDumping()) {
|
if (video_dumper && video_dumper->IsDumping()) {
|
||||||
game_shutdown_delayed = true;
|
game_shutdown_delayed = true;
|
||||||
OnStopVideoDumping();
|
OnStopVideoDumping();
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
AllowOSSleep();
|
AllowOSSleep();
|
||||||
|
|
||||||
|
@ -2215,7 +2192,97 @@ void GMainWindow::OnCaptureScreenshot() {
|
||||||
OnStartGame();
|
OnStartGame();
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
void GMainWindow::OnDumpVideo() {
|
||||||
|
if (DynamicLibrary::FFmpeg::LoadFFmpeg()) {
|
||||||
|
if (ui->action_Dump_Video->isChecked()) {
|
||||||
|
OnStartVideoDumping();
|
||||||
|
} else {
|
||||||
|
OnStopVideoDumping();
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
ui->action_Dump_Video->setChecked(false);
|
||||||
|
|
||||||
|
QMessageBox message_box;
|
||||||
|
message_box.setWindowTitle(tr("Could not load video dumper"));
|
||||||
|
message_box.setText(
|
||||||
|
tr("FFmpeg could not be loaded. Make sure you have a compatible version installed."
|
||||||
|
#ifdef _WIN32
|
||||||
|
"\n\nTo install FFmpeg to Citra, press Open and select your FFmpeg directory."
|
||||||
|
#endif
|
||||||
|
"\n\nTo view a guide on how to install FFmpeg, press Help."));
|
||||||
|
message_box.setStandardButtons(QMessageBox::Ok | QMessageBox::Help
|
||||||
|
#ifdef _WIN32
|
||||||
|
| QMessageBox::Open
|
||||||
|
#endif
|
||||||
|
);
|
||||||
|
auto result = message_box.exec();
|
||||||
|
if (result == QMessageBox::Help) {
|
||||||
|
QDesktopServices::openUrl(QUrl(QStringLiteral(
|
||||||
|
"https://citra-emu.org/wiki/installing-ffmpeg-for-the-video-dumper/")));
|
||||||
|
#ifdef _WIN32
|
||||||
|
} else if (result == QMessageBox::Open) {
|
||||||
|
OnOpenFFmpeg();
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef _WIN32
|
||||||
|
void GMainWindow::OnOpenFFmpeg() {
|
||||||
|
auto filename =
|
||||||
|
QFileDialog::getExistingDirectory(this, tr("Select FFmpeg Directory")).toStdString();
|
||||||
|
if (filename.empty()) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
// Check for a bin directory if they chose the FFmpeg root directory.
|
||||||
|
auto bin_dir = filename + DIR_SEP + "bin";
|
||||||
|
if (!FileUtil::Exists(bin_dir)) {
|
||||||
|
// Otherwise, assume the user directly selected the directory containing the DLLs.
|
||||||
|
bin_dir = filename;
|
||||||
|
}
|
||||||
|
|
||||||
|
static const std::array library_names = {
|
||||||
|
DynamicLibrary::DynamicLibrary::GetLibraryName("avcodec", LIBAVCODEC_VERSION_MAJOR),
|
||||||
|
DynamicLibrary::DynamicLibrary::GetLibraryName("avfilter", LIBAVFILTER_VERSION_MAJOR),
|
||||||
|
DynamicLibrary::DynamicLibrary::GetLibraryName("avformat", LIBAVFORMAT_VERSION_MAJOR),
|
||||||
|
DynamicLibrary::DynamicLibrary::GetLibraryName("avutil", LIBAVUTIL_VERSION_MAJOR),
|
||||||
|
DynamicLibrary::DynamicLibrary::GetLibraryName("swresample", LIBSWRESAMPLE_VERSION_MAJOR),
|
||||||
|
};
|
||||||
|
|
||||||
|
for (auto& library_name : library_names) {
|
||||||
|
if (!FileUtil::Exists(bin_dir + DIR_SEP + library_name)) {
|
||||||
|
QMessageBox::critical(this, tr("Citra"),
|
||||||
|
tr("The provided FFmpeg directory is missing %1. Please make "
|
||||||
|
"sure the correct directory was selected.")
|
||||||
|
.arg(QString::fromStdString(library_name)));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
std::atomic<bool> success(true);
|
||||||
|
auto process_file = [&success](u64* num_entries_out, const std::string& directory,
|
||||||
|
const std::string& virtual_name) -> bool {
|
||||||
|
auto file_path = directory + DIR_SEP + virtual_name;
|
||||||
|
if (file_path.ends_with(".dll")) {
|
||||||
|
auto destination_path = FileUtil::GetExeDirectory() + DIR_SEP + virtual_name;
|
||||||
|
if (!FileUtil::Copy(file_path, destination_path)) {
|
||||||
|
success.store(false);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return true;
|
||||||
|
};
|
||||||
|
FileUtil::ForeachDirectoryEntry(nullptr, bin_dir, process_file);
|
||||||
|
|
||||||
|
if (success.load()) {
|
||||||
|
QMessageBox::information(this, tr("Citra"), tr("FFmpeg has been sucessfully installed."));
|
||||||
|
} else {
|
||||||
|
QMessageBox::critical(this, tr("Citra"),
|
||||||
|
tr("Installation of FFmpeg failed. Check the log file for details."));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
void GMainWindow::OnStartVideoDumping() {
|
void GMainWindow::OnStartVideoDumping() {
|
||||||
DumpingDialog dialog(this);
|
DumpingDialog dialog(this);
|
||||||
if (dialog.exec() != QDialog::DialogCode::Accepted) {
|
if (dialog.exec() != QDialog::DialogCode::Accepted) {
|
||||||
|
@ -2224,20 +2291,28 @@ void GMainWindow::OnStartVideoDumping() {
|
||||||
}
|
}
|
||||||
const auto path = dialog.GetFilePath();
|
const auto path = dialog.GetFilePath();
|
||||||
if (emulation_running) {
|
if (emulation_running) {
|
||||||
Layout::FramebufferLayout layout{Layout::FrameLayoutFromResolutionScale(
|
StartVideoDumping(path);
|
||||||
VideoCore::g_renderer->GetResolutionScaleFactor())};
|
|
||||||
if (!system.VideoDumper().StartDumping(path.toStdString(), layout)) {
|
|
||||||
QMessageBox::critical(
|
|
||||||
this, tr("Citra"),
|
|
||||||
tr("Could not start video dumping.<br>Refer to the log for details."));
|
|
||||||
ui->action_Dump_Video->setChecked(false);
|
|
||||||
}
|
|
||||||
} else {
|
} else {
|
||||||
video_dumping_on_start = true;
|
video_dumping_on_start = true;
|
||||||
video_dumping_path = path;
|
video_dumping_path = path;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void GMainWindow::StartVideoDumping(const QString& path) {
|
||||||
|
Layout::FramebufferLayout layout{
|
||||||
|
Layout::FrameLayoutFromResolutionScale(VideoCore::g_renderer->GetResolutionScaleFactor())};
|
||||||
|
|
||||||
|
auto dumper = std::make_shared<VideoDumper::FFmpegBackend>();
|
||||||
|
if (dumper->StartDumping(path.toStdString(), layout)) {
|
||||||
|
system.RegisterVideoDumper(dumper);
|
||||||
|
} else {
|
||||||
|
QMessageBox::critical(
|
||||||
|
this, tr("Citra"),
|
||||||
|
tr("Could not start video dumping.<br>Refer to the log for details."));
|
||||||
|
ui->action_Dump_Video->setChecked(false);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void GMainWindow::OnStopVideoDumping() {
|
void GMainWindow::OnStopVideoDumping() {
|
||||||
ui->action_Dump_Video->setChecked(false);
|
ui->action_Dump_Video->setChecked(false);
|
||||||
|
|
||||||
|
@ -2245,14 +2320,15 @@ void GMainWindow::OnStopVideoDumping() {
|
||||||
video_dumping_on_start = false;
|
video_dumping_on_start = false;
|
||||||
video_dumping_path.clear();
|
video_dumping_path.clear();
|
||||||
} else {
|
} else {
|
||||||
const bool was_dumping = system.VideoDumper().IsDumping();
|
auto dumper = system.GetVideoDumper();
|
||||||
if (!was_dumping)
|
if (!dumper || !dumper->IsDumping()) {
|
||||||
return;
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
game_paused_for_dumping = emu_thread->IsRunning();
|
game_paused_for_dumping = emu_thread->IsRunning();
|
||||||
OnPauseGame();
|
OnPauseGame();
|
||||||
|
|
||||||
auto future = QtConcurrent::run([this] { system.VideoDumper().StopDumping(); });
|
auto future = QtConcurrent::run([dumper] { dumper->StopDumping(); });
|
||||||
auto* future_watcher = new QFutureWatcher<void>(this);
|
auto* future_watcher = new QFutureWatcher<void>(this);
|
||||||
connect(future_watcher, &QFutureWatcher<void>::finished, this, [this] {
|
connect(future_watcher, &QFutureWatcher<void>::finished, this, [this] {
|
||||||
if (game_shutdown_delayed) {
|
if (game_shutdown_delayed) {
|
||||||
|
@ -2266,7 +2342,6 @@ void GMainWindow::OnStopVideoDumping() {
|
||||||
future_watcher->setFuture(future);
|
future_watcher->setFuture(future);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
#endif
|
|
||||||
|
|
||||||
void GMainWindow::UpdateStatusBar() {
|
void GMainWindow::UpdateStatusBar() {
|
||||||
if (!emu_thread) [[unlikely]] {
|
if (!emu_thread) [[unlikely]] {
|
||||||
|
|
|
@ -242,10 +242,13 @@ private slots:
|
||||||
void OnCloseMovie();
|
void OnCloseMovie();
|
||||||
void OnSaveMovie();
|
void OnSaveMovie();
|
||||||
void OnCaptureScreenshot();
|
void OnCaptureScreenshot();
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
void OnDumpVideo();
|
||||||
void OnStartVideoDumping();
|
#ifdef _WIN32
|
||||||
void OnStopVideoDumping();
|
void OnOpenFFmpeg();
|
||||||
#endif
|
#endif
|
||||||
|
void OnStartVideoDumping();
|
||||||
|
void StartVideoDumping(const QString& path);
|
||||||
|
void OnStopVideoDumping();
|
||||||
void OnCoreError(Core::System::ResultStatus, std::string);
|
void OnCoreError(Core::System::ResultStatus, std::string);
|
||||||
/// Called whenever a user selects Help->About Citra
|
/// Called whenever a user selects Help->About Citra
|
||||||
void OnMenuAboutCitra();
|
void OnMenuAboutCitra();
|
||||||
|
|
|
@ -65,6 +65,12 @@ add_library(citra_common STATIC
|
||||||
common_precompiled_headers.h
|
common_precompiled_headers.h
|
||||||
common_types.h
|
common_types.h
|
||||||
construct.h
|
construct.h
|
||||||
|
dynamic_library/dynamic_library.cpp
|
||||||
|
dynamic_library/dynamic_library.h
|
||||||
|
dynamic_library/fdk-aac.cpp
|
||||||
|
dynamic_library/fdk-aac.h
|
||||||
|
dynamic_library/ffmpeg.cpp
|
||||||
|
dynamic_library/ffmpeg.h
|
||||||
error.cpp
|
error.cpp
|
||||||
error.h
|
error.h
|
||||||
file_util.cpp
|
file_util.cpp
|
||||||
|
@ -153,7 +159,7 @@ endif()
|
||||||
|
|
||||||
create_target_directory_groups(citra_common)
|
create_target_directory_groups(citra_common)
|
||||||
|
|
||||||
target_link_libraries(citra_common PUBLIC fmt::fmt microprofile Boost::boost Boost::serialization Boost::iostreams)
|
target_link_libraries(citra_common PUBLIC fmt::fmt library-headers microprofile Boost::boost Boost::serialization Boost::iostreams)
|
||||||
target_link_libraries(citra_common PRIVATE libzstd_static)
|
target_link_libraries(citra_common PRIVATE libzstd_static)
|
||||||
set_target_properties(citra_common PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO})
|
set_target_properties(citra_common PROPERTIES INTERPROCEDURAL_OPTIMIZATION ${ENABLE_LTO})
|
||||||
|
|
||||||
|
|
87
src/common/dynamic_library/dynamic_library.cpp
Normal file
87
src/common/dynamic_library/dynamic_library.cpp
Normal file
|
@ -0,0 +1,87 @@
|
||||||
|
// Copyright 2023 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include <fmt/format.h>
|
||||||
|
#if defined(_WIN32)
|
||||||
|
#include <windows.h>
|
||||||
|
#else
|
||||||
|
#include <dlfcn.h>
|
||||||
|
#endif
|
||||||
|
#include "dynamic_library.h"
|
||||||
|
|
||||||
|
namespace DynamicLibrary {
|
||||||
|
|
||||||
|
DynamicLibrary::DynamicLibrary(std::string_view name, int major, int minor) {
|
||||||
|
auto full_name = GetLibraryName(name, major, minor);
|
||||||
|
#if defined(_WIN32)
|
||||||
|
handle = reinterpret_cast<void*>(LoadLibraryA(full_name.c_str()));
|
||||||
|
if (!handle) {
|
||||||
|
DWORD error_message_id = GetLastError();
|
||||||
|
LPSTR message_buffer = nullptr;
|
||||||
|
size_t size =
|
||||||
|
FormatMessageA(FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM |
|
||||||
|
FORMAT_MESSAGE_IGNORE_INSERTS,
|
||||||
|
nullptr, error_message_id, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
|
||||||
|
reinterpret_cast<LPSTR>(&message_buffer), 0, nullptr);
|
||||||
|
std::string message(message_buffer, size);
|
||||||
|
load_error = message;
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
handle = dlopen(full_name.c_str(), RTLD_LAZY);
|
||||||
|
if (!handle) {
|
||||||
|
load_error = dlerror();
|
||||||
|
}
|
||||||
|
#endif // defined(_WIN32)
|
||||||
|
}
|
||||||
|
|
||||||
|
DynamicLibrary::~DynamicLibrary() {
|
||||||
|
if (handle) {
|
||||||
|
#if defined(_WIN32)
|
||||||
|
FreeLibrary(reinterpret_cast<HMODULE>(handle));
|
||||||
|
#else
|
||||||
|
dlclose(handle);
|
||||||
|
#endif // defined(_WIN32)
|
||||||
|
handle = nullptr;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
void* DynamicLibrary::GetRawSymbol(std::string_view name) {
|
||||||
|
#if defined(_WIN32)
|
||||||
|
return reinterpret_cast<void*>(GetProcAddress(reinterpret_cast<HMODULE>(handle), name.data()));
|
||||||
|
#else
|
||||||
|
return dlsym(handle, name.data());
|
||||||
|
#endif // defined(_WIN32)
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string DynamicLibrary::GetLibraryName(std::string_view name, int major, int minor) {
|
||||||
|
#if defined(_WIN32)
|
||||||
|
if (major >= 0 && minor >= 0) {
|
||||||
|
return fmt::format("{}-{}-{}.dll", name, major, minor);
|
||||||
|
} else if (major >= 0) {
|
||||||
|
return fmt::format("{}-{}.dll", name, major);
|
||||||
|
} else {
|
||||||
|
return fmt::format("{}.dll", name);
|
||||||
|
}
|
||||||
|
#elif defined(__APPLE__)
|
||||||
|
auto prefix = name.starts_with("lib") ? "" : "lib";
|
||||||
|
if (major >= 0 && minor >= 0) {
|
||||||
|
return fmt::format("{}{}.{}.{}.dylib", prefix, name, major, minor);
|
||||||
|
} else if (major >= 0) {
|
||||||
|
return fmt::format("{}{}.{}.dylib", prefix, name, major);
|
||||||
|
} else {
|
||||||
|
return fmt::format("{}{}.dylib", prefix, name);
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
auto prefix = name.starts_with("lib") ? "" : "lib";
|
||||||
|
if (major >= 0 && minor >= 0) {
|
||||||
|
return fmt::format("{}{}.so.{}.{}", prefix, name, major, minor);
|
||||||
|
} else if (major >= 0) {
|
||||||
|
return fmt::format("{}{}.so.{}", prefix, name, major);
|
||||||
|
} else {
|
||||||
|
return fmt::format("{}{}.so", prefix, name);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace DynamicLibrary
|
39
src/common/dynamic_library/dynamic_library.h
Normal file
39
src/common/dynamic_library/dynamic_library.h
Normal file
|
@ -0,0 +1,39 @@
|
||||||
|
// Copyright 2023 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
#include <string>
|
||||||
|
#include "common/common_types.h"
|
||||||
|
|
||||||
|
namespace DynamicLibrary {
|
||||||
|
|
||||||
|
class DynamicLibrary {
|
||||||
|
public:
|
||||||
|
explicit DynamicLibrary(std::string_view name, int major = -1, int minor = -1);
|
||||||
|
~DynamicLibrary();
|
||||||
|
|
||||||
|
bool IsLoaded() {
|
||||||
|
return handle != nullptr;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::string_view GetLoadError() {
|
||||||
|
return load_error;
|
||||||
|
}
|
||||||
|
|
||||||
|
template <typename T>
|
||||||
|
T GetSymbol(std::string_view name) {
|
||||||
|
return reinterpret_cast<T>(GetRawSymbol(name));
|
||||||
|
}
|
||||||
|
|
||||||
|
static std::string GetLibraryName(std::string_view name, int major = -1, int minor = -1);
|
||||||
|
|
||||||
|
private:
|
||||||
|
void* GetRawSymbol(std::string_view name);
|
||||||
|
|
||||||
|
void* handle;
|
||||||
|
std::string load_error;
|
||||||
|
};
|
||||||
|
|
||||||
|
} // namespace DynamicLibrary
|
54
src/common/dynamic_library/fdk-aac.cpp
Normal file
54
src/common/dynamic_library/fdk-aac.cpp
Normal file
|
@ -0,0 +1,54 @@
|
||||||
|
// Copyright 2023 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/dynamic_library/fdk-aac.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
|
namespace DynamicLibrary::FdkAac {
|
||||||
|
|
||||||
|
aacDecoder_GetLibInfo_func aacDecoder_GetLibInfo;
|
||||||
|
aacDecoder_Open_func aacDecoder_Open;
|
||||||
|
aacDecoder_Close_func aacDecoder_Close;
|
||||||
|
aacDecoder_SetParam_func aacDecoder_SetParam;
|
||||||
|
aacDecoder_GetStreamInfo_func aacDecoder_GetStreamInfo;
|
||||||
|
aacDecoder_DecodeFrame_func aacDecoder_DecodeFrame;
|
||||||
|
aacDecoder_Fill_func aacDecoder_Fill;
|
||||||
|
|
||||||
|
static std::unique_ptr<DynamicLibrary> fdk_aac;
|
||||||
|
|
||||||
|
#define LOAD_SYMBOL(library, name) \
|
||||||
|
any_failed = any_failed || (name = library->GetSymbol<name##_func>(#name)) == nullptr
|
||||||
|
|
||||||
|
bool LoadFdkAac() {
|
||||||
|
if (fdk_aac) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
fdk_aac = std::make_unique<DynamicLibrary>("fdk-aac", 2);
|
||||||
|
if (!fdk_aac->IsLoaded()) {
|
||||||
|
LOG_WARNING(Common, "Could not dynamically load libfdk-aac: {}", fdk_aac->GetLoadError());
|
||||||
|
fdk_aac.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto any_failed = false;
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_GetLibInfo);
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_Open);
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_Close);
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_SetParam);
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_GetStreamInfo);
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_DecodeFrame);
|
||||||
|
LOAD_SYMBOL(fdk_aac, aacDecoder_Fill);
|
||||||
|
|
||||||
|
if (any_failed) {
|
||||||
|
LOG_WARNING(Common, "Could not find all required functions in libfdk-aac.");
|
||||||
|
fdk_aac.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Common, "Successfully loaded libfdk-aac.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace DynamicLibrary::FdkAac
|
37
src/common/dynamic_library/fdk-aac.h
Normal file
37
src/common/dynamic_library/fdk-aac.h
Normal file
|
@ -0,0 +1,37 @@
|
||||||
|
// Copyright 2023 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <fdk-aac/aacdecoder_lib.h>
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/dynamic_library/dynamic_library.h"
|
||||||
|
|
||||||
|
namespace DynamicLibrary::FdkAac {
|
||||||
|
|
||||||
|
typedef INT (*aacDecoder_GetLibInfo_func)(LIB_INFO* info);
|
||||||
|
typedef HANDLE_AACDECODER (*aacDecoder_Open_func)(TRANSPORT_TYPE transportFmt, UINT nrOfLayers);
|
||||||
|
typedef void (*aacDecoder_Close_func)(HANDLE_AACDECODER self);
|
||||||
|
typedef AAC_DECODER_ERROR (*aacDecoder_SetParam_func)(const HANDLE_AACDECODER self,
|
||||||
|
const AACDEC_PARAM param, const INT value);
|
||||||
|
typedef CStreamInfo* (*aacDecoder_GetStreamInfo_func)(HANDLE_AACDECODER self);
|
||||||
|
typedef AAC_DECODER_ERROR (*aacDecoder_DecodeFrame_func)(HANDLE_AACDECODER self, INT_PCM* pTimeData,
|
||||||
|
const INT timeDataSize, const UINT flags);
|
||||||
|
typedef AAC_DECODER_ERROR (*aacDecoder_Fill_func)(HANDLE_AACDECODER self, UCHAR* pBuffer[],
|
||||||
|
const UINT bufferSize[], UINT* bytesValid);
|
||||||
|
|
||||||
|
extern aacDecoder_GetLibInfo_func aacDecoder_GetLibInfo;
|
||||||
|
extern aacDecoder_Open_func aacDecoder_Open;
|
||||||
|
extern aacDecoder_Close_func aacDecoder_Close;
|
||||||
|
extern aacDecoder_SetParam_func aacDecoder_SetParam;
|
||||||
|
extern aacDecoder_GetStreamInfo_func aacDecoder_GetStreamInfo;
|
||||||
|
extern aacDecoder_DecodeFrame_func aacDecoder_DecodeFrame;
|
||||||
|
extern aacDecoder_Fill_func aacDecoder_Fill;
|
||||||
|
|
||||||
|
bool LoadFdkAac();
|
||||||
|
|
||||||
|
} // namespace DynamicLibrary::FdkAac
|
390
src/common/dynamic_library/ffmpeg.cpp
Normal file
390
src/common/dynamic_library/ffmpeg.cpp
Normal file
|
@ -0,0 +1,390 @@
|
||||||
|
// Copyright 2023 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#include "common/dynamic_library/ffmpeg.h"
|
||||||
|
#include "common/logging/log.h"
|
||||||
|
|
||||||
|
namespace DynamicLibrary::FFmpeg {
|
||||||
|
|
||||||
|
// avutil
|
||||||
|
av_buffer_ref_func av_buffer_ref;
|
||||||
|
av_buffer_unref_func av_buffer_unref;
|
||||||
|
av_d2q_func av_d2q;
|
||||||
|
av_dict_count_func av_dict_count;
|
||||||
|
av_dict_get_func av_dict_get;
|
||||||
|
av_dict_get_string_func av_dict_get_string;
|
||||||
|
av_dict_set_func av_dict_set;
|
||||||
|
av_frame_alloc_func av_frame_alloc;
|
||||||
|
av_frame_free_func av_frame_free;
|
||||||
|
av_frame_unref_func av_frame_unref;
|
||||||
|
av_freep_func av_freep;
|
||||||
|
av_get_bytes_per_sample_func av_get_bytes_per_sample;
|
||||||
|
av_get_pix_fmt_func av_get_pix_fmt;
|
||||||
|
av_get_pix_fmt_name_func av_get_pix_fmt_name;
|
||||||
|
av_get_sample_fmt_name_func av_get_sample_fmt_name;
|
||||||
|
av_hwdevice_ctx_create_func av_hwdevice_ctx_create;
|
||||||
|
av_hwdevice_get_hwframe_constraints_func av_hwdevice_get_hwframe_constraints;
|
||||||
|
av_hwframe_constraints_free_func av_hwframe_constraints_free;
|
||||||
|
av_hwframe_ctx_alloc_func av_hwframe_ctx_alloc;
|
||||||
|
av_hwframe_ctx_init_func av_hwframe_ctx_init;
|
||||||
|
av_hwframe_get_buffer_func av_hwframe_get_buffer;
|
||||||
|
av_hwframe_transfer_data_func av_hwframe_transfer_data;
|
||||||
|
av_int_list_length_for_size_func av_int_list_length_for_size;
|
||||||
|
#if LIBAVCODEC_VERSION_MAJOR >= 59
|
||||||
|
av_opt_child_class_iterate_func av_opt_child_class_iterate;
|
||||||
|
#else
|
||||||
|
av_opt_child_class_next_func av_opt_child_class_next;
|
||||||
|
#endif
|
||||||
|
av_opt_next_func av_opt_next;
|
||||||
|
av_opt_set_bin_func av_opt_set_bin;
|
||||||
|
av_pix_fmt_desc_get_func av_pix_fmt_desc_get;
|
||||||
|
av_pix_fmt_desc_next_func av_pix_fmt_desc_next;
|
||||||
|
av_sample_fmt_is_planar_func av_sample_fmt_is_planar;
|
||||||
|
av_samples_alloc_array_and_samples_func av_samples_alloc_array_and_samples;
|
||||||
|
av_strdup_func av_strdup;
|
||||||
|
avutil_version_func avutil_version;
|
||||||
|
|
||||||
|
// avcodec
|
||||||
|
av_codec_is_encoder_func av_codec_is_encoder;
|
||||||
|
av_codec_iterate_func av_codec_iterate;
|
||||||
|
av_init_packet_func av_init_packet;
|
||||||
|
av_packet_alloc_func av_packet_alloc;
|
||||||
|
av_packet_free_func av_packet_free;
|
||||||
|
av_packet_rescale_ts_func av_packet_rescale_ts;
|
||||||
|
av_parser_close_func av_parser_close;
|
||||||
|
av_parser_init_func av_parser_init;
|
||||||
|
av_parser_parse2_func av_parser_parse2;
|
||||||
|
avcodec_alloc_context3_func avcodec_alloc_context3;
|
||||||
|
avcodec_descriptor_next_func avcodec_descriptor_next;
|
||||||
|
avcodec_find_decoder_func avcodec_find_decoder;
|
||||||
|
avcodec_find_encoder_by_name_func avcodec_find_encoder_by_name;
|
||||||
|
avcodec_free_context_func avcodec_free_context;
|
||||||
|
avcodec_get_class_func avcodec_get_class;
|
||||||
|
avcodec_get_hw_config_func avcodec_get_hw_config;
|
||||||
|
avcodec_open2_func avcodec_open2;
|
||||||
|
avcodec_parameters_from_context_func avcodec_parameters_from_context;
|
||||||
|
avcodec_receive_frame_func avcodec_receive_frame;
|
||||||
|
avcodec_receive_packet_func avcodec_receive_packet;
|
||||||
|
avcodec_send_frame_func avcodec_send_frame;
|
||||||
|
avcodec_send_packet_func avcodec_send_packet;
|
||||||
|
avcodec_version_func avcodec_version;
|
||||||
|
|
||||||
|
// avfilter
|
||||||
|
av_buffersink_get_frame_func av_buffersink_get_frame;
|
||||||
|
av_buffersrc_add_frame_func av_buffersrc_add_frame;
|
||||||
|
avfilter_get_by_name_func avfilter_get_by_name;
|
||||||
|
avfilter_graph_alloc_func avfilter_graph_alloc;
|
||||||
|
avfilter_graph_config_func avfilter_graph_config;
|
||||||
|
avfilter_graph_create_filter_func avfilter_graph_create_filter;
|
||||||
|
avfilter_graph_free_func avfilter_graph_free;
|
||||||
|
avfilter_graph_parse_ptr_func avfilter_graph_parse_ptr;
|
||||||
|
avfilter_inout_alloc_func avfilter_inout_alloc;
|
||||||
|
avfilter_inout_free_func avfilter_inout_free;
|
||||||
|
avfilter_version_func avfilter_version;
|
||||||
|
|
||||||
|
// avformat
|
||||||
|
av_guess_format_func av_guess_format;
|
||||||
|
av_interleaved_write_frame_func av_interleaved_write_frame;
|
||||||
|
av_muxer_iterate_func av_muxer_iterate;
|
||||||
|
av_write_trailer_func av_write_trailer;
|
||||||
|
avformat_alloc_output_context2_func avformat_alloc_output_context2;
|
||||||
|
avformat_free_context_func avformat_free_context;
|
||||||
|
avformat_get_class_func avformat_get_class;
|
||||||
|
avformat_network_init_func avformat_network_init;
|
||||||
|
avformat_new_stream_func avformat_new_stream;
|
||||||
|
avformat_query_codec_func avformat_query_codec;
|
||||||
|
avformat_write_header_func avformat_write_header;
|
||||||
|
avformat_version_func avformat_version;
|
||||||
|
avio_closep_func avio_closep;
|
||||||
|
avio_open_func avio_open;
|
||||||
|
|
||||||
|
// swresample
|
||||||
|
#if LIBSWRESAMPLE_VERSION_INT >= AV_VERSION_INT(4, 5, 100)
|
||||||
|
swr_alloc_set_opts2_func swr_alloc_set_opts2;
|
||||||
|
#else
|
||||||
|
swr_alloc_set_opts_func swr_alloc_set_opts;
|
||||||
|
#endif
|
||||||
|
swr_convert_func swr_convert;
|
||||||
|
swr_free_func swr_free;
|
||||||
|
swr_init_func swr_init;
|
||||||
|
swresample_version_func swresample_version;
|
||||||
|
|
||||||
|
static std::unique_ptr<DynamicLibrary> avutil;
|
||||||
|
static std::unique_ptr<DynamicLibrary> avcodec;
|
||||||
|
static std::unique_ptr<DynamicLibrary> avfilter;
|
||||||
|
static std::unique_ptr<DynamicLibrary> avformat;
|
||||||
|
static std::unique_ptr<DynamicLibrary> swresample;
|
||||||
|
|
||||||
|
#define LOAD_SYMBOL(library, name) \
|
||||||
|
any_failed = any_failed || (name = library->GetSymbol<name##_func>(#name)) == nullptr
|
||||||
|
|
||||||
|
static bool LoadAVUtil() {
|
||||||
|
if (avutil) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
avutil = std::make_unique<DynamicLibrary>("avutil", LIBAVUTIL_VERSION_MAJOR);
|
||||||
|
if (!avutil->IsLoaded()) {
|
||||||
|
LOG_WARNING(Common, "Could not dynamically load libavutil: {}", avutil->GetLoadError());
|
||||||
|
avutil.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto any_failed = false;
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avutil, avutil_version);
|
||||||
|
|
||||||
|
auto major_version = AV_VERSION_MAJOR(avutil_version());
|
||||||
|
if (major_version != LIBAVUTIL_VERSION_MAJOR) {
|
||||||
|
LOG_WARNING(Common, "libavutil version {} does not match supported version {}.",
|
||||||
|
major_version, LIBAVUTIL_VERSION_MAJOR);
|
||||||
|
avutil.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avutil, av_buffer_ref);
|
||||||
|
LOAD_SYMBOL(avutil, av_buffer_unref);
|
||||||
|
LOAD_SYMBOL(avutil, av_d2q);
|
||||||
|
LOAD_SYMBOL(avutil, av_dict_count);
|
||||||
|
LOAD_SYMBOL(avutil, av_dict_get);
|
||||||
|
LOAD_SYMBOL(avutil, av_dict_get_string);
|
||||||
|
LOAD_SYMBOL(avutil, av_dict_set);
|
||||||
|
LOAD_SYMBOL(avutil, av_frame_alloc);
|
||||||
|
LOAD_SYMBOL(avutil, av_frame_free);
|
||||||
|
LOAD_SYMBOL(avutil, av_frame_unref);
|
||||||
|
LOAD_SYMBOL(avutil, av_freep);
|
||||||
|
LOAD_SYMBOL(avutil, av_get_bytes_per_sample);
|
||||||
|
LOAD_SYMBOL(avutil, av_get_pix_fmt);
|
||||||
|
LOAD_SYMBOL(avutil, av_get_pix_fmt_name);
|
||||||
|
LOAD_SYMBOL(avutil, av_get_sample_fmt_name);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwdevice_ctx_create);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwdevice_get_hwframe_constraints);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwframe_constraints_free);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwframe_ctx_alloc);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwframe_ctx_init);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwframe_get_buffer);
|
||||||
|
LOAD_SYMBOL(avutil, av_hwframe_transfer_data);
|
||||||
|
LOAD_SYMBOL(avutil, av_int_list_length_for_size);
|
||||||
|
#if LIBAVCODEC_VERSION_MAJOR >= 59
|
||||||
|
LOAD_SYMBOL(avutil, av_opt_child_class_iterate);
|
||||||
|
#else
|
||||||
|
LOAD_SYMBOL(avutil, av_opt_child_class_next);
|
||||||
|
#endif
|
||||||
|
LOAD_SYMBOL(avutil, av_opt_next);
|
||||||
|
LOAD_SYMBOL(avutil, av_opt_set_bin);
|
||||||
|
LOAD_SYMBOL(avutil, av_pix_fmt_desc_get);
|
||||||
|
LOAD_SYMBOL(avutil, av_pix_fmt_desc_next);
|
||||||
|
LOAD_SYMBOL(avutil, av_sample_fmt_is_planar);
|
||||||
|
LOAD_SYMBOL(avutil, av_samples_alloc_array_and_samples);
|
||||||
|
LOAD_SYMBOL(avutil, av_strdup);
|
||||||
|
|
||||||
|
if (any_failed) {
|
||||||
|
LOG_WARNING(Common, "Could not find all required functions in libavutil.");
|
||||||
|
avutil.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Common, "Successfully loaded libavutil.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool LoadAVCodec() {
|
||||||
|
if (avcodec) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
avcodec = std::make_unique<DynamicLibrary>("avcodec", LIBAVCODEC_VERSION_MAJOR);
|
||||||
|
if (!avcodec->IsLoaded()) {
|
||||||
|
LOG_WARNING(Common, "Could not dynamically load libavcodec: {}", avcodec->GetLoadError());
|
||||||
|
avcodec.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto any_failed = false;
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_version);
|
||||||
|
|
||||||
|
auto major_version = AV_VERSION_MAJOR(avcodec_version());
|
||||||
|
if (major_version != LIBAVCODEC_VERSION_MAJOR) {
|
||||||
|
LOG_WARNING(Common, "libavcodec version {} does not match supported version {}.",
|
||||||
|
major_version, LIBAVCODEC_VERSION_MAJOR);
|
||||||
|
avcodec.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avcodec, av_codec_is_encoder);
|
||||||
|
LOAD_SYMBOL(avcodec, av_codec_iterate);
|
||||||
|
LOAD_SYMBOL(avcodec, av_init_packet);
|
||||||
|
LOAD_SYMBOL(avcodec, av_packet_alloc);
|
||||||
|
LOAD_SYMBOL(avcodec, av_packet_free);
|
||||||
|
LOAD_SYMBOL(avcodec, av_packet_rescale_ts);
|
||||||
|
LOAD_SYMBOL(avcodec, av_parser_close);
|
||||||
|
LOAD_SYMBOL(avcodec, av_parser_init);
|
||||||
|
LOAD_SYMBOL(avcodec, av_parser_parse2);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_alloc_context3);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_descriptor_next);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_find_decoder);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_find_encoder_by_name);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_free_context);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_get_class);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_get_hw_config);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_open2);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_parameters_from_context);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_receive_frame);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_receive_packet);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_send_frame);
|
||||||
|
LOAD_SYMBOL(avcodec, avcodec_send_packet);
|
||||||
|
|
||||||
|
if (any_failed) {
|
||||||
|
LOG_WARNING(Common, "Could not find all required functions in libavcodec.");
|
||||||
|
avcodec.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Common, "Successfully loaded libavcodec.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool LoadAVFilter() {
|
||||||
|
if (avfilter) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
avfilter = std::make_unique<DynamicLibrary>("avfilter", LIBAVFILTER_VERSION_MAJOR);
|
||||||
|
if (!avfilter->IsLoaded()) {
|
||||||
|
LOG_WARNING(Common, "Could not dynamically load libavfilter: {}", avfilter->GetLoadError());
|
||||||
|
avfilter.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto any_failed = false;
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_version);
|
||||||
|
|
||||||
|
auto major_version = AV_VERSION_MAJOR(avfilter_version());
|
||||||
|
if (major_version != LIBAVFILTER_VERSION_MAJOR) {
|
||||||
|
LOG_WARNING(Common, "libavfilter version {} does not match supported version {}.",
|
||||||
|
major_version, LIBAVFILTER_VERSION_MAJOR);
|
||||||
|
avfilter.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avfilter, av_buffersink_get_frame);
|
||||||
|
LOAD_SYMBOL(avfilter, av_buffersrc_add_frame);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_get_by_name);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_graph_alloc);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_graph_config);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_graph_create_filter);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_graph_free);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_graph_parse_ptr);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_inout_alloc);
|
||||||
|
LOAD_SYMBOL(avfilter, avfilter_inout_free);
|
||||||
|
|
||||||
|
if (any_failed) {
|
||||||
|
LOG_WARNING(Common, "Could not find all required functions in libavfilter.");
|
||||||
|
avfilter.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Common, "Successfully loaded libavfilter.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool LoadAVFormat() {
|
||||||
|
if (avformat) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
avformat = std::make_unique<DynamicLibrary>("avformat", LIBAVFORMAT_VERSION_MAJOR);
|
||||||
|
if (!avformat->IsLoaded()) {
|
||||||
|
LOG_WARNING(Common, "Could not dynamically load libavformat: {}", avformat->GetLoadError());
|
||||||
|
avformat.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto any_failed = false;
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avformat, avformat_version);
|
||||||
|
|
||||||
|
auto major_version = AV_VERSION_MAJOR(avformat_version());
|
||||||
|
if (major_version != LIBAVFORMAT_VERSION_MAJOR) {
|
||||||
|
LOG_WARNING(Common, "libavformat version {} does not match supported version {}.",
|
||||||
|
major_version, LIBAVFORMAT_VERSION_MAJOR);
|
||||||
|
avformat.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOAD_SYMBOL(avformat, av_guess_format);
|
||||||
|
LOAD_SYMBOL(avformat, av_interleaved_write_frame);
|
||||||
|
LOAD_SYMBOL(avformat, av_muxer_iterate);
|
||||||
|
LOAD_SYMBOL(avformat, av_write_trailer);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_alloc_output_context2);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_free_context);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_get_class);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_network_init);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_new_stream);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_query_codec);
|
||||||
|
LOAD_SYMBOL(avformat, avformat_write_header);
|
||||||
|
LOAD_SYMBOL(avformat, avio_closep);
|
||||||
|
LOAD_SYMBOL(avformat, avio_open);
|
||||||
|
|
||||||
|
if (any_failed) {
|
||||||
|
LOG_WARNING(Common, "Could not find all required functions in libavformat.");
|
||||||
|
avformat.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Common, "Successfully loaded libavformat.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
static bool LoadSWResample() {
|
||||||
|
if (swresample) {
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
swresample = std::make_unique<DynamicLibrary>("swresample", LIBSWRESAMPLE_VERSION_MAJOR);
|
||||||
|
if (!swresample->IsLoaded()) {
|
||||||
|
LOG_WARNING(Common, "Could not dynamically load libswresample: {}",
|
||||||
|
swresample->GetLoadError());
|
||||||
|
swresample.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
auto any_failed = false;
|
||||||
|
|
||||||
|
LOAD_SYMBOL(swresample, swresample_version);
|
||||||
|
|
||||||
|
auto major_version = AV_VERSION_MAJOR(swresample_version());
|
||||||
|
if (major_version != LIBSWRESAMPLE_VERSION_MAJOR) {
|
||||||
|
LOG_WARNING(Common, "libswresample version {} does not match supported version {}.",
|
||||||
|
major_version, LIBSWRESAMPLE_VERSION_MAJOR);
|
||||||
|
swresample.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
#if LIBSWRESAMPLE_VERSION_INT >= AV_VERSION_INT(4, 5, 100)
|
||||||
|
LOAD_SYMBOL(swresample, swr_alloc_set_opts2);
|
||||||
|
#else
|
||||||
|
LOAD_SYMBOL(swresample, swr_alloc_set_opts);
|
||||||
|
#endif
|
||||||
|
LOAD_SYMBOL(swresample, swr_convert);
|
||||||
|
LOAD_SYMBOL(swresample, swr_free);
|
||||||
|
LOAD_SYMBOL(swresample, swr_init);
|
||||||
|
|
||||||
|
if (any_failed) {
|
||||||
|
LOG_WARNING(Common, "Could not find all required functions in libswresample.");
|
||||||
|
swresample.reset();
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
LOG_INFO(Common, "Successfully loaded libswresample.");
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
|
||||||
|
bool LoadFFmpeg() {
|
||||||
|
return LoadAVUtil() && LoadAVCodec() && LoadAVFilter() && LoadAVFormat() && LoadSWResample();
|
||||||
|
}
|
||||||
|
|
||||||
|
} // namespace DynamicLibrary::FFmpeg
|
236
src/common/dynamic_library/ffmpeg.h
Normal file
236
src/common/dynamic_library/ffmpeg.h
Normal file
|
@ -0,0 +1,236 @@
|
||||||
|
// Copyright 2023 Citra Emulator Project
|
||||||
|
// Licensed under GPLv2 or any later version
|
||||||
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
|
#pragma once
|
||||||
|
|
||||||
|
extern "C" {
|
||||||
|
#include <libavcodec/avcodec.h>
|
||||||
|
#include <libavfilter/avfilter.h>
|
||||||
|
#include <libavformat/avformat.h>
|
||||||
|
#include <libavutil/avutil.h>
|
||||||
|
#include <libavutil/ffversion.h>
|
||||||
|
#include <libavutil/opt.h>
|
||||||
|
#include <libavutil/pixdesc.h>
|
||||||
|
#include <libswresample/swresample.h>
|
||||||
|
}
|
||||||
|
|
||||||
|
#include "common/common_types.h"
|
||||||
|
#include "common/dynamic_library/dynamic_library.h"
|
||||||
|
|
||||||
|
namespace DynamicLibrary::FFmpeg {
|
||||||
|
|
||||||
|
// avutil
|
||||||
|
typedef AVBufferRef* (*av_buffer_ref_func)(const AVBufferRef*);
|
||||||
|
typedef void (*av_buffer_unref_func)(AVBufferRef**);
|
||||||
|
typedef AVRational (*av_d2q_func)(double d, int max);
|
||||||
|
typedef int (*av_dict_count_func)(const AVDictionary*);
|
||||||
|
typedef AVDictionaryEntry* (*av_dict_get_func)(const AVDictionary*, const char*,
|
||||||
|
const AVDictionaryEntry*, int);
|
||||||
|
typedef int (*av_dict_get_string_func)(const AVDictionary*, char**, const char, const char);
|
||||||
|
typedef int (*av_dict_set_func)(AVDictionary**, const char*, const char*, int);
|
||||||
|
typedef AVFrame* (*av_frame_alloc_func)();
|
||||||
|
typedef void (*av_frame_free_func)(AVFrame**);
|
||||||
|
typedef void (*av_frame_unref_func)(AVFrame*);
|
||||||
|
typedef void (*av_freep_func)(void*);
|
||||||
|
typedef int (*av_get_bytes_per_sample_func)(AVSampleFormat);
|
||||||
|
typedef AVPixelFormat (*av_get_pix_fmt_func)(const char*);
|
||||||
|
typedef const char* (*av_get_pix_fmt_name_func)(AVPixelFormat);
|
||||||
|
typedef const char* (*av_get_sample_fmt_name_func)(AVSampleFormat);
|
||||||
|
typedef int (*av_hwdevice_ctx_create_func)(AVBufferRef**, AVHWDeviceType, const char*,
|
||||||
|
AVDictionary*, int);
|
||||||
|
typedef AVHWFramesConstraints* (*av_hwdevice_get_hwframe_constraints_func)(AVBufferRef*,
|
||||||
|
const void*);
|
||||||
|
typedef void (*av_hwframe_constraints_free_func)(AVHWFramesConstraints**);
|
||||||
|
typedef AVBufferRef* (*av_hwframe_ctx_alloc_func)(AVBufferRef*);
|
||||||
|
typedef int (*av_hwframe_ctx_init_func)(AVBufferRef*);
|
||||||
|
typedef int (*av_hwframe_get_buffer_func)(AVBufferRef*, AVFrame*, int);
|
||||||
|
typedef int (*av_hwframe_transfer_data_func)(AVFrame*, const AVFrame*, int);
|
||||||
|
typedef unsigned (*av_int_list_length_for_size_func)(unsigned, const void*, uint64_t);
|
||||||
|
#if LIBAVCODEC_VERSION_MAJOR >= 59
|
||||||
|
typedef const AVClass* (*av_opt_child_class_iterate_func)(const AVClass*, void**);
|
||||||
|
#else
|
||||||
|
typedef const AVClass* (*av_opt_child_class_next_func)(const AVClass*, const AVClass*);
|
||||||
|
#endif
|
||||||
|
typedef const AVOption* (*av_opt_next_func)(const void*, const AVOption*);
|
||||||
|
typedef int (*av_opt_set_bin_func)(void*, const char*, const uint8_t*, int, int);
|
||||||
|
typedef const AVPixFmtDescriptor* (*av_pix_fmt_desc_get_func)(AVPixelFormat);
|
||||||
|
typedef const AVPixFmtDescriptor* (*av_pix_fmt_desc_next_func)(const AVPixFmtDescriptor*);
|
||||||
|
typedef int (*av_sample_fmt_is_planar_func)(AVSampleFormat);
|
||||||
|
typedef int (*av_samples_alloc_array_and_samples_func)(uint8_t***, int*, int, int, AVSampleFormat,
|
||||||
|
int);
|
||||||
|
typedef char* (*av_strdup_func)(const char*);
|
||||||
|
typedef unsigned (*avutil_version_func)();
|
||||||
|
|
||||||
|
extern av_buffer_ref_func av_buffer_ref;
|
||||||
|
extern av_buffer_unref_func av_buffer_unref;
|
||||||
|
extern av_d2q_func av_d2q;
|
||||||
|
extern av_dict_count_func av_dict_count;
|
||||||
|
extern av_dict_get_func av_dict_get;
|
||||||
|
extern av_dict_get_string_func av_dict_get_string;
|
||||||
|
extern av_dict_set_func av_dict_set;
|
||||||
|
extern av_frame_alloc_func av_frame_alloc;
|
||||||
|
extern av_frame_free_func av_frame_free;
|
||||||
|
extern av_frame_unref_func av_frame_unref;
|
||||||
|
extern av_freep_func av_freep;
|
||||||
|
extern av_get_bytes_per_sample_func av_get_bytes_per_sample;
|
||||||
|
extern av_get_pix_fmt_func av_get_pix_fmt;
|
||||||
|
extern av_get_pix_fmt_name_func av_get_pix_fmt_name;
|
||||||
|
extern av_get_sample_fmt_name_func av_get_sample_fmt_name;
|
||||||
|
extern av_hwdevice_ctx_create_func av_hwdevice_ctx_create;
|
||||||
|
extern av_hwdevice_get_hwframe_constraints_func av_hwdevice_get_hwframe_constraints;
|
||||||
|
extern av_hwframe_constraints_free_func av_hwframe_constraints_free;
|
||||||
|
extern av_hwframe_ctx_alloc_func av_hwframe_ctx_alloc;
|
||||||
|
extern av_hwframe_ctx_init_func av_hwframe_ctx_init;
|
||||||
|
extern av_hwframe_get_buffer_func av_hwframe_get_buffer;
|
||||||
|
extern av_hwframe_transfer_data_func av_hwframe_transfer_data;
|
||||||
|
extern av_int_list_length_for_size_func av_int_list_length_for_size;
|
||||||
|
#if LIBAVCODEC_VERSION_MAJOR >= 59
|
||||||
|
extern av_opt_child_class_iterate_func av_opt_child_class_iterate;
|
||||||
|
#else
|
||||||
|
extern av_opt_child_class_next_func av_opt_child_class_next;
|
||||||
|
#endif
|
||||||
|
extern av_opt_next_func av_opt_next;
|
||||||
|
extern av_opt_set_bin_func av_opt_set_bin;
|
||||||
|
extern av_pix_fmt_desc_get_func av_pix_fmt_desc_get;
|
||||||
|
extern av_pix_fmt_desc_next_func av_pix_fmt_desc_next;
|
||||||
|
extern av_sample_fmt_is_planar_func av_sample_fmt_is_planar;
|
||||||
|
extern av_samples_alloc_array_and_samples_func av_samples_alloc_array_and_samples;
|
||||||
|
extern av_strdup_func av_strdup;
|
||||||
|
extern avutil_version_func avutil_version;
|
||||||
|
|
||||||
|
// avcodec
|
||||||
|
typedef int (*av_codec_is_encoder_func)(const AVCodec*);
|
||||||
|
typedef const AVCodec* (*av_codec_iterate_func)(void**);
|
||||||
|
typedef void (*av_init_packet_func)(AVPacket*);
|
||||||
|
typedef AVPacket* (*av_packet_alloc_func)();
|
||||||
|
typedef void (*av_packet_free_func)(AVPacket**);
|
||||||
|
typedef void (*av_packet_rescale_ts_func)(AVPacket*, AVRational, AVRational);
|
||||||
|
typedef void (*av_parser_close_func)(AVCodecParserContext*);
|
||||||
|
typedef AVCodecParserContext* (*av_parser_init_func)(int);
|
||||||
|
typedef int (*av_parser_parse2_func)(AVCodecParserContext*, AVCodecContext*, uint8_t**, int*,
|
||||||
|
const uint8_t*, int, int64_t, int64_t, int64_t);
|
||||||
|
typedef AVCodecContext* (*avcodec_alloc_context3_func)(const AVCodec*);
|
||||||
|
typedef const AVCodecDescriptor* (*avcodec_descriptor_next_func)(const AVCodecDescriptor*);
|
||||||
|
typedef AVCodec* (*avcodec_find_decoder_func)(AVCodecID);
|
||||||
|
typedef const AVCodec* (*avcodec_find_encoder_by_name_func)(const char*);
|
||||||
|
typedef void (*avcodec_free_context_func)(AVCodecContext**);
|
||||||
|
typedef const AVClass* (*avcodec_get_class_func)();
|
||||||
|
typedef const AVCodecHWConfig* (*avcodec_get_hw_config_func)(const AVCodec*, int);
|
||||||
|
typedef int (*avcodec_open2_func)(AVCodecContext*, const AVCodec*, AVDictionary**);
|
||||||
|
typedef int (*avcodec_parameters_from_context_func)(AVCodecParameters* par, const AVCodecContext*);
|
||||||
|
typedef int (*avcodec_receive_frame_func)(AVCodecContext*, AVFrame*);
|
||||||
|
typedef int (*avcodec_receive_packet_func)(AVCodecContext*, AVPacket*);
|
||||||
|
typedef int (*avcodec_send_frame_func)(AVCodecContext*, const AVFrame*);
|
||||||
|
typedef int (*avcodec_send_packet_func)(AVCodecContext*, const AVPacket*);
|
||||||
|
typedef unsigned (*avcodec_version_func)();
|
||||||
|
|
||||||
|
extern av_codec_is_encoder_func av_codec_is_encoder;
|
||||||
|
extern av_codec_iterate_func av_codec_iterate;
|
||||||
|
extern av_init_packet_func av_init_packet;
|
||||||
|
extern av_packet_alloc_func av_packet_alloc;
|
||||||
|
extern av_packet_free_func av_packet_free;
|
||||||
|
extern av_packet_rescale_ts_func av_packet_rescale_ts;
|
||||||
|
extern av_parser_close_func av_parser_close;
|
||||||
|
extern av_parser_init_func av_parser_init;
|
||||||
|
extern av_parser_parse2_func av_parser_parse2;
|
||||||
|
extern avcodec_alloc_context3_func avcodec_alloc_context3;
|
||||||
|
extern avcodec_descriptor_next_func avcodec_descriptor_next;
|
||||||
|
extern avcodec_find_decoder_func avcodec_find_decoder;
|
||||||
|
extern avcodec_find_encoder_by_name_func avcodec_find_encoder_by_name;
|
||||||
|
extern avcodec_free_context_func avcodec_free_context;
|
||||||
|
extern avcodec_get_class_func avcodec_get_class;
|
||||||
|
extern avcodec_get_hw_config_func avcodec_get_hw_config;
|
||||||
|
extern avcodec_open2_func avcodec_open2;
|
||||||
|
extern avcodec_parameters_from_context_func avcodec_parameters_from_context;
|
||||||
|
extern avcodec_receive_frame_func avcodec_receive_frame;
|
||||||
|
extern avcodec_receive_packet_func avcodec_receive_packet;
|
||||||
|
extern avcodec_send_frame_func avcodec_send_frame;
|
||||||
|
extern avcodec_send_packet_func avcodec_send_packet;
|
||||||
|
extern avcodec_version_func avcodec_version;
|
||||||
|
|
||||||
|
// avfilter
|
||||||
|
typedef int (*av_buffersink_get_frame_func)(AVFilterContext*, AVFrame*);
|
||||||
|
typedef int (*av_buffersrc_add_frame_func)(AVFilterContext*, AVFrame*);
|
||||||
|
typedef const AVFilter* (*avfilter_get_by_name_func)(const char*);
|
||||||
|
typedef AVFilterGraph* (*avfilter_graph_alloc_func)();
|
||||||
|
typedef int (*avfilter_graph_config_func)(AVFilterGraph*, void*);
|
||||||
|
typedef int (*avfilter_graph_create_filter_func)(AVFilterContext**, const AVFilter*, const char*,
|
||||||
|
const char*, void*, AVFilterGraph*);
|
||||||
|
typedef void (*avfilter_graph_free_func)(AVFilterGraph** graph);
|
||||||
|
typedef int (*avfilter_graph_parse_ptr_func)(AVFilterGraph*, const char*, AVFilterInOut**,
|
||||||
|
AVFilterInOut**, void*);
|
||||||
|
typedef AVFilterInOut* (*avfilter_inout_alloc_func)();
|
||||||
|
typedef void (*avfilter_inout_free_func)(AVFilterInOut**);
|
||||||
|
typedef unsigned (*avfilter_version_func)();
|
||||||
|
|
||||||
|
extern av_buffersink_get_frame_func av_buffersink_get_frame;
|
||||||
|
extern av_buffersrc_add_frame_func av_buffersrc_add_frame;
|
||||||
|
extern avfilter_get_by_name_func avfilter_get_by_name;
|
||||||
|
extern avfilter_graph_alloc_func avfilter_graph_alloc;
|
||||||
|
extern avfilter_graph_config_func avfilter_graph_config;
|
||||||
|
extern avfilter_graph_create_filter_func avfilter_graph_create_filter;
|
||||||
|
extern avfilter_graph_free_func avfilter_graph_free;
|
||||||
|
extern avfilter_graph_parse_ptr_func avfilter_graph_parse_ptr;
|
||||||
|
extern avfilter_inout_alloc_func avfilter_inout_alloc;
|
||||||
|
extern avfilter_inout_free_func avfilter_inout_free;
|
||||||
|
extern avfilter_version_func avfilter_version;
|
||||||
|
|
||||||
|
// avformat
|
||||||
|
typedef const AVOutputFormat* (*av_guess_format_func)(const char*, const char*, const char*);
|
||||||
|
typedef int (*av_interleaved_write_frame_func)(AVFormatContext*, AVPacket*);
|
||||||
|
typedef const AVOutputFormat* (*av_muxer_iterate_func)(void**);
|
||||||
|
typedef int (*av_write_trailer_func)(AVFormatContext*);
|
||||||
|
typedef int (*avformat_alloc_output_context2_func)(AVFormatContext**, const AVOutputFormat*,
|
||||||
|
const char*, const char*);
|
||||||
|
typedef void (*avformat_free_context_func)(AVFormatContext*);
|
||||||
|
typedef const AVClass* (*avformat_get_class_func)();
|
||||||
|
typedef int (*avformat_network_init_func)();
|
||||||
|
typedef AVStream* (*avformat_new_stream_func)(AVFormatContext*, const AVCodec*);
|
||||||
|
typedef int (*avformat_query_codec_func)(const AVOutputFormat*, AVCodecID, int);
|
||||||
|
typedef int (*avformat_write_header_func)(AVFormatContext*, AVDictionary**);
|
||||||
|
typedef unsigned (*avformat_version_func)();
|
||||||
|
typedef int (*avio_closep_func)(AVIOContext**);
|
||||||
|
typedef int (*avio_open_func)(AVIOContext**, const char*, int);
|
||||||
|
|
||||||
|
extern av_guess_format_func av_guess_format;
|
||||||
|
extern av_interleaved_write_frame_func av_interleaved_write_frame;
|
||||||
|
extern av_muxer_iterate_func av_muxer_iterate;
|
||||||
|
extern av_write_trailer_func av_write_trailer;
|
||||||
|
extern avformat_alloc_output_context2_func avformat_alloc_output_context2;
|
||||||
|
extern avformat_free_context_func avformat_free_context;
|
||||||
|
extern avformat_get_class_func avformat_get_class;
|
||||||
|
extern avformat_network_init_func avformat_network_init;
|
||||||
|
extern avformat_new_stream_func avformat_new_stream;
|
||||||
|
extern avformat_query_codec_func avformat_query_codec;
|
||||||
|
extern avformat_write_header_func avformat_write_header;
|
||||||
|
extern avformat_version_func avformat_version;
|
||||||
|
extern avio_closep_func avio_closep;
|
||||||
|
extern avio_open_func avio_open;
|
||||||
|
|
||||||
|
// swresample
|
||||||
|
#if LIBSWRESAMPLE_VERSION_INT >= AV_VERSION_INT(4, 5, 100)
|
||||||
|
typedef SwrContext* (*swr_alloc_set_opts2_func)(SwrContext**, AVChannelLayout*, AVSampleFormat, int,
|
||||||
|
AVChannelLayout*, AVSampleFormat, int, int, void*);
|
||||||
|
#else
|
||||||
|
typedef SwrContext* (*swr_alloc_set_opts_func)(SwrContext*, int64_t, AVSampleFormat, int, int64_t,
|
||||||
|
AVSampleFormat, int, int, void*);
|
||||||
|
#endif
|
||||||
|
typedef int (*swr_convert_func)(SwrContext*, uint8_t**, int, const uint8_t**, int);
|
||||||
|
typedef void (*swr_free_func)(SwrContext**);
|
||||||
|
typedef int (*swr_init_func)(SwrContext*);
|
||||||
|
typedef unsigned (*swresample_version_func)();
|
||||||
|
|
||||||
|
#if LIBSWRESAMPLE_VERSION_INT >= AV_VERSION_INT(4, 5, 100)
|
||||||
|
extern swr_alloc_set_opts2_func swr_alloc_set_opts2;
|
||||||
|
#else
|
||||||
|
extern swr_alloc_set_opts_func swr_alloc_set_opts;
|
||||||
|
#endif
|
||||||
|
extern swr_convert_func swr_convert;
|
||||||
|
extern swr_free_func swr_free;
|
||||||
|
extern swr_init_func swr_init;
|
||||||
|
extern swresample_version_func swresample_version;
|
||||||
|
|
||||||
|
bool LoadFFmpeg();
|
||||||
|
|
||||||
|
} // namespace DynamicLibrary::FFmpeg
|
|
@ -38,6 +38,8 @@ add_library(citra_core STATIC
|
||||||
core_timing.h
|
core_timing.h
|
||||||
dumping/backend.cpp
|
dumping/backend.cpp
|
||||||
dumping/backend.h
|
dumping/backend.h
|
||||||
|
dumping/ffmpeg_backend.cpp
|
||||||
|
dumping/ffmpeg_backend.h
|
||||||
file_sys/archive_backend.cpp
|
file_sys/archive_backend.cpp
|
||||||
file_sys/archive_backend.h
|
file_sys/archive_backend.h
|
||||||
file_sys/archive_extsavedata.cpp
|
file_sys/archive_extsavedata.cpp
|
||||||
|
@ -468,13 +470,6 @@ add_library(citra_core STATIC
|
||||||
tracer/recorder.h
|
tracer/recorder.h
|
||||||
)
|
)
|
||||||
|
|
||||||
if (ENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
target_sources(citra_core PRIVATE
|
|
||||||
dumping/ffmpeg_backend.cpp
|
|
||||||
dumping/ffmpeg_backend.h
|
|
||||||
)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
create_target_directory_groups(citra_core)
|
create_target_directory_groups(citra_core)
|
||||||
|
|
||||||
target_link_libraries(citra_core PUBLIC citra_common PRIVATE audio_core network video_core)
|
target_link_libraries(citra_core PUBLIC citra_common PRIVATE audio_core network video_core)
|
||||||
|
@ -504,10 +499,6 @@ if ("x86_64" IN_LIST ARCHITECTURE OR "arm64" IN_LIST ARCHITECTURE)
|
||||||
target_link_libraries(citra_core PRIVATE dynarmic)
|
target_link_libraries(citra_core PRIVATE dynarmic)
|
||||||
endif()
|
endif()
|
||||||
|
|
||||||
if (ENABLE_FFMPEG_VIDEO_DUMPER)
|
|
||||||
target_link_libraries(citra_core PUBLIC FFmpeg::avcodec FFmpeg::avfilter FFmpeg::avformat FFmpeg::swresample FFmpeg::avutil)
|
|
||||||
endif()
|
|
||||||
|
|
||||||
if (CITRA_USE_PRECOMPILED_HEADERS)
|
if (CITRA_USE_PRECOMPILED_HEADERS)
|
||||||
target_precompile_headers(citra_core PRIVATE precompiled_headers.h)
|
target_precompile_headers(citra_core PRIVATE precompiled_headers.h)
|
||||||
endif()
|
endif()
|
||||||
|
|
|
@ -11,6 +11,7 @@
|
||||||
#include "audio_core/lle/lle.h"
|
#include "audio_core/lle/lle.h"
|
||||||
#include "common/arch.h"
|
#include "common/arch.h"
|
||||||
#include "common/logging/log.h"
|
#include "common/logging/log.h"
|
||||||
|
#include "common/settings.h"
|
||||||
#include "common/texture.h"
|
#include "common/texture.h"
|
||||||
#include "core/arm/arm_interface.h"
|
#include "core/arm/arm_interface.h"
|
||||||
#include "core/arm/exclusive_monitor.h"
|
#include "core/arm/exclusive_monitor.h"
|
||||||
|
@ -22,10 +23,7 @@
|
||||||
#include "core/core.h"
|
#include "core/core.h"
|
||||||
#include "core/core_timing.h"
|
#include "core/core_timing.h"
|
||||||
#include "core/dumping/backend.h"
|
#include "core/dumping/backend.h"
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
|
||||||
#include "core/dumping/ffmpeg_backend.h"
|
#include "core/dumping/ffmpeg_backend.h"
|
||||||
#endif
|
|
||||||
#include "common/settings.h"
|
|
||||||
#include "core/frontend/image_interface.h"
|
#include "core/frontend/image_interface.h"
|
||||||
#include "core/gdbstub/gdbstub.h"
|
#include "core/gdbstub/gdbstub.h"
|
||||||
#include "core/global.h"
|
#include "core/global.h"
|
||||||
|
@ -423,12 +421,6 @@ System::ResultStatus System::Init(Frontend::EmuWindow& emu_window,
|
||||||
Service::Init(*this);
|
Service::Init(*this);
|
||||||
GDBStub::DeferStart();
|
GDBStub::DeferStart();
|
||||||
|
|
||||||
#ifdef ENABLE_FFMPEG_VIDEO_DUMPER
|
|
||||||
video_dumper = std::make_unique<VideoDumper::FFmpegBackend>();
|
|
||||||
#else
|
|
||||||
video_dumper = std::make_unique<VideoDumper::NullBackend>();
|
|
||||||
#endif
|
|
||||||
|
|
||||||
if (!registered_image_interface) {
|
if (!registered_image_interface) {
|
||||||
registered_image_interface = std::make_shared<Frontend::ImageInterface>();
|
registered_image_interface = std::make_shared<Frontend::ImageInterface>();
|
||||||
}
|
}
|
||||||
|
@ -500,12 +492,8 @@ const Cheats::CheatEngine& System::CheatEngine() const {
|
||||||
return *cheat_engine;
|
return *cheat_engine;
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoDumper::Backend& System::VideoDumper() {
|
void System::RegisterVideoDumper(std::shared_ptr<VideoDumper::Backend> dumper) {
|
||||||
return *video_dumper;
|
video_dumper = std::move(dumper);
|
||||||
}
|
|
||||||
|
|
||||||
const VideoDumper::Backend& System::VideoDumper() const {
|
|
||||||
return *video_dumper;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
VideoCore::CustomTexManager& System::CustomTexManager() {
|
VideoCore::CustomTexManager& System::CustomTexManager() {
|
||||||
|
|
|
@ -258,11 +258,13 @@ public:
|
||||||
/// Gets a const reference to the custom texture cache system
|
/// Gets a const reference to the custom texture cache system
|
||||||
[[nodiscard]] const VideoCore::CustomTexManager& CustomTexManager() const;
|
[[nodiscard]] const VideoCore::CustomTexManager& CustomTexManager() const;
|
||||||
|
|
||||||
/// Gets a reference to the video dumper backend
|
/// Video Dumper interface
|
||||||
[[nodiscard]] VideoDumper::Backend& VideoDumper();
|
|
||||||
|
|
||||||
/// Gets a const reference to the video dumper backend
|
void RegisterVideoDumper(std::shared_ptr<VideoDumper::Backend> video_dumper);
|
||||||
[[nodiscard]] const VideoDumper::Backend& VideoDumper() const;
|
|
||||||
|
[[nodiscard]] std::shared_ptr<VideoDumper::Backend> GetVideoDumper() const {
|
||||||
|
return video_dumper;
|
||||||
|
}
|
||||||
|
|
||||||
std::unique_ptr<PerfStats> perf_stats;
|
std::unique_ptr<PerfStats> perf_stats;
|
||||||
FrameLimiter frame_limiter;
|
FrameLimiter frame_limiter;
|
||||||
|
@ -370,7 +372,7 @@ private:
|
||||||
std::unique_ptr<Cheats::CheatEngine> cheat_engine;
|
std::unique_ptr<Cheats::CheatEngine> cheat_engine;
|
||||||
|
|
||||||
/// Video dumper backend
|
/// Video dumper backend
|
||||||
std::unique_ptr<VideoDumper::Backend> video_dumper;
|
std::shared_ptr<VideoDumper::Backend> video_dumper;
|
||||||
|
|
||||||
/// Custom texture cache system
|
/// Custom texture cache system
|
||||||
std::unique_ptr<VideoCore::CustomTexManager> custom_tex_manager;
|
std::unique_ptr<VideoCore::CustomTexManager> custom_tex_manager;
|
||||||
|
|
|
@ -15,24 +15,17 @@
|
||||||
#include "video_core/renderer_base.h"
|
#include "video_core/renderer_base.h"
|
||||||
#include "video_core/video_core.h"
|
#include "video_core/video_core.h"
|
||||||
|
|
||||||
extern "C" {
|
using namespace DynamicLibrary;
|
||||||
#include <libavfilter/buffersink.h>
|
|
||||||
#include <libavfilter/buffersrc.h>
|
|
||||||
#include <libavutil/hwcontext.h>
|
|
||||||
#include <libavutil/pixdesc.h>
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace VideoDumper {
|
namespace VideoDumper {
|
||||||
|
|
||||||
void InitializeFFmpegLibraries() {
|
void InitializeFFmpegLibraries() {
|
||||||
static bool initialized = false;
|
static bool initialized = false;
|
||||||
|
if (initialized) {
|
||||||
if (initialized)
|
|
||||||
return;
|
return;
|
||||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 9, 100)
|
}
|
||||||
av_register_all();
|
|
||||||
#endif
|
FFmpeg::avformat_network_init();
|
||||||
avformat_network_init();
|
|
||||||
initialized = true;
|
initialized = true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -40,7 +33,7 @@ AVDictionary* ToAVDictionary(const std::string& serialized) {
|
||||||
Common::ParamPackage param_package{serialized};
|
Common::ParamPackage param_package{serialized};
|
||||||
AVDictionary* result = nullptr;
|
AVDictionary* result = nullptr;
|
||||||
for (const auto& [key, value] : param_package) {
|
for (const auto& [key, value] : param_package) {
|
||||||
av_dict_set(&result, key.c_str(), value.c_str(), 0);
|
FFmpeg::av_dict_set(&result, key.c_str(), value.c_str(), 0);
|
||||||
}
|
}
|
||||||
return result;
|
return result;
|
||||||
}
|
}
|
||||||
|
@ -67,29 +60,29 @@ void FFmpegStream::Flush() {
|
||||||
}
|
}
|
||||||
|
|
||||||
void FFmpegStream::WritePacket(AVPacket& packet) {
|
void FFmpegStream::WritePacket(AVPacket& packet) {
|
||||||
av_packet_rescale_ts(&packet, codec_context->time_base, stream->time_base);
|
FFmpeg::av_packet_rescale_ts(&packet, codec_context->time_base, stream->time_base);
|
||||||
packet.stream_index = stream->index;
|
packet.stream_index = stream->index;
|
||||||
{
|
{
|
||||||
std::lock_guard lock{*format_context_mutex};
|
std::lock_guard lock{*format_context_mutex};
|
||||||
av_interleaved_write_frame(format_context, &packet);
|
FFmpeg::av_interleaved_write_frame(format_context, &packet);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void FFmpegStream::SendFrame(AVFrame* frame) {
|
void FFmpegStream::SendFrame(AVFrame* frame) {
|
||||||
// Initialize packet
|
// Initialize packet
|
||||||
AVPacket packet;
|
AVPacket packet;
|
||||||
av_init_packet(&packet);
|
FFmpeg::av_init_packet(&packet);
|
||||||
packet.data = nullptr;
|
packet.data = nullptr;
|
||||||
packet.size = 0;
|
packet.size = 0;
|
||||||
|
|
||||||
// Encode frame
|
// Encode frame
|
||||||
if (avcodec_send_frame(codec_context.get(), frame) < 0) {
|
if (FFmpeg::avcodec_send_frame(codec_context.get(), frame) < 0) {
|
||||||
LOG_ERROR(Render, "Frame dropped: could not send frame");
|
LOG_ERROR(Render, "Frame dropped: could not send frame");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
int error = 1;
|
int error = 1;
|
||||||
while (error >= 0) {
|
while (error >= 0) {
|
||||||
error = avcodec_receive_packet(codec_context.get(), &packet);
|
error = FFmpeg::avcodec_receive_packet(codec_context.get(), &packet);
|
||||||
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF)
|
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF)
|
||||||
return;
|
return;
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
|
@ -111,7 +104,7 @@ FFmpegVideoStream::~FFmpegVideoStream() {
|
||||||
static AVPixelFormat GetPixelFormat(AVCodecContext* avctx, const AVPixelFormat* fmt) {
|
static AVPixelFormat GetPixelFormat(AVCodecContext* avctx, const AVPixelFormat* fmt) {
|
||||||
// Choose a software pixel format if any, prefering those in the front of the list
|
// Choose a software pixel format if any, prefering those in the front of the list
|
||||||
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
||||||
const AVPixFmtDescriptor* desc = av_pix_fmt_desc_get(fmt[i]);
|
const AVPixFmtDescriptor* desc = FFmpeg::av_pix_fmt_desc_get(fmt[i]);
|
||||||
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
|
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL)) {
|
||||||
return fmt[i];
|
return fmt[i];
|
||||||
}
|
}
|
||||||
|
@ -123,7 +116,7 @@ static AVPixelFormat GetPixelFormat(AVCodecContext* avctx, const AVPixelFormat*
|
||||||
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
for (int i = 0; fmt[i] != AV_PIX_FMT_NONE; i++) {
|
||||||
const AVCodecHWConfig* config;
|
const AVCodecHWConfig* config;
|
||||||
for (int j = 0;; j++) {
|
for (int j = 0;; j++) {
|
||||||
config = avcodec_get_hw_config(avctx->codec, j);
|
config = FFmpeg::avcodec_get_hw_config(avctx->codec, j);
|
||||||
if (!config || config->pix_fmt == fmt[i]) {
|
if (!config || config->pix_fmt == fmt[i]) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -144,18 +137,19 @@ static AVPixelFormat GetPixelFormat(AVCodecContext* avctx, const AVPixelFormat*
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FFmpegVideoStream::Init(FFmpegMuxer& muxer, const Layout::FramebufferLayout& layout_) {
|
bool FFmpegVideoStream::Init(FFmpegMuxer& muxer, const Layout::FramebufferLayout& layout_) {
|
||||||
|
|
||||||
InitializeFFmpegLibraries();
|
InitializeFFmpegLibraries();
|
||||||
|
|
||||||
if (!FFmpegStream::Init(muxer))
|
if (!FFmpegStream::Init(muxer)) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
layout = layout_;
|
layout = layout_;
|
||||||
frame_count = 0;
|
frame_count = 0;
|
||||||
|
|
||||||
// Initialize video codec
|
// Initialize video codec
|
||||||
const AVCodec* codec = avcodec_find_encoder_by_name(Settings::values.video_encoder.c_str());
|
const AVCodec* codec =
|
||||||
codec_context.reset(avcodec_alloc_context3(codec));
|
FFmpeg::avcodec_find_encoder_by_name(Settings::values.video_encoder.c_str());
|
||||||
|
codec_context.reset(FFmpeg::avcodec_alloc_context3(codec));
|
||||||
if (!codec || !codec_context) {
|
if (!codec || !codec_context) {
|
||||||
LOG_ERROR(Render, "Could not find video encoder or allocate video codec context");
|
LOG_ERROR(Render, "Could not find video encoder or allocate video codec context");
|
||||||
return false;
|
return false;
|
||||||
|
@ -173,9 +167,9 @@ bool FFmpegVideoStream::Init(FFmpegMuxer& muxer, const Layout::FramebufferLayout
|
||||||
|
|
||||||
// Get pixel format for codec
|
// Get pixel format for codec
|
||||||
auto options = ToAVDictionary(Settings::values.video_encoder_options);
|
auto options = ToAVDictionary(Settings::values.video_encoder_options);
|
||||||
auto pixel_format_opt = av_dict_get(options, "pixel_format", nullptr, 0);
|
auto pixel_format_opt = FFmpeg::av_dict_get(options, "pixel_format", nullptr, 0);
|
||||||
if (pixel_format_opt) {
|
if (pixel_format_opt) {
|
||||||
sw_pixel_format = av_get_pix_fmt(pixel_format_opt->value);
|
sw_pixel_format = FFmpeg::av_get_pix_fmt(pixel_format_opt->value);
|
||||||
} else if (codec->pix_fmts) {
|
} else if (codec->pix_fmts) {
|
||||||
sw_pixel_format = GetPixelFormat(codec_context.get(), codec->pix_fmts);
|
sw_pixel_format = GetPixelFormat(codec_context.get(), codec->pix_fmts);
|
||||||
} else {
|
} else {
|
||||||
|
@ -192,23 +186,25 @@ bool FFmpegVideoStream::Init(FFmpegMuxer& muxer, const Layout::FramebufferLayout
|
||||||
codec_context->pix_fmt = sw_pixel_format;
|
codec_context->pix_fmt = sw_pixel_format;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (format_context->oformat->flags & AVFMT_GLOBALHEADER)
|
if (format_context->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||||
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||||
|
}
|
||||||
|
|
||||||
if (avcodec_open2(codec_context.get(), codec, &options) < 0) {
|
if (FFmpeg::avcodec_open2(codec_context.get(), codec, &options) < 0) {
|
||||||
LOG_ERROR(Render, "Could not open video codec");
|
LOG_ERROR(Render, "Could not open video codec");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
if (FFmpeg::av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
||||||
char* buf = nullptr;
|
char* buf = nullptr;
|
||||||
av_dict_get_string(options, &buf, ':', ';');
|
FFmpeg::av_dict_get_string(options, &buf, ':', ';');
|
||||||
LOG_WARNING(Render, "Video encoder options not found: {}", buf);
|
LOG_WARNING(Render, "Video encoder options not found: {}", buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create video stream
|
// Create video stream
|
||||||
stream = avformat_new_stream(format_context, codec);
|
stream = FFmpeg::avformat_new_stream(format_context, codec);
|
||||||
if (!stream || avcodec_parameters_from_context(stream->codecpar, codec_context.get()) < 0) {
|
if (!stream ||
|
||||||
|
FFmpeg::avcodec_parameters_from_context(stream->codecpar, codec_context.get()) < 0) {
|
||||||
LOG_ERROR(Render, "Could not create video stream");
|
LOG_ERROR(Render, "Could not create video stream");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -216,12 +212,12 @@ bool FFmpegVideoStream::Init(FFmpegMuxer& muxer, const Layout::FramebufferLayout
|
||||||
stream->time_base = codec_context->time_base;
|
stream->time_base = codec_context->time_base;
|
||||||
|
|
||||||
// Allocate frames
|
// Allocate frames
|
||||||
current_frame.reset(av_frame_alloc());
|
current_frame.reset(FFmpeg::av_frame_alloc());
|
||||||
filtered_frame.reset(av_frame_alloc());
|
filtered_frame.reset(FFmpeg::av_frame_alloc());
|
||||||
|
|
||||||
if (requires_hw_frames) {
|
if (requires_hw_frames) {
|
||||||
hw_frame.reset(av_frame_alloc());
|
hw_frame.reset(FFmpeg::av_frame_alloc());
|
||||||
if (av_hwframe_get_buffer(codec_context->hw_frames_ctx, hw_frame.get(), 0) < 0) {
|
if (FFmpeg::av_hwframe_get_buffer(codec_context->hw_frames_ctx, hw_frame.get(), 0) < 0) {
|
||||||
LOG_ERROR(Render, "Could not allocate buffer for HW frame");
|
LOG_ERROR(Render, "Could not allocate buffer for HW frame");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -255,12 +251,12 @@ void FFmpegVideoStream::ProcessFrame(VideoFrame& frame) {
|
||||||
current_frame->pts = frame_count++;
|
current_frame->pts = frame_count++;
|
||||||
|
|
||||||
// Filter the frame
|
// Filter the frame
|
||||||
if (av_buffersrc_add_frame(source_context, current_frame.get()) < 0) {
|
if (FFmpeg::av_buffersrc_add_frame(source_context, current_frame.get()) < 0) {
|
||||||
LOG_ERROR(Render, "Video frame dropped: Could not add frame to filter graph");
|
LOG_ERROR(Render, "Video frame dropped: Could not add frame to filter graph");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
while (true) {
|
while (true) {
|
||||||
const int error = av_buffersink_get_frame(sink_context, filtered_frame.get());
|
const int error = FFmpeg::av_buffersink_get_frame(sink_context, filtered_frame.get());
|
||||||
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF) {
|
if (error == AVERROR(EAGAIN) || error == AVERROR_EOF) {
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -269,7 +265,7 @@ void FFmpegVideoStream::ProcessFrame(VideoFrame& frame) {
|
||||||
return;
|
return;
|
||||||
} else {
|
} else {
|
||||||
if (requires_hw_frames) {
|
if (requires_hw_frames) {
|
||||||
if (av_hwframe_transfer_data(hw_frame.get(), filtered_frame.get(), 0) < 0) {
|
if (FFmpeg::av_hwframe_transfer_data(hw_frame.get(), filtered_frame.get(), 0) < 0) {
|
||||||
LOG_ERROR(Render, "Video frame dropped: Could not upload to HW frame");
|
LOG_ERROR(Render, "Video frame dropped: Could not upload to HW frame");
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
@ -278,7 +274,7 @@ void FFmpegVideoStream::ProcessFrame(VideoFrame& frame) {
|
||||||
SendFrame(filtered_frame.get());
|
SendFrame(filtered_frame.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
av_frame_unref(filtered_frame.get());
|
FFmpeg::av_frame_unref(filtered_frame.get());
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -287,7 +283,7 @@ bool FFmpegVideoStream::InitHWContext(const AVCodec* codec) {
|
||||||
for (std::size_t i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i) {
|
for (std::size_t i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i) {
|
||||||
const AVCodecHWConfig* config;
|
const AVCodecHWConfig* config;
|
||||||
for (int j = 0;; ++j) {
|
for (int j = 0;; ++j) {
|
||||||
config = avcodec_get_hw_config(codec, j);
|
config = FFmpeg::avcodec_get_hw_config(codec, j);
|
||||||
if (!config || config->pix_fmt == codec->pix_fmts[i]) {
|
if (!config || config->pix_fmt == codec->pix_fmts[i]) {
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -306,22 +302,22 @@ bool FFmpegVideoStream::InitHWContext(const AVCodec* codec) {
|
||||||
|
|
||||||
// Create HW device context
|
// Create HW device context
|
||||||
AVBufferRef* hw_device_context;
|
AVBufferRef* hw_device_context;
|
||||||
SCOPE_EXIT({ av_buffer_unref(&hw_device_context); });
|
SCOPE_EXIT({ FFmpeg::av_buffer_unref(&hw_device_context); });
|
||||||
|
|
||||||
// TODO: Provide the argument here somehow.
|
// TODO: Provide the argument here somehow.
|
||||||
// This is necessary for some devices like CUDA where you must supply the GPU name.
|
// This is necessary for some devices like CUDA where you must supply the GPU name.
|
||||||
// This is not necessary for VAAPI, etc.
|
// This is not necessary for VAAPI, etc.
|
||||||
if (av_hwdevice_ctx_create(&hw_device_context, config->device_type, nullptr, nullptr, 0) <
|
if (FFmpeg::av_hwdevice_ctx_create(&hw_device_context, config->device_type, nullptr,
|
||||||
0) {
|
nullptr, 0) < 0) {
|
||||||
LOG_ERROR(Render, "Failed to create HW device context");
|
LOG_ERROR(Render, "Failed to create HW device context");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
codec_context->hw_device_ctx = av_buffer_ref(hw_device_context);
|
codec_context->hw_device_ctx = FFmpeg::av_buffer_ref(hw_device_context);
|
||||||
|
|
||||||
// Get the SW format
|
// Get the SW format
|
||||||
AVHWFramesConstraints* constraints =
|
AVHWFramesConstraints* constraints =
|
||||||
av_hwdevice_get_hwframe_constraints(hw_device_context, nullptr);
|
FFmpeg::av_hwdevice_get_hwframe_constraints(hw_device_context, nullptr);
|
||||||
SCOPE_EXIT({ av_hwframe_constraints_free(&constraints); });
|
SCOPE_EXIT({ FFmpeg::av_hwframe_constraints_free(&constraints); });
|
||||||
|
|
||||||
if (constraints) {
|
if (constraints) {
|
||||||
sw_pixel_format = constraints->valid_sw_formats ? constraints->valid_sw_formats[0]
|
sw_pixel_format = constraints->valid_sw_formats ? constraints->valid_sw_formats[0]
|
||||||
|
@ -341,9 +337,9 @@ bool FFmpegVideoStream::InitHWContext(const AVCodec* codec) {
|
||||||
|
|
||||||
// Create HW frames context
|
// Create HW frames context
|
||||||
AVBufferRef* hw_frames_context_ref;
|
AVBufferRef* hw_frames_context_ref;
|
||||||
SCOPE_EXIT({ av_buffer_unref(&hw_frames_context_ref); });
|
SCOPE_EXIT({ FFmpeg::av_buffer_unref(&hw_frames_context_ref); });
|
||||||
|
|
||||||
if (!(hw_frames_context_ref = av_hwframe_ctx_alloc(hw_device_context))) {
|
if (!(hw_frames_context_ref = FFmpeg::av_hwframe_ctx_alloc(hw_device_context))) {
|
||||||
LOG_ERROR(Render, "Failed to create HW frames context");
|
LOG_ERROR(Render, "Failed to create HW frames context");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -356,12 +352,12 @@ bool FFmpegVideoStream::InitHWContext(const AVCodec* codec) {
|
||||||
hw_frames_context->height = codec_context->height;
|
hw_frames_context->height = codec_context->height;
|
||||||
hw_frames_context->initial_pool_size = 20; // value from FFmpeg's example
|
hw_frames_context->initial_pool_size = 20; // value from FFmpeg's example
|
||||||
|
|
||||||
if (av_hwframe_ctx_init(hw_frames_context_ref) < 0) {
|
if (FFmpeg::av_hwframe_ctx_init(hw_frames_context_ref) < 0) {
|
||||||
LOG_ERROR(Render, "Failed to initialize HW frames context");
|
LOG_ERROR(Render, "Failed to initialize HW frames context");
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
|
||||||
codec_context->hw_frames_ctx = av_buffer_ref(hw_frames_context_ref);
|
codec_context->hw_frames_ctx = FFmpeg::av_buffer_ref(hw_frames_context_ref);
|
||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -370,10 +366,10 @@ bool FFmpegVideoStream::InitHWContext(const AVCodec* codec) {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FFmpegVideoStream::InitFilters() {
|
bool FFmpegVideoStream::InitFilters() {
|
||||||
filter_graph.reset(avfilter_graph_alloc());
|
filter_graph.reset(FFmpeg::avfilter_graph_alloc());
|
||||||
|
|
||||||
const AVFilter* source = avfilter_get_by_name("buffer");
|
const AVFilter* source = FFmpeg::avfilter_get_by_name("buffer");
|
||||||
const AVFilter* sink = avfilter_get_by_name("buffersink");
|
const AVFilter* sink = FFmpeg::avfilter_get_by_name("buffersink");
|
||||||
if (!source || !sink) {
|
if (!source || !sink) {
|
||||||
LOG_ERROR(Render, "Could not find buffer source or sink");
|
LOG_ERROR(Render, "Could not find buffer source or sink");
|
||||||
return false;
|
return false;
|
||||||
|
@ -385,18 +381,23 @@ bool FFmpegVideoStream::InitFilters() {
|
||||||
const std::string in_args =
|
const std::string in_args =
|
||||||
fmt::format("video_size={}x{}:pix_fmt={}:time_base={}/{}:pixel_aspect=1", layout.width,
|
fmt::format("video_size={}x{}:pix_fmt={}:time_base={}/{}:pixel_aspect=1", layout.width,
|
||||||
layout.height, pixel_format, src_time_base.num, src_time_base.den);
|
layout.height, pixel_format, src_time_base.num, src_time_base.den);
|
||||||
if (avfilter_graph_create_filter(&source_context, source, "in", in_args.c_str(), nullptr,
|
if (FFmpeg::avfilter_graph_create_filter(&source_context, source, "in", in_args.c_str(),
|
||||||
filter_graph.get()) < 0) {
|
nullptr, filter_graph.get()) < 0) {
|
||||||
LOG_ERROR(Render, "Could not create buffer source");
|
LOG_ERROR(Render, "Could not create buffer source");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Configure buffer sink
|
// Configure buffer sink
|
||||||
if (avfilter_graph_create_filter(&sink_context, sink, "out", nullptr, nullptr,
|
if (FFmpeg::avfilter_graph_create_filter(&sink_context, sink, "out", nullptr, nullptr,
|
||||||
filter_graph.get()) < 0) {
|
filter_graph.get()) < 0) {
|
||||||
LOG_ERROR(Render, "Could not create buffer sink");
|
LOG_ERROR(Render, "Could not create buffer sink");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Point av_opt_set_int_list to correct functions.
|
||||||
|
#define av_int_list_length_for_size FFmpeg::av_int_list_length_for_size
|
||||||
|
#define av_opt_set_bin FFmpeg::av_opt_set_bin
|
||||||
|
|
||||||
const AVPixelFormat pix_fmts[] = {sw_pixel_format, AV_PIX_FMT_NONE};
|
const AVPixelFormat pix_fmts[] = {sw_pixel_format, AV_PIX_FMT_NONE};
|
||||||
if (av_opt_set_int_list(sink_context, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE,
|
if (av_opt_set_int_list(sink_context, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE,
|
||||||
AV_OPT_SEARCH_CHILDREN) < 0) {
|
AV_OPT_SEARCH_CHILDREN) < 0) {
|
||||||
|
@ -406,30 +407,30 @@ bool FFmpegVideoStream::InitFilters() {
|
||||||
|
|
||||||
// Initialize filter graph
|
// Initialize filter graph
|
||||||
// `outputs` as in outputs of the 'previous' graphs
|
// `outputs` as in outputs of the 'previous' graphs
|
||||||
AVFilterInOut* outputs = avfilter_inout_alloc();
|
AVFilterInOut* outputs = FFmpeg::avfilter_inout_alloc();
|
||||||
outputs->name = av_strdup("in");
|
outputs->name = FFmpeg::av_strdup("in");
|
||||||
outputs->filter_ctx = source_context;
|
outputs->filter_ctx = source_context;
|
||||||
outputs->pad_idx = 0;
|
outputs->pad_idx = 0;
|
||||||
outputs->next = nullptr;
|
outputs->next = nullptr;
|
||||||
|
|
||||||
// `inputs` as in inputs to the 'next' graphs
|
// `inputs` as in inputs to the 'next' graphs
|
||||||
AVFilterInOut* inputs = avfilter_inout_alloc();
|
AVFilterInOut* inputs = FFmpeg::avfilter_inout_alloc();
|
||||||
inputs->name = av_strdup("out");
|
inputs->name = FFmpeg::av_strdup("out");
|
||||||
inputs->filter_ctx = sink_context;
|
inputs->filter_ctx = sink_context;
|
||||||
inputs->pad_idx = 0;
|
inputs->pad_idx = 0;
|
||||||
inputs->next = nullptr;
|
inputs->next = nullptr;
|
||||||
|
|
||||||
SCOPE_EXIT({
|
SCOPE_EXIT({
|
||||||
avfilter_inout_free(&outputs);
|
FFmpeg::avfilter_inout_free(&outputs);
|
||||||
avfilter_inout_free(&inputs);
|
FFmpeg::avfilter_inout_free(&inputs);
|
||||||
});
|
});
|
||||||
|
|
||||||
if (avfilter_graph_parse_ptr(filter_graph.get(), filter_graph_desc.data(), &inputs, &outputs,
|
if (FFmpeg::avfilter_graph_parse_ptr(filter_graph.get(), filter_graph_desc.data(), &inputs,
|
||||||
nullptr) < 0) {
|
&outputs, nullptr) < 0) {
|
||||||
LOG_ERROR(Render, "Could not parse or create filter graph");
|
LOG_ERROR(Render, "Could not parse or create filter graph");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (avfilter_graph_config(filter_graph.get(), nullptr) < 0) {
|
if (FFmpeg::avfilter_graph_config(filter_graph.get(), nullptr) < 0) {
|
||||||
LOG_ERROR(Render, "Could not configure filter graph");
|
LOG_ERROR(Render, "Could not configure filter graph");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -444,14 +445,16 @@ FFmpegAudioStream::~FFmpegAudioStream() {
|
||||||
bool FFmpegAudioStream::Init(FFmpegMuxer& muxer) {
|
bool FFmpegAudioStream::Init(FFmpegMuxer& muxer) {
|
||||||
InitializeFFmpegLibraries();
|
InitializeFFmpegLibraries();
|
||||||
|
|
||||||
if (!FFmpegStream::Init(muxer))
|
if (!FFmpegStream::Init(muxer)) {
|
||||||
return false;
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
frame_count = 0;
|
frame_count = 0;
|
||||||
|
|
||||||
// Initialize audio codec
|
// Initialize audio codec
|
||||||
const AVCodec* codec = avcodec_find_encoder_by_name(Settings::values.audio_encoder.c_str());
|
const AVCodec* codec =
|
||||||
codec_context.reset(avcodec_alloc_context3(codec));
|
FFmpeg::avcodec_find_encoder_by_name(Settings::values.audio_encoder.c_str());
|
||||||
|
codec_context.reset(FFmpeg::avcodec_alloc_context3(codec));
|
||||||
if (!codec || !codec_context) {
|
if (!codec || !codec_context) {
|
||||||
LOG_ERROR(Render, "Could not find audio encoder or allocate audio codec context");
|
LOG_ERROR(Render, "Could not find audio encoder or allocate audio codec context");
|
||||||
return false;
|
return false;
|
||||||
|
@ -482,20 +485,25 @@ bool FFmpegAudioStream::Init(FFmpegMuxer& muxer) {
|
||||||
}
|
}
|
||||||
codec_context->time_base.num = 1;
|
codec_context->time_base.num = 1;
|
||||||
codec_context->time_base.den = codec_context->sample_rate;
|
codec_context->time_base.den = codec_context->sample_rate;
|
||||||
|
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
|
||||||
|
codec_context->ch_layout = AV_CHANNEL_LAYOUT_STEREO;
|
||||||
|
#else
|
||||||
codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
|
codec_context->channel_layout = AV_CH_LAYOUT_STEREO;
|
||||||
codec_context->channels = 2;
|
codec_context->channels = 2;
|
||||||
if (format_context->oformat->flags & AVFMT_GLOBALHEADER)
|
#endif
|
||||||
|
if (format_context->oformat->flags & AVFMT_GLOBALHEADER) {
|
||||||
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
codec_context->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
|
||||||
|
}
|
||||||
|
|
||||||
AVDictionary* options = ToAVDictionary(Settings::values.audio_encoder_options);
|
AVDictionary* options = ToAVDictionary(Settings::values.audio_encoder_options);
|
||||||
if (avcodec_open2(codec_context.get(), codec, &options) < 0) {
|
if (FFmpeg::avcodec_open2(codec_context.get(), codec, &options) < 0) {
|
||||||
LOG_ERROR(Render, "Could not open audio codec");
|
LOG_ERROR(Render, "Could not open audio codec");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
if (FFmpeg::av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
||||||
char* buf = nullptr;
|
char* buf = nullptr;
|
||||||
av_dict_get_string(options, &buf, ':', ';');
|
FFmpeg::av_dict_get_string(options, &buf, ':', ';');
|
||||||
LOG_WARNING(Render, "Audio encoder options not found: {}", buf);
|
LOG_WARNING(Render, "Audio encoder options not found: {}", buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -506,39 +514,49 @@ bool FFmpegAudioStream::Init(FFmpegMuxer& muxer) {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Create audio stream
|
// Create audio stream
|
||||||
stream = avformat_new_stream(format_context, codec);
|
stream = FFmpeg::avformat_new_stream(format_context, codec);
|
||||||
if (!stream || avcodec_parameters_from_context(stream->codecpar, codec_context.get()) < 0) {
|
if (!stream ||
|
||||||
|
FFmpeg::avcodec_parameters_from_context(stream->codecpar, codec_context.get()) < 0) {
|
||||||
|
|
||||||
LOG_ERROR(Render, "Could not create audio stream");
|
LOG_ERROR(Render, "Could not create audio stream");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate frame
|
// Allocate frame
|
||||||
audio_frame.reset(av_frame_alloc());
|
audio_frame.reset(FFmpeg::av_frame_alloc());
|
||||||
audio_frame->format = codec_context->sample_fmt;
|
audio_frame->format = codec_context->sample_fmt;
|
||||||
audio_frame->channel_layout = codec_context->channel_layout;
|
|
||||||
audio_frame->channels = codec_context->channels;
|
|
||||||
audio_frame->sample_rate = codec_context->sample_rate;
|
audio_frame->sample_rate = codec_context->sample_rate;
|
||||||
|
|
||||||
// Allocate SWR context
|
#if LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(59, 24, 100)
|
||||||
auto* context =
|
auto num_channels = codec_context->ch_layout.nb_channels;
|
||||||
swr_alloc_set_opts(nullptr, codec_context->channel_layout, codec_context->sample_fmt,
|
audio_frame->ch_layout = codec_context->ch_layout;
|
||||||
codec_context->sample_rate, codec_context->channel_layout,
|
SwrContext* context = nullptr;
|
||||||
AV_SAMPLE_FMT_S16P, AudioCore::native_sample_rate, 0, nullptr);
|
FFmpeg::swr_alloc_set_opts2(&context, &codec_context->ch_layout, codec_context->sample_fmt,
|
||||||
|
codec_context->sample_rate, &codec_context->ch_layout,
|
||||||
|
AV_SAMPLE_FMT_S16P, AudioCore::native_sample_rate, 0, nullptr);
|
||||||
|
#else
|
||||||
|
auto num_channels = codec_context->channels;
|
||||||
|
audio_frame->channel_layout = codec_context->channel_layout;
|
||||||
|
audio_frame->channels = num_channels;
|
||||||
|
auto* context = FFmpeg::swr_alloc_set_opts(
|
||||||
|
nullptr, codec_context->channel_layout, codec_context->sample_fmt,
|
||||||
|
codec_context->sample_rate, codec_context->channel_layout, AV_SAMPLE_FMT_S16P,
|
||||||
|
AudioCore::native_sample_rate, 0, nullptr);
|
||||||
|
#endif
|
||||||
|
|
||||||
if (!context) {
|
if (!context) {
|
||||||
LOG_ERROR(Render, "Could not create SWR context");
|
LOG_ERROR(Render, "Could not create SWR context");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
swr_context.reset(context);
|
swr_context.reset(context);
|
||||||
if (swr_init(swr_context.get()) < 0) {
|
if (FFmpeg::swr_init(swr_context.get()) < 0) {
|
||||||
LOG_ERROR(Render, "Could not init SWR context");
|
LOG_ERROR(Render, "Could not init SWR context");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
|
||||||
// Allocate resampled data
|
// Allocate resampled data
|
||||||
int error =
|
int error = FFmpeg::av_samples_alloc_array_and_samples(
|
||||||
av_samples_alloc_array_and_samples(&resampled_data, nullptr, codec_context->channels,
|
&resampled_data, nullptr, num_channels, frame_size, codec_context->sample_fmt, 0);
|
||||||
frame_size, codec_context->sample_fmt, 0);
|
|
||||||
if (error < 0) {
|
if (error < 0) {
|
||||||
LOG_ERROR(Render, "Could not allocate samples storage");
|
LOG_ERROR(Render, "Could not allocate samples storage");
|
||||||
return false;
|
return false;
|
||||||
|
@ -554,9 +572,9 @@ void FFmpegAudioStream::Free() {
|
||||||
swr_context.reset();
|
swr_context.reset();
|
||||||
// Free resampled data
|
// Free resampled data
|
||||||
if (resampled_data) {
|
if (resampled_data) {
|
||||||
av_freep(&resampled_data[0]);
|
FFmpeg::av_freep(&resampled_data[0]);
|
||||||
}
|
}
|
||||||
av_freep(&resampled_data);
|
FFmpeg::av_freep(&resampled_data);
|
||||||
}
|
}
|
||||||
|
|
||||||
void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0,
|
void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0,
|
||||||
|
@ -564,20 +582,21 @@ void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0,
|
||||||
ASSERT_MSG(channel0.size() == channel1.size(),
|
ASSERT_MSG(channel0.size() == channel1.size(),
|
||||||
"Frames of the two channels must have the same number of samples");
|
"Frames of the two channels must have the same number of samples");
|
||||||
|
|
||||||
const auto sample_size = av_get_bytes_per_sample(codec_context->sample_fmt);
|
const auto sample_size = FFmpeg::av_get_bytes_per_sample(codec_context->sample_fmt);
|
||||||
std::array<const u8*, 2> src_data = {reinterpret_cast<const u8*>(channel0.data()),
|
std::array<const u8*, 2> src_data = {reinterpret_cast<const u8*>(channel0.data()),
|
||||||
reinterpret_cast<const u8*>(channel1.data())};
|
reinterpret_cast<const u8*>(channel1.data())};
|
||||||
|
|
||||||
std::array<u8*, 2> dst_data;
|
std::array<u8*, 2> dst_data;
|
||||||
if (av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
if (FFmpeg::av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
||||||
dst_data = {resampled_data[0] + sample_size * offset,
|
dst_data = {resampled_data[0] + sample_size * offset,
|
||||||
resampled_data[1] + sample_size * offset};
|
resampled_data[1] + sample_size * offset};
|
||||||
} else {
|
} else {
|
||||||
dst_data = {resampled_data[0] + sample_size * offset * 2}; // 2 channels
|
dst_data = {resampled_data[0] + sample_size * offset * 2}; // 2 channels
|
||||||
}
|
}
|
||||||
|
|
||||||
auto resampled_count = swr_convert(swr_context.get(), dst_data.data(), frame_size - offset,
|
auto resampled_count =
|
||||||
src_data.data(), static_cast<int>(channel0.size()));
|
FFmpeg::swr_convert(swr_context.get(), dst_data.data(), frame_size - offset,
|
||||||
|
src_data.data(), static_cast<int>(channel0.size()));
|
||||||
if (resampled_count < 0) {
|
if (resampled_count < 0) {
|
||||||
LOG_ERROR(Render, "Audio frame dropped: Could not resample data");
|
LOG_ERROR(Render, "Audio frame dropped: Could not resample data");
|
||||||
return;
|
return;
|
||||||
|
@ -592,7 +611,7 @@ void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0,
|
||||||
// Prepare frame
|
// Prepare frame
|
||||||
audio_frame->nb_samples = frame_size;
|
audio_frame->nb_samples = frame_size;
|
||||||
audio_frame->data[0] = resampled_data[0];
|
audio_frame->data[0] = resampled_data[0];
|
||||||
if (av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
if (FFmpeg::av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
||||||
audio_frame->data[1] = resampled_data[1];
|
audio_frame->data[1] = resampled_data[1];
|
||||||
}
|
}
|
||||||
audio_frame->pts = frame_count * frame_size;
|
audio_frame->pts = frame_count * frame_size;
|
||||||
|
@ -601,7 +620,8 @@ void FFmpegAudioStream::ProcessFrame(const VariableAudioFrame& channel0,
|
||||||
SendFrame(audio_frame.get());
|
SendFrame(audio_frame.get());
|
||||||
|
|
||||||
// swr_convert buffers input internally. Try to get more resampled data
|
// swr_convert buffers input internally. Try to get more resampled data
|
||||||
resampled_count = swr_convert(swr_context.get(), resampled_data, frame_size, nullptr, 0);
|
resampled_count =
|
||||||
|
FFmpeg::swr_convert(swr_context.get(), resampled_data, frame_size, nullptr, 0);
|
||||||
if (resampled_count < 0) {
|
if (resampled_count < 0) {
|
||||||
LOG_ERROR(Render, "Audio frame dropped: Could not resample data");
|
LOG_ERROR(Render, "Audio frame dropped: Could not resample data");
|
||||||
return;
|
return;
|
||||||
|
@ -617,7 +637,7 @@ void FFmpegAudioStream::Flush() {
|
||||||
// Send the last samples
|
// Send the last samples
|
||||||
audio_frame->nb_samples = offset;
|
audio_frame->nb_samples = offset;
|
||||||
audio_frame->data[0] = resampled_data[0];
|
audio_frame->data[0] = resampled_data[0];
|
||||||
if (av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
if (FFmpeg::av_sample_fmt_is_planar(codec_context->sample_fmt)) {
|
||||||
audio_frame->data[1] = resampled_data[1];
|
audio_frame->data[1] = resampled_data[1];
|
||||||
}
|
}
|
||||||
audio_frame->pts = frame_count * frame_size;
|
audio_frame->pts = frame_count * frame_size;
|
||||||
|
@ -632,7 +652,6 @@ FFmpegMuxer::~FFmpegMuxer() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FFmpegMuxer::Init(const std::string& path, const Layout::FramebufferLayout& layout) {
|
bool FFmpegMuxer::Init(const std::string& path, const Layout::FramebufferLayout& layout) {
|
||||||
|
|
||||||
InitializeFFmpegLibraries();
|
InitializeFFmpegLibraries();
|
||||||
|
|
||||||
if (!FileUtil::CreateFullPath(path)) {
|
if (!FileUtil::CreateFullPath(path)) {
|
||||||
|
@ -641,7 +660,7 @@ bool FFmpegMuxer::Init(const std::string& path, const Layout::FramebufferLayout&
|
||||||
|
|
||||||
// Get output format
|
// Get output format
|
||||||
const auto format = Settings::values.output_format;
|
const auto format = Settings::values.output_format;
|
||||||
auto* output_format = av_guess_format(format.c_str(), path.c_str(), nullptr);
|
auto* output_format = FFmpeg::av_guess_format(format.c_str(), path.c_str(), nullptr);
|
||||||
if (!output_format) {
|
if (!output_format) {
|
||||||
LOG_ERROR(Render, "Could not get format {}", format);
|
LOG_ERROR(Render, "Could not get format {}", format);
|
||||||
return false;
|
return false;
|
||||||
|
@ -649,9 +668,8 @@ bool FFmpegMuxer::Init(const std::string& path, const Layout::FramebufferLayout&
|
||||||
|
|
||||||
// Initialize format context
|
// Initialize format context
|
||||||
auto* format_context_raw = format_context.get();
|
auto* format_context_raw = format_context.get();
|
||||||
if (avformat_alloc_output_context2(&format_context_raw, output_format, nullptr, path.c_str()) <
|
if (FFmpeg::avformat_alloc_output_context2(&format_context_raw, output_format, nullptr,
|
||||||
0) {
|
path.c_str()) < 0) {
|
||||||
|
|
||||||
LOG_ERROR(Render, "Could not allocate output context");
|
LOG_ERROR(Render, "Could not allocate output context");
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
|
@ -664,15 +682,15 @@ bool FFmpegMuxer::Init(const std::string& path, const Layout::FramebufferLayout&
|
||||||
|
|
||||||
AVDictionary* options = ToAVDictionary(Settings::values.format_options);
|
AVDictionary* options = ToAVDictionary(Settings::values.format_options);
|
||||||
// Open video file
|
// Open video file
|
||||||
if (avio_open(&format_context->pb, path.c_str(), AVIO_FLAG_WRITE) < 0 ||
|
if (FFmpeg::avio_open(&format_context->pb, path.c_str(), AVIO_FLAG_WRITE) < 0 ||
|
||||||
avformat_write_header(format_context.get(), &options)) {
|
FFmpeg::avformat_write_header(format_context.get(), &options)) {
|
||||||
|
|
||||||
LOG_ERROR(Render, "Could not open {}", path);
|
LOG_ERROR(Render, "Could not open {}", path);
|
||||||
return false;
|
return false;
|
||||||
}
|
}
|
||||||
if (av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
if (FFmpeg::av_dict_count(options) != 0) { // Successfully set options are removed from the dict
|
||||||
char* buf = nullptr;
|
char* buf = nullptr;
|
||||||
av_dict_get_string(options, &buf, ':', ';');
|
FFmpeg::av_dict_get_string(options, &buf, ':', ';');
|
||||||
LOG_WARNING(Render, "Format options not found: {}", buf);
|
LOG_WARNING(Render, "Format options not found: {}", buf);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -705,8 +723,8 @@ void FFmpegMuxer::FlushAudio() {
|
||||||
|
|
||||||
void FFmpegMuxer::WriteTrailer() {
|
void FFmpegMuxer::WriteTrailer() {
|
||||||
std::lock_guard lock{format_context_mutex};
|
std::lock_guard lock{format_context_mutex};
|
||||||
av_interleaved_write_frame(format_context.get(), nullptr);
|
FFmpeg::av_interleaved_write_frame(format_context.get(), nullptr);
|
||||||
av_write_trailer(format_context.get());
|
FFmpeg::av_write_trailer(format_context.get());
|
||||||
}
|
}
|
||||||
|
|
||||||
FFmpegBackend::FFmpegBackend() = default;
|
FFmpegBackend::FFmpegBackend() = default;
|
||||||
|
@ -722,7 +740,6 @@ FFmpegBackend::~FFmpegBackend() {
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FFmpegBackend::StartDumping(const std::string& path, const Layout::FramebufferLayout& layout) {
|
bool FFmpegBackend::StartDumping(const std::string& path, const Layout::FramebufferLayout& layout) {
|
||||||
|
|
||||||
InitializeFFmpegLibraries();
|
InitializeFFmpegLibraries();
|
||||||
|
|
||||||
if (!ffmpeg.Init(path, layout)) {
|
if (!ffmpeg.Init(path, layout)) {
|
||||||
|
@ -732,8 +749,9 @@ bool FFmpegBackend::StartDumping(const std::string& path, const Layout::Framebuf
|
||||||
|
|
||||||
video_layout = layout;
|
video_layout = layout;
|
||||||
|
|
||||||
if (video_processing_thread.joinable())
|
if (video_processing_thread.joinable()) {
|
||||||
video_processing_thread.join();
|
video_processing_thread.join();
|
||||||
|
}
|
||||||
video_processing_thread = std::thread([&] {
|
video_processing_thread = std::thread([&] {
|
||||||
event1.Set();
|
event1.Set();
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -756,8 +774,9 @@ bool FFmpegBackend::StartDumping(const std::string& path, const Layout::Framebuf
|
||||||
EndDumping();
|
EndDumping();
|
||||||
});
|
});
|
||||||
|
|
||||||
if (audio_processing_thread.joinable())
|
if (audio_processing_thread.joinable()) {
|
||||||
audio_processing_thread.join();
|
audio_processing_thread.join();
|
||||||
|
}
|
||||||
audio_processing_thread = std::thread([&] {
|
audio_processing_thread = std::thread([&] {
|
||||||
VariableAudioFrame channel0, channel1;
|
VariableAudioFrame channel0, channel1;
|
||||||
while (true) {
|
while (true) {
|
||||||
|
@ -912,16 +931,17 @@ std::string FormatDefaultValue(const AVOption* option,
|
||||||
return fmt::format("{}", option->default_val.dbl);
|
return fmt::format("{}", option->default_val.dbl);
|
||||||
}
|
}
|
||||||
case AV_OPT_TYPE_RATIONAL: {
|
case AV_OPT_TYPE_RATIONAL: {
|
||||||
const auto q = av_d2q(option->default_val.dbl, std::numeric_limits<int>::max());
|
const auto q = FFmpeg::av_d2q(option->default_val.dbl, std::numeric_limits<int>::max());
|
||||||
return fmt::format("{}/{}", q.num, q.den);
|
return fmt::format("{}/{}", q.num, q.den);
|
||||||
}
|
}
|
||||||
case AV_OPT_TYPE_PIXEL_FMT: {
|
case AV_OPT_TYPE_PIXEL_FMT: {
|
||||||
const char* name = av_get_pix_fmt_name(static_cast<AVPixelFormat>(option->default_val.i64));
|
const char* name =
|
||||||
|
FFmpeg::av_get_pix_fmt_name(static_cast<AVPixelFormat>(option->default_val.i64));
|
||||||
return ToStdString(name, "none");
|
return ToStdString(name, "none");
|
||||||
}
|
}
|
||||||
case AV_OPT_TYPE_SAMPLE_FMT: {
|
case AV_OPT_TYPE_SAMPLE_FMT: {
|
||||||
const char* name =
|
const char* name =
|
||||||
av_get_sample_fmt_name(static_cast<AVSampleFormat>(option->default_val.i64));
|
FFmpeg::av_get_sample_fmt_name(static_cast<AVSampleFormat>(option->default_val.i64));
|
||||||
return ToStdString(name, "none");
|
return ToStdString(name, "none");
|
||||||
}
|
}
|
||||||
case AV_OPT_TYPE_COLOR:
|
case AV_OPT_TYPE_COLOR:
|
||||||
|
@ -947,7 +967,7 @@ void GetOptionListSingle(std::vector<OptionInfo>& out, const AVClass* av_class)
|
||||||
const AVOption* current = nullptr;
|
const AVOption* current = nullptr;
|
||||||
std::unordered_map<std::string, std::vector<OptionInfo::NamedConstant>> named_constants_map;
|
std::unordered_map<std::string, std::vector<OptionInfo::NamedConstant>> named_constants_map;
|
||||||
// First iteration: find and place all named constants
|
// First iteration: find and place all named constants
|
||||||
while ((current = av_opt_next(&av_class, current))) {
|
while ((current = FFmpeg::av_opt_next(&av_class, current))) {
|
||||||
if (current->type != AV_OPT_TYPE_CONST || !current->unit) {
|
if (current->type != AV_OPT_TYPE_CONST || !current->unit) {
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
|
@ -956,7 +976,7 @@ void GetOptionListSingle(std::vector<OptionInfo>& out, const AVClass* av_class)
|
||||||
}
|
}
|
||||||
// Second iteration: find all options
|
// Second iteration: find all options
|
||||||
current = nullptr;
|
current = nullptr;
|
||||||
while ((current = av_opt_next(&av_class, current))) {
|
while ((current = FFmpeg::av_opt_next(&av_class, current))) {
|
||||||
// Currently we cannot handle binary options
|
// Currently we cannot handle binary options
|
||||||
if (current->type == AV_OPT_TYPE_CONST || current->type == AV_OPT_TYPE_BINARY) {
|
if (current->type == AV_OPT_TYPE_CONST || current->type == AV_OPT_TYPE_BINARY) {
|
||||||
continue;
|
continue;
|
||||||
|
@ -985,9 +1005,9 @@ void GetOptionList(std::vector<OptionInfo>& out, const AVClass* av_class, bool s
|
||||||
const AVClass* child_class = nullptr;
|
const AVClass* child_class = nullptr;
|
||||||
#if LIBAVCODEC_VERSION_MAJOR >= 59
|
#if LIBAVCODEC_VERSION_MAJOR >= 59
|
||||||
void* iter = nullptr;
|
void* iter = nullptr;
|
||||||
while ((child_class = av_opt_child_class_iterate(av_class, &iter))) {
|
while ((child_class = FFmpeg::av_opt_child_class_iterate(av_class, &iter))) {
|
||||||
#else
|
#else
|
||||||
while ((child_class = av_opt_child_class_next(av_class, child_class))) {
|
while ((child_class = FFmpeg::av_opt_child_class_next(av_class, child_class))) {
|
||||||
#endif
|
#endif
|
||||||
GetOptionListSingle(out, child_class);
|
GetOptionListSingle(out, child_class);
|
||||||
}
|
}
|
||||||
|
@ -1005,13 +1025,9 @@ std::vector<EncoderInfo> ListEncoders(AVMediaType type) {
|
||||||
std::vector<EncoderInfo> out;
|
std::vector<EncoderInfo> out;
|
||||||
|
|
||||||
const AVCodec* current = nullptr;
|
const AVCodec* current = nullptr;
|
||||||
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(58, 10, 100)
|
|
||||||
while ((current = av_codec_next(current))) {
|
|
||||||
#else
|
|
||||||
void* data = nullptr; // For libavcodec to save the iteration state
|
void* data = nullptr; // For libavcodec to save the iteration state
|
||||||
while ((current = av_codec_iterate(&data))) {
|
while ((current = FFmpeg::av_codec_iterate(&data))) {
|
||||||
#endif
|
if (!FFmpeg::av_codec_is_encoder(current) || current->type != type) {
|
||||||
if (!av_codec_is_encoder(current) || current->type != type) {
|
|
||||||
continue;
|
continue;
|
||||||
}
|
}
|
||||||
out.push_back({current->name, ToStdString(current->long_name), current->id,
|
out.push_back({current->name, ToStdString(current->long_name), current->id,
|
||||||
|
@ -1021,7 +1037,7 @@ std::vector<EncoderInfo> ListEncoders(AVMediaType type) {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<OptionInfo> GetEncoderGenericOptions() {
|
std::vector<OptionInfo> GetEncoderGenericOptions() {
|
||||||
return GetOptionList(avcodec_get_class(), false);
|
return GetOptionList(FFmpeg::avcodec_get_class(), false);
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<FormatInfo> ListFormats() {
|
std::vector<FormatInfo> ListFormats() {
|
||||||
|
@ -1030,20 +1046,16 @@ std::vector<FormatInfo> ListFormats() {
|
||||||
std::vector<FormatInfo> out;
|
std::vector<FormatInfo> out;
|
||||||
|
|
||||||
const AVOutputFormat* current = nullptr;
|
const AVOutputFormat* current = nullptr;
|
||||||
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(58, 9, 100)
|
|
||||||
while ((current = av_oformat_next(current))) {
|
|
||||||
#else
|
|
||||||
void* data = nullptr; // For libavformat to save the iteration state
|
void* data = nullptr; // For libavformat to save the iteration state
|
||||||
while ((current = av_muxer_iterate(&data))) {
|
while ((current = FFmpeg::av_muxer_iterate(&data))) {
|
||||||
#endif
|
|
||||||
const auto extensions = Common::SplitString(ToStdString(current->extensions), ',');
|
const auto extensions = Common::SplitString(ToStdString(current->extensions), ',');
|
||||||
|
|
||||||
std::set<AVCodecID> supported_video_codecs;
|
std::set<AVCodecID> supported_video_codecs;
|
||||||
std::set<AVCodecID> supported_audio_codecs;
|
std::set<AVCodecID> supported_audio_codecs;
|
||||||
// Go through all codecs
|
// Go through all codecs
|
||||||
const AVCodecDescriptor* codec = nullptr;
|
const AVCodecDescriptor* codec = nullptr;
|
||||||
while ((codec = avcodec_descriptor_next(codec))) {
|
while ((codec = FFmpeg::avcodec_descriptor_next(codec))) {
|
||||||
if (avformat_query_codec(current, codec->id, FF_COMPLIANCE_NORMAL) == 1) {
|
if (FFmpeg::avformat_query_codec(current, codec->id, FF_COMPLIANCE_NORMAL) == 1) {
|
||||||
if (codec->type == AVMEDIA_TYPE_VIDEO) {
|
if (codec->type == AVMEDIA_TYPE_VIDEO) {
|
||||||
supported_video_codecs.emplace(codec->id);
|
supported_video_codecs.emplace(codec->id);
|
||||||
} else if (codec->type == AVMEDIA_TYPE_AUDIO) {
|
} else if (codec->type == AVMEDIA_TYPE_AUDIO) {
|
||||||
|
@ -1064,7 +1076,24 @@ std::vector<FormatInfo> ListFormats() {
|
||||||
}
|
}
|
||||||
|
|
||||||
std::vector<OptionInfo> GetFormatGenericOptions() {
|
std::vector<OptionInfo> GetFormatGenericOptions() {
|
||||||
return GetOptionList(avformat_get_class(), false);
|
return GetOptionList(FFmpeg::avformat_get_class(), false);
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> GetPixelFormats() {
|
||||||
|
std::vector<std::string> out;
|
||||||
|
const AVPixFmtDescriptor* current = nullptr;
|
||||||
|
while ((current = FFmpeg::av_pix_fmt_desc_next(current))) {
|
||||||
|
out.emplace_back(current->name);
|
||||||
|
}
|
||||||
|
return out;
|
||||||
|
}
|
||||||
|
|
||||||
|
std::vector<std::string> GetSampleFormats() {
|
||||||
|
std::vector<std::string> out;
|
||||||
|
for (int current = AV_SAMPLE_FMT_U8; current < AV_SAMPLE_FMT_NB; current++) {
|
||||||
|
out.emplace_back(FFmpeg::av_get_sample_fmt_name(static_cast<AVSampleFormat>(current)));
|
||||||
|
}
|
||||||
|
return out;
|
||||||
}
|
}
|
||||||
|
|
||||||
} // namespace VideoDumper
|
} // namespace VideoDumper
|
||||||
|
|
|
@ -13,24 +13,15 @@
|
||||||
#include <thread>
|
#include <thread>
|
||||||
#include <vector>
|
#include <vector>
|
||||||
#include "common/common_types.h"
|
#include "common/common_types.h"
|
||||||
|
#include "common/dynamic_library/ffmpeg.h"
|
||||||
#include "common/thread.h"
|
#include "common/thread.h"
|
||||||
#include "common/threadsafe_queue.h"
|
#include "common/threadsafe_queue.h"
|
||||||
#include "core/dumping/backend.h"
|
#include "core/dumping/backend.h"
|
||||||
|
|
||||||
extern "C" {
|
|
||||||
#include <libavcodec/avcodec.h>
|
|
||||||
#include <libavfilter/avfilter.h>
|
|
||||||
#include <libavformat/avformat.h>
|
|
||||||
#include <libavutil/opt.h>
|
|
||||||
#include <libswresample/swresample.h>
|
|
||||||
}
|
|
||||||
|
|
||||||
namespace VideoDumper {
|
namespace VideoDumper {
|
||||||
|
|
||||||
using VariableAudioFrame = std::vector<s16>;
|
using VariableAudioFrame = std::vector<s16>;
|
||||||
|
|
||||||
void InitFFmpegLibraries();
|
|
||||||
|
|
||||||
class FFmpegMuxer;
|
class FFmpegMuxer;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -51,13 +42,13 @@ protected:
|
||||||
|
|
||||||
struct AVCodecContextDeleter {
|
struct AVCodecContextDeleter {
|
||||||
void operator()(AVCodecContext* codec_context) const {
|
void operator()(AVCodecContext* codec_context) const {
|
||||||
avcodec_free_context(&codec_context);
|
DynamicLibrary::FFmpeg::avcodec_free_context(&codec_context);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
struct AVFrameDeleter {
|
struct AVFrameDeleter {
|
||||||
void operator()(AVFrame* frame) const {
|
void operator()(AVFrame* frame) const {
|
||||||
av_frame_free(&frame);
|
DynamicLibrary::FFmpeg::av_frame_free(&frame);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -103,7 +94,7 @@ private:
|
||||||
// Filter related
|
// Filter related
|
||||||
struct AVFilterGraphDeleter {
|
struct AVFilterGraphDeleter {
|
||||||
void operator()(AVFilterGraph* filter_graph) const {
|
void operator()(AVFilterGraph* filter_graph) const {
|
||||||
avfilter_graph_free(&filter_graph);
|
DynamicLibrary::FFmpeg::avfilter_graph_free(&filter_graph);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
std::unique_ptr<AVFilterGraph, AVFilterGraphDeleter> filter_graph{};
|
std::unique_ptr<AVFilterGraph, AVFilterGraphDeleter> filter_graph{};
|
||||||
|
@ -132,7 +123,7 @@ public:
|
||||||
private:
|
private:
|
||||||
struct SwrContextDeleter {
|
struct SwrContextDeleter {
|
||||||
void operator()(SwrContext* swr_context) const {
|
void operator()(SwrContext* swr_context) const {
|
||||||
swr_free(&swr_context);
|
DynamicLibrary::FFmpeg::swr_free(&swr_context);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -165,8 +156,8 @@ public:
|
||||||
private:
|
private:
|
||||||
struct AVFormatContextDeleter {
|
struct AVFormatContextDeleter {
|
||||||
void operator()(AVFormatContext* format_context) const {
|
void operator()(AVFormatContext* format_context) const {
|
||||||
avio_closep(&format_context->pb);
|
DynamicLibrary::FFmpeg::avio_closep(&format_context->pb);
|
||||||
avformat_free_context(format_context);
|
DynamicLibrary::FFmpeg::avformat_free_context(format_context);
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -253,5 +244,7 @@ std::vector<EncoderInfo> ListEncoders(AVMediaType type);
|
||||||
std::vector<OptionInfo> GetEncoderGenericOptions();
|
std::vector<OptionInfo> GetEncoderGenericOptions();
|
||||||
std::vector<FormatInfo> ListFormats();
|
std::vector<FormatInfo> ListFormats();
|
||||||
std::vector<OptionInfo> GetFormatGenericOptions();
|
std::vector<OptionInfo> GetFormatGenericOptions();
|
||||||
|
std::vector<std::string> GetPixelFormats();
|
||||||
|
std::vector<std::string> GetSampleFormats();
|
||||||
|
|
||||||
} // namespace VideoDumper
|
} // namespace VideoDumper
|
||||||
|
|
|
@ -3,32 +3,37 @@
|
||||||
// Refer to the license.txt file included.
|
// Refer to the license.txt file included.
|
||||||
|
|
||||||
#include <glad/glad.h>
|
#include <glad/glad.h>
|
||||||
|
|
||||||
|
#include <utility>
|
||||||
#include "core/frontend/emu_window.h"
|
#include "core/frontend/emu_window.h"
|
||||||
#include "video_core/renderer_opengl/frame_dumper_opengl.h"
|
#include "video_core/renderer_opengl/frame_dumper_opengl.h"
|
||||||
#include "video_core/renderer_opengl/renderer_opengl.h"
|
#include "video_core/renderer_opengl/renderer_opengl.h"
|
||||||
|
|
||||||
namespace OpenGL {
|
namespace OpenGL {
|
||||||
|
|
||||||
FrameDumperOpenGL::FrameDumperOpenGL(VideoDumper::Backend& video_dumper_,
|
FrameDumperOpenGL::FrameDumperOpenGL(Core::System& system_, Frontend::EmuWindow& emu_window)
|
||||||
Frontend::EmuWindow& emu_window)
|
: system(system_), context(emu_window.CreateSharedContext()) {}
|
||||||
: video_dumper(video_dumper_), context(emu_window.CreateSharedContext()) {}
|
|
||||||
|
|
||||||
FrameDumperOpenGL::~FrameDumperOpenGL() {
|
FrameDumperOpenGL::~FrameDumperOpenGL() {
|
||||||
if (present_thread.joinable())
|
if (present_thread.joinable()) {
|
||||||
present_thread.join();
|
present_thread.join();
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
bool FrameDumperOpenGL::IsDumping() const {
|
bool FrameDumperOpenGL::IsDumping() const {
|
||||||
return video_dumper.IsDumping();
|
auto video_dumper = system.GetVideoDumper();
|
||||||
|
return video_dumper && video_dumper->IsDumping();
|
||||||
}
|
}
|
||||||
|
|
||||||
Layout::FramebufferLayout FrameDumperOpenGL::GetLayout() const {
|
Layout::FramebufferLayout FrameDumperOpenGL::GetLayout() const {
|
||||||
return video_dumper.GetLayout();
|
auto video_dumper = system.GetVideoDumper();
|
||||||
|
return video_dumper ? video_dumper->GetLayout() : Layout::FramebufferLayout{};
|
||||||
}
|
}
|
||||||
|
|
||||||
void FrameDumperOpenGL::StartDumping() {
|
void FrameDumperOpenGL::StartDumping() {
|
||||||
if (present_thread.joinable())
|
if (present_thread.joinable()) {
|
||||||
present_thread.join();
|
present_thread.join();
|
||||||
|
}
|
||||||
|
|
||||||
present_thread = std::thread(&FrameDumperOpenGL::PresentLoop, this);
|
present_thread = std::thread(&FrameDumperOpenGL::PresentLoop, this);
|
||||||
}
|
}
|
||||||
|
@ -62,13 +67,17 @@ void FrameDumperOpenGL::PresentLoop() {
|
||||||
frame->present_fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
frame->present_fence = glFenceSync(GL_SYNC_GPU_COMMANDS_COMPLETE, 0);
|
||||||
glFlush();
|
glFlush();
|
||||||
|
|
||||||
// Bind the previous PBO and read the pixels
|
auto video_dumper = system.GetVideoDumper();
|
||||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[next_pbo].handle);
|
if (video_dumper) {
|
||||||
GLubyte* pixels = static_cast<GLubyte*>(glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY));
|
// Bind the previous PBO and read the pixels
|
||||||
VideoDumper::VideoFrame frame_data{layout.width, layout.height, pixels};
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, pbos[next_pbo].handle);
|
||||||
video_dumper.AddVideoFrame(std::move(frame_data));
|
GLubyte* pixels =
|
||||||
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
|
static_cast<GLubyte*>(glMapBuffer(GL_PIXEL_PACK_BUFFER, GL_READ_ONLY));
|
||||||
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
VideoDumper::VideoFrame frame_data{layout.width, layout.height, pixels};
|
||||||
|
video_dumper->AddVideoFrame(std::move(frame_data));
|
||||||
|
glUnmapBuffer(GL_PIXEL_PACK_BUFFER);
|
||||||
|
glBindBuffer(GL_PIXEL_PACK_BUFFER, 0);
|
||||||
|
}
|
||||||
|
|
||||||
current_pbo = (current_pbo + 1) % 2;
|
current_pbo = (current_pbo + 1) % 2;
|
||||||
next_pbo = (current_pbo + 1) % 2;
|
next_pbo = (current_pbo + 1) % 2;
|
||||||
|
|
|
@ -7,6 +7,7 @@
|
||||||
#include <atomic>
|
#include <atomic>
|
||||||
#include <memory>
|
#include <memory>
|
||||||
#include <thread>
|
#include <thread>
|
||||||
|
#include "core/core.h"
|
||||||
#include "core/dumping/backend.h"
|
#include "core/dumping/backend.h"
|
||||||
#include "core/frontend/framebuffer_layout.h"
|
#include "core/frontend/framebuffer_layout.h"
|
||||||
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
#include "video_core/renderer_opengl/gl_resource_manager.h"
|
||||||
|
@ -28,7 +29,7 @@ class RendererOpenGL;
|
||||||
*/
|
*/
|
||||||
class FrameDumperOpenGL {
|
class FrameDumperOpenGL {
|
||||||
public:
|
public:
|
||||||
explicit FrameDumperOpenGL(VideoDumper::Backend& video_dumper, Frontend::EmuWindow& emu_window);
|
explicit FrameDumperOpenGL(Core::System& system, Frontend::EmuWindow& emu_window);
|
||||||
~FrameDumperOpenGL();
|
~FrameDumperOpenGL();
|
||||||
|
|
||||||
bool IsDumping() const;
|
bool IsDumping() const;
|
||||||
|
@ -43,7 +44,7 @@ private:
|
||||||
void CleanupOpenGLObjects();
|
void CleanupOpenGLObjects();
|
||||||
void PresentLoop();
|
void PresentLoop();
|
||||||
|
|
||||||
VideoDumper::Backend& video_dumper;
|
Core::System& system;
|
||||||
std::unique_ptr<Frontend::GraphicsContext> context;
|
std::unique_ptr<Frontend::GraphicsContext> context;
|
||||||
std::thread present_thread;
|
std::thread present_thread;
|
||||||
std::atomic_bool stop_requested{false};
|
std::atomic_bool stop_requested{false};
|
||||||
|
|
|
@ -306,7 +306,7 @@ static std::array<GLfloat, 3 * 2> MakeOrthographicMatrix(const float width, cons
|
||||||
RendererOpenGL::RendererOpenGL(Core::System& system, Frontend::EmuWindow& window,
|
RendererOpenGL::RendererOpenGL(Core::System& system, Frontend::EmuWindow& window,
|
||||||
Frontend::EmuWindow* secondary_window)
|
Frontend::EmuWindow* secondary_window)
|
||||||
: VideoCore::RendererBase{system, window, secondary_window}, driver{system.TelemetrySession()},
|
: VideoCore::RendererBase{system, window, secondary_window}, driver{system.TelemetrySession()},
|
||||||
frame_dumper{system.VideoDumper(), window} {
|
frame_dumper{system, window} {
|
||||||
const bool has_debug_tool = driver.HasDebugTool();
|
const bool has_debug_tool = driver.HasDebugTool();
|
||||||
window.mailbox = std::make_unique<OGLTextureMailbox>(has_debug_tool);
|
window.mailbox = std::make_unique<OGLTextureMailbox>(has_debug_tool);
|
||||||
if (secondary_window) {
|
if (secondary_window) {
|
||||||
|
|
Loading…
Reference in a new issue