Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat/cef audio v2 #1590

Draft
wants to merge 7 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions src/modules/ffmpeg/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@ set(SOURCES
producer/av_producer.cpp
producer/av_input.cpp
util/av_util.cpp
util/audio_resampler.cpp
producer/ffmpeg_producer.cpp
consumer/ffmpeg_consumer.cpp

Expand All @@ -15,6 +16,7 @@ set(HEADERS
producer/av_producer.h
producer/av_input.h
util/av_util.h
util/audio_resampler.h
producer/ffmpeg_producer.h
consumer/ffmpeg_consumer.h

Expand Down
41 changes: 41 additions & 0 deletions src/modules/ffmpeg/util/audio_resampler.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
#include "audio_resampler.h"
#include "av_assert.h"

extern "C" {
#include <libavutil/samplefmt.h>
#include <libswresample/swresample.h>
}

namespace caspar::ffmpeg {

AudioResampler::AudioResampler(int sample_rate, AVSampleFormat in_sample_fmt)
{
AVChannelLayout channel_layout = AV_CHANNEL_LAYOUT_7POINT1;
AVChannelLayout channel_layout_out = AV_CHANNEL_LAYOUT_HEXADECAGONAL;

SwrContext* raw_ctx = nullptr;
FF(swr_alloc_set_opts2(&raw_ctx,
&channel_layout_out,
AV_SAMPLE_FMT_S32,
sample_rate,
&channel_layout,
in_sample_fmt,
sample_rate,
0,
nullptr));

ctx = std::shared_ptr<SwrContext>(raw_ctx, [](SwrContext* ptr) { swr_free(&ptr); });

FF_RET(swr_init(ctx.get()), "swr_init");
}

caspar::array<int32_t> AudioResampler::convert(int frames, const void** src)
{
auto result = caspar::array<int32_t>(frames * 16 * sizeof(int32_t));
auto ptr = result.data();
swr_convert(ctx.get(), (uint8_t**)&ptr, frames, reinterpret_cast<const uint8_t**>(src), frames);

return result;
}

}; // namespace caspar::ffmpeg
27 changes: 27 additions & 0 deletions src/modules/ffmpeg/util/audio_resampler.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
#include <common/array.h>
#include <memory>

#pragma once

extern "C" {
#include <libavutil/samplefmt.h>
}

struct SwrContext;

namespace caspar::ffmpeg {

class AudioResampler
{
std::shared_ptr<SwrContext> ctx;

public:
AudioResampler(int sample_rate, AVSampleFormat in_sample_fmt);

AudioResampler(const AudioResampler&) = delete;
AudioResampler& operator=(const AudioResampler&) = delete;

caspar::array<int32_t> convert(int frames, const void** src);
};

}; // namespace caspar::ffmpeg
4 changes: 3 additions & 1 deletion src/modules/html/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,9 @@ target_include_directories(html PRIVATE
..
../..
${CEF_INCLUDE_PATH}
)
${FFMPEG_INCLUDE_PATH}
)
target_link_libraries(html ffmpeg)

set_target_properties(html PROPERTIES FOLDER modules)
source_group(sources\\producer producer/*)
Expand Down
154 changes: 126 additions & 28 deletions src/modules/html/producer/html_producer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -60,13 +60,53 @@
#include <queue>
#include <utility>

#include <ffmpeg/util/audio_resampler.h>

#include "../html.h"

namespace caspar { namespace html {

inline std::int_least64_t now()
{
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now().time_since_epoch())
.count();
}

struct presentation_frame
{
std::int_least64_t timestamp;
core::draw_frame frame;

explicit presentation_frame(core::draw_frame frame = {}, std::int_least64_t ts = now()) noexcept
: timestamp(ts)
, frame(std::move(frame))
{
}

presentation_frame(presentation_frame&& other) noexcept
: timestamp(other.timestamp)
, frame(std::move(other.frame))
{
}

presentation_frame(const presentation_frame&) = delete;
presentation_frame& operator=(const presentation_frame&) = delete;

presentation_frame& operator=(presentation_frame&& rhs) noexcept
{
timestamp = rhs.timestamp;
frame = std::move(rhs.frame);
return *this;
}

~presentation_frame() {}
};

class html_client
: public CefClient
, public CefRenderHandler
, public CefAudioHandler
, public CefLifeSpanHandler
, public CefLoadHandler
, public CefDisplayHandler
Expand All @@ -80,16 +120,21 @@ class html_client
caspar::timer paint_timer_;
caspar::timer test_timer_;

spl::shared_ptr<core::frame_factory> frame_factory_;
core::video_format_desc format_desc_;
bool gpu_enabled_;
tbb::concurrent_queue<std::wstring> javascript_before_load_;
std::atomic<bool> loaded_;
std::queue<std::pair<std::int_least64_t, core::draw_frame>> frames_;
mutable std::mutex frames_mutex_;
const size_t frames_max_size_ = 4;
std::atomic<bool> closing_;

spl::shared_ptr<core::frame_factory> frame_factory_;
core::video_format_desc format_desc_;
bool gpu_enabled_;
tbb::concurrent_queue<std::wstring> javascript_before_load_;
std::atomic<bool> loaded_;
std::queue<presentation_frame> frames_;
std::queue<presentation_frame> audio_frames_;
mutable std::mutex frames_mutex_;
mutable std::mutex audio_frames_mutex_;
const size_t frames_max_size_ = 4;
std::atomic<bool> closing_;

std::unique_ptr<ffmpeg::AudioResampler> audioResampler_;

core::draw_frame last_video_frame_;
core::draw_frame last_frame_;
std::int_least64_t last_frame_time_;

Expand Down Expand Up @@ -146,8 +191,21 @@ class html_client

bool try_pop(const core::video_field field)
{
bool result = false;
std::lock_guard<std::mutex> lock(frames_mutex_);

core::draw_frame audio_frame;
uint64_t audio_frame_timestamp = 0;

{
std::lock_guard<std::mutex> audio_lock(audio_frames_mutex_);
if (!audio_frames_.empty()) {
audio_frame_timestamp = audio_frames_.front().timestamp;
audio_frame = core::draw_frame(std::move(audio_frames_.front().frame));
audio_frames_.pop();
}
}

if (!frames_.empty()) {
/*
* CEF in gpu-enabled mode only sends frames when something changes, and interlaced channels
Expand All @@ -167,35 +225,43 @@ class html_client

// Check if the sole buffered frame is too young to have a partner field generated (with a tolerance)
auto time_per_frame = (1000 * 1.5) / format_desc_.fps;
auto front_frame_is_too_young = (now_time - frames_.front().first) < time_per_frame;
auto front_frame_is_too_young = (now_time - frames_.front().timestamp) < time_per_frame;

if (follows_gap_in_frames && front_frame_is_too_young) {
return false;
}
}

last_frame_time_ = frames_.front().first;
last_frame_ = std::move(frames_.front().second);
last_frame_time_ = frames_.front().timestamp;
last_video_frame_ = std::move(frames_.front().frame);
last_frame_ = last_video_frame_;
frames_.pop();

graph_->set_value("buffered-frames", (double)frames_.size() / frames_max_size_);

return true;
result = true;
}

return false;
if (audio_frame) {
last_frame_time_ = audio_frame_timestamp;
last_frame_ = core::draw_frame::over(last_video_frame_, audio_frame);
result = true;
}

return result;
}

core::draw_frame receive(const core::video_field field)
{
if (!try_pop(field)) {
graph_->set_tag(diagnostics::tag_severity::SILENT, "late-frame");
return core::draw_frame::still(last_frame_);
} else {
return last_frame_;
}

return last_frame_;
}

core::draw_frame last_frame() const { return last_frame_; }
core::draw_frame last_frame() const { return core::draw_frame::still(last_frame_); }

bool is_ready() const
{
Expand Down Expand Up @@ -245,13 +311,6 @@ class html_client
}

private:
std::int_least64_t now()
{
return std::chrono::duration_cast<std::chrono::milliseconds>(
std::chrono::high_resolution_clock::now().time_since_epoch())
.count();
}

void GetViewRect(CefRefPtr<CefBrowser> browser, CefRect& rect) override
{
CASPAR_ASSERT(CefCurrentlyOn(TID_UI));
Expand Down Expand Up @@ -302,8 +361,10 @@ class html_client
{
std::lock_guard<std::mutex> lock(frames_mutex_);

frames_.push(std::make_pair(now(), core::draw_frame(std::move(frame))));
while (frames_.size() > 4) {
core::draw_frame new_frame = core::draw_frame(std::move(frame));

frames_.push(presentation_frame(std::move(new_frame)));
while (frames_.size() > frames_max_size_) {
frames_.pop();
graph_->set_tag(diagnostics::tag_severity::WARNING, "dropped-frame");
}
Expand Down Expand Up @@ -353,6 +414,8 @@ class html_client

CefRefPtr<CefRenderHandler> GetRenderHandler() override { return this; }

CefRefPtr<CefAudioHandler> GetAudioHandler() override { return this; }

CefRefPtr<CefLifeSpanHandler> GetLifeSpanHandler() override { return this; }

CefRefPtr<CefLoadHandler> GetLoadHandler() override { return this; }
Expand All @@ -378,7 +441,7 @@ class html_client

{
std::lock_guard<std::mutex> lock(frames_mutex_);
frames_.push(std::make_pair(now(), core::draw_frame::empty()));
frames_.push(presentation_frame());
}

{
Expand All @@ -399,6 +462,41 @@ class html_client
return false;
}

bool GetAudioParameters(CefRefPtr<CefBrowser> browser, CefAudioParameters& params) override
{
params.channel_layout = CEF_CHANNEL_LAYOUT_7_1;
params.sample_rate = format_desc_.audio_sample_rate;
params.frames_per_buffer = format_desc_.audio_cadence[0];
return format_desc_.audio_cadence.size() == 1; // TODO - handle 59.94
}

void OnAudioStreamStarted(CefRefPtr<CefBrowser> browser, const CefAudioParameters& params, int channels) override
{
audioResampler_ = std::make_unique<ffmpeg::AudioResampler>(params.sample_rate, AV_SAMPLE_FMT_FLTP);
}
void OnAudioStreamPacket(CefRefPtr<CefBrowser> browser, const float** data, int samples, int64_t pts) override
{
if (!audioResampler_)
return;

auto audio = audioResampler_->convert(samples, reinterpret_cast<const void**>(data));
auto audio_frame = core::mutable_frame(this, {}, std::move(audio), core::pixel_format_desc());

{
std::lock_guard<std::mutex> lock(audio_frames_mutex_);
while (audio_frames_.size() >= frames_max_size_) {
audio_frames_.pop();
}
audio_frames_.push(presentation_frame(core::draw_frame(std::move(audio_frame))));
}
}
void OnAudioStreamStopped(CefRefPtr<CefBrowser> browser) override { audioResampler_ = nullptr; }
void OnAudioStreamError(CefRefPtr<CefBrowser> browser, const CefString& message) override
{
CASPAR_LOG(info) << "[html_producer] OnAudioStreamError: \"" << message.ToString() << "\"";
audioResampler_ = nullptr;
}

void do_execute_javascript(const std::wstring& javascript)
{
html::begin_invoke([=] {
Expand Down
Loading