This source file includes following definitions.
- gf_audio_input_fetch_frame
- gf_audio_input_release_frame
- gf_audio_input_get_speed
- gf_audio_input_get_volume
- gf_audio_input_is_muted
- gf_audio_input_get_config
- gf_sc_audio_setup
- gf_sc_audio_predestroy
- gf_sc_audio_open
- gf_sc_audio_stop
- gf_sc_audio_restart
- gf_sc_audio_check_url
- gf_sc_audio_register
- gf_sc_audio_unregister
- gf_af_fetch_frame
- gf_af_release_frame
- gf_af_get_speed
- gf_af_get_channel_volume
- gf_af_is_muted
- gf_af_get_config
- gf_af_new
- gf_af_del
- gf_af_reset
#include <gpac/internal/compositor_dev.h>
#define ENABLE_EARLY_FRAME_DETECTION
#define MAX_RESYNC_TIME 1000
#define MIN_DRIFT_ADJUST 75
struct __audiofilteritem
{
GF_AudioInterface input;
GF_AudioInterface *src;
u32 out_chan, out_ch_cfg;
u32 nb_used, nb_filled;
GF_AudioFilterChain filter_chain;
};
GF_AudioFilterItem *gf_af_new(GF_Compositor *compositor, GF_AudioInterface *src, char *filter_name);
void gf_af_del(GF_AudioFilterItem *af);
void gf_af_reset(GF_AudioFilterItem *af);
static char *gf_audio_input_fetch_frame(void *callback, u32 *size, u32 audio_delay_ms)
{
char *frame;
u32 obj_time, ts;
s32 drift;
Fixed speed;
Bool done;
GF_AudioInput *ai = (GF_AudioInput *) callback;
if (!ai->stream) return NULL;
done = ai->stream_finished;
frame = gf_mo_fetch_data(ai->stream, ai->compositor->audio_renderer->step_mode ? GF_MO_FETCH_PAUSED : GF_MO_FETCH, 0, &ai->stream_finished, &ts, size, NULL, NULL, NULL);
if (done != ai->stream_finished) {
gf_sc_invalidate(ai->compositor, NULL);
}
if (!frame) {
if (!ai->stream_finished) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUDIO, ("[Audio Input] No data in audio object\n"));
}
gf_mo_adjust_clock(ai->stream, 0);
*size = 0;
return NULL;
}
ai->need_release = GF_TRUE;
if (ai->compositor->audio_renderer->step_mode) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[Audio Input] audio frame CTS %u %d bytes fetched\n", ts, *size));
return frame;
}
speed = gf_mo_get_current_speed(ai->stream);
gf_mo_get_object_time(ai->stream, &obj_time);
obj_time += audio_delay_ms;
if (ai->compositor->bench_mode) {
drift = 0;
} else {
drift = (s32)obj_time;
drift -= (s32)ts;
}
#ifdef ENABLE_EARLY_FRAME_DETECTION
if (drift < 0) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUDIO, ("[Audio Input] audio too early of %d (CTS %u at OTB %u with audio delay %d ms)\n", drift + audio_delay_ms, ts, obj_time, audio_delay_ms));
ai->need_release = GF_FALSE;
gf_mo_release_data(ai->stream, 0, -1);
*size = 0;
return NULL;
}
#endif
if (audio_delay_ms) {
s32 resync_delay = speed > 0 ? FIX2INT(speed * MAX_RESYNC_TIME) : FIX2INT(-speed * MAX_RESYNC_TIME);
if (drift>resync_delay) {
GF_LOG(GF_LOG_INFO, GF_LOG_AUDIO, ("[Audio Input] Audio data too late obj time %d - CTS %d - drift %d ms - resync forced\n", obj_time - audio_delay_ms, ts, drift));
gf_mo_release_data(ai->stream, *size, 2);
ai->need_release = GF_FALSE;
return gf_audio_input_fetch_frame(callback, size, audio_delay_ms);
}
resync_delay = gf_mo_get_clock_drift(ai->stream) - drift;
if (resync_delay < 0) resync_delay = -resync_delay;
if (resync_delay > MIN_DRIFT_ADJUST) {
GF_LOG(GF_LOG_DEBUG, GF_LOG_AUDIO, ("[Audio Input] Audio clock: delay %d - obj time %d - audio delay %d - CTS %d - adjust drift %d\n", audio_delay_ms, obj_time, audio_delay_ms, ts, drift));
gf_mo_adjust_clock(ai->stream, drift);
}
}
return frame;
}
static void gf_audio_input_release_frame(void *callback, u32 nb_bytes)
{
GF_AudioInput *ai = (GF_AudioInput *) callback;
if (!ai->stream) return;
gf_mo_release_data(ai->stream, nb_bytes, 1);
ai->need_release = GF_FALSE;
}
static Fixed gf_audio_input_get_speed(void *callback)
{
GF_AudioInput *ai = (GF_AudioInput *) callback;
return gf_mo_get_current_speed(ai->stream);
}
static Bool gf_audio_input_get_volume(void *callback, Fixed *vol)
{
GF_AudioInput *ai = (GF_AudioInput *) callback;
if (ai->snd && ai->snd->GetChannelVolume) {
return ai->snd->GetChannelVolume(ai->snd->owner, vol);
} else {
vol[0] = vol[1] = vol[2] = vol[3] = vol[4] = vol[5] = ai->intensity;
return (ai->intensity==FIX_ONE) ? GF_FALSE : GF_TRUE;
}
}
static Bool gf_audio_input_is_muted(void *callback)
{
GF_AudioInput *ai = (GF_AudioInput *) callback;
if (!ai->stream) return GF_TRUE;
if (ai->is_muted)
return GF_TRUE;
return gf_mo_is_muted(ai->stream);
}
static Bool gf_audio_input_get_config(GF_AudioInterface *aifc, Bool for_recf)
{
GF_AudioInput *ai = (GF_AudioInput *) aifc->callback;
if (!ai->stream) return GF_FALSE;
if (aifc->samplerate && (gf_mo_get_flags(ai->stream) & GF_MO_IS_INIT)) return GF_TRUE;
gf_mo_get_audio_info(ai->stream, &aifc->samplerate, &aifc->bps , &aifc->chan, &aifc->ch_cfg);
if (!for_recf)
return aifc->samplerate ? GF_TRUE : GF_FALSE;
if (aifc->samplerate * aifc->chan * aifc->bps && ((aifc->chan<=2) || aifc->ch_cfg)) {
gf_mo_set_flag(ai->stream, GF_MO_IS_INIT, GF_TRUE);
return GF_TRUE;
}
gf_mo_set_flag(ai->stream, GF_MO_IS_INIT, GF_FALSE);
return GF_FALSE;
}
GF_EXPORT
void gf_sc_audio_setup(GF_AudioInput *ai, GF_Compositor *compositor, GF_Node *node)
{
memset(ai, 0, sizeof(GF_AudioInput));
ai->owner = node;
ai->compositor = compositor;
ai->stream = NULL;
ai->input_ifce.FetchFrame = gf_audio_input_fetch_frame;
ai->input_ifce.ReleaseFrame = gf_audio_input_release_frame;
ai->input_ifce.GetConfig = gf_audio_input_get_config;
ai->input_ifce.GetChannelVolume = gf_audio_input_get_volume;
ai->input_ifce.GetSpeed = gf_audio_input_get_speed;
ai->input_ifce.IsMuted = gf_audio_input_is_muted;
ai->input_ifce.callback = ai;
ai->intensity = FIX_ONE;
ai->speed = FIX_ONE;
}
void gf_sc_audio_predestroy(GF_AudioInput *ai)
{
gf_sc_audio_stop(ai);
gf_sc_audio_unregister(ai);
if (ai->filter) gf_af_del(ai->filter);
}
GF_EXPORT
GF_Err gf_sc_audio_open(GF_AudioInput *ai, MFURL *url, Double clipBegin, Double clipEnd, Bool lock_timeline)
{
u32 i;
if (ai->is_open) return GF_BAD_PARAM;
ai->stream = gf_mo_register(ai->owner, url, lock_timeline, GF_FALSE);
if (!ai->stream) return GF_NOT_SUPPORTED;
gf_mo_play(ai->stream, clipBegin, clipEnd, GF_FALSE);
ai->stream_finished = GF_FALSE;
ai->is_open = 1;
gf_mo_set_flag(ai->stream, GF_MO_IS_INIT, GF_FALSE);
if (ai->filter) gf_af_del(ai->filter);
ai->filter = NULL;
for (i=0; i<url->count; i++) {
if (url->vals[i].url && !strnicmp(url->vals[i].url, "#filter=", 8)) {
ai->filter = gf_af_new(ai->compositor, &ai->input_ifce, url->vals[i].url+8);
if (ai->filter)
break;
}
}
return GF_OK;
}
GF_EXPORT
void gf_sc_audio_stop(GF_AudioInput *ai)
{
if (!ai->is_open) return;
gf_mixer_lock(ai->compositor->audio_renderer->mixer, GF_TRUE);
assert(!ai->need_release);
gf_mo_stop(ai->stream);
ai->is_open = 0;
gf_mo_unregister(ai->owner, ai->stream);
ai->stream = NULL;
if (ai->filter) gf_af_del(ai->filter);
ai->filter = NULL;
gf_mixer_lock(ai->compositor->audio_renderer->mixer, GF_FALSE);
}
GF_EXPORT
void gf_sc_audio_restart(GF_AudioInput *ai)
{
if (!ai->is_open) return;
if (ai->need_release) gf_mo_release_data(ai->stream, 0xFFFFFFFF, 2);
ai->need_release = GF_FALSE;
ai->stream_finished = GF_FALSE;
if (ai->filter) gf_af_reset(ai->filter);
gf_mo_restart(ai->stream);
}
GF_EXPORT
Bool gf_sc_audio_check_url(GF_AudioInput *ai, MFURL *url)
{
if (!ai->stream) return url->count;
return gf_mo_url_changed(ai->stream, url);
}
GF_EXPORT
void gf_sc_audio_register(GF_AudioInput *ai, GF_TraverseState *tr_state)
{
GF_AudioInterface *aifce;
if (!ai->input_ifce.FetchFrame
|| !ai->input_ifce.GetChannelVolume
|| !ai->input_ifce.GetConfig
|| !ai->input_ifce.GetSpeed
|| !ai->input_ifce.IsMuted
|| !ai->input_ifce.ReleaseFrame
) return;
aifce = &ai->input_ifce;
if (ai->filter) aifce = &ai->filter->input;
if (tr_state->audio_parent) {
if (ai->register_with_parent) return;
if (ai->register_with_renderer) {
gf_sc_ar_remove_src(ai->compositor->audio_renderer, aifce);
ai->register_with_renderer = GF_FALSE;
}
tr_state->audio_parent->add_source(tr_state->audio_parent, ai);
ai->register_with_parent = GF_TRUE;
ai->snd = tr_state->sound_holder;
} else if (!ai->register_with_renderer) {
if (ai->register_with_parent) {
ai->register_with_parent = GF_FALSE;
gf_sc_invalidate(ai->compositor, NULL);
}
gf_sc_ar_add_src(ai->compositor->audio_renderer, aifce);
ai->register_with_renderer = GF_TRUE;
ai->snd = tr_state->sound_holder;
}
}
GF_EXPORT
void gf_sc_audio_unregister(GF_AudioInput *ai)
{
GF_AudioInterface *aifce = &ai->input_ifce;
if (ai->filter) aifce = &ai->filter->input;
if (ai->register_with_renderer) {
ai->register_with_renderer = GF_FALSE;
gf_sc_ar_remove_src(ai->compositor->audio_renderer, aifce);
} else {
gf_sc_invalidate(ai->compositor, NULL);
}
}
static char *gf_af_fetch_frame(void *callback, u32 *size, u32 audio_delay_ms)
{
GF_AudioFilterItem *af = (GF_AudioFilterItem *)callback;
*size = 0;
if (!af->nb_used) {
while (!af->nb_filled) {
u32 nb_bytes;
char *data = af->src->FetchFrame(af->src->callback, &nb_bytes, audio_delay_ms + af->filter_chain.delay_ms);
if (!data || !nb_bytes)
return NULL;
if (nb_bytes > af->filter_chain.min_block_size) nb_bytes = af->filter_chain.min_block_size;
memcpy(af->filter_chain.tmp_block1, data, nb_bytes);
af->src->ReleaseFrame(af->src->callback, nb_bytes);
af->nb_filled = gf_afc_process(&af->filter_chain, nb_bytes);
}
}
*size = af->nb_filled - af->nb_used;
return af->filter_chain.tmp_block1 + af->nb_used;
}
static void gf_af_release_frame(void *callback, u32 nb_bytes)
{
GF_AudioFilterItem *af = (GF_AudioFilterItem *)callback;
af->nb_used += nb_bytes;
if (af->nb_used==af->nb_filled) {
af->nb_used = 0;
af->nb_filled = 0;
}
}
static Fixed gf_af_get_speed(void *callback)
{
GF_AudioFilterItem *af = (GF_AudioFilterItem *)callback;
return af->src->GetSpeed(af->src->callback);
}
static Bool gf_af_get_channel_volume(void *callback, Fixed *vol)
{
GF_AudioFilterItem *af = (GF_AudioFilterItem *)callback;
return af->src->GetChannelVolume(af->src->callback, vol);
}
static Bool gf_af_is_muted(void *callback)
{
GF_AudioFilterItem *af = (GF_AudioFilterItem *)callback;
return af->src->IsMuted(af->src->callback);
}
static Bool gf_af_get_config(GF_AudioInterface *ai, Bool for_reconf)
{
GF_AudioFilterItem *af = (GF_AudioFilterItem *)ai->callback;
Bool res = af->src->GetConfig(af->src, for_reconf);
if (!res) return GF_FALSE;
if (!for_reconf) return GF_TRUE;
af->input.bps = af->src->bps;
af->input.samplerate = af->src->samplerate;
af->input.ch_cfg = af->src->ch_cfg;
af->input.chan = af->src->chan;
if (gf_afc_setup(&af->filter_chain, af->input.bps, af->input.samplerate, af->src->chan, af->src->ch_cfg, &af->input.chan, &af->input.ch_cfg)!=GF_OK) {
GF_LOG(GF_LOG_ERROR, GF_LOG_AUDIO, ("[Audio Input] Failed to configure audio filter chain\n"));
return GF_FALSE;
}
return GF_TRUE;
}
GF_AudioFilterItem *gf_af_new(GF_Compositor *compositor, GF_AudioInterface *src, char *filter_name)
{
GF_AudioFilterItem *filter;
if (!src || !filter_name) return NULL;
GF_SAFEALLOC(filter, GF_AudioFilterItem);
if (!filter) return NULL;
filter->src = src;
filter->input.FetchFrame = gf_af_fetch_frame;
filter->input.ReleaseFrame = gf_af_release_frame;
filter->input.GetSpeed = gf_af_get_speed;
filter->input.GetChannelVolume = gf_af_get_channel_volume;
filter->input.IsMuted = gf_af_is_muted;
filter->input.GetConfig = gf_af_get_config;
filter->input.callback = filter;
gf_afc_load(&filter->filter_chain, compositor->user, filter_name);
return filter;
}
void gf_af_del(GF_AudioFilterItem *af)
{
gf_afc_unload(&af->filter_chain);
gf_free(af);
}
void gf_af_reset(GF_AudioFilterItem *af)
{
gf_afc_reset(&af->filter_chain);
af->nb_filled = af->nb_used = 0;
}