* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
#include <stdio.h>
#include <stdlib.h>
-#include <pulse/pulseaudio.h>
-#include <pulse/glib-mainloop.h>
+#include <gst/interfaces/mixer.h>
#include <libempathy/empathy-utils.h>
#include <libempathy-gtk/empathy-call-utils.h>
#include "empathy-audio-src.h"
-#include "src-marshal.h"
+#include "empathy-mic-monitor.h"
#define DEBUG_FLAG EMPATHY_DEBUG_VOIP
#include <libempathy/empathy-debug.h>
{
PEAK_LEVEL_CHANGED,
RMS_LEVEL_CHANGED,
- MICROPHONE_ADDED,
- MICROPHONE_REMOVED,
LAST_SIGNAL
};
enum {
PROP_VOLUME = 1,
+ PROP_MUTE,
PROP_RMS_LEVEL,
PROP_PEAK_LEVEL,
PROP_MICROPHONE,
};
/* private structure */
-typedef struct _EmpathyGstAudioSrcPrivate EmpathyGstAudioSrcPrivate;
-
struct _EmpathyGstAudioSrcPrivate
{
gboolean dispose_has_run;
GstElement *src;
- GstElement *volume;
GstElement *level;
- pa_glib_mainloop *loop;
- pa_context *context;
- GQueue *operations;
+ EmpathyMicMonitor *mic_monitor;
/* 0 if not known yet */
guint source_output_idx;
gdouble peak_level;
gdouble rms_level;
+ gdouble volume;
+ gboolean mute;
+ /* the mixer track on src we follow and adjust */
+ GstMixerTrack *track;
+
GMutex *lock;
- guint idle_id;
+ guint level_idle_id;
+ guint volume_idle_id;
};
#define EMPATHY_GST_AUDIO_SRC_GET_PRIVATE(o) \
(G_TYPE_INSTANCE_GET_PRIVATE ((o), EMPATHY_TYPE_GST_AUDIO_SRC, \
EmpathyGstAudioSrcPrivate))
-static gboolean
-empathy_audio_src_supports_changing_mic (EmpathyGstAudioSrc *self)
+/* There is no predefined maximum channels by gstreamer, just pick 32, which is
+ * the same as the pulseaudio maximum */
+#define MAX_MIC_CHANNELS 32
+
+static void
+empathy_audio_set_hw_mute (EmpathyGstAudioSrc *self, gboolean mute)
{
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
- GObjectClass *object_class;
+ g_mutex_lock (self->priv->lock);
+ /* If there is no mixer available ignore the setting */
+ if (self->priv->track == NULL)
+ goto out;
- object_class = G_OBJECT_GET_CLASS (priv->src);
+ gst_mixer_set_mute (GST_MIXER (self->priv->src), self->priv->track, mute);
- return (g_object_class_find_property (object_class,
- "source-output-index") != NULL);
+out:
+ g_mutex_unlock (self->priv->lock);
+ self->priv->mute = mute;
}
-typedef void (*OperationFunc) (EmpathyGstAudioSrc *, GSimpleAsyncResult *);
-
-typedef struct
+static gboolean
+empathy_audio_src_get_hw_mute (EmpathyGstAudioSrc *self)
{
- OperationFunc func;
- GSimpleAsyncResult *result;
-} Operation;
+ gboolean result = self->priv->mute;
-static Operation *
-operation_new (OperationFunc func,
- GSimpleAsyncResult *result)
-{
- Operation *o = g_slice_new0 (Operation);
+ g_mutex_lock (self->priv->lock);
+ if (self->priv->track == NULL)
+ goto out;
- o->func = func;
- o->result = result;
+ result = GST_MIXER_TRACK_HAS_FLAG (self->priv->track, GST_MIXER_TRACK_MUTE);
+out:
+ g_mutex_unlock (self->priv->lock);
- return o;
+ return result;
}
static void
-operation_free (Operation *o,
- gboolean cancelled)
+empathy_audio_src_set_hw_volume (EmpathyGstAudioSrc *self,
+ gdouble volume)
{
- if (cancelled)
- {
- g_simple_async_result_set_error (o->result,
- G_IO_ERROR, G_IO_ERROR_CANCELLED,
- "The audio source was disposed");
- g_simple_async_result_complete (o->result);
- g_object_unref (o->result);
- }
+ gint volumes[MAX_MIC_CHANNELS];
+ int i;
- g_slice_free (Operation, o);
-}
+ g_mutex_lock (self->priv->lock);
+ /* If there is no mixer available ignore the setting */
+ if (self->priv->track == NULL)
+ goto out;
-static void
-operation_get_microphones_free (gpointer data)
-{
- GQueue *queue = data;
- GList *l;
+ for (i = 0; i < MAX_MIC_CHANNELS; i++)
+ volumes[i] = self->priv->track->max_volume * volume;
- for (l = queue->head; l != NULL; l = l->next)
- {
- EmpathyAudioSrcMicrophone *mic = l->data;
+ gst_mixer_set_volume (GST_MIXER (self->priv->src),
+ self->priv->track, volumes);
- g_free (mic->name);
- g_free (mic->description);
- g_slice_free (EmpathyAudioSrcMicrophone, mic);
- }
+out:
+ g_mutex_unlock (self->priv->lock);
- g_queue_free (queue);
+ self->priv->volume = volume;
}
-static void
-operation_get_microphones_cb (pa_context *context,
- const pa_source_info *info,
- int eol,
- void *userdata)
+static gdouble
+empathy_audio_src_get_hw_volume (EmpathyGstAudioSrc *self)
{
- GSimpleAsyncResult *result = userdata;
- EmpathyAudioSrcMicrophone *mic;
- GQueue *queue;
+ gint volumes[MAX_MIC_CHANNELS];
+ gdouble result = self->priv->volume;
- if (eol)
- {
- g_simple_async_result_complete (result);
- g_object_unref (result);
- return;
- }
+ g_mutex_lock (self->priv->lock);
+ if (self->priv->track == NULL)
+ goto out;
+
+ gst_mixer_get_volume (GST_MIXER (self->priv->src),
+ self->priv->track, volumes);
+ result = volumes[0]/(gdouble)self->priv->track->max_volume;
- mic = g_slice_new0 (EmpathyAudioSrcMicrophone);
- mic->index = info->index;
- mic->name = g_strdup (info->name);
- mic->description = g_strdup (info->description);
- mic->is_monitor = (info->monitor_of_sink != PA_INVALID_INDEX);
+out:
+ g_mutex_unlock (self->priv->lock);
- /* add it to the queue */
- queue = g_simple_async_result_get_op_res_gpointer (result);
- g_queue_push_tail (queue, mic);
+ return result;
}
-static void
-operation_get_microphones (EmpathyGstAudioSrc *self,
- GSimpleAsyncResult *result)
+
+gboolean
+empathy_audio_src_supports_changing_mic (EmpathyGstAudioSrc *self)
{
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
+ GObjectClass *object_class;
- g_assert_cmpuint (pa_context_get_state (priv->context), ==, PA_CONTEXT_READY);
-
- g_simple_async_result_set_op_res_gpointer (result, g_queue_new (),
- operation_get_microphones_free);
+ object_class = G_OBJECT_GET_CLASS (priv->src);
- pa_context_get_source_info_list (priv->context,
- operation_get_microphones_cb, result);
+ return (g_object_class_find_property (object_class,
+ "source-output-index") != NULL);
}
-static void
-operation_change_microphone_cb (pa_context *context,
- int success,
- void *userdata)
+static guint
+empathy_audio_src_get_mic_index (EmpathyGstAudioSrc *self)
{
- GSimpleAsyncResult *result = userdata;
+ EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
+ guint audio_src_idx = PA_INVALID_INDEX;
- if (!success)
- {
- g_simple_async_result_set_error (result, G_IO_ERROR, G_IO_ERROR_FAILED,
- "Failed to change microphone. Reason unknown.");
- }
+ if (empathy_audio_src_supports_changing_mic (self))
+ g_object_get (priv->src,
+ "source-output-index", &audio_src_idx,
+ NULL);
- g_simple_async_result_complete (result);
- g_object_unref (result);
+ return audio_src_idx;
}
static void
-operation_change_microphone (EmpathyGstAudioSrc *self,
- GSimpleAsyncResult *result)
+empathy_audio_src_microphone_changed_cb (EmpathyMicMonitor *monitor,
+ guint source_output_idx,
+ guint source_idx,
+ gpointer user_data)
{
+ EmpathyGstAudioSrc *self = user_data;
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
- guint source_output_idx, microphone;
+ guint audio_src_idx;
- g_object_get (priv->src, "source-output-index", &source_output_idx, NULL);
+ audio_src_idx = empathy_audio_src_get_mic_index (self);
- g_assert_cmpuint (pa_context_get_state (priv->context), ==, PA_CONTEXT_READY);
- g_assert_cmpuint (source_output_idx, !=, PA_INVALID_INDEX);
+ if (source_output_idx == PA_INVALID_INDEX
+ || source_output_idx != audio_src_idx)
+ return;
- microphone = GPOINTER_TO_UINT (
- g_simple_async_result_get_op_res_gpointer (result));
+ if (priv->source_idx == source_idx)
+ return;
- pa_context_move_source_output_by_index (priv->context, source_output_idx, microphone,
- operation_change_microphone_cb, result);
+ priv->source_idx = source_idx;
+ g_object_notify (G_OBJECT (self), "microphone");
}
static void
-operations_run (EmpathyGstAudioSrc *self)
+empathy_audio_src_get_current_mic_cb (GObject *source_object,
+ GAsyncResult *result,
+ gpointer user_data)
{
+ EmpathyMicMonitor *monitor = EMPATHY_MIC_MONITOR (source_object);
+ EmpathyGstAudioSrc *self = user_data;
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
- pa_context_state_t state = pa_context_get_state (priv->context);
- GList *l;
+ guint source_idx;
+ GError *error = NULL;
- if (state != PA_CONTEXT_READY)
- return;
+ source_idx = empathy_mic_monitor_get_current_mic_finish (monitor, result, &error);
- for (l = priv->operations->head; l != NULL; l = l->next)
+ if (error != NULL)
{
- Operation *o = l->data;
-
- o->func (self, o->result);
-
- operation_free (o, FALSE);
+ DEBUG ("Failed to get current mic: %s", error->message);
+ g_clear_error (&error);
+ return;
}
- g_queue_clear (priv->operations);
+ if (priv->source_idx == source_idx)
+ return;
+
+ priv->source_idx = source_idx;
+ g_object_notify (G_OBJECT (self), "microphone");
}
static void
-empathy_audio_src_source_output_info_cb (pa_context *context,
- const pa_source_output_info *info,
- int eol,
- void *userdata)
+empathy_audio_src_source_output_index_notify (GObject *object,
+ GParamSpec *pspec,
+ EmpathyGstAudioSrc *self)
{
- EmpathyGstAudioSrc *self = userdata;
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
+ guint source_output_idx;
- if (eol)
- return;
-
- /* There should only be one call here. */
+ source_output_idx = empathy_audio_src_get_mic_index (self);
- if (priv->source_idx == info->source)
+ if (source_output_idx == PA_INVALID_INDEX)
return;
- priv->source_idx = info->source;
- g_object_notify (G_OBJECT (self), "microphone");
-}
-
-static void
-empathy_audio_src_source_info_cb (pa_context *context,
- const pa_source_info *info,
- int eol,
- void *userdata)
-{
- EmpathyGstAudioSrc *self = userdata;
- gboolean is_monitor;
-
- if (eol)
+ if (priv->source_output_idx == source_output_idx)
return;
- is_monitor = (info->monitor_of_sink != PA_INVALID_INDEX);
+ /* It's actually changed. */
+ priv->source_output_idx = source_output_idx;
- g_signal_emit (self, signals[MICROPHONE_ADDED], 0,
- info->index, info->name, info->description, is_monitor);
+ empathy_mic_monitor_get_current_mic_async (priv->mic_monitor,
+ source_output_idx, empathy_audio_src_get_current_mic_cb, self);
}
-static void
-empathy_audio_src_pa_event_cb (pa_context *context,
- pa_subscription_event_type_t type,
- uint32_t idx,
- void *userdata)
+static GstMixerTrack *
+empathy_audio_src_get_track (GstElement *src)
{
- EmpathyGstAudioSrc *self = userdata;
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
+ const GList *t;
+ GstMixerTrack *track = NULL;
- if ((type & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SOURCE_OUTPUT
- && (type & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE
- && idx == priv->source_output_idx)
+ if (!gst_element_implements_interface (src, GST_TYPE_MIXER))
{
- /* Microphone in the source output has changed */
- pa_context_get_source_output_info (context, idx,
- empathy_audio_src_source_output_info_cb, self);
+ g_warning ("No mixer interface implementation, can't control volume");
+ return NULL;
}
- else if ((type & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SOURCE
- && (type & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_REMOVE)
+
+ for (t = gst_mixer_list_tracks (GST_MIXER (src));
+ t != NULL; t = g_list_next (t))
{
- /* A mic has been removed */
- g_signal_emit (self, signals[MICROPHONE_REMOVED], 0, idx);
+ GstMixerTrack *tr = t->data;
+ if (!tp_strdiff (tr->label, "Master"))
+ {
+ track = tr;
+ break;
+ }
}
- else if ((type & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SOURCE
- && (type & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_NEW)
+
+ if (track == NULL)
{
- /* A mic has been plugged in */
- pa_context_get_source_info_by_index (context, idx,
- empathy_audio_src_source_info_cb, self);
+ g_warning ("No suitable track found");
}
-}
-
-static void
-empathy_audio_src_pa_subscribe_cb (pa_context *context,
- int success,
- void *userdata)
-{
- if (!success)
- DEBUG ("Failed to subscribe to PulseAudio events");
-}
-
-static void
-empathy_audio_src_pa_state_change_cb (pa_context *context,
- void *userdata)
-{
- EmpathyGstAudioSrc *self = userdata;
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
- pa_context_state_t state = pa_context_get_state (priv->context);
-
- if (state == PA_CONTEXT_READY)
+ else if (track->num_channels > MAX_MIC_CHANNELS)
{
- /* Listen to pulseaudio events so we know when sources are
- * added and when the microphone is changed. */
- pa_context_set_subscribe_callback (priv->context,
- empathy_audio_src_pa_event_cb, self);
- pa_context_subscribe (priv->context,
- PA_SUBSCRIPTION_MASK_SOURCE | PA_SUBSCRIPTION_MASK_SOURCE_OUTPUT,
- empathy_audio_src_pa_subscribe_cb, NULL);
-
- operations_run (self);
+ g_warning ("Microphones with more then %d channels not supported ",
+ MAX_MIC_CHANNELS);
+ track = NULL;
}
-}
-
-static void
-empathy_audio_src_source_output_index_notify (GObject *object,
- GParamSpec *pspec,
- EmpathyGstAudioSrc *self)
-{
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
- guint source_output_idx = PA_INVALID_INDEX;
-
- g_object_get (priv->src, "source-output-index", &source_output_idx, NULL);
-
- if (source_output_idx == PA_INVALID_INDEX)
- return;
-
- if (priv->source_output_idx == source_output_idx)
- return;
-
- /* It's actually changed. */
- priv->source_output_idx = source_output_idx;
- pa_context_get_source_output_info (priv->context, source_output_idx,
- empathy_audio_src_source_output_info_cb, self);
+ return track;
}
static GstElement *
if (src == NULL)
return NULL;
- empathy_call_set_stream_properties (src);
+ empathy_call_set_stream_properties (src, TRUE);
+
+ /* Set latency (buffering on the PulseAudio side) of 20ms */
+ g_object_set (src, "buffer-time", (gint64) 20000, NULL);
return src;
}
{
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (obj);
GstPad *ghost, *src;
+ GstElement *capsfilter;
+ GstCaps *caps;
+ obj->priv = priv;
priv->peak_level = -G_MAXDOUBLE;
priv->lock = g_mutex_new ();
+ priv->volume = 1.0;
priv->src = create_src ();
if (priv->src == NULL)
gst_bin_add (GST_BIN (obj), priv->src);
- priv->volume = gst_element_factory_make ("volume", NULL);
- g_object_ref (priv->volume);
-
- gst_bin_add (GST_BIN (obj), priv->volume);
- gst_element_link (priv->src, priv->volume);
+ /* Explicitly state what format we want from pulsesrc. This pushes resampling
+ * and format conversion as early as possible, lowering the amount of data
+ * transferred and thus improving performance. When moving to GStreamer
+ * 0.11/1.0, this should change so that we actually request what the encoder
+ * wants downstream. */
+ caps = gst_caps_new_simple ("audio/x-raw-int",
+ "channels", G_TYPE_INT, 1,
+ "width", G_TYPE_INT, 16,
+ "depth", G_TYPE_INT, 16,
+ "rate", G_TYPE_INT, 32000,
+ NULL);
+ capsfilter = gst_element_factory_make ("capsfilter", NULL);
+ g_object_set (G_OBJECT (capsfilter), "caps", caps, NULL);
+ gst_bin_add (GST_BIN (obj), capsfilter);
+ gst_element_link (priv->src, capsfilter);
priv->level = gst_element_factory_make ("level", NULL);
gst_bin_add (GST_BIN (obj), priv->level);
- gst_element_link (priv->volume, priv->level);
+ gst_element_link (capsfilter, priv->level);
src = gst_element_get_static_pad (priv->level, "src");
gst_object_unref (G_OBJECT (src));
- /* PulseAudio stuff: We need to create a dummy pa_glib_mainloop* so
- * Pulse can use the mainloop that GTK has created for us. */
- priv->loop = pa_glib_mainloop_new (NULL);
- priv->context = pa_context_new (pa_glib_mainloop_get_api (priv->loop),
- "EmpathyAudioSrc");
-
/* Listen to changes to GstPulseSrc:source-output-index so we know when
* it's no longer PA_INVALID_INDEX (starting for the first time) or if it
* changes (READY->NULL->READY...) */
G_CALLBACK (empathy_audio_src_source_output_index_notify),
obj);
- /* Finally listen for state changes so we know when we've
- * connected. */
- pa_context_set_state_callback (priv->context,
- empathy_audio_src_pa_state_change_cb, obj);
- pa_context_connect (priv->context, NULL, 0, NULL);
+ priv->mic_monitor = empathy_mic_monitor_new ();
+ g_signal_connect (priv->mic_monitor, "microphone-changed",
+ G_CALLBACK (empathy_audio_src_microphone_changed_cb), obj);
- priv->operations = g_queue_new ();
+ priv->source_idx = PA_INVALID_INDEX;
}
static void empathy_audio_src_dispose (GObject *object);
switch (property_id)
{
case PROP_VOLUME:
- empathy_audio_src_set_volume (EMPATHY_GST_AUDIO_SRC (object),
+ empathy_audio_src_set_hw_volume (EMPATHY_GST_AUDIO_SRC (object),
g_value_get_double (value));
break;
+ case PROP_MUTE:
+ empathy_audio_set_hw_mute (EMPATHY_GST_AUDIO_SRC (object),
+ g_value_get_boolean (value));
+ break;
default:
G_OBJECT_WARN_INVALID_PROPERTY_ID (object, property_id, pspec);
}
switch (property_id)
{
case PROP_VOLUME:
- g_value_set_double (value,
- empathy_audio_src_get_volume (self));
+ g_value_set_double (value, priv->volume);
+ break;
+ case PROP_MUTE:
+ g_value_set_boolean (value, priv->mute);
break;
case PROP_PEAK_LEVEL:
g_mutex_lock (priv->lock);
G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
g_object_class_install_property (object_class, PROP_VOLUME, param_spec);
+ param_spec = g_param_spec_boolean ("mute", "Mute", "mute contol",
+ FALSE,
+ G_PARAM_READWRITE | G_PARAM_STATIC_STRINGS);
+ g_object_class_install_property (object_class, PROP_MUTE, param_spec);
+
param_spec = g_param_spec_double ("peak-level", "peak level", "peak level",
-G_MAXDOUBLE, G_MAXDOUBLE, 0,
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
G_SIGNAL_RUN_LAST,
0,
NULL, NULL,
- g_cclosure_marshal_VOID__DOUBLE,
+ g_cclosure_marshal_generic,
G_TYPE_NONE, 1, G_TYPE_DOUBLE);
param_spec = g_param_spec_double ("rms-level", "RMS level", "RMS level",
G_PARAM_READABLE | G_PARAM_STATIC_STRINGS);
g_object_class_install_property (object_class, PROP_RMS_LEVEL, param_spec);
-
signals[RMS_LEVEL_CHANGED] = g_signal_new ("rms-level-changed",
G_TYPE_FROM_CLASS (empathy_audio_src_class),
G_SIGNAL_RUN_LAST,
0,
NULL, NULL,
- g_cclosure_marshal_VOID__DOUBLE,
+ g_cclosure_marshal_generic,
G_TYPE_NONE, 1, G_TYPE_DOUBLE);
-
- signals[MICROPHONE_ADDED] = g_signal_new ("microphone-added",
- G_TYPE_FROM_CLASS (empathy_audio_src_class),
- G_SIGNAL_RUN_LAST,
- 0,
- NULL, NULL,
- _src_marshal_VOID__UINT_STRING_STRING_BOOLEAN,
- G_TYPE_NONE, 4, G_TYPE_UINT, G_TYPE_STRING, G_TYPE_STRING, G_TYPE_BOOLEAN);
-
- signals[MICROPHONE_REMOVED] = g_signal_new ("microphone-removed",
- G_TYPE_FROM_CLASS (empathy_audio_src_class),
- G_SIGNAL_RUN_LAST,
- 0,
- NULL, NULL,
- g_cclosure_marshal_VOID__UINT,
- G_TYPE_NONE, 1, G_TYPE_UINT);
}
void
priv->dispose_has_run = TRUE;
- if (priv->idle_id != 0)
- g_source_remove (priv->idle_id);
-
- priv->idle_id = 0;
+ if (priv->level_idle_id != 0)
+ g_source_remove (priv->level_idle_id);
+ priv->level_idle_id = 0;
- if (priv->context != NULL)
- pa_context_unref (priv->context);
- priv->context = NULL;
+ if (priv->volume_idle_id != 0)
+ g_source_remove (priv->volume_idle_id);
+ priv->volume_idle_id = 0;
- if (priv->loop != NULL)
- pa_glib_mainloop_free (priv->loop);
- priv->loop = NULL;
+ tp_clear_object (&priv->mic_monitor);
/* release any references held by the object here */
/* free any data held directly by the object here */
g_mutex_free (priv->lock);
- g_queue_foreach (priv->operations, (GFunc) operation_free,
- GUINT_TO_POINTER (TRUE));
- g_queue_free (priv->operations);
-
G_OBJECT_CLASS (empathy_audio_src_parent_class)->finalize (object);
}
g_signal_emit (self, signals[PEAK_LEVEL_CHANGED], 0, priv->peak_level);
g_signal_emit (self, signals[RMS_LEVEL_CHANGED], 0, priv->rms_level);
- priv->idle_id = 0;
+ priv->level_idle_id = 0;
g_mutex_unlock (priv->lock);
return FALSE;
}
+static gboolean
+empathy_audio_src_volume_changed (gpointer user_data)
+{
+ EmpathyGstAudioSrc *self = EMPATHY_GST_AUDIO_SRC (user_data);
+ EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
+ gdouble volume;
+ gboolean mute;
+
+ g_mutex_lock (priv->lock);
+ priv->volume_idle_id = 0;
+ g_mutex_unlock (priv->lock);
+
+ volume = empathy_audio_src_get_hw_volume (self);
+
+ if (volume != priv->volume)
+ {
+ priv->volume = volume;
+ g_object_notify (G_OBJECT (self), "volume");
+ }
+
+ mute = empathy_audio_src_get_hw_mute (self);
+ if (mute != priv->mute)
+ {
+ priv->mute = mute;
+ g_object_notify (G_OBJECT (self), "mute");
+ }
+
+ return FALSE;
+}
+
static void
empathy_audio_src_handle_message (GstBin *bin, GstMessage *message)
{
EmpathyGstAudioSrc *self = EMPATHY_GST_AUDIO_SRC (bin);
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (self);
- if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ELEMENT &&
- GST_MESSAGE_SRC (message) == GST_OBJECT (priv->level))
+ if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ELEMENT &&
+ GST_MESSAGE_SRC (message) == GST_OBJECT (priv->level))
{
const GstStructure *s;
const gchar *name;
priv->peak_level = peak;
priv->rms_level = rms;
- if (priv->idle_id == 0)
- priv->idle_id = g_idle_add (empathy_audio_src_levels_updated, self);
+ if (priv->level_idle_id == 0)
+ priv->level_idle_id = g_idle_add (
+ empathy_audio_src_levels_updated, self);
g_mutex_unlock (priv->lock);
}
+ else if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_ELEMENT &&
+ GST_MESSAGE_SRC (message) == GST_OBJECT (priv->src))
+ {
+ GstMixerTrack *track = NULL;
+
+ /* Listen for mute or volume changes on the src element */
+ if (gst_mixer_message_get_type (message) ==
+ GST_MIXER_MESSAGE_VOLUME_CHANGED)
+ gst_mixer_message_parse_volume_changed (message, &track,
+ NULL, NULL);
+
+ if (gst_mixer_message_get_type (message) ==
+ GST_MIXER_MESSAGE_MUTE_TOGGLED)
+ gst_mixer_message_parse_mute_toggled (message, &track, NULL);
+
+ g_mutex_lock (priv->lock);
+
+ if (track != NULL && track == priv->track && priv->volume_idle_id == 0)
+ priv->volume_idle_id = g_idle_add (
+ empathy_audio_src_volume_changed, self);
+
+ g_mutex_unlock (priv->lock);
+ }
+ else if (GST_MESSAGE_TYPE (message) == GST_MESSAGE_STATE_CHANGED &&
+ GST_MESSAGE_SRC (message) == GST_OBJECT (priv->src))
+ {
+ GstState old, new;
+
+ gst_message_parse_state_changed (message, &old, &new, NULL);
+
+ /* GstMixer is only available in state >= READY, so only start
+ * controlling the source element when going to ready state and stop
+ * doing so when going below ready. Furthermore once we have mixer read
+ * the current volume level from it and remove the settings done by
+ * Empathy. We want to pick up the level pulseaudio saved */
+ if (old == GST_STATE_NULL && new == GST_STATE_READY)
+ {
+ g_mutex_lock (priv->lock);
+ priv->track = empathy_audio_src_get_track (priv->src);
+ if (priv->track != NULL)
+ priv->volume_idle_id = g_idle_add (
+ empathy_audio_src_volume_changed, self);
+ g_mutex_unlock (priv->lock);
+ }
+ else if (old == GST_STATE_READY && new == GST_STATE_NULL)
+ {
+ g_mutex_lock (priv->lock);
+ priv->track = NULL;
+ g_mutex_unlock (priv->lock);
+ }
+ }
out:
GST_BIN_CLASS (empathy_audio_src_parent_class)->handle_message (bin,
}
void
-empathy_audio_src_set_volume (EmpathyGstAudioSrc *src, gdouble volume)
+empathy_audio_src_set_echo_cancel (EmpathyGstAudioSrc *src,
+ gboolean enable)
{
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (src);
- GParamSpec *pspec;
- GParamSpecDouble *pspec_double;
-
- pspec = g_object_class_find_property (G_OBJECT_GET_CLASS (priv->volume),
- "volume");
-
- g_assert (pspec != NULL);
-
- pspec_double = G_PARAM_SPEC_DOUBLE (pspec);
-
- volume = CLAMP (volume, pspec_double->minimum, pspec_double->maximum);
+ DEBUG ("Src echo cancellation setting: %s", enable ? "on" : "off");
+ empathy_call_set_stream_properties (src->priv->src, enable);
+}
- g_object_set (G_OBJECT (priv->volume), "volume", volume, NULL);
+void
+empathy_audio_src_set_volume (EmpathyGstAudioSrc *src, gdouble volume)
+{
+ g_object_set (src, "volume", volume, NULL);
}
gdouble
empathy_audio_src_get_volume (EmpathyGstAudioSrc *src)
{
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (src);
- gdouble volume;
-
- g_object_get (G_OBJECT (priv->volume), "volume", &volume, NULL);
-
- return volume;
+ return src->priv->volume;
}
-void
-empathy_audio_src_get_microphones_async (EmpathyGstAudioSrc *src,
- GAsyncReadyCallback callback,
- gpointer user_data)
+guint
+empathy_audio_src_get_microphone (EmpathyGstAudioSrc *src)
{
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (src);
- Operation *operation;
- GSimpleAsyncResult *simple;
-
- simple = g_simple_async_result_new (G_OBJECT (src), callback, user_data,
- empathy_audio_src_get_microphones_async);
-
- /* If we can't change mic let's not pretend we can by returning the
- * list of available mics. */
- if (!empathy_audio_src_supports_changing_mic (src))
- {
- g_simple_async_result_set_error (simple, G_IO_ERROR, G_IO_ERROR_FAILED,
- "pulsesrc is not new enough to support changing microphone");
- g_simple_async_result_complete_in_idle (simple);
- g_object_unref (simple);
- return;
- }
- operation = operation_new (operation_get_microphones, simple);
- g_queue_push_tail (priv->operations, operation);
-
- /* gogogogo */
- operations_run (src);
+ return priv->source_idx;
}
-const GList *
-empathy_audio_src_get_microphones_finish (EmpathyGstAudioSrc *src,
+static void
+empathy_audio_src_change_microphone_cb (GObject *source_object,
GAsyncResult *result,
- GError **error)
+ gpointer user_data)
{
- GSimpleAsyncResult *simple = G_SIMPLE_ASYNC_RESULT (result);
- GQueue *queue;
-
- if (g_simple_async_result_propagate_error (simple, error))
- return NULL;
+ EmpathyMicMonitor *monitor = EMPATHY_MIC_MONITOR (source_object);
+ GSimpleAsyncResult *simple = user_data;
+ GError *error = NULL;
- g_return_val_if_fail (g_simple_async_result_is_valid (result,
- G_OBJECT (src), empathy_audio_src_get_microphones_async),
- NULL);
-
- queue = g_simple_async_result_get_op_res_gpointer (simple);
- return queue->head;
-}
-
-guint
-empathy_audio_src_get_microphone (EmpathyGstAudioSrc *src)
-{
- EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (src);
+ if (!empathy_mic_monitor_change_microphone_finish (monitor,
+ result, &error))
+ {
+ g_simple_async_result_take_error (simple, error);
+ }
- return priv->source_idx;
+ g_simple_async_result_complete (simple);
+ g_object_unref (simple);
}
void
EmpathyGstAudioSrcPrivate *priv = EMPATHY_GST_AUDIO_SRC_GET_PRIVATE (src);
guint source_output_idx;
GSimpleAsyncResult *simple;
- Operation *operation;
simple = g_simple_async_result_new (G_OBJECT (src), callback, user_data,
empathy_audio_src_change_microphone_async);
return;
}
- g_object_get (priv->src, "source-output-index", &source_output_idx, NULL);
+ source_output_idx = empathy_audio_src_get_mic_index (src);
if (source_output_idx == PA_INVALID_INDEX)
{
return;
}
- g_simple_async_result_set_op_res_gpointer (simple,
- GUINT_TO_POINTER (microphone), NULL);
-
- operation = operation_new (operation_change_microphone, simple);
- g_queue_push_tail (priv->operations, operation);
-
- /* gogogogo */
- operations_run (src);
+ empathy_mic_monitor_change_microphone_async (priv->mic_monitor,
+ source_output_idx, microphone, empathy_audio_src_change_microphone_cb,
+ simple);
}
gboolean