* Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
#include <stdio.h>
#include <stdlib.h>
#include <libempathy/empathy-utils.h>
#include <libempathy-gtk/empathy-avatar-image.h>
+#include <libempathy-gtk/empathy-dialpad-widget.h>
#include <libempathy-gtk/empathy-ui-utils.h>
#include <libempathy-gtk/empathy-sound-manager.h>
#include <libempathy-gtk/empathy-geometry.h>
GtkWidget *audio_call_button;
GtkWidget *video_call_button;
GtkWidget *mic_button;
+ GtkWidget *volume_button;
GtkWidget *camera_button;
GtkWidget *dialpad_button;
GtkWidget *toolbar;
gulong video_output_motion_handler_id;
guint bus_message_source_id;
- gdouble volume;
-
/* String that contains the queued tones to send after the current ones
are sent */
GString *tones;
GstElement *video_output_sink;
GstElement *audio_input;
GstElement *audio_output;
+ gboolean audio_output_added;
GstElement *pipeline;
GstElement *video_tee;
static void empathy_call_window_set_send_video (EmpathyCallWindow *window,
CameraState state);
-static void empathy_call_window_mic_toggled_cb (
- GtkToggleToolButton *toggle, EmpathyCallWindow *window);
-
static void empathy_call_window_hangup_cb (gpointer object,
EmpathyCallWindow *window);
static gboolean empathy_call_window_bus_message (GstBus *bus,
GstMessage *message, gpointer user_data);
-static void
-empathy_call_window_volume_changed_cb (GtkScaleButton *button,
- gdouble value, EmpathyCallWindow *window);
-
static void
empathy_call_window_show_hangup_button (EmpathyCallWindow *self,
gboolean show)
}
static void
-dtmf_button_pressed_cb (GtkButton *button,
+dtmf_start_tone_cb (EmpathyDialpadWidget *dialpad,
+ TpDTMFEvent event,
EmpathyCallWindow *self)
{
EmpathyCallWindowPriv *priv = GET_PRIV (self);
- GQuark button_quark;
- TpDTMFEvent event;
-
- button_quark = g_quark_from_static_string (EMPATHY_DTMF_BUTTON_ID);
- event = GPOINTER_TO_UINT (g_object_get_qdata (G_OBJECT (button),
- button_quark));
g_string_append_c (priv->tones, tp_dtmf_event_to_char (event));
empathy_call_window_maybe_emit_tones (self);
}
-/* empathy_create_dtmf_dialpad() requires a callback, even if empty */
-static void
-dtmf_button_released_cb (GtkButton *button,
- EmpathyCallWindow *self)
-{
-}
-
-static void
-empathy_call_window_mic_volume_changed (EmpathyCallWindow *self)
-{
- EmpathyCallWindowPriv *priv = GET_PRIV (self);
- gdouble volume;
-
- volume = g_settings_get_double (priv->settings,
- EMPATHY_PREFS_CALL_SOUND_VOLUME) / 100.0;
-
- /* Don't store the volume because of muting */
- if (volume > 0 || gtk_toggle_tool_button_get_active (
- GTK_TOGGLE_TOOL_BUTTON (priv->mic_button)))
- priv->volume = volume;
-
- /* Ensure that the toggle button is active if the volume is > 0 and inactive
- * if it's smaller than 0 */
- if ((volume > 0) != gtk_toggle_tool_button_get_active (
- GTK_TOGGLE_TOOL_BUTTON (priv->mic_button)))
- gtk_toggle_tool_button_set_active (
- GTK_TOGGLE_TOOL_BUTTON (priv->mic_button), volume > 0);
-
- empathy_audio_src_set_volume (EMPATHY_GST_AUDIO_SRC (priv->audio_input),
- volume);
-}
-
-static void
-empathy_call_window_prefs_volume_changed_cb (GSettings *settings,
- gchar *key,
- EmpathyCallWindow *self)
-{
- empathy_call_window_mic_volume_changed (self);
-}
-
static void
empathy_call_window_raise_actors (EmpathyCallWindow *self)
{
clutter_texture_set_keep_aspect_ratio (CLUTTER_TEXTURE (priv->video_output),
TRUE);
- priv->video_output_sink = clutter_gst_video_sink_new (
- CLUTTER_TEXTURE (priv->video_output));
+ priv->video_output_sink = gst_element_factory_make ("cluttersink", NULL);
+ if (priv->video_output_sink == NULL)
+ g_error ("Missing cluttersink");
+ else
+ g_object_set (priv->video_output_sink, "texture", priv->video_output, NULL);
clutter_container_add_actor (CLUTTER_CONTAINER (priv->video_box),
priv->video_output);
gst_object_sink (priv->video_input);
}
+static gboolean
+audio_control_volume_to_element (GBinding *binding,
+ const GValue *source_value,
+ GValue *target_value,
+ gpointer user_data)
+{
+ /* AudioControl volume is 0-255, with -1 for unknown */
+ gint hv;
+
+ hv = g_value_get_int (source_value);
+ if (hv < 0)
+ return FALSE;
+
+ hv = MIN (hv, 255);
+ g_value_set_double (target_value, hv/255.0);
+
+ return TRUE;
+}
+
+static gboolean
+element_volume_to_audio_control (GBinding *binding,
+ const GValue *source_value,
+ GValue *target_value,
+ gpointer user_data)
+{
+ gdouble ev;
+
+ ev = g_value_get_double (source_value);
+ ev = CLAMP (ev, 0.0, 1.0);
+
+ g_value_set_int (target_value, ev * 255);
+ return TRUE;
+}
+
static void
create_audio_input (EmpathyCallWindow *self)
{
priv->audio_input = empathy_audio_src_new ();
gst_object_ref (priv->audio_input);
gst_object_sink (priv->audio_input);
+
+ g_object_bind_property (priv->mic_button, "active",
+ priv->audio_input, "mute",
+ G_BINDING_BIDIRECTIONAL |
+ G_BINDING_INVERT_BOOLEAN | G_BINDING_SYNC_CREATE);
}
static void
preview = empathy_rounded_texture_new ();
clutter_actor_set_size (preview,
SELF_VIDEO_SECTION_WIDTH, SELF_VIDEO_SECTION_HEIGHT);
- priv->video_preview_sink = clutter_gst_video_sink_new (
- CLUTTER_TEXTURE (preview));
+
+ priv->video_preview_sink = gst_element_factory_make ("cluttersink", NULL);
+ if (priv->video_preview_sink == NULL)
+ g_error ("Missing cluttersink");
+ else
+ g_object_set (priv->video_preview_sink, "texture", preview, NULL);
/* Add a little offset to the video preview */
layout = clutter_bin_layout_new (CLUTTER_BIN_ALIGNMENT_CENTER,
g_object_set (priv->video_preview_sink,
"sync", FALSE,
- "async", TRUE,
+ "async", FALSE,
NULL);
/* Translators: this is an "Info" label. It should be as short
preview = priv->video_preview_sink;
gst_element_set_state (preview, state);
- gst_element_set_state (priv->video_input, state);
gst_element_set_state (priv->video_tee, state);
+ gst_element_set_state (priv->video_input, state);
}
static void
{
EmpathyCallWindowPriv *priv = GET_PRIV (self);
+ if (priv->video_preview == NULL)
+ {
+ create_video_preview (self);
+ add_video_preview_to_pipeline (self);
+ }
+
if (display)
{
/* Display the video preview */
DEBUG ("Disable camera");
- display_video_preview (self, FALSE);
-
- if (priv->camera_state == CAMERA_STATE_ON)
- empathy_call_window_set_send_video (self, CAMERA_STATE_OFF);
+ empathy_call_window_set_send_video (self, CAMERA_STATE_OFF);
priv->camera_state = CAMERA_STATE_OFF;
}
"audiocall", &priv->audio_call_button,
"videocall", &priv->video_call_button,
"microphone", &priv->mic_button,
+ "volume", &priv->volume_button,
"camera", &priv->camera_button,
"hangup", &priv->hangup_button,
"dialpad", &priv->dialpad_button,
"hangup", "clicked", empathy_call_window_hangup_cb,
"audiocall", "clicked", empathy_call_window_audio_call_cb,
"videocall", "clicked", empathy_call_window_video_call_cb,
- "volume", "value-changed", empathy_call_window_volume_changed_cb,
- "microphone", "toggled", empathy_call_window_mic_toggled_cb,
"camera", "toggled", empathy_call_window_camera_toggled_cb,
"dialpad", "toggled", empathy_call_window_dialpad_cb,
"menufullscreen", "activate", empathy_call_window_fullscreen_cb,
gtk_container_add (GTK_CONTAINER (self), top_vbox);
- priv->content_hbox = gtk_hbox_new (FALSE, CONTENT_HBOX_SPACING);
+ priv->content_hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL,
+ CONTENT_HBOX_SPACING);
gtk_container_set_border_width (GTK_CONTAINER (priv->content_hbox),
CONTENT_HBOX_BORDER_WIDTH);
gtk_box_pack_start (GTK_BOX (priv->pane), priv->content_hbox,
priv->video_container = gtk_clutter_embed_new ();
+ gtk_widget_set_size_request (priv->video_container,
+ EMPATHY_VIDEO_WIDGET_DEFAULT_WIDTH, EMPATHY_VIDEO_WIDGET_DEFAULT_HEIGHT);
+
/* Set the background color to that of the rest of the window */
context = gtk_widget_get_style_context (priv->content_hbox);
gtk_style_context_get_background_color (context,
/* The call will be started as soon the pipeline is playing */
priv->start_call_when_playing = TRUE;
- priv->dtmf_panel = empathy_create_dtmf_dialpad (G_OBJECT (self),
- G_CALLBACK (dtmf_button_pressed_cb),
- G_CALLBACK (dtmf_button_released_cb));
+ priv->dtmf_panel = empathy_dialpad_widget_new ();
+ g_signal_connect (priv->dtmf_panel, "start-tone",
+ G_CALLBACK (dtmf_start_tone_cb), self);
priv->tones = g_string_new ("");
empathy_call_window_show_hangup_button (self, TRUE);
- /* Retrieve initial volume */
- priv->volume = g_settings_get_double (priv->settings,
- EMPATHY_PREFS_CALL_SOUND_VOLUME) / 100.0;
-
- g_signal_connect (priv->settings, "changed::"EMPATHY_PREFS_CALL_SOUND_VOLUME,
- G_CALLBACK (empathy_call_window_prefs_volume_changed_cb), self);
-
empathy_geometry_bind (GTK_WINDOW (self), "call-window");
/* These signals are used to track the window position and save it
* when the window is destroyed. We need to do this as we don't want
if (priv->audio_output != NULL)
g_object_unref (priv->audio_output);
priv->audio_output = NULL;
+ priv->audio_output_added = FALSE;
if (priv->video_tee != NULL)
g_object_unref (priv->video_tee);
}
static gboolean
-empathy_call_window_sink_removed_cb (EmpathyCallHandler *handler,
- GstPad *sink,
- FsMediaType media_type,
+empathy_call_window_content_is_raw (TfContent *content)
+{
+ FsConference *conference;
+ gboolean israw;
+
+ g_object_get (content, "fs-conference", &conference, NULL);
+ g_assert (conference != NULL);
+
+ /* FIXME: Ugly hack, update when moving a packetization property into
+ * farstream */
+ israw = g_str_has_prefix (GST_OBJECT_NAME (conference), "fsrawconf");
+ gst_object_unref (conference);
+
+ return israw;
+}
+
+static gboolean
+empathy_call_window_content_removed_cb (EmpathyCallHandler *handler,
+ TfContent *content,
EmpathyCallWindow *self)
{
EmpathyCallWindowPriv *priv = GET_PRIV (self);
+ FsMediaType media_type;
DEBUG ("removing content");
+ g_object_get (content, "media-type", &media_type, NULL);
+
/*
* This assumes that there is only one video stream per channel...
*/
gst_bin_remove (GST_BIN (priv->pipeline), output);
gst_bin_remove (GST_BIN (priv->pipeline), priv->funnel);
priv->funnel = NULL;
- return TRUE;
}
}
else if (media_type == FS_MEDIA_TYPE_AUDIO)
{
gst_element_set_state (priv->audio_output, GST_STATE_NULL);
- gst_bin_remove (GST_BIN (priv->pipeline), priv->audio_output);
+ if (priv->audio_output_added)
+ gst_bin_remove (GST_BIN (priv->pipeline), priv->audio_output);
priv->audio_output = NULL;
- return TRUE;
+ priv->audio_output_added = FALSE;
}
}
+ else
+ {
+ g_assert_not_reached ();
+ }
- return FALSE;
+ return TRUE;
+}
+
+static void
+empathy_call_window_framerate_changed_cb (EmpathyCallHandler *handler,
+ guint framerate,
+ EmpathyCallWindow *self)
+{
+ EmpathyCallWindowPriv *priv = GET_PRIV (self);
+
+ DEBUG ("Framerate changed to %u", framerate);
+
+ if (priv->video_input != NULL)
+ empathy_video_src_set_framerate (priv->video_input, framerate);
+}
+
+static void
+empathy_call_window_resolution_changed_cb (EmpathyCallHandler *handler,
+ guint width,
+ guint height,
+ EmpathyCallWindow *self)
+{
+ EmpathyCallWindowPriv *priv = GET_PRIV (self);
+
+ DEBUG ("Resolution changed to %ux%u", width, height);
+
+ if (priv->video_input != NULL)
+ {
+ empathy_video_src_set_resolution (priv->video_input, width, height);
+ }
}
/* Called with global lock held */
/* Called with global lock held */
static GstPad *
-empathy_call_window_get_audio_sink_pad (EmpathyCallWindow *self)
+empathy_call_window_get_audio_sink_pad (EmpathyCallWindow *self,
+ TfContent *content)
{
EmpathyCallWindowPriv *priv = GET_PRIV (self);
GstPad *pad;
GstPadTemplate *template;
- if (priv->audio_output == NULL)
+ if (!priv->audio_output_added)
{
- priv->audio_output = empathy_audio_sink_new ();
- g_object_ref_sink (priv->audio_output);
-
if (!gst_bin_add (GST_BIN (priv->pipeline), priv->audio_output))
{
g_warning ("Could not add audio sink to pipeline");
return TRUE;
}
-#if 0
+enum
+{
+ EMP_RESPONSE_BALANCE
+};
+
+static void
+on_error_infobar_response_cb (GtkInfoBar *info_bar,
+ gint response_id,
+ gpointer user_data)
+{
+ switch (response_id)
+ {
+ case GTK_RESPONSE_CLOSE:
+ gtk_widget_destroy (GTK_WIDGET (info_bar));
+ break;
+ case EMP_RESPONSE_BALANCE:
+ empathy_url_show (GTK_WIDGET (info_bar),
+ g_object_get_data (G_OBJECT (info_bar), "uri"));
+ break;
+ }
+}
+
static void
display_error (EmpathyCallWindow *self,
- TpyCallChannel *call,
const gchar *img,
const gchar *title,
const gchar *desc,
- const gchar *details)
+ const gchar *details,
+ const gchar *button_text,
+ const gchar *uri,
+ gint button_response)
{
EmpathyCallWindowPriv *priv = GET_PRIV (self);
GtkWidget *info_bar;
gchar *txt;
/* Create info bar */
- info_bar = gtk_info_bar_new_with_buttons (GTK_STOCK_CLOSE, GTK_RESPONSE_CLOSE,
- NULL);
+ info_bar = gtk_info_bar_new ();
+
+ if (button_text != NULL)
+ {
+ gtk_info_bar_add_button (GTK_INFO_BAR (info_bar),
+ button_text, button_response);
+ g_object_set_data_full (G_OBJECT (info_bar),
+ "uri", g_strdup (uri), g_free);
+ }
+
+ gtk_info_bar_add_button (GTK_INFO_BAR (info_bar),
+ GTK_STOCK_CLOSE, GTK_RESPONSE_CLOSE);
gtk_info_bar_set_message_type (GTK_INFO_BAR (info_bar), GTK_MESSAGE_WARNING);
content_area = gtk_info_bar_get_content_area (GTK_INFO_BAR (info_bar));
/* hbox containing the image and the messages vbox */
- hbox = gtk_hbox_new (FALSE, 3);
+ hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 3);
gtk_container_add (GTK_CONTAINER (content_area), hbox);
/* Add image */
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 0);
/* vbox containing the main message and the details expander */
- vbox = gtk_vbox_new (FALSE, 3);
+ vbox = gtk_box_new (GTK_ORIENTATION_VERTICAL, 3);
gtk_box_pack_start (GTK_BOX (hbox), vbox, TRUE, TRUE, 0);
/* Add text */
}
g_signal_connect (info_bar, "response",
- G_CALLBACK (gtk_widget_destroy), NULL);
+ G_CALLBACK (on_error_infobar_response_cb), NULL);
gtk_box_pack_start (GTK_BOX (priv->errors_vbox), info_bar,
FALSE, FALSE, CONTENT_HBOX_CHILDREN_PACKING_PADDING);
gtk_widget_show_all (info_bar);
}
+#if 0
static gchar *
media_stream_error_to_txt (EmpathyCallWindow *self,
TpyCallChannel *call,
}
#endif
+static void
+show_balance_error (EmpathyCallWindow *self)
+{
+ TpChannel *call;
+ TpConnection *conn;
+ gchar *balance, *tmp;
+ const gchar *uri, *currency;
+ gint amount;
+ guint scale;
+
+ g_object_get (self->priv->handler,
+ "call-channel", &call,
+ NULL);
+
+ conn = tp_channel_borrow_connection (call);
+ g_object_unref (call);
+
+ uri = tp_connection_get_balance_uri (conn);
+
+ if (!tp_connection_get_balance (conn, &amount, &scale, ¤cy))
+ {
+ /* unknown balance */
+ balance = g_strdup ("(--)");
+ }
+ else
+ {
+ char *money = empathy_format_currency (amount, scale, currency);
+
+ balance = g_strdup_printf ("%s %s",
+ currency, money);
+ g_free (money);
+ }
+
+ tmp = g_strdup_printf (_("Your current balance is %s."), balance),
+
+ display_error (self,
+ NULL,
+ _("Sorry, you don’t have enough credit for that call."),
+ tmp, NULL,
+ _("Top Up"),
+ uri,
+ EMP_RESPONSE_BALANCE);
+
+ g_free (tmp);
+ g_free (balance);
+}
+
static void
empathy_call_window_state_changed_cb (EmpathyCallHandler *handler,
TpyCallState state,
+ gchar *reason,
EmpathyCallWindow *self)
{
EmpathyCallWindowPriv *priv = GET_PRIV (self);
TpyCallChannel *call;
gboolean can_send_video;
+ if (state == TPY_CALL_STATE_ENDED &&
+ !tp_strdiff (reason, TP_ERROR_STR_INSUFFICIENT_BALANCE))
+ {
+ show_balance_error (self);
+ return;
+ }
+
if (state != TPY_CALL_STATE_ACCEPTED)
return;
/* Called from the streaming thread */
static gboolean
empathy_call_window_src_added_cb (EmpathyCallHandler *handler,
- GstPad *src, guint media_type, gpointer user_data)
+ TfContent *content, GstPad *src, gpointer user_data)
{
EmpathyCallWindow *self = EMPATHY_CALL_WINDOW (user_data);
EmpathyCallWindowPriv *priv = GET_PRIV (self);
gboolean retval = FALSE;
+ guint media_type;
GstPad *pad;
g_mutex_lock (priv->lock);
+ g_object_get (content, "media-type", &media_type, NULL);
+
switch (media_type)
{
case TP_MEDIA_STREAM_TYPE_AUDIO:
- pad = empathy_call_window_get_audio_sink_pad (self);
+ pad = empathy_call_window_get_audio_sink_pad (self, content);
break;
case TP_MEDIA_STREAM_TYPE_VIDEO:
g_idle_add (empathy_call_window_show_video_output_cb, self);
G_CALLBACK (empathy_call_window_video_probe_cb), self);
if (priv->got_video_src > 0)
g_source_remove (priv->got_video_src);
- priv->got_video_src = g_timeout_add_seconds (5,
+ priv->got_video_src = g_timeout_add_seconds (1,
empathy_call_window_check_video_cb, self);
break;
default:
return TRUE;
}
+static void
+empathy_call_window_prepare_audio_output (EmpathyCallWindow *self,
+ TfContent *content)
+{
+ EmpathyCallWindowPriv *priv = self->priv;
+
+ g_assert (priv->audio_output_added == FALSE);
+ g_assert (priv->audio_output == FALSE);
+
+ priv->audio_output = empathy_audio_sink_new ();
+ g_object_ref_sink (priv->audio_output);
+
+ /* volume button to output volume linking */
+ g_object_bind_property (priv->audio_output, "volume",
+ priv->volume_button, "value",
+ G_BINDING_BIDIRECTIONAL | G_BINDING_SYNC_CREATE);
+
+ g_object_bind_property_full (content, "requested-output-volume",
+ priv->audio_output, "volume",
+ G_BINDING_DEFAULT,
+ audio_control_volume_to_element,
+ element_volume_to_audio_control,
+ NULL, NULL);
+
+ /* Link volumes together, sync the current audio input volume property
+ * back to farstream first */
+ g_object_bind_property_full (priv->audio_output, "volume",
+ content, "reported-output-volume",
+ G_BINDING_SYNC_CREATE,
+ element_volume_to_audio_control,
+ audio_control_volume_to_element,
+ NULL, NULL);
+
+ /* For raw audio conferences assume that the producer of the raw data
+ * has already processed it, so turn off any echo cancellation and any
+ * other audio improvements that come with it */
+ empathy_audio_sink_set_echo_cancel (
+ EMPATHY_GST_AUDIO_SINK (priv->audio_output),
+ !empathy_call_window_content_is_raw (content));
+}
+
+
static gboolean
-empathy_call_window_sink_added_cb (EmpathyCallHandler *handler,
- GstPad *sink, FsMediaType media_type, gpointer user_data)
+empathy_call_window_content_added_cb (EmpathyCallHandler *handler,
+ TfContent *content, gpointer user_data)
{
EmpathyCallWindow *self = EMPATHY_CALL_WINDOW (user_data);
EmpathyCallWindowPriv *priv = GET_PRIV (self);
- GstPad *pad;
+ GstPad *sink, *pad;
+ FsMediaType media_type;
gboolean retval = FALSE;
+ g_object_get (content, "media-type", &media_type, "sink-pad", &sink, NULL);
+ g_assert (sink != NULL);
+
switch (media_type)
{
case FS_MEDIA_TYPE_AUDIO:
+
+ /* For raw audio conferences assume that the receiver of the raw data
+ * wants it unprocessed, so turn off any echo cancellation and any
+ * other audio improvements that come with it */
+ empathy_audio_src_set_echo_cancel (
+ EMPATHY_GST_AUDIO_SRC (priv->audio_input),
+ !empathy_call_window_content_is_raw (content));
+
+ /* Link volumes together, sync the current audio input volume property
+ * back to farstream first */
+ g_object_bind_property_full (content, "requested-input-volume",
+ priv->audio_input, "volume",
+ G_BINDING_DEFAULT,
+ audio_control_volume_to_element,
+ element_volume_to_audio_control,
+ NULL, NULL);
+
+ g_object_bind_property_full (priv->audio_input, "volume",
+ content, "reported-input-volume",
+ G_BINDING_SYNC_CREATE,
+ element_volume_to_audio_control,
+ audio_control_volume_to_element,
+ NULL, NULL);
+
if (!gst_bin_add (GST_BIN (priv->pipeline), priv->audio_input))
{
g_warning ("Could not add audio source to pipeline");
break;
}
+ /* Prepare our audio output, not added yet though */
+ empathy_call_window_prepare_audio_output (self, content);
+
retval = TRUE;
break;
case FS_MEDIA_TYPE_VIDEO:
g_assert_not_reached ();
}
+ gst_object_unref (sink);
return retval;
}
TpySendingState s;
g_object_get (priv->handler, "call-channel", &call, NULL);
- s = tpy_call_channel_get_video_state (call);
+ /* If the call channel isn't set yet we're requesting it, if we're
+ * requesting it with initial video it should be PENDING_SEND when we get
+ * it */
+ if (call == NULL)
+ s = TPY_SENDING_STATE_PENDING_SEND;
+ else
+ s = tpy_call_channel_get_video_state (call);
if (s == TPY_SENDING_STATE_PENDING_SEND ||
s == TPY_SENDING_STATE_SENDING)
}
}
- g_object_unref (call);
+ if (call != NULL)
+ g_object_unref (call);
}
}
G_CALLBACK (empathy_call_window_channel_closed_cb), self);
g_signal_connect (priv->handler, "src-pad-added",
G_CALLBACK (empathy_call_window_src_added_cb), self);
- g_signal_connect (priv->handler, "sink-pad-added",
- G_CALLBACK (empathy_call_window_sink_added_cb), self);
- g_signal_connect (priv->handler, "sink-pad-removed",
- G_CALLBACK (empathy_call_window_sink_removed_cb), self);
+ g_signal_connect (priv->handler, "content-added",
+ G_CALLBACK (empathy_call_window_content_added_cb), self);
+ g_signal_connect (priv->handler, "content-removed",
+ G_CALLBACK (empathy_call_window_content_removed_cb), self);
/* We connect to ::call-channel unconditionally since we'll
* get new channels if we hangup and redial or if we reuse the
g_signal_connect (priv->handler, "notify::call-channel",
G_CALLBACK (call_handler_notify_call_cb), self);
+ g_signal_connect (priv->handler, "framerate-changed",
+ G_CALLBACK (empathy_call_window_framerate_changed_cb), self);
+ g_signal_connect (priv->handler, "resolution-changed",
+ G_CALLBACK (empathy_call_window_resolution_changed_cb), self);
+
g_object_get (priv->handler, "call-channel", &call, NULL);
if (call != NULL)
{
g_object_unref (call);
}
-static void
-empathy_call_window_mic_toggled_cb (GtkToggleToolButton *toggle,
- EmpathyCallWindow *self)
-{
- EmpathyCallWindowPriv *priv = GET_PRIV (self);
- gboolean active;
-
- active = (gtk_toggle_tool_button_get_active (toggle));
-
- /* We don't want the settings callback to react to this change to avoid
- * a loop. */
- g_signal_handlers_block_by_func (priv->settings,
- empathy_call_window_prefs_volume_changed_cb, self);
-
- if (active)
- {
- g_settings_set_double (priv->settings, EMPATHY_PREFS_CALL_SOUND_VOLUME,
- priv->volume * 100);
- }
- else
- {
- /* TODO, Instead of setting the input volume to 0 we should probably
- * stop sending but this would cause the audio call to drop if both
- * sides mute at the same time on certain CMs AFAIK. Need to revisit this
- * in the future. GNOME #574574
- */
- g_settings_set_double (priv->settings, EMPATHY_PREFS_CALL_SOUND_VOLUME,
- 0);
- }
-
- g_signal_handlers_unblock_by_func (priv->settings,
- empathy_call_window_prefs_volume_changed_cb, self);
-}
-
static void
empathy_call_window_hangup_cb (gpointer object,
EmpathyCallWindow *self)
(GtkCallback) gtk_widget_destroy, NULL);
create_video_output_widget (window);
-
- /* While the call was disconnected, the input volume might have changed.
- * However, since the audio_input source was destroyed, its volume has not
- * been updated during that time. That's why we manually update it here */
- empathy_call_window_mic_volume_changed (window);
-
priv->outgoing = TRUE;
empathy_call_window_set_state_connecting (window);
gtk_label_set_label (GTK_LABEL (self->priv->status_label), message);
}
-static void
-empathy_call_window_volume_changed_cb (GtkScaleButton *button,
- gdouble value, EmpathyCallWindow *window)
-{
- EmpathyCallWindowPriv *priv = GET_PRIV (window);
-
- if (priv->audio_output == NULL)
- return;
-
- empathy_audio_sink_set_volume (EMPATHY_GST_AUDIO_SINK (priv->audio_output),
- value);
-}
-
GtkUIManager *
empathy_call_window_get_ui_manager (EmpathyCallWindow *window)
{
{
return EMPATHY_GST_VIDEO_SRC (self->priv->video_input);
}
+
+void
+empathy_call_window_change_webcam (EmpathyCallWindow *self,
+ const gchar *device)
+{
+ EmpathyGstVideoSrc *video;
+ gboolean running;
+
+ /* Restart the camera only if it's already running */
+ running = (self->priv->video_preview != NULL);
+ video = empathy_call_window_get_video_src (self);
+
+ if (running)
+ empathy_call_window_play_camera (self, FALSE);
+
+ empathy_video_src_change_device (video, device);
+
+ if (running)
+ empathy_call_window_play_camera (self, TRUE);
+}