GstElement *video_output_sink;
GstElement *audio_input;
GstElement *audio_output;
+ gboolean audio_output_added;
GstElement *pipeline;
GstElement *video_tee;
gtk_container_add (GTK_CONTAINER (self), top_vbox);
- priv->content_hbox = gtk_hbox_new (FALSE, CONTENT_HBOX_SPACING);
+ priv->content_hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL,
+ CONTENT_HBOX_SPACING);
gtk_container_set_border_width (GTK_CONTAINER (priv->content_hbox),
CONTENT_HBOX_BORDER_WIDTH);
gtk_box_pack_start (GTK_BOX (priv->pane), priv->content_hbox,
priv->video_container = gtk_clutter_embed_new ();
+ gtk_widget_set_size_request (priv->video_container,
+ EMPATHY_VIDEO_WIDGET_DEFAULT_WIDTH, EMPATHY_VIDEO_WIDGET_DEFAULT_HEIGHT);
+
/* Set the background color to that of the rest of the window */
context = gtk_widget_get_style_context (priv->content_hbox);
gtk_style_context_get_background_color (context,
if (priv->audio_output != NULL)
g_object_unref (priv->audio_output);
priv->audio_output = NULL;
+ priv->audio_output_added = FALSE;
if (priv->video_tee != NULL)
g_object_unref (priv->video_tee);
{
gst_element_set_state (priv->audio_output, GST_STATE_NULL);
- gst_bin_remove (GST_BIN (priv->pipeline), priv->audio_output);
+ if (priv->audio_output_added)
+ gst_bin_remove (GST_BIN (priv->pipeline), priv->audio_output);
priv->audio_output = NULL;
+ priv->audio_output_added = FALSE;
}
}
else
GstPad *pad;
GstPadTemplate *template;
- if (priv->audio_output == NULL)
+ if (!priv->audio_output_added)
{
- priv->audio_output = empathy_audio_sink_new ();
- g_object_ref_sink (priv->audio_output);
-
- /* volume button to output volume linking */
- g_object_bind_property (priv->audio_output, "volume",
- priv->volume_button, "value",
- G_BINDING_BIDIRECTIONAL | G_BINDING_SYNC_CREATE);
-
- g_object_bind_property_full (content, "requested-output-volume",
- priv->audio_output, "volume",
- G_BINDING_DEFAULT,
- audio_control_volume_to_element,
- element_volume_to_audio_control,
- NULL, NULL);
-
- /* Link volumes together, sync the current audio input volume property
- * back to farstream first */
- g_object_bind_property_full (priv->audio_output, "volume",
- content, "reported-output-volume",
- G_BINDING_SYNC_CREATE,
- element_volume_to_audio_control,
- audio_control_volume_to_element,
- NULL, NULL);
-
if (!gst_bin_add (GST_BIN (priv->pipeline), priv->audio_output))
{
g_warning ("Could not add audio sink to pipeline");
}
}
- /* For raw audio conferences assume that the producer of the raw data
- * has already processed it, so turn off any echo cancellation and any
- * other audio improvements that come with it */
- empathy_audio_sink_set_echo_cancel (
- EMPATHY_GST_AUDIO_SINK (priv->audio_output),
- !empathy_call_window_content_is_raw (content));
-
template = gst_element_class_get_pad_template (
GST_ELEMENT_GET_CLASS (priv->audio_output), "sink%d");
content_area = gtk_info_bar_get_content_area (GTK_INFO_BAR (info_bar));
/* hbox containing the image and the messages vbox */
- hbox = gtk_hbox_new (FALSE, 3);
+ hbox = gtk_box_new (GTK_ORIENTATION_HORIZONTAL, 3);
gtk_container_add (GTK_CONTAINER (content_area), hbox);
/* Add image */
gtk_box_pack_start (GTK_BOX (hbox), image, FALSE, FALSE, 0);
/* vbox containing the main message and the details expander */
- vbox = gtk_vbox_new (FALSE, 3);
+ vbox = gtk_box_new (GTK_ORIENTATION_VERTICAL, 3);
gtk_box_pack_start (GTK_BOX (hbox), vbox, TRUE, TRUE, 0);
/* Add text */
return TRUE;
}
+static void
+empathy_call_window_prepare_audio_output (EmpathyCallWindow *self,
+ TfContent *content)
+{
+ EmpathyCallWindowPriv *priv = self->priv;
+
+ g_assert (priv->audio_output_added == FALSE);
+ g_assert (priv->audio_output == FALSE);
+
+ priv->audio_output = empathy_audio_sink_new ();
+ g_object_ref_sink (priv->audio_output);
+
+ /* volume button to output volume linking */
+ g_object_bind_property (priv->audio_output, "volume",
+ priv->volume_button, "value",
+ G_BINDING_BIDIRECTIONAL | G_BINDING_SYNC_CREATE);
+
+ g_object_bind_property_full (content, "requested-output-volume",
+ priv->audio_output, "volume",
+ G_BINDING_DEFAULT,
+ audio_control_volume_to_element,
+ element_volume_to_audio_control,
+ NULL, NULL);
+
+ /* Link volumes together, sync the current audio input volume property
+ * back to farstream first */
+ g_object_bind_property_full (priv->audio_output, "volume",
+ content, "reported-output-volume",
+ G_BINDING_SYNC_CREATE,
+ element_volume_to_audio_control,
+ audio_control_volume_to_element,
+ NULL, NULL);
+
+ /* For raw audio conferences assume that the producer of the raw data
+ * has already processed it, so turn off any echo cancellation and any
+ * other audio improvements that come with it */
+ empathy_audio_sink_set_echo_cancel (
+ EMPATHY_GST_AUDIO_SINK (priv->audio_output),
+ !empathy_call_window_content_is_raw (content));
+}
+
+
static gboolean
empathy_call_window_content_added_cb (EmpathyCallHandler *handler,
TfContent *content, gpointer user_data)
break;
}
+ /* Prepare our audio output, not added yet though */
+ empathy_call_window_prepare_audio_output (self, content);
+
retval = TRUE;
break;
case FS_MEDIA_TYPE_VIDEO:
{
return EMPATHY_GST_VIDEO_SRC (self->priv->video_input);
}
+
+void
+empathy_call_window_change_webcam (EmpathyCallWindow *self,
+ const gchar *device)
+{
+ EmpathyGstVideoSrc *video;
+ gboolean running;
+
+ /* Restart the camera only if it's already running */
+ running = (self->priv->video_preview != NULL);
+ video = empathy_call_window_get_video_src (self);
+
+ if (running)
+ empathy_call_window_play_camera (self, FALSE);
+
+ empathy_video_src_change_device (video, device);
+
+ if (running)
+ empathy_call_window_play_camera (self, TRUE);
+}