break
self.label_name_event_box.modify_bg(gtk.STATE_NORMAL, self.color_tuple[1])
self.vbox.pack_start(self.label_name_event_box, True)
+ self.mute = gtk.ToggleButton()
+ self.mute.set_label("M")
+ self.mute.set_active(self.channel.mute)
+ self.mute.connect("toggled", self.on_mute_toggled)
+ self.vbox.pack_start(self.mute, False)
+
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
frame.add(self.abspeak);
if event.button == 1:
self.on_channel_properties()
+ def on_mute_toggled(self, button):
+ self.channel.out_mute = self.mute.get_active()
+ self.app.update_monitor(self.app.main_mix)
+
def unrealize(self):
# remove control groups from input channels
for input_channel in self.app.channels:
self.label_name.set_text(self.channel_name)
self.label_name.set_size_request(0, -1)
self.vbox.pack_start(self.label_name, False)
+ self.mute = gtk.ToggleButton()
+ self.mute.set_label("M")
+ self.mute.set_active(self.channel.mute)
+ self.mute.connect("toggled", self.on_mute_toggled)
+ self.vbox.pack_start(self.mute, False)
frame = gtk.Frame()
frame.set_shadow_type(gtk.SHADOW_IN)
frame.add(self.abspeak);
self._init_muted_channels = None
self._init_solo_channels = None
+ def on_mute_toggled(self, button):
+ self.channel.out_mute = self.mute.get_active()
+ self.app.update_monitor(self.app.main_mix)
+
def unrealize(self):
Channel.unrealize(self)
self.channel = False
struct jack_mixer * mixer_ptr;
char * name;
bool stereo;
+ bool out_mute;
float volume_transition_seconds;
unsigned int num_volume_transition_steps;
float volume;
float peak_left;
float peak_right;
+ jack_default_audio_sample_t * tmp_mixed_frames_left;
+ jack_default_audio_sample_t * tmp_mixed_frames_right;
jack_default_audio_sample_t * frames_left;
jack_default_audio_sample_t * frames_right;
jack_default_audio_sample_t * prefader_frames_left;
output_channel_set_muted(channel_ptr->mixer_ptr->main_mix_channel, channel, false);
}
+void
+channel_out_mute(
+ jack_mixer_channel_t channel)
+{
+ channel_ptr->out_mute = true;
+}
+
+void
+channel_out_unmute(
+ jack_mixer_channel_t channel)
+{
+ channel_ptr->out_mute = false;
+}
+
void
channel_solo(
jack_mixer_channel_t channel)
return false;
}
+bool
+channel_is_out_muted(
+ jack_mixer_channel_t channel)
+{
+ return channel_ptr->out_mute;
+}
+
bool
channel_is_soloed(
jack_mixer_channel_t channel)
for (i = start; i < end; i++)
{
- mix_channel->left_buffer_ptr[i] = 0.0;
+ mix_channel->left_buffer_ptr[i] = mix_channel->tmp_mixed_frames_left[i] = 0.0;
if (mix_channel->stereo)
- mix_channel->right_buffer_ptr[i] = 0.0;
+ mix_channel->right_buffer_ptr[i] = mix_channel->tmp_mixed_frames_right[i] = 0.0;
}
-
for (node_ptr = channels_list; node_ptr; node_ptr = g_slist_next(node_ptr))
{
channel_ptr = node_ptr->data;
}
if (frame_left == NAN)
break;
- mix_channel->left_buffer_ptr[i] += frame_left;
+ mix_channel->tmp_mixed_frames_left[i] += frame_left;
if (mix_channel->stereo)
{
if (frame_right == NAN)
break;
- mix_channel->right_buffer_ptr[i] += frame_right;
+ mix_channel->tmp_mixed_frames_right[i] += frame_right;
}
}
vol_l = vol * (1 - bal);
vol_r = vol * (1 + bal);
}
- mix_channel->left_buffer_ptr[i] *= vol_l;
- mix_channel->right_buffer_ptr[i] *= vol_r;
+ mix_channel->tmp_mixed_frames_left[i] *= vol_l;
+ mix_channel->tmp_mixed_frames_right[i] *= vol_r;
}
- frame_left = fabsf(mix_channel->left_buffer_ptr[i]);
+ frame_left = fabsf(mix_channel->tmp_mixed_frames_left[i]);
if (mix_channel->peak_left < frame_left)
{
mix_channel->peak_left = frame_left;
if (mix_channel->stereo)
{
- frame_right = fabsf(mix_channel->right_buffer_ptr[i]);
+ frame_right = fabsf(mix_channel->tmp_mixed_frames_right[i]);
if (mix_channel->peak_right < frame_right)
{
mix_channel->peak_right = frame_right;
mix_channel->balance = mix_channel->balance_new;
mix_channel->balance_idx = 0;
}
+
+ if (!mix_channel->out_mute) {
+ mix_channel->left_buffer_ptr[i] = mix_channel->tmp_mixed_frames_left[i];
+ mix_channel->right_buffer_ptr[i] = mix_channel->tmp_mixed_frames_right[i];
+ }
}
}
}
channel_ptr->stereo = stereo;
+ channel_ptr->out_mute = false;
channel_ptr->volume_transition_seconds = VOLUME_TRANSITION_SECONDS;
channel_ptr->num_volume_transition_steps =
channel_ptr->peak_right = 0.0;
channel_ptr->peak_frames = 0;
+ channel_ptr->tmp_mixed_frames_left = calloc(MAX_BLOCK_SIZE, sizeof(jack_default_audio_sample_t));
+ channel_ptr->tmp_mixed_frames_right = calloc(MAX_BLOCK_SIZE, sizeof(jack_default_audio_sample_t));
channel_ptr->frames_left = calloc(MAX_BLOCK_SIZE, sizeof(jack_default_audio_sample_t));
channel_ptr->frames_right = calloc(MAX_BLOCK_SIZE, sizeof(jack_default_audio_sample_t));
channel_ptr->prefader_frames_left = calloc(MAX_BLOCK_SIZE, sizeof(jack_default_audio_sample_t));
return result;
}
+static PyObject*
+Channel_get_out_mute(ChannelObject *self, void *closure)
+{
+ PyObject *result;
+
+ if (channel_is_out_muted(self->channel)) {
+ result = Py_True;
+ } else {
+ result = Py_False;
+ }
+ Py_INCREF(result);
+ return result;
+}
+
static int
Channel_set_mute(ChannelObject *self, PyObject *value, void *closure)
{
return 0;
}
+static int
+Channel_set_out_mute(ChannelObject *self, PyObject *value, void *closure)
+{
+ if (value == Py_True) {
+ channel_out_mute(self->channel);
+ } else {
+ channel_out_unmute(self->channel);
+ }
+ return 0;
+}
+
static PyObject*
Channel_get_solo(ChannelObject *self, void *closure)
{
{"mute",
(getter)Channel_get_mute, (setter)Channel_set_mute,
"mute", NULL},
- {"solo",
+ {"out_mute",
+ (getter)Channel_get_out_mute, (setter)Channel_set_out_mute,
+ "out_mute", NULL},
+ {"solo",
(getter)Channel_get_solo, (setter)Channel_set_solo,
"solo", NULL},
{"meter",