Signal processing: Some refactoring.

dev-brahim
Pierre-Antoine 2025-03-19 15:31:06 +01:00
parent ad865e2829
commit 8a39bba7b1
1 changed files with 191 additions and 245 deletions

View File

@ -20,6 +20,7 @@
# -*- coding: utf-8 -*-
import os
import logging
from cProfile import label
@ -54,6 +55,7 @@ import Translation.constant_string as cs
from Model.acoustic_data_loader import AcousticDataLoader
import settings as stg
from tools import trace
_translate = QCoreApplication.translate
@ -69,15 +71,8 @@ class SignalProcessingTab(QWidget):
def __init__(self, widget_tab):
super().__init__()
self.path_icon = "./icons/"
self.icon_folder = QIcon(self.path_icon + "folder.png")
self.icon_triangle_left = QIcon(self.path_icon + "triangle_left.png")
self.icon_triangle_right = QIcon(self.path_icon + "triangle_right.png")
self.icon_triangle_left_to_begin = QIcon(self.path_icon + "triangle_left_to_begin.png")
self.icon_triangle_right_to_end = QIcon(self.path_icon + "triangle_right_to_end.png")
self.icon_update = QIcon(self.path_icon + "update.png")
self.icon_clear = QIcon(self.path_icon + "clear.png")
self.icon_apply = QIcon(self.path_icon + "circle_green_arrow_right.png")
self._setup_icons()
### --- General layout of widgets ---
@ -493,6 +488,26 @@ class SignalProcessingTab(QWidget):
self.slider.valueChanged.connect(self.update_lineEdit_by_moving_slider)
self.slider.valueChanged.connect(self.update_plot_pre_processed_profile)
def _setup_icons(self):
def path_icon(icon):
return os.path.join(
"icons", icon
)
self.icon_folder = QIcon(path_icon("folder.png"))
self.icon_triangle_left = QIcon(path_icon("triangle_left.png"))
self.icon_triangle_right = QIcon(path_icon("triangle_right.png"))
self.icon_triangle_left_to_begin = QIcon(
path_icon("triangle_left_to_begin.png")
)
self.icon_triangle_right_to_end = QIcon(
path_icon("triangle_right_to_end.png")
)
self.icon_update = QIcon(path_icon("update.png"))
self.icon_clear = QIcon(path_icon("clear.png"))
self.icon_apply = QIcon(path_icon("circle_green_arrow_right.png"))
# --------------------------------------------------------------------------------------------------------------
# --------------------------------------------------------------------------------------------------------------
# +++++++++ FUNCTION +++++++++
@ -516,6 +531,7 @@ class SignalProcessingTab(QWidget):
data_id = self.combobox_acoustic_data_choice.currentIndex()
self.combobox_acoustic_data_choice.blockSignals(True)
self.combobox_freq_noise_from_profile_tail.blockSignals(True)
logger.debug("Update the Signal preprocessing tab...")
@ -541,6 +557,7 @@ class SignalProcessingTab(QWidget):
logger.debug("Update the Signal preprocessing tab... Done")
self.combobox_freq_noise_from_profile_tail.blockSignals(False)
self.combobox_acoustic_data_choice.blockSignals(False)
def activate_list_of_pre_processed_data(self):
@ -607,71 +624,77 @@ class SignalProcessingTab(QWidget):
self.animation_groupbox_option_profile_tail.start()
# ------------------------------------------------------
def compute_average_profile_tail(self):
@trace
def compute_average_profile_tail(self, *args):
data_id = self.combobox_acoustic_data_choice.currentIndex()
freq_noise_id = self.combobox_freq_noise_from_profile_tail.currentIndex()
if (float(self.lineEdit_val1.text()) == 0) and (float(self.lineEdit_val2.text()) == 0):
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if ((float(self.lineEdit_val1.text()) == 0)
and (float(self.lineEdit_val2.text()) == 0)):
if stg.time_cross_section[data_id].shape != (0,):
self.lineEdit_val1.setText(str(
'%.3f' % np.nanmin(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()])))
'%.3f' % np.nanmin(
stg.time_cross_section[data_id][
freq_noise_id
]
)
))
self.lineEdit_val2.setText(str(
'%.3f' % np.nanmax(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()])))
elif stg.time[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
'%.3f' % np.nanmax(
stg.time_cross_section[data_id][
freq_noise_id
]
)
))
elif stg.time[data_id].shape != (0,):
self.lineEdit_val1.setText(str(
'%.3f' % np.nanmin(stg.time[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()])))
'%.3f' % np.nanmin(stg.time[data_id][
freq_noise_id])))
self.lineEdit_val2.setText(str(
'%.3f' % np.nanmax(stg.time[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()])))
'%.3f' % np.nanmax(stg.time[data_id][
freq_noise_id])))
# --- Find index of line edit value ---
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.time_cross_section[data_id].shape != (0,):
time_data = stg.time_cross_section
elif stg.time[data_id].shape != (0,):
time_data = stg.time
val1 = np.where(np.abs(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val1.text().replace(",", ".")))
==
np.nanmin(np.abs(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val1.text().replace(",", "."))))
)[0][0]
val2 = np.where(np.abs(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val2.text().replace(",", ".")))
==
np.nanmin(np.abs(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val2.text().replace(",", "."))))
)[0][0]
val1 = np.where(
np.abs(
time_data[data_id][freq_noise_id]
- float(self.lineEdit_val1.text().replace(",", "."))
) == np.nanmin(
np.abs(
time_data[data_id][freq_noise_id]
- float(self.lineEdit_val1.text().replace(",", "."))
)
)
)[0][0]
val2 = np.where(
np.abs(
time_data[data_id][freq_noise_id]
- float(self.lineEdit_val2.text().replace(",", "."))
) == np.nanmin(
np.abs(
time_data[data_id][freq_noise_id]
- float(self.lineEdit_val2.text().replace(",", "."))
)
)
)[0][0]
elif stg.time[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
stg.BS_mean[data_id] = (
np.nanmean(
stg.BS_raw_data[data_id][:, :, val1:val2],
axis=2
)
)
val1 = np.where(np.abs(stg.time[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val1.text().replace(",", ".")))
==
np.nanmin(np.abs(stg.time[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val1.text().replace(",", "."))))
)[0][0]
val2 = np.where(np.abs(stg.time[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val2.text().replace(",", ".")))
==
np.nanmin(np.abs(stg.time[self.combobox_acoustic_data_choice.currentIndex()][
self.combobox_freq_noise_from_profile_tail.currentIndex()] -
float(self.lineEdit_val2.text().replace(",", "."))))
)[0][0]
stg.BS_mean[self.combobox_acoustic_data_choice.currentIndex()] = (
np.nanmean(stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()][:, :, val1:val2], axis=2))
if stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.BS_noise_raw_data[data_id].shape != (0,):
self.compute_noise_from_profile_tail_value()
def plot_averaged_profile_tail(self):
@trace
def plot_averaged_profile_tail(self, *args):
# --- Plot averaged signal ---
@ -1043,7 +1066,8 @@ class SignalProcessingTab(QWidget):
# self.activate_list_of_pre_processed_data()
def plot_noise(self):
@trace
def plot_noise(self, *args):
self.horizontalLayout_groupbox_plot_noise_data.removeWidget(self.canvas_noise)
self.fig_noise, self.axis_noise = plt.subplots(nrows=1, ncols=1, layout="constrained")
@ -1074,7 +1098,8 @@ class SignalProcessingTab(QWidget):
self.axis_noise.tick_params(axis='both', which='minor', labelsize=10)
def plot_transect_with_SNR_data(self):
@trace
def plot_transect_with_SNR_data(self, *args):
# --- Condition if table is not filled ---
# if not self.lineEdit_noise_file.text():
if len(stg.BS_noise_raw_data) == 0:
@ -1330,218 +1355,138 @@ class SignalProcessingTab(QWidget):
self.compute_averaged_BS_data()
self.update_plot_pre_processed_profile()
def plot_pre_processed_BS_signal(self):
@trace
def plot_pre_processed_BS_signal(self, *args):
data_id = self.combobox_acoustic_data_choice.currentIndex()
self.lineEdit_horizontal_average.setText(str(stg.Nb_cells_to_average_BS_signal[self.combobox_acoustic_data_choice.currentIndex()]))
self.lineEdit_horizontal_average.setText(
str(stg.Nb_cells_to_average_BS_signal[data_id])
)
if ((self.combobox_acoustic_data_choice.currentIndex() != -1)
and (stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,))):
if ((data_id != -1)
and (stg.BS_noise_raw_data[data_id].shape != (0,))):
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.removeWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.removeWidget(self.scroll_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.removeWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.removeWidget(self.scroll_BS)
self.fig_BS, self.axis_BS = plt.subplots(nrows=stg.freq[self.combobox_acoustic_data_choice.currentIndex()].shape[0], ncols=1, sharex=True, sharey=False, layout="constrained")
self.fig_BS, self.axis_BS = plt.subplots(
nrows=stg.freq[data_id].shape[0], ncols=1,
sharex=True, sharey=False, layout="constrained"
)
self.canvas_BS = FigureCanvas(self.fig_BS)
self.toolbar_BS = NavigationToolBar(self.canvas_BS, self)
self.scroll_BS.setWidget(self.canvas_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.addWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.addWidget(self.scroll_BS)
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x_time = stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()]
y_depth = stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()]
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x_time = stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()]
y_depth = stg.depth[self.combobox_acoustic_data_choice.currentIndex()]
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.addWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.addWidget(self.scroll_BS)
if stg.time_cross_section[data_id].shape != (0,):
if stg.depth_cross_section[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
y_depth = stg.depth[data_id]
else:
if stg.depth_cross_section[data_id].shape != (0,):
x_time = stg.time[data_id]
y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,):
x_time = stg.time[data_id]
y_depth = stg.depth[data_id]
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
for f, _ in enumerate(stg.freq[data_id]):
bed = False
x_time = stg.time[self.combobox_acoustic_data_choice.currentIndex()]
y_depth = stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()]
if stg.BS_stream_bed_pre_process_average[data_id].shape != (0,):
BS_data = stg.BS_stream_bed_pre_process_average
bed = True
elif stg.BS_cross_section_pre_process_average[data_id].shape != (0,):
BS_data = stg.BS_cross_section_pre_process_average
elif stg.BS_raw_data_pre_process_average[data_id].shape != (0,):
BS_data = stg.BS_raw_data_pre_process_average
elif stg.BS_stream_bed_pre_process_SNR[data_id].shape != (0,):
BS_data = stg.BS_stream_bed_pre_process_SNR
bed = True
elif stg.BS_cross_section_pre_process_SNR[data_id].shape != (0,):
BS_data = stg.BS_cross_section_pre_process_SNR
elif stg.BS_raw_data_pre_process_SNR[data_id].shape != (0,):
BS_data = stg.BS_raw_data_pre_process_SNR
elif stg.BS_stream_bed[data_id].shape != (0,):
BS_data = stg.BS_stream_bed
bed = True
elif stg.BS_cross_section[data_id].shape != (0,):
BS_data = stg.BS_cross_section
elif stg.BS_raw_data[data_id].shape != (0,):
BS_data = stg.BS_raw_data
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
BS_data[data_id][f, :, :]
)
val_max = np.nanmax(
BS_data[data_id][f, :, :]
)
x_time = stg.time[self.combobox_acoustic_data_choice.currentIndex()]
y_depth = stg.depth[self.combobox_acoustic_data_choice.currentIndex()]
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(
x_time[f, :], -y_depth[f, :],
BS_data[data_id][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max)
)
for f, _ in enumerate(stg.freq[self.combobox_acoustic_data_choice.currentIndex()]):
if bed:
self.axis_BS[f].plot(
x_time[f, :], -stg.depth_bottom[data_id],
color='black', linewidth=1, linestyle="solid"
)
if stg.BS_stream_bed_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(stg.BS_stream_bed_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.BS_stream_bed_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_stream_bed_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
self.axis_BS[f].plot(x_time[f, :], -stg.depth_bottom[self.combobox_acoustic_data_choice.currentIndex()],
color='black', linewidth=1, linestyle="solid")
elif stg.BS_cross_section_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_cross_section_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_cross_section_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(
x_time[f, :], -y_depth[f, :],
stg.BS_cross_section_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
elif stg.BS_raw_data_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_raw_data_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_raw_data_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_raw_data_pre_process_average[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
elif stg.BS_stream_bed_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(stg.BS_stream_bed_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.BS_stream_bed_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_stream_bed_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
self.axis_BS[f].plot(x_time[f, :],
-stg.depth_bottom[self.combobox_acoustic_data_choice.currentIndex()],
color='black', linewidth=1, linestyle="solid")
elif stg.BS_cross_section_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_cross_section_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_cross_section_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(
x_time[f, :], -y_depth[f, :],
stg.BS_cross_section_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
elif stg.BS_raw_data_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_raw_data_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_raw_data_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_raw_data_pre_process_SNR[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
elif stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_stream_bed[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
elif stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_cross_section[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
elif stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
val_min = np.nanmin(
stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(
stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(x_time[f, :], -y_depth[f, :],
stg.BS_raw_data[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
self.axis_BS[f].text(1, .70, stg.freq_text[self.combobox_acoustic_data_choice.currentIndex()][f],
fontsize=14, fontweight='bold', fontname="DejaVu Sans", c="black", alpha=0.5,
horizontalalignment='right', verticalalignment='bottom',
transform=self.axis_BS[f].transAxes)
self.axis_BS[f].text(
1, .70, stg.freq_text[data_id][f],
fontsize=14, fontweight='bold', fontname="DejaVu Sans",
c="black", alpha=0.5,
horizontalalignment='right',
verticalalignment='bottom',
transform=self.axis_BS[f].transAxes
)
# --- Display red line on acoustic recording for profile position ---
freq_id = self.combobox_frequency_profile.currentIndex()
self.red_line_return, = self.axis_BS[self.combobox_frequency_profile.currentIndex()].plot(
x_time[self.combobox_frequency_profile.currentIndex(), self.slider.value() - 1] *
self.red_line_return, = self.axis_BS[freq_id].plot(
x_time[freq_id, self.slider.value() - 1] *
np.ones(y_depth.shape[1]),
-y_depth[self.combobox_frequency_profile.currentIndex(), :],
linestyle="solid", linewidth=2, color="red")
-y_depth[freq_id, :],
linestyle="solid", linewidth=2, color="red"
)
self.fig_BS.supxlabel('Time (sec)', fontsize=10)
self.fig_BS.supylabel('Depth (m)', fontsize=10)
cbar = self.fig_BS.colorbar(pcm, ax=self.axis_BS[:], shrink=1, location='right')
cbar.set_label(label='Acoustic backscatter signal (V)', rotation=270, labelpad=10)
cbar = self.fig_BS.colorbar(pcm, ax=self.axis_BS[:],
shrink=1, location='right')
cbar.set_label(label='Acoustic backscatter signal (V)',
rotation=270, labelpad=10)
self.fig_BS.canvas.draw_idle()
else:
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.removeWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.removeWidget(self.scroll_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.removeWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.removeWidget(self.scroll_BS)
self.canvas_BS = FigureCanvas()
self.toolbar_BS = NavigationToolBar(self.canvas_BS, self)
self.scroll_BS.setWidget(self.canvas_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.addWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field.addWidget(self.scroll_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.addWidget(self.toolbar_BS)
self.verticalLayout_groupbox_plot_pre_processed_data_2D_field\
.addWidget(self.scroll_BS)
def update_label_cells_sec(self):
@ -1676,7 +1621,8 @@ class SignalProcessingTab(QWidget):
self.plot_pre_processed_BS_signal()
self.update_plot_pre_processed_profile()
def plot_pre_processed_profile(self):
@trace
def plot_pre_processed_profile(self, *args):
if ((self.combobox_acoustic_data_choice.currentIndex() != -1) and
(stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,))):