Signal processing: Fix #42.

dev-brahim
Pierre-Antoine 2025-03-25 10:03:41 +01:00
parent 74137405fc
commit 5f7c81f866
1 changed files with 205 additions and 269 deletions

View File

@ -515,7 +515,7 @@ class SignalProcessingTab(QWidget):
self.combobox_acoustic_data_choice.blockSignals(True)
self.full_update_fill_text()
self.update_SignalPreprocessingTab(recompute=True)
self.update_SignalPreprocessingTab()
self.combobox_acoustic_data_choice.blockSignals(False)
self.blockSignals(False)
@ -535,7 +535,7 @@ class SignalProcessingTab(QWidget):
str(stg.Nb_cells_to_average_BS_signal[data_id])
)
def update_SignalPreprocessingTab(self, recompute=True):
def update_SignalPreprocessingTab(self):
""" The tab is updated in two cases :
- the user remove a file (in the list widget) in the first tab (Acoustic data), so that the combobox
@ -573,9 +573,7 @@ class SignalProcessingTab(QWidget):
stg.freq_text[data_id]
)
if recompute:
self.recompute()
self.recompute()
self.replot()
logger.debug("Update the Signal preprocessing tab... Done")
@ -583,6 +581,33 @@ class SignalProcessingTab(QWidget):
self.combobox_freq_noise_from_profile_tail.blockSignals(False)
self.combobox_acoustic_data_choice.blockSignals(False)
def _is_correct_shape(self, data):
data_id = self.combobox_acoustic_data_choice.currentIndex()
if stg.time_cross_section[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
else:
x_time = stg.time[data_id]
if stg.depth_cross_section[data_id].shape != (0,):
y_depth = stg.depth_cross_section[data_id]
else:
y_depth = stg.depth[data_id]
time_shape, = x_time[data_id].shape
depth_shape, = y_depth[data_id].shape
logger.debug(f"_is_correct_shape: time shape: {time_shape}")
logger.debug(f"_is_correct_shape: depth shape: {depth_shape}")
logger.debug(f"_is_correct_shape: data shape: {data[data_id].shape}")
if data[data_id].shape == (0,):
return False
_, y, z = data[data_id].shape
return (y == depth_shape and z == time_shape)
def recompute(self):
data_id = self.combobox_acoustic_data_choice.currentIndex()
@ -835,28 +860,29 @@ class SignalProcessingTab(QWidget):
if len(stg.filename_BS_raw_data) == 0:
pass
else:
data_id = self.combobox_acoustic_data_choice.currentIndex()
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.SNR_filter_value[self.combobox_acoustic_data_choice.currentIndex()] = 0
stg.BS_noise_raw_data[data_id] = np.array([])
stg.BS_noise_averaged_data[data_id] = np.array([])
stg.SNR_raw_data[data_id] = np.array([])
stg.SNR_cross_section[data_id] = np.array([])
stg.SNR_stream_bed[data_id] = np.array([])
stg.time_noise[data_id] = np.array([])
stg.SNR_filter_value[data_id] = 0
stg.BS_raw_data_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_raw_data_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_raw_data_pre_process_SNR[data_id] = np.array([])
stg.BS_raw_data_pre_process_average[data_id] = np.array([])
stg.BS_cross_section_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_cross_section_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_cross_section_pre_process_SNR[data_id] = np.array([])
stg.BS_cross_section_pre_process_average[data_id] = np.array([])
stg.BS_stream_bed_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_stream_bed_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
print("stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()]", stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()])
if stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] == 0:
stg.BS_stream_bed_pre_process_SNR[data_id] = np.array([])
stg.BS_stream_bed_pre_process_average[data_id] = np.array([])
print("stg.noise_method[data_id]", stg.noise_method[data_id])
if stg.noise_method[data_id] == 0:
self.lineEdit_noise_file.clear()
elif stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] == 1:
elif stg.noise_method[data_id] == 1:
self.lineEdit_val1.clear()
self.lineEdit_val1.setText("0.00")
@ -973,136 +999,140 @@ class SignalProcessingTab(QWidget):
def load_noise_data_and_compute_SNR(self):
data_id = self.combobox_acoustic_data_choice.currentIndex()
stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] = 0
stg.noise_method[data_id] = 0
noise_data = AcousticDataLoader(stg.path_BS_noise_data[self.combobox_acoustic_data_choice.currentIndex()] +
noise_data = AcousticDataLoader(stg.path_BS_noise_data[data_id] +
"/" +
stg.filename_BS_noise_data[self.combobox_acoustic_data_choice.currentIndex()])
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = noise_data._BS_raw_data
stg.filename_BS_noise_data[data_id])
stg.BS_noise_raw_data[data_id] = noise_data._BS_raw_data
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = noise_data._time
stg.depth_noise[self.combobox_acoustic_data_choice.currentIndex()] = noise_data._r
stg.time_noise[data_id] = noise_data._time
stg.depth_noise[data_id] = noise_data._r
if stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.BS_stream_bed[data_id].shape != (0,):
noise = np.zeros(stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape)
noise = np.zeros(stg.BS_stream_bed[data_id].shape)
for f, _ in enumerate(noise_data._freq):
noise[f, :, :] = np.mean(
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = noise
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] = (
np.divide((stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] -
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2,
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2))
stg.BS_noise_raw_data[data_id][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[data_id] = noise
stg.SNR_stream_bed[data_id] = (
np.divide((stg.BS_stream_bed[data_id] -
stg.BS_noise_averaged_data[data_id]) ** 2,
stg.BS_noise_averaged_data[data_id] ** 2))
elif stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
elif stg.BS_cross_section[data_id].shape != (0,):
noise = np.zeros(stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape)
noise = np.zeros(stg.BS_cross_section[data_id].shape)
for f, _ in enumerate(noise_data._freq):
noise[f, :, :] = np.mean(
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = noise
stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()] = (
np.divide((stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()] -
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2,
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2))
stg.BS_noise_raw_data[data_id][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[data_id] = noise
stg.SNR_cross_section[data_id] = (
np.divide((stg.BS_cross_section[data_id] -
stg.BS_noise_averaged_data[data_id]) ** 2,
stg.BS_noise_averaged_data[data_id] ** 2))
# stg.SNR_reshape = np.reshape(stg.SNR_cross_section, (stg.r.shape[1] * stg.t.shape[1], stg.freq.shape[0]), order="F")
else:
noise = np.zeros(stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape)
noise = np.zeros(stg.BS_raw_data[data_id].shape)
for f, _ in enumerate(noise_data._freq):
noise[f, :, :] = np.mean(
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = noise
stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = (
np.divide((stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()] -
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2,
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2))
stg.BS_noise_raw_data[data_id][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[data_id] = noise
stg.SNR_raw_data[data_id] = (
np.divide((stg.BS_raw_data[data_id] -
stg.BS_noise_averaged_data[data_id]) ** 2,
stg.BS_noise_averaged_data[data_id] ** 2))
def open_plot_noise_window(self):
pnw = PlotNoiseWindow()
pnw.exec()
def compute_noise_from_profile_tail_value(self):
data_id = self.combobox_acoustic_data_choice.currentIndex()
stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] = 1
stg.noise_method[data_id] = 1
stg.noise_value[data_id] = (
float(self.lineEdit_profile_tail_value.text().replace(",", "."))
)
stg.noise_value[self.combobox_acoustic_data_choice.currentIndex()] = (
float(self.lineEdit_profile_tail_value.text().replace(",", ".")))
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()])
if stg.time_cross_section[data_id].shape != (0,):
stg.time_noise[data_id] = (
stg.time_cross_section[data_id]
)
else:
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.time[self.combobox_acoustic_data_choice.currentIndex()])
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
stg.depth_noise[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()])
stg.time_noise[data_id] = (
stg.time[data_id]
)
if stg.depth_cross_section[data_id].shape != (0,):
stg.depth_noise[data_id] = (
stg.depth_cross_section[data_id]
)
else:
stg.depth_noise[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.depth[self.combobox_acoustic_data_choice.currentIndex()])
stg.depth_noise[data_id] = (
stg.depth[data_id]
)
# --- Compute noise from value and compute SNR ---
if stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([])
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = (
np.full(stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape,
if self._is_correct_shape(stg.BS_stream_bed):
stg.BS_noise_raw_data[data_id] = np.array([])
stg.BS_noise_raw_data[data_id] = (
np.full(stg.BS_stream_bed[data_id].shape,
float(self.lineEdit_profile_tail_value.text().replace(",", "."))))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][:, :,
:stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape[2]])
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] = (
np.divide((stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()]
- stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2,
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2))
stg.BS_noise_averaged_data[data_id] = (
stg.BS_noise_raw_data[data_id][:, :,
:stg.BS_stream_bed[data_id].shape[2]])
stg.SNR_stream_bed[data_id] = (
np.divide((stg.BS_stream_bed[data_id]
- stg.BS_noise_raw_data[data_id]) ** 2,
stg.BS_noise_raw_data[data_id] ** 2))
elif stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = (
np.full(stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape,
elif self._is_correct_shape(stg.BS_cross_section):
stg.BS_noise_raw_data[data_id] = (
np.full(stg.BS_cross_section[data_id].shape,
float(self.lineEdit_profile_tail_value.text().replace(",", "."))))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][:, :,
:stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape[2]])
stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()] = (
np.divide((stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()]
- stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2,
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) #
stg.BS_noise_averaged_data[data_id] = (
stg.BS_noise_raw_data[data_id][:, :,
:stg.BS_cross_section[data_id].shape[2]])
stg.SNR_cross_section[data_id] = (
np.divide((stg.BS_cross_section[data_id]
- stg.BS_noise_raw_data[data_id]) ** 2,
stg.BS_noise_raw_data[data_id] ** 2)) #
else:
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = (
np.full(stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape,
stg.BS_noise_raw_data[data_id] = (
np.full(stg.BS_raw_data[data_id].shape,
float(self.lineEdit_profile_tail_value.text().replace(",", "."))))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()])
stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = (
np.divide((stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()]
- stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2,
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2))
stg.BS_noise_averaged_data[data_id] = (
stg.BS_noise_raw_data[data_id])
stg.SNR_raw_data[data_id] = (
np.divide((stg.BS_raw_data[data_id]
- stg.BS_noise_raw_data[data_id]) ** 2,
stg.BS_noise_raw_data[data_id] ** 2))
self.combobox_frequency_profile.clear()
self.combobox_frequency_profile.addItems(
[f for f in stg.freq_text[self.combobox_acoustic_data_choice.currentIndex()]])
[f for f in stg.freq_text[data_id]])
# --- Trigger graphic widgets ---
if stg.SNR_filter_value[self.combobox_acoustic_data_choice.currentIndex()] == 0:
if stg.SNR_filter_value[data_id] == 0:
self.lineEdit_SNR_criterion.setText("0.00")
else:
self.lineEdit_SNR_criterion.setText(str(stg.SNR_filter_value[self.combobox_acoustic_data_choice.currentIndex()]))
self.lineEdit_SNR_criterion.setText(str(stg.SNR_filter_value[data_id]))
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
self.slider.setMaximum(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape[1])
if stg.time_cross_section[data_id].shape != (0,):
self.slider.setMaximum(stg.time_cross_section[data_id].shape[1])
else:
self.slider.setMaximum(stg.time[self.combobox_acoustic_data_choice.currentIndex()].shape[1])
self.slider.setMaximum(stg.time[data_id].shape[1])
# self.activate_list_of_pre_processed_data()
@ -1150,14 +1180,17 @@ class SignalProcessingTab(QWidget):
# elif self.canvas_SNR == None:
else:
data_id = self.combobox_acoustic_data_choice.currentIndex()
if ((self.combobox_acoustic_data_choice.currentIndex() != -1)
and (stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,))):
if ((data_id != -1)
and (stg.BS_noise_raw_data[data_id].shape != (0,))):
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.toolbar_SNR)
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.scroll_SNR)
self.fig_SNR, self.axis_SNR = plt.subplots(nrows=stg.freq[self.combobox_acoustic_data_choice.currentIndex()].shape[0], ncols=1, sharex=True, sharey=False, layout='constrained')
self.fig_SNR, self.axis_SNR = plt.subplots(
nrows=stg.freq[data_id].shape[0], ncols=1,
sharex=True, sharey=False, layout='constrained'
)
self.canvas_SNR = FigureCanvas(self.fig_SNR)
self.toolbar_SNR = NavigationToolBar(self.canvas_SNR, self)
@ -1166,158 +1199,82 @@ class SignalProcessingTab(QWidget):
self.verticalLayout_groupbox_plot_SNR.addWidget(self.toolbar_SNR)
self.verticalLayout_groupbox_plot_SNR.addWidget(self.scroll_SNR)
for f, _ in enumerate(stg.freq[self.combobox_acoustic_data_choice.currentIndex()]):
if stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
else:
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
val_min = np.nanmin(stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
if val_min == 0:
val_min = 1e-5
if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
levels, cmap='gist_rainbow',
norm=norm))
elif stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
else:
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
val_min = np.nanmin(stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
if val_min == 0:
val_min = 1e-5
if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_cross_section[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
levels, cmap='gist_rainbow', norm=norm))
for f, _ in enumerate(stg.freq[data_id]):
if stg.SNR_stream_bed[data_id].shape != (0,):
SNR_data = stg.SNR_stream_bed
elif stg.SNR_cross_section[data_id].shape != (0,):
SNR_data = stg.SNR_cross_section
if stg.time_cross_section[data_id].shape != (0,):
time_data = stg.time_cross_section
else:
time_data = stg.time
x, y = np.meshgrid(stg.time[self.combobox_acoustic_data_choice.currentIndex()][0, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][0, :])
if stg.depth_cross_section[data_id].shape != (0,):
depth_data = stg.depth_cross_section
elif stg.depth[data_id].shape != (0,):
depth_data = stg.depth
val_min = np.nanmin(stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
x, y = np.meshgrid(
time_data[data_id][f, :],
depth_data[data_id][f, :]
)
val_min = np.nanmin(SNR_data[data_id][f, :, :])
val_max = np.nanmax(SNR_data[data_id][f, :, :])
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
if val_min != val_max:
if val_min == 0:
val_min = 1e-5
else:
if val_min == 0:
val_min = 1e-5
if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max*1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
levels = np.array(
[00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
)
bounds = [
00.1, 1, 2, 10, 100, 1000,
val_max * 1000 + 1
]
cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_raw_data[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
levels, cmap='gist_rainbow', norm=norm))
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = self.axis_SNR[f].contourf(
x, -y,
SNR_data[data_id][f, :, :],
levels, cmap='gist_rainbow',
norm=norm
)
self.axis_SNR[f].text(1, .70, stg.freq_text[self.combobox_acoustic_data_choice.currentIndex()][f],
fontsize=14, fontweight='bold', fontname="DejaVu Sans", c="black", alpha=0.5,
horizontalalignment='right', verticalalignment='bottom',
transform=self.axis_SNR[f].transAxes)
self.axis_SNR[f].text(
1, .70, stg.freq_text[data_id][f],
fontsize=14, fontweight='bold', fontname="DejaVu Sans",
c="black", alpha=0.5,
horizontalalignment='right',
verticalalignment='bottom',
transform=self.axis_SNR[f].transAxes
)
self.fig_SNR.supxlabel('Time (sec)', fontsize=10)
self.fig_SNR.supylabel('Depth (m)', fontsize=10)
cbar = self.fig_SNR.colorbar(cf, ax=self.axis_SNR[:], shrink=1, location='right')
cbar.set_label(label='Signal to Noise Ratio', rotation=270, labelpad=10)
cbar.set_ticklabels(['0', '1', '2', '10', '100', r'10$^3$', r'10$^6$'])
cbar = self.fig_SNR.colorbar(
cf, ax=self.axis_SNR[:],
shrink=1, location='right'
)
cbar.set_label(
label='Signal to Noise Ratio',
rotation=270, labelpad=10
)
cbar.set_ticklabels(
[
'0', '1', '2', '10', '100',
r'10$^3$', r'10$^6$'
]
)
self.fig_SNR.canvas.draw_idle()
else:
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.toolbar_SNR)
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.scroll_SNR)
@ -1439,9 +1396,6 @@ class SignalProcessingTab(QWidget):
x_time = stg.time[data_id]
y_depth = stg.depth[data_id]
logger.debug(f"x_time: {x_time[data_id].shape}")
logger.debug(f"y_depth: {y_depth[data_id].shape}")
for f, _ in enumerate(stg.freq[data_id]):
if stg.BS_stream_bed_pre_process_average[data_id].shape != (0,):
BS_data = stg.BS_stream_bed_pre_process_average
@ -1462,8 +1416,6 @@ class SignalProcessingTab(QWidget):
elif stg.BS_raw_data[data_id].shape != (0,):
BS_data = stg.BS_raw_data
logger.debug(f"BS_data: {BS_data[data_id].shape}")
val_min = np.nanmin(
BS_data[data_id][f, :, :]
)
@ -1574,20 +1526,10 @@ class SignalProcessingTab(QWidget):
)
)
if stg.time_cross_section[data_id].shape != (0,):
if stg.depth_cross_section[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
y_depth = stg.depth[data_id]
else:
if stg.depth_cross_section[data_id].shape != (0,):
x_time = stg.time[data_id]
y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,):
x_time = stg.time[data_id]
y_depth = stg.depth[data_id]
if stg.depth_cross_section[data_id].shape != (0,):
y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,):
y_depth = stg.depth[data_id]
BS = [
stg.BS_stream_bed_pre_process_SNR,
@ -1607,11 +1549,6 @@ class SignalProcessingTab(QWidget):
stg.BS_raw_data_pre_process_average,
]
time_shape, = x_time[data_id].shape
depth_shape, = y_depth[data_id].shape
logger.debug(f"time_shape: {time_shape}")
logger.debug(f"depth_shape: {depth_shape}")
BS_data = stg.BS_raw_data
BS_data_ppa = stg.BS_raw_data_pre_process_average
for i in range(len(BS)):
@ -1620,8 +1557,7 @@ class SignalProcessingTab(QWidget):
if bs[data_id].shape == (0,):
continue
x, y, z = bs[data_id].shape
if y == depth_shape and z == time_shape:
if self._is_correct_shape(bs):
BS_data = bs
BS_data_ppa = BS_ppa[i]
break