Signal processing: Fix #42.

dev-brahim
Pierre-Antoine 2025-03-25 10:03:41 +01:00
parent 74137405fc
commit 5f7c81f866
1 changed files with 205 additions and 269 deletions

View File

@ -515,7 +515,7 @@ class SignalProcessingTab(QWidget):
self.combobox_acoustic_data_choice.blockSignals(True) self.combobox_acoustic_data_choice.blockSignals(True)
self.full_update_fill_text() self.full_update_fill_text()
self.update_SignalPreprocessingTab(recompute=True) self.update_SignalPreprocessingTab()
self.combobox_acoustic_data_choice.blockSignals(False) self.combobox_acoustic_data_choice.blockSignals(False)
self.blockSignals(False) self.blockSignals(False)
@ -535,7 +535,7 @@ class SignalProcessingTab(QWidget):
str(stg.Nb_cells_to_average_BS_signal[data_id]) str(stg.Nb_cells_to_average_BS_signal[data_id])
) )
def update_SignalPreprocessingTab(self, recompute=True): def update_SignalPreprocessingTab(self):
""" The tab is updated in two cases : """ The tab is updated in two cases :
- the user remove a file (in the list widget) in the first tab (Acoustic data), so that the combobox - the user remove a file (in the list widget) in the first tab (Acoustic data), so that the combobox
@ -573,9 +573,7 @@ class SignalProcessingTab(QWidget):
stg.freq_text[data_id] stg.freq_text[data_id]
) )
if recompute:
self.recompute() self.recompute()
self.replot() self.replot()
logger.debug("Update the Signal preprocessing tab... Done") logger.debug("Update the Signal preprocessing tab... Done")
@ -583,6 +581,33 @@ class SignalProcessingTab(QWidget):
self.combobox_freq_noise_from_profile_tail.blockSignals(False) self.combobox_freq_noise_from_profile_tail.blockSignals(False)
self.combobox_acoustic_data_choice.blockSignals(False) self.combobox_acoustic_data_choice.blockSignals(False)
def _is_correct_shape(self, data):
data_id = self.combobox_acoustic_data_choice.currentIndex()
if stg.time_cross_section[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
else:
x_time = stg.time[data_id]
if stg.depth_cross_section[data_id].shape != (0,):
y_depth = stg.depth_cross_section[data_id]
else:
y_depth = stg.depth[data_id]
time_shape, = x_time[data_id].shape
depth_shape, = y_depth[data_id].shape
logger.debug(f"_is_correct_shape: time shape: {time_shape}")
logger.debug(f"_is_correct_shape: depth shape: {depth_shape}")
logger.debug(f"_is_correct_shape: data shape: {data[data_id].shape}")
if data[data_id].shape == (0,):
return False
_, y, z = data[data_id].shape
return (y == depth_shape and z == time_shape)
def recompute(self): def recompute(self):
data_id = self.combobox_acoustic_data_choice.currentIndex() data_id = self.combobox_acoustic_data_choice.currentIndex()
@ -835,28 +860,29 @@ class SignalProcessingTab(QWidget):
if len(stg.filename_BS_raw_data) == 0: if len(stg.filename_BS_raw_data) == 0:
pass pass
else: else:
data_id = self.combobox_acoustic_data_choice.currentIndex()
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_noise_raw_data[data_id] = np.array([])
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_noise_averaged_data[data_id] = np.array([])
stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.SNR_raw_data[data_id] = np.array([])
stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.SNR_cross_section[data_id] = np.array([])
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.SNR_stream_bed[data_id] = np.array([])
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.time_noise[data_id] = np.array([])
stg.SNR_filter_value[self.combobox_acoustic_data_choice.currentIndex()] = 0 stg.SNR_filter_value[data_id] = 0
stg.BS_raw_data_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_raw_data_pre_process_SNR[data_id] = np.array([])
stg.BS_raw_data_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_raw_data_pre_process_average[data_id] = np.array([])
stg.BS_cross_section_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_cross_section_pre_process_SNR[data_id] = np.array([])
stg.BS_cross_section_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_cross_section_pre_process_average[data_id] = np.array([])
stg.BS_stream_bed_pre_process_SNR[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_stream_bed_pre_process_SNR[data_id] = np.array([])
stg.BS_stream_bed_pre_process_average[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_stream_bed_pre_process_average[data_id] = np.array([])
print("stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()]", stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()]) print("stg.noise_method[data_id]", stg.noise_method[data_id])
if stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] == 0: if stg.noise_method[data_id] == 0:
self.lineEdit_noise_file.clear() self.lineEdit_noise_file.clear()
elif stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] == 1: elif stg.noise_method[data_id] == 1:
self.lineEdit_val1.clear() self.lineEdit_val1.clear()
self.lineEdit_val1.setText("0.00") self.lineEdit_val1.setText("0.00")
@ -973,136 +999,140 @@ class SignalProcessingTab(QWidget):
def load_noise_data_and_compute_SNR(self): def load_noise_data_and_compute_SNR(self):
data_id = self.combobox_acoustic_data_choice.currentIndex()
stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] = 0 stg.noise_method[data_id] = 0
noise_data = AcousticDataLoader(stg.path_BS_noise_data[self.combobox_acoustic_data_choice.currentIndex()] + noise_data = AcousticDataLoader(stg.path_BS_noise_data[data_id] +
"/" + "/" +
stg.filename_BS_noise_data[self.combobox_acoustic_data_choice.currentIndex()]) stg.filename_BS_noise_data[data_id])
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = noise_data._BS_raw_data stg.BS_noise_raw_data[data_id] = noise_data._BS_raw_data
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = noise_data._time stg.time_noise[data_id] = noise_data._time
stg.depth_noise[self.combobox_acoustic_data_choice.currentIndex()] = noise_data._r stg.depth_noise[data_id] = noise_data._r
if stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): if stg.BS_stream_bed[data_id].shape != (0,):
noise = np.zeros(stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape) noise = np.zeros(stg.BS_stream_bed[data_id].shape)
for f, _ in enumerate(noise_data._freq): for f, _ in enumerate(noise_data._freq):
noise[f, :, :] = np.mean( noise[f, :, :] = np.mean(
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], axis=(0, 1)) stg.BS_noise_raw_data[data_id][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = noise stg.BS_noise_averaged_data[data_id] = noise
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.SNR_stream_bed[data_id] = (
np.divide((stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] - np.divide((stg.BS_stream_bed[data_id] -
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2, stg.BS_noise_averaged_data[data_id]) ** 2,
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) stg.BS_noise_averaged_data[data_id] ** 2))
elif stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): elif stg.BS_cross_section[data_id].shape != (0,):
noise = np.zeros(stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape) noise = np.zeros(stg.BS_cross_section[data_id].shape)
for f, _ in enumerate(noise_data._freq): for f, _ in enumerate(noise_data._freq):
noise[f, :, :] = np.mean( noise[f, :, :] = np.mean(
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], axis=(0, 1)) stg.BS_noise_raw_data[data_id][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = noise stg.BS_noise_averaged_data[data_id] = noise
stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.SNR_cross_section[data_id] = (
np.divide((stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()] - np.divide((stg.BS_cross_section[data_id] -
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2, stg.BS_noise_averaged_data[data_id]) ** 2,
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) stg.BS_noise_averaged_data[data_id] ** 2))
# stg.SNR_reshape = np.reshape(stg.SNR_cross_section, (stg.r.shape[1] * stg.t.shape[1], stg.freq.shape[0]), order="F") # stg.SNR_reshape = np.reshape(stg.SNR_cross_section, (stg.r.shape[1] * stg.t.shape[1], stg.freq.shape[0]), order="F")
else: else:
noise = np.zeros(stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape) noise = np.zeros(stg.BS_raw_data[data_id].shape)
for f, _ in enumerate(noise_data._freq): for f, _ in enumerate(noise_data._freq):
noise[f, :, :] = np.mean( noise[f, :, :] = np.mean(
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], axis=(0, 1)) stg.BS_noise_raw_data[data_id][f, :, :], axis=(0, 1))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = noise stg.BS_noise_averaged_data[data_id] = noise
stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.SNR_raw_data[data_id] = (
np.divide((stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()] - np.divide((stg.BS_raw_data[data_id] -
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2, stg.BS_noise_averaged_data[data_id]) ** 2,
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) stg.BS_noise_averaged_data[data_id] ** 2))
def open_plot_noise_window(self): def open_plot_noise_window(self):
pnw = PlotNoiseWindow() pnw = PlotNoiseWindow()
pnw.exec() pnw.exec()
def compute_noise_from_profile_tail_value(self): def compute_noise_from_profile_tail_value(self):
data_id = self.combobox_acoustic_data_choice.currentIndex()
stg.noise_method[self.combobox_acoustic_data_choice.currentIndex()] = 1 stg.noise_method[data_id] = 1
stg.noise_value[data_id] = (
float(self.lineEdit_profile_tail_value.text().replace(",", "."))
)
stg.noise_value[self.combobox_acoustic_data_choice.currentIndex()] = ( if stg.time_cross_section[data_id].shape != (0,):
float(self.lineEdit_profile_tail_value.text().replace(",", "."))) stg.time_noise[data_id] = (
stg.time_cross_section[data_id]
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): )
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = (
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()])
else: else:
stg.time_noise[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.time_noise[data_id] = (
stg.time[self.combobox_acoustic_data_choice.currentIndex()]) stg.time[data_id]
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): )
stg.depth_noise[self.combobox_acoustic_data_choice.currentIndex()] = ( if stg.depth_cross_section[data_id].shape != (0,):
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()]) stg.depth_noise[data_id] = (
stg.depth_cross_section[data_id]
)
else: else:
stg.depth_noise[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.depth_noise[data_id] = (
stg.depth[self.combobox_acoustic_data_choice.currentIndex()]) stg.depth[data_id]
)
# --- Compute noise from value and compute SNR --- # --- Compute noise from value and compute SNR ---
if self._is_correct_shape(stg.BS_stream_bed):
if stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): stg.BS_noise_raw_data[data_id] = np.array([])
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = np.array([]) stg.BS_noise_raw_data[data_id] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = ( np.full(stg.BS_stream_bed[data_id].shape,
np.full(stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape,
float(self.lineEdit_profile_tail_value.text().replace(",", ".")))) float(self.lineEdit_profile_tail_value.text().replace(",", "."))))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.BS_noise_averaged_data[data_id] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][:, :, stg.BS_noise_raw_data[data_id][:, :,
:stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape[2]]) :stg.BS_stream_bed[data_id].shape[2]])
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.SNR_stream_bed[data_id] = (
np.divide((stg.BS_stream_bed[self.combobox_acoustic_data_choice.currentIndex()] np.divide((stg.BS_stream_bed[data_id]
- stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2, - stg.BS_noise_raw_data[data_id]) ** 2,
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) stg.BS_noise_raw_data[data_id] ** 2))
elif stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): elif self._is_correct_shape(stg.BS_cross_section):
stg.BS_noise_raw_data[data_id] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = ( np.full(stg.BS_cross_section[data_id].shape,
np.full(stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape,
float(self.lineEdit_profile_tail_value.text().replace(",", ".")))) float(self.lineEdit_profile_tail_value.text().replace(",", "."))))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.BS_noise_averaged_data[data_id] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()][:, :, stg.BS_noise_raw_data[data_id][:, :,
:stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape[2]]) :stg.BS_cross_section[data_id].shape[2]])
stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.SNR_cross_section[data_id] = (
np.divide((stg.BS_cross_section[self.combobox_acoustic_data_choice.currentIndex()] np.divide((stg.BS_cross_section[data_id]
- stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2, - stg.BS_noise_raw_data[data_id]) ** 2,
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) # stg.BS_noise_raw_data[data_id] ** 2)) #
else: else:
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.BS_noise_raw_data[data_id] = (
np.full(stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape, np.full(stg.BS_raw_data[data_id].shape,
float(self.lineEdit_profile_tail_value.text().replace(",", ".")))) float(self.lineEdit_profile_tail_value.text().replace(",", "."))))
stg.BS_noise_averaged_data[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.BS_noise_averaged_data[data_id] = (
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) stg.BS_noise_raw_data[data_id])
stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()] = ( stg.SNR_raw_data[data_id] = (
np.divide((stg.BS_raw_data[self.combobox_acoustic_data_choice.currentIndex()] np.divide((stg.BS_raw_data[data_id]
- stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()]) ** 2, - stg.BS_noise_raw_data[data_id]) ** 2,
stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()] ** 2)) stg.BS_noise_raw_data[data_id] ** 2))
self.combobox_frequency_profile.clear() self.combobox_frequency_profile.clear()
self.combobox_frequency_profile.addItems( self.combobox_frequency_profile.addItems(
[f for f in stg.freq_text[self.combobox_acoustic_data_choice.currentIndex()]]) [f for f in stg.freq_text[data_id]])
# --- Trigger graphic widgets --- # --- Trigger graphic widgets ---
if stg.SNR_filter_value[self.combobox_acoustic_data_choice.currentIndex()] == 0: if stg.SNR_filter_value[data_id] == 0:
self.lineEdit_SNR_criterion.setText("0.00") self.lineEdit_SNR_criterion.setText("0.00")
else: else:
self.lineEdit_SNR_criterion.setText(str(stg.SNR_filter_value[self.combobox_acoustic_data_choice.currentIndex()])) self.lineEdit_SNR_criterion.setText(str(stg.SNR_filter_value[data_id]))
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): if stg.time_cross_section[data_id].shape != (0,):
self.slider.setMaximum(stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape[1]) self.slider.setMaximum(stg.time_cross_section[data_id].shape[1])
else: else:
self.slider.setMaximum(stg.time[self.combobox_acoustic_data_choice.currentIndex()].shape[1]) self.slider.setMaximum(stg.time[data_id].shape[1])
# self.activate_list_of_pre_processed_data() # self.activate_list_of_pre_processed_data()
@ -1150,14 +1180,17 @@ class SignalProcessingTab(QWidget):
# elif self.canvas_SNR == None: # elif self.canvas_SNR == None:
else: else:
data_id = self.combobox_acoustic_data_choice.currentIndex()
if ((self.combobox_acoustic_data_choice.currentIndex() != -1) if ((data_id != -1)
and (stg.BS_noise_raw_data[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,))): and (stg.BS_noise_raw_data[data_id].shape != (0,))):
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.toolbar_SNR) self.verticalLayout_groupbox_plot_SNR.removeWidget(self.toolbar_SNR)
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.scroll_SNR) self.verticalLayout_groupbox_plot_SNR.removeWidget(self.scroll_SNR)
self.fig_SNR, self.axis_SNR = plt.subplots(nrows=stg.freq[self.combobox_acoustic_data_choice.currentIndex()].shape[0], ncols=1, sharex=True, sharey=False, layout='constrained') self.fig_SNR, self.axis_SNR = plt.subplots(
nrows=stg.freq[data_id].shape[0], ncols=1,
sharex=True, sharey=False, layout='constrained'
)
self.canvas_SNR = FigureCanvas(self.fig_SNR) self.canvas_SNR = FigureCanvas(self.fig_SNR)
self.toolbar_SNR = NavigationToolBar(self.canvas_SNR, self) self.toolbar_SNR = NavigationToolBar(self.canvas_SNR, self)
@ -1166,158 +1199,82 @@ class SignalProcessingTab(QWidget):
self.verticalLayout_groupbox_plot_SNR.addWidget(self.toolbar_SNR) self.verticalLayout_groupbox_plot_SNR.addWidget(self.toolbar_SNR)
self.verticalLayout_groupbox_plot_SNR.addWidget(self.scroll_SNR) self.verticalLayout_groupbox_plot_SNR.addWidget(self.scroll_SNR)
for f, _ in enumerate(stg.freq[self.combobox_acoustic_data_choice.currentIndex()]): for f, _ in enumerate(stg.freq[data_id]):
if stg.SNR_stream_bed[data_id].shape != (0,):
if stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): SNR_data = stg.SNR_stream_bed
elif stg.SNR_cross_section[data_id].shape != (0,):
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): SNR_data = stg.SNR_cross_section
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
if stg.time_cross_section[data_id].shape != (0,):
time_data = stg.time_cross_section
else: else:
time_data = stg.time
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): if stg.depth_cross_section[data_id].shape != (0,):
depth_data = stg.depth_cross_section
elif stg.depth[data_id].shape != (0,):
depth_data = stg.depth
x, y = np.meshgrid( x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :], time_data[data_id][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :]) depth_data[data_id][f, :]
)
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): val_min = np.nanmin(SNR_data[data_id][f, :, :])
val_max = np.nanmax(SNR_data[data_id][f, :, :])
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
val_min = np.nanmin(stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: if val_min != val_max:
if val_min == 0: if val_min == 0:
val_min = 1e-5 val_min = 1e-5
if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]) levels = np.array(
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1] [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
)
bounds = [
00.1, 1, 2, 10, 100, 1000,
val_max * 1000 + 1
]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y, cf = self.axis_SNR[f].contourf(
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], x, -y,
SNR_data[data_id][f, :, :],
levels, cmap='gist_rainbow', levels, cmap='gist_rainbow',
norm=norm)) norm=norm
)
elif stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): self.axis_SNR[f].text(
1, .70, stg.freq_text[data_id][f],
if stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): fontsize=14, fontweight='bold', fontname="DejaVu Sans",
c="black", alpha=0.5,
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,): horizontalalignment='right',
verticalalignment='bottom',
x, y = np.meshgrid( transform=self.axis_SNR[f].transAxes
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :], )
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
else:
if stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :])
elif stg.depth[self.combobox_acoustic_data_choice.currentIndex()].shape != (0,):
x, y = np.meshgrid(
stg.time[self.combobox_acoustic_data_choice.currentIndex()][f, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][f, :])
val_min = np.nanmin(stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
if val_min == 0:
val_min = 1e-5
if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_cross_section[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
levels, cmap='gist_rainbow', norm=norm))
else:
x, y = np.meshgrid(stg.time[self.combobox_acoustic_data_choice.currentIndex()][0, :],
stg.depth[self.combobox_acoustic_data_choice.currentIndex()][0, :])
val_min = np.nanmin(stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
val_max = np.nanmax(stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
if val_min == 0:
val_min = 1e-5
if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else:
levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max*1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_raw_data[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
levels, cmap='gist_rainbow', norm=norm))
self.axis_SNR[f].text(1, .70, stg.freq_text[self.combobox_acoustic_data_choice.currentIndex()][f],
fontsize=14, fontweight='bold', fontname="DejaVu Sans", c="black", alpha=0.5,
horizontalalignment='right', verticalalignment='bottom',
transform=self.axis_SNR[f].transAxes)
self.fig_SNR.supxlabel('Time (sec)', fontsize=10) self.fig_SNR.supxlabel('Time (sec)', fontsize=10)
self.fig_SNR.supylabel('Depth (m)', fontsize=10) self.fig_SNR.supylabel('Depth (m)', fontsize=10)
cbar = self.fig_SNR.colorbar(cf, ax=self.axis_SNR[:], shrink=1, location='right') cbar = self.fig_SNR.colorbar(
cbar.set_label(label='Signal to Noise Ratio', rotation=270, labelpad=10) cf, ax=self.axis_SNR[:],
cbar.set_ticklabels(['0', '1', '2', '10', '100', r'10$^3$', r'10$^6$']) shrink=1, location='right'
)
cbar.set_label(
label='Signal to Noise Ratio',
rotation=270, labelpad=10
)
cbar.set_ticklabels(
[
'0', '1', '2', '10', '100',
r'10$^3$', r'10$^6$'
]
)
self.fig_SNR.canvas.draw_idle() self.fig_SNR.canvas.draw_idle()
else: else:
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.toolbar_SNR) self.verticalLayout_groupbox_plot_SNR.removeWidget(self.toolbar_SNR)
self.verticalLayout_groupbox_plot_SNR.removeWidget(self.scroll_SNR) self.verticalLayout_groupbox_plot_SNR.removeWidget(self.scroll_SNR)
@ -1439,9 +1396,6 @@ class SignalProcessingTab(QWidget):
x_time = stg.time[data_id] x_time = stg.time[data_id]
y_depth = stg.depth[data_id] y_depth = stg.depth[data_id]
logger.debug(f"x_time: {x_time[data_id].shape}")
logger.debug(f"y_depth: {y_depth[data_id].shape}")
for f, _ in enumerate(stg.freq[data_id]): for f, _ in enumerate(stg.freq[data_id]):
if stg.BS_stream_bed_pre_process_average[data_id].shape != (0,): if stg.BS_stream_bed_pre_process_average[data_id].shape != (0,):
BS_data = stg.BS_stream_bed_pre_process_average BS_data = stg.BS_stream_bed_pre_process_average
@ -1462,8 +1416,6 @@ class SignalProcessingTab(QWidget):
elif stg.BS_raw_data[data_id].shape != (0,): elif stg.BS_raw_data[data_id].shape != (0,):
BS_data = stg.BS_raw_data BS_data = stg.BS_raw_data
logger.debug(f"BS_data: {BS_data[data_id].shape}")
val_min = np.nanmin( val_min = np.nanmin(
BS_data[data_id][f, :, :] BS_data[data_id][f, :, :]
) )
@ -1574,19 +1526,9 @@ class SignalProcessingTab(QWidget):
) )
) )
if stg.time_cross_section[data_id].shape != (0,):
if stg.depth_cross_section[data_id].shape != (0,): if stg.depth_cross_section[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
y_depth = stg.depth_cross_section[data_id] y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,): elif stg.depth[data_id].shape != (0,):
x_time = stg.time_cross_section[data_id]
y_depth = stg.depth[data_id]
else:
if stg.depth_cross_section[data_id].shape != (0,):
x_time = stg.time[data_id]
y_depth = stg.depth_cross_section[data_id]
elif stg.depth[data_id].shape != (0,):
x_time = stg.time[data_id]
y_depth = stg.depth[data_id] y_depth = stg.depth[data_id]
BS = [ BS = [
@ -1607,11 +1549,6 @@ class SignalProcessingTab(QWidget):
stg.BS_raw_data_pre_process_average, stg.BS_raw_data_pre_process_average,
] ]
time_shape, = x_time[data_id].shape
depth_shape, = y_depth[data_id].shape
logger.debug(f"time_shape: {time_shape}")
logger.debug(f"depth_shape: {depth_shape}")
BS_data = stg.BS_raw_data BS_data = stg.BS_raw_data
BS_data_ppa = stg.BS_raw_data_pre_process_average BS_data_ppa = stg.BS_raw_data_pre_process_average
for i in range(len(BS)): for i in range(len(BS)):
@ -1620,8 +1557,7 @@ class SignalProcessingTab(QWidget):
if bs[data_id].shape == (0,): if bs[data_id].shape == (0,):
continue continue
x, y, z = bs[data_id].shape if self._is_correct_shape(bs):
if y == depth_shape and z == time_shape:
BS_data = bs BS_data = bs
BS_data_ppa = BS_ppa[i] BS_data_ppa = BS_ppa[i]
break break