Compare commits

..

8 Commits

5 changed files with 57 additions and 378 deletions

View File

@ -5,35 +5,25 @@ import pandas as pd
import matplotlib.pyplot as plt import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm from matplotlib.colors import LogNorm
# path_BS_raw_data = "/home/bmoudjed/Documents/2 Data/Confluence_Rhône_Isere_2018/Acoustic_data/20180107123500.aqa"
# path_BS_raw_data = "/home/bmoudjed/Documents/3 SSC acoustic meas project/Graphical interface project/" \
# "Data/AcousticNoise_data/20180107121600.aqa"
class AcousticDataLoader: class AcousticDataLoader:
def __init__(self, path_BS_raw_data: str): def __init__(self, path_BS_raw_data: str):
self.path_BS_raw_data = path_BS_raw_data self.path_BS_raw_data = path_BS_raw_data
print(self.path_BS_raw_data)
# --- Load Backscatter acoustic raw data with RawAquascatData class --- # --- Load Backscatter acoustic raw data with RawAquascatData class ---
self._data_BS = RawAquascatData(self.path_BS_raw_data) self._data_BS = RawAquascatData(self.path_BS_raw_data)
print(self._data_BS.V.shape)
self._BS_raw_data = np.swapaxes(self._data_BS.V, 0, 1) self._BS_raw_data = np.swapaxes(self._data_BS.V, 0, 1)
print(f"BS raw data shape = {self._BS_raw_data.shape}")
self._freq = self._data_BS.Freq self._freq = self._data_BS.Freq
print(f"freq shape = {self._freq.shape}")
self._freq_text = self._data_BS.freqText self._freq_text = self._data_BS.freqText
self._r = np.repeat(np.transpose(self._data_BS.r), self._freq.shape[0], axis=0) self._r = np.repeat(np.transpose(self._data_BS.r), self._freq.shape[0], axis=0)
print(f"r shape = {self._r.shape}")
self._time = np.repeat( self._time = np.repeat(
np.transpose(np.array([t / self._data_BS.PingRate for t in range(self._data_BS.NumProfiles)])[:, np.newaxis]), np.transpose(np.array([t / self._data_BS.PingRate for t in range(self._data_BS.NumProfiles)])[:, np.newaxis]),
self._freq.shape[0], axis=0) self._freq.shape[0], axis=0)
print(f"time shape = {self._time.shape}")
self._date = self._data_BS.date.date() self._date = self._data_BS.date.date()
self._hour = self._data_BS.date.time() self._hour = self._data_BS.date.time()
@ -48,97 +38,30 @@ class AcousticDataLoader:
self._gain_rx = self._data_BS.RxGain.tolist() self._gain_rx = self._data_BS.RxGain.tolist()
self._gain_tx = self._data_BS.TxGain.tolist() self._gain_tx = self._data_BS.TxGain.tolist()
# print((self._cell_size))
# print((self._nb_pings_averaged_per_profile))
# print(self._r[0, :][1] - self._r[1, :][0])
# print(type(self._nb_cells), self._nb_cells)
# self._snr = np.array([])
# self._snr_reshape = np.array([])
# self._time_snr = np.array([])
# print(type(self._gain_tx))
# print(["BS - " + f for f in self._freq_text])
# print(self._time.shape[0]*self._r.shape[0]*4)
# print(self._time[np.where(np.floor(self._time) == 175)])
# print(np.where((self._time) == 155)[0][0])
# fig, ax = plt.subplots(nrows=1, ncols=1)
# # ax.pcolormesh(self._time[0, :2200], -self._r[0, :], (self._BS_raw_data[0, :, :2200]),
# # cmap='viridis',
# # norm=LogNorm(vmin=1e-5, vmax=np.max(self._BS_raw_data[0, :, :2200]))) # , shading='gouraud')
# ax.pcolormesh(range(self._BS_raw_data.shape[2]), range(self._BS_raw_data.shape[1]), self._BS_raw_data[2, :, :], cmap='viridis',
# norm=LogNorm(vmin=1e-5, vmax=np.max(self._BS_raw_data[:, 0, :]))) # , shading='gouraud')
# ax.set_xticks([])
# ax.set_yticks([])
# plt.show()
# --- Plot vertical profile for bottom detection ---
# fig2, ax2 = plt.subplots(nrows=1, ncols=1, layout="constrained")
# ax2.plot(self._BS_raw_data[0, :, 1], -self._r[0], "k.-")
# plt.show()
# fig, ax = plt.subplots(nrows=1, ncols=1)
# ax.plot(self._BS_raw_data[:, 0, 100] , self._r)
# ax.set_ylim(2, 20)
# plt.show()
# print(self.reshape_BS_raw_cross_section()[0, 0])
# self.reshape_BS_raw_cross_section()
# self.reshape_r()
# self.reshape_t()
# self.compute_r_2D()
def reshape_BS_raw_data(self): def reshape_BS_raw_data(self):
BS_raw_cross_section = np.reshape(self._BS_raw_data, BS_raw_cross_section = np.reshape(self._BS_raw_data,
(self._r.shape[1] * self._time.shape[1], self._freq.shape[0]), (self._r.shape[1] * self._time.shape[1], self._freq.shape[0]),
order="F") order="F")
print(BS_raw_cross_section.shape)
return BS_raw_cross_section return BS_raw_cross_section
def reshape_r(self): def reshape_r(self):
# r = np.reshape(np.repeat(self._r[0, :], self._time.shape[0], axis=1),
# self._r.shape[0]*self._time.shape[0],
# order="F")
r = np.zeros((self._r.shape[1] * self._time.shape[1], self._freq.shape[0])) r = np.zeros((self._r.shape[1] * self._time.shape[1], self._freq.shape[0]))
for i, _ in enumerate(self._freq): for i, _ in enumerate(self._freq):
for j in range(self._time.shape[1]): for j in range(self._time.shape[1]):
r[j*self._r.shape[1]:(j+1)*self._r.shape[1], i] = self._r[i, :] r[j*self._r.shape[1]:(j+1)*self._r.shape[1], i] = self._r[i, :]
# r[:, i] = np.repeat(self._r[i, :], self._time.shape[1])
print(r.shape)
return r return r
def compute_r_2D(self): def compute_r_2D(self):
r2D = np.zeros((self._freq.shape[0], self._r.shape[1], self._time.shape[1])) r2D = np.zeros((self._freq.shape[0], self._r.shape[1], self._time.shape[1]))
for f, _ in enumerate(self._freq): for f, _ in enumerate(self._freq):
r2D[f, :, :] = np.repeat(np.transpose(self._r[f, :])[:, np.newaxis], self._time.shape[1], axis=1) r2D[f, :, :] = np.repeat(np.transpose(self._r[f, :])[:, np.newaxis], self._time.shape[1], axis=1)
print(r2D.shape)
return r2D return r2D
def reshape_t(self): def reshape_t(self):
# t = np.reshape(np.repeat(self._time, self._r.shape[0]), (self._time.shape[0]*self._r.shape[0], 1))
t = np.zeros((self._r.shape[1] * self._time.shape[1], self._freq.shape[0])) t = np.zeros((self._r.shape[1] * self._time.shape[1], self._freq.shape[0]))
for i, _ in enumerate(self._freq): for i, _ in enumerate(self._freq):
t[:, i] = np.repeat(self._time[i, :], self._r.shape[1]) t[:, i] = np.repeat(self._time[i, :], self._r.shape[1])
print(t.shape)
return t return t
# def concatenate_data(self):
# self.reshape_t()
# self.reshape_BS_raw_cross_section()
# # print(self.reshape_t().shape)
# # print(se.lf.reshape_BS_raw_cross_section().shape)
# df = pd.DataFrame(np.concatenate((self.reshape_t(), self.reshape_BS_raw_cross_section()), axis=1),
# columns=["time"] + self._freq_text)
# return df
# if __name__ == "__main__":
# AcousticDataLoader(path_BS_raw_data)

View File

@ -21,7 +21,6 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import matplotlib.pyplot as plt
import numpy as np import numpy as np
import settings as stg import settings as stg
from Model.GrainSizeTools import demodul_granulo, mix_gaussian_model from Model.GrainSizeTools import demodul_granulo, mix_gaussian_model
@ -58,17 +57,6 @@ class AcousticInversionMethodHighConcentration():
(np.log(10) / 20) * (freq * 1e-3) ** 2 (np.log(10) / 20) * (freq * 1e-3) ** 2
return alpha return alpha
# ---------- Conmpute FBC ----------
# def compute_FCB(self):
# # print(self.BS_averaged_cross_section_corr.V.shape)
# # print(self.r_2D.shape)
# FCB = np.zeros((256, 4, 1912))
# for f in range(4):
# # print(self.alpha_w_function(self.Freq[f], self.temperature))
# FCB[:, f, :] = np.log(self.BS_averaged_cross_section_corr.V[:, f, :]) + np.log(self.r_3D[:, f, :]) + \
# np.log(2 * self.alpha_w_function(self.Freq[f], self.temperature) * self.r_3D[:, f, :])
# return FCB
# --- Gaussian mixture --- # --- Gaussian mixture ---
def compute_particle_size_distribution_in_number_of_particles(self, num_sample, r_grain, frac_vol_cumul): def compute_particle_size_distribution_in_number_of_particles(self, num_sample, r_grain, frac_vol_cumul):
min_demodul = 1e-6 min_demodul = 1e-6
@ -82,15 +70,6 @@ class AcousticInversionMethodHighConcentration():
sample_demodul.demodul_data_list[2].sigma_list, sample_demodul.demodul_data_list[2].sigma_list,
sample_demodul.demodul_data_list[2].w_list) sample_demodul.demodul_data_list[2].w_list)
# N_modes = 3
# sample_demodul.print_mode_data(N_modes)
# sample_demodul.plot_interpolation()
# sample_demodul.plot_modes(N_modes)
# print(f"mu_list : {sample_demodul.demodul_data_list[3 - 1].mu_list}")
# print(f"sigma_list : {sample_demodul.demodul_data_list[3 - 1].sigma_list}")
# print(f"w_list : {sample_demodul.demodul_data_list[3 - 1].w_list}")
proba_vol_demodul = proba_vol_demodul / np.sum(proba_vol_demodul) proba_vol_demodul = proba_vol_demodul / np.sum(proba_vol_demodul)
ss = np.sum(proba_vol_demodul / np.exp(resampled_log_array) ** 3) ss = np.sum(proba_vol_demodul / np.exp(resampled_log_array) ** 3)
proba_num = proba_vol_demodul / np.exp(resampled_log_array) ** 3 / ss proba_num = proba_vol_demodul / np.exp(resampled_log_array) ** 3 / ss
@ -106,23 +85,9 @@ class AcousticInversionMethodHighConcentration():
x = k * a x = k * a
f = (x ** 2 * (1 - 0.25 * np.exp(-((x - 1.5) / 0.35) ** 2)) * (1 + 0.6 * np.exp(-((x - 2.9) / 1.15) ** 2))) / ( f = (x ** 2 * (1 - 0.25 * np.exp(-((x - 1.5) / 0.35) ** 2)) * (1 + 0.6 * np.exp(-((x - 2.9) / 1.15) ** 2))) / (
42 + 28 * x ** 2) 42 + 28 * x ** 2)
# print(f"form factor = {f}")
return f return f
# def ks(self, num_sample_sand, radius_grain_sand, frac_vol_sand_cumul, freq, C):
def ks(self, proba_num, freq, C): def ks(self, proba_num, freq, C):
# --- Calcul de la fonction de form ---
# form_factor = self.form_factor_function_MoateThorne2012(a, freq)
# print(f"form_factor shape = {form_factor}")
# print(f"form_factor = {form_factor}")
#--- Particle size distribution ---
# proba_num = (
# self.compute_particle_size_distribution_in_number_of_particles(
# num_sample=num_sample_sand, r_grain=radius_grain_sand, frac_vol_cumul=frac_vol_sand_cumul[num_sample_sand]))
# print(f"proba_num : {proba_num}")
# --- Compute k_s by dividing two integrals --- # --- Compute k_s by dividing two integrals ---
resampled_log_array = np.log(np.logspace(-10, -2, 3000)) resampled_log_array = np.log(np.logspace(-10, -2, 3000))
a2f2pdf = 0 a2f2pdf = 0
@ -132,28 +97,17 @@ class AcousticInversionMethodHighConcentration():
a2f2pdf += a**2 * self.form_factor_function_MoateThorne2012(a, freq, C)**2 * proba_num[i] a2f2pdf += a**2 * self.form_factor_function_MoateThorne2012(a, freq, C)**2 * proba_num[i]
a3pdf += a**3 * proba_num[i] a3pdf += a**3 * proba_num[i]
# print("form factor ", self.form_factor_function_MoateThorne2012(a, freq, C))
# print(f"a2f2pdf = {a2f2pdf}")
# print(f"a3pdf = {a3pdf}")
ks = np.sqrt(a2f2pdf / a3pdf) ks = np.sqrt(a2f2pdf / a3pdf)
# ks = np.array([0.04452077, 0.11415143, 0.35533713, 2.47960051])
# ks = ks0[ind]
return ks return ks
# ------------- Computing sv ------------- # # ------------- Computing sv ------------- #
def sv(self, ks, M_sand): def sv(self, ks, M_sand):
# print(f"ks = {ks}")
# print(f"M_sand = {M_sand}")
sv = (3 / (16 * np.pi)) * (ks ** 2) * M_sand sv = (3 / (16 * np.pi)) * (ks ** 2) * M_sand
# sv = np.full((stg.r.shape[1], stg.t.shape[1]), sv0)
return sv return sv
# ------------- Computing X ------------- # # ------------- Computing X ------------- #
def X_exponent(self, freq1, freq2, sv_freq1, sv_freq2): def X_exponent(self, freq1, freq2, sv_freq1, sv_freq2):
# X0 = [3.450428714146802, 3.276478927777019, 3.6864638665972893, 0]
# X = X0[ind]
X = np.log(sv_freq1 / sv_freq2) / np.log(freq1 / freq2) X = np.log(sv_freq1 / sv_freq2) / np.log(freq1 / freq2)
return X return X
@ -174,165 +128,43 @@ class AcousticInversionMethodHighConcentration():
gain = 10 ** ((RxGain + TxGain) / 20) gain = 10 ** ((RxGain + TxGain) / 20)
# Computing Kt # Computing Kt
kt = kt_ref * gain * np.sqrt(tau * cel / (tau_ref * c_ref)) # 1D numpy array kt = kt_ref * gain * np.sqrt(tau * cel / (tau_ref * c_ref)) # 1D numpy array
# kt = np.reshape(kt0, (1, 2)) # convert to 2d numpy array to compute J_cross_section
# print(f"kt = {kt}")
# kt_2D = np.repeat(np.array([kt]), stg.r.shape[1], axis=0)
# print("kt 2D ", kt_2D)
# print("kt 2D shape ", kt_2D.shape)
# # kt_3D = np.zeros((kt_2D.shape[1], kt_2D.shape[0], stg.t.shape[1]))
# # for k in range(kt_2D.shape[1]):
# # kt_3D[k, :, :] = np.repeat(kt_2D, stg.t.shape[1], axis=1)[:, k * stg.t.shape[1]:(k + 1) * stg.t.shape[1]]
# kt_3D = np.repeat(kt_2D.transpose()[:, :, np.newaxis], stg.t.shape[1], axis=2)
# # print("kt 3D ", kt_3D)
# print("kt 3D shape ", kt_3D.shape)
return kt return kt
# ------------- Computing J_cross_section ------------- # # ------------- Computing J_cross_section ------------- #
def j_cross_section(self, BS, r2D, kt): def j_cross_section(self, BS, r2D, kt):
# J_cross_section = np.zeros((1, BS.shape[1], BS.shape[2])) # 2 because it's a pair of frequencies
# print("BS.shape", BS.shape)
# print("r2D.shape", r2D.shape)
# print("kt.shape", kt.shape)
# if stg.ABS_name == "Aquascat 1000R":
# print("--------------------------------")
# print("BS : ", BS)
# print("BS min : ", np.nanmin(BS))
# print("BS max : ", np.nanmax(BS))
# print("r2D : ", r2D)
# print("kt shape : ", kt.shape)
# print("kt : ", kt)
# print("--------------------------------")
# for k in range(1):
# J_cross_section[k, :, :] = (3 / (16 * np.pi)) * ((BS[k, :, :]**2 * r2D[k, :, :]**2) / kt[k, :, :]**2)
J_cross_section = (3 / (16 * np.pi)) * ((BS**2 * r2D**2) / kt**2) J_cross_section = (3 / (16 * np.pi)) * ((BS**2 * r2D**2) / kt**2)
# J_cross_section[J_cross_section == 0] = np.nan
# print("J_cross_section.shape", J_cross_section.shape)
# elif stg.ABS_name == "UB-SediFlow":
# for k in range(1):
# J_cross_section[k, :, :] = (3 / (16 * np.pi)) * ((BS[k, :, :]**2 * r2D[0, :, :]**2) / kt[k, :, :]**2)
# print("compute j_cross_section finished")
return J_cross_section return J_cross_section
# ------------- Computing alpha_s ------------- # # ------------- Computing alpha_s ------------- #
def alpha_s(self, sv, j_cross_section, depth, alpha_w): def alpha_s(self, sv, j_cross_section, depth, alpha_w):
alpha_s = (np.log(sv / j_cross_section) / (4 * depth)) - alpha_w alpha_s = (np.log(sv / j_cross_section) / (4 * depth)) - alpha_w
print("----------------------------")
print(f"sv = {sv}")
print(f"j_cross_section = {j_cross_section}")
print(f"depth = {depth}")
print(f"alpha_w = {alpha_w}")
print(f"(np.log(sv / j_cross_section) / (4 * depth)) = {(np.log(sv / j_cross_section) / (4 * depth))}")
print(f"alpha_s {alpha_s}")
return alpha_s return alpha_s
# ------------- Computing interpolation of fine SSC data obtained from water sampling ------------- # ------------- Computing interpolation of fine SSC -------------
# ------------- collected at various depth in the vertical sample -------------
# def M_profile_SCC_fine_interpolated(self, sample_depth, M_profile, range_cells, r_bottom):
# res = np.zeros((len(range_cells),)) * np.nan
# for i in range(len(M_profile) - 1):
# # print(f"i = {i}")
# r_ini = sample_depth[i]
# # print(f"r_ini = {r_ini}")
# c_ini = M_profile[i]
# # print(f"c_ini = {c_ini}")
# r_end = sample_depth[i + 1]
# # print(f"r_end = {r_end}")
# c_end = M_profile[i + 1]
# # print(f"c_end = {c_end}")
#
# # Computing the linear equation
# a = (c_end - c_ini) / (r_end - r_ini)
# # print(f"a = {a}")
# b = c_ini - a * r_ini
# # print(f"b = {b}")
#
# # Finding the indices of r_ini and r_end in the interpolated array
# # print(f"range_cells = {range_cells}")
# loc = (range_cells >= r_ini) * (range_cells < r_end)
# # print(f"loc = {loc}")
# # print(f"loc shape = {len(loc)}")
#
# # Filling the array with interpolation values
# res[loc] = range_cells[loc] * a + b
# # print(res.shape)
# # print(f"res = {res}")
# # print(f"1. res.shape = {res.shape}")
#
# # Filling first and last values
# i = 0
# while np.isnan(res[i]):
# res[i] = M_profile[0]
# i += 1
#
# # Filling the last values
# i = -1
# while np.isnan(res[i]):
# res[i] = M_profile[-1]
# i += -1
# # print(f"res.shape = {res.shape}")
# # print(f"res = {res}")
# # print(f"r_bottom.shape = {r_bottom.shape}")
# # print(f" = {res}")
#
# if r_bottom.shape != (0,):
# res[np.where(range_cells > r_bottom)] = np.nan
#
# loc_point_lin_interp0 = range_cells[np.where((range_cells > sample_depth[0]) & (range_cells < sample_depth[-1]))]
# # print(f"range_cells : {range_cells}")
# # print(f"loc_point_lin_interp0 shape : {len(loc_point_lin_interp0)}")
# # print(f"loc_point_lin_interp0 : {loc_point_lin_interp0}")
# res0 = res[np.where((range_cells > sample_depth[0]) & (range_cells < sample_depth[-1]))]
#
# loc_point_lin_interp = loc_point_lin_interp0[np.where(loc_point_lin_interp0 > range_cells[0])]
# # print(f"loc_point_lin_interp shape : {len(loc_point_lin_interp)}")
# # print(f"loc_point_lin_interp : {loc_point_lin_interp}")
# res = res0[np.where(loc_point_lin_interp0 > range_cells[0])]
#
# # fig, ax = plt.subplots(nrows=1, ncols=1)
# # ax.plot(loc_point_lin_interp, res[:len(loc_point_lin_interp)], marker="*", mfc="blue")
# # ax.plot(sample_depth, M_profile, marker="o", mfc="k", mec="k")
# # plt.show()
#
# return (loc_point_lin_interp, res)
def M_profile_SCC_fine_interpolated(self, sample_depth, M_profile, range_cells, r_bottom): def M_profile_SCC_fine_interpolated(self, sample_depth, M_profile, range_cells, r_bottom):
'''Computing interpolation of fine SSC data obtained from water sampling
collected at various depth in the vertical sample'''
res = np.zeros((len(range_cells),)) * np.nan res = np.zeros((len(range_cells),)) * np.nan
print("range_cells ", range_cells.shape)
l0 = sample_depth l0 = sample_depth
print("l0 = ", l0)
l1 = [l0.index(x) for x in sorted(l0)] l1 = [l0.index(x) for x in sorted(l0)]
print("l1 = ", l1)
l2 = [l0[k] for k in l1] l2 = [l0[k] for k in l1]
print("l2 = ", l2)
c1 = [list(M_profile)[j] for j in l1] c1 = [list(M_profile)[j] for j in l1]
print("c1 = ", c1)
for i in range(len(c1) - 1): for i in range(len(c1) - 1):
# print("i = ", i) # print("i = ", i)
r_ini = l2[i] r_ini = l2[i]
c_ini = c1[i] c_ini = c1[i]
r_end = l2[i + 1] r_end = l2[i + 1]
c_end = c1[i + 1] c_end = c1[i + 1]
print("r_ini ", r_ini, "c_ini ", c_ini, "r_end ", r_end, "c_end ", c_end)
# Computing the linear equation # Computing the linear equation
a = (c_end - c_ini) / (r_end - r_ini) a = (c_end - c_ini) / (r_end - r_ini)
b = c_ini - a * r_ini b = c_ini - a * r_ini
print("range_cells ", (range_cells))
# Finding the indices of r_ini and r_end in the interpolated array # Finding the indices of r_ini and r_end in the interpolated array
loc = (range_cells >= r_ini) * (range_cells < r_end) loc = (range_cells >= r_ini) * (range_cells < r_end)
print("range_cells >= r_ini ", range_cells >= r_ini)
print("range_cells < r_end ", range_cells < r_end)
print("loc ", loc)
# Filling the array with interpolation values # Filling the array with interpolation values
res[loc] = range_cells[loc] * a + b res[loc] = range_cells[loc] * a + b
print("a = ", a, "b = ", b)
print("res ", res)
# Filling first and last values # Filling first and last values
i = 0 i = 0
while np.isnan(res[i]): while np.isnan(res[i]):
@ -346,9 +178,6 @@ class AcousticInversionMethodHighConcentration():
i += -1 i += -1
if r_bottom.size != 0: if r_bottom.size != 0:
print("res ", res.shape)
print("range_cells ", len(range_cells))
# print("r_bottom ", len(r_bottom))
res[np.where(range_cells > r_bottom)] = np.nan res[np.where(range_cells > r_bottom)] = np.nan
loc_point_lin_interp0 = range_cells[np.where((range_cells > l2[0]) & (range_cells < l2[-1]))] loc_point_lin_interp0 = range_cells[np.where((range_cells > l2[0]) & (range_cells < l2[-1]))]
@ -357,13 +186,6 @@ class AcousticInversionMethodHighConcentration():
loc_point_lin_interp = loc_point_lin_interp0[np.where(loc_point_lin_interp0 > l2[0])] loc_point_lin_interp = loc_point_lin_interp0[np.where(loc_point_lin_interp0 > l2[0])]
res = res0[np.where(loc_point_lin_interp0 > l2[0])] res = res0[np.where(loc_point_lin_interp0 > l2[0])]
# fig, ax = plt.subplots(nrows=1, ncols=1)
# ax.plot(res[:len(loc_point_lin_interp)], -loc_point_lin_interp, marker="*", mfc="blue")
# ax.plot(c1, [-x for x in l2], marker="o", mfc="k", mec="k", ls="None")
# ax.set_xlabel("Concentration (g/L)")
# ax.set_ylabel("Depth (m)")
# plt.show()
return (loc_point_lin_interp, res) return (loc_point_lin_interp, res)
# ------------- Computing zeta ------------- # # ------------- Computing zeta ------------- #
@ -372,39 +194,6 @@ class AcousticInversionMethodHighConcentration():
delta_r = r[1] - r[0] delta_r = r[1] - r[0]
zeta = alpha_s / (np.sum(np.array(M_profile_fine)*delta_r)) zeta = alpha_s / (np.sum(np.array(M_profile_fine)*delta_r))
# print(f"np.sum(M_profile_fine*delta_r) : {np.sum(M_profile_fine*delta_r)}")
# zeta0 = np.array([0.021, 0.035, 0.057, 0.229])
# zeta = zeta0[ind]
# zeta0 = np.array([0.04341525, 0.04832906, 0.0847188, np.nan])
# zeta = zeta0[[ind1, ind2]]
# for k in range(3):
# for p in range(3):
# if np.isnan(ind_X_min_around_sample[p, k]):
# zeta_list_exp.append(np.nan)
# else:
# ind_X_min = int(ind_X_min_around_sample[p, k])
# ind_X_max = int(ind_X_max_around_sample[p, k])
# ind_r_min = int(ind_r_min_around_sample[p, k])
# ind_r_max = int(ind_r_max_around_sample[p, k])
#
# R_temp = R_cross_section[ind_r_min:ind_r_max, :, ind_X_min:ind_X_max]
# J_temp = J_cross_section[ind_r_min:ind_r_max, :, ind_X_min:ind_X_max]
# aw_temp = aw_cross_section[ind_r_min:ind_r_max, :, ind_X_min:ind_X_max]
# sv_temp_1 = np.repeat([sv_list_temp[3 * k + p]], np.shape(R_temp)[0], axis=0)
# sv_temp = np.swapaxes(np.swapaxes(np.repeat([sv_temp_1], np.shape(R_temp)[2], axis=0), 1, 0), 2, 1)
# ind_depth = np.where(R_cross_section[:, 0, 0] >= M_list_temp[k][0, p + 1])[0][0]
# # Using concentration profile
# zeta_temp = alpha_s / ((1 / M_list_temp[k][0, p + 1]) * (R_cross_section[0, 0, 0] * M_list_temp[k][1, 0] +
# delta_r * np.sum(M_interpolate_list[k][:ind_depth])))
# zeta_temp = (1 / (4 * R_temp) *
# np.log(sv_temp / J_temp) - aw_temp) / ((1 / M_list_temp[k][0, p + 1]) *
# (R_cross_section[0, 0, 0] * M_list_temp[k][
# 1, 0] +
# delta_r * np.sum(
# M_interpolate_list[k][:ind_depth])))
# zeta_list_exp.append(np.mean(np.mean(zeta_temp, axis=0), axis=1))
return zeta return zeta
# ------------- Computing VBI ------------- # # ------------- Computing VBI ------------- #
@ -415,21 +204,6 @@ class AcousticInversionMethodHighConcentration():
water_attenuation_freq1, water_attenuation_freq2, water_attenuation_freq1, water_attenuation_freq2,
X): X):
# print('self.zeta_exp[ind_j].shape', self.zeta_exp[ind_j])
# print('np.log(self.j_cross_section[:, ind_i, :]).shape', np.log(self.j_cross_section[:, ind_i, :]).shape)
# print('self.r_3D[:, ind_i, :]', self.r_3D[:, ind_i, :].shape)
# print('self.water_attenuation[ind_i]', self.water_attenuation[ind_i])
# print('self.x_exp[0.3-1 MHz]', self.x_exp['0.3-1 MHz'].values[0])
# print("start computing VBI")
# print("================================")
# print(f"zeta_freq2 : {zeta_freq2}")
# print(f"j_cross_section_freq1 : {j_cross_section_freq1.shape}")
# print(f"r2D : {r2D.shape}")
# print(f"water_attenuation_freq1 : {water_attenuation_freq1}")
# print(f"freq1 : {freq1}")
# print(f"X : {X}")
# print("================================")
logVBI = ((zeta_freq2 * logVBI = ((zeta_freq2 *
np.log(j_cross_section_freq1 * np.exp(4 * r2D * water_attenuation_freq1) / np.log(j_cross_section_freq1 * np.exp(4 * r2D * water_attenuation_freq1) /
(freq1 ** X)) - (freq1 ** X)) -
@ -438,31 +212,16 @@ class AcousticInversionMethodHighConcentration():
(freq2 ** X))) / (freq2 ** X))) /
(zeta_freq2 - zeta_freq1)) (zeta_freq2 - zeta_freq1))
# logVBI = (freq2**2 * np.log(j_cross_section_freq1 / freq1**X) -
# freq1**2 * np.log(j_cross_section_freq2 / freq2**X)) / (freq2**2 - freq1**2)
# logVBI = (( np.full((stg.r.shape[1], stg.t.shape[1]), zeta_freq2) *
# np.log(j_cross_section_freq1 * np.exp(4 * r2D * np.full((stg.r.shape[1], stg.t.shape[1]), water_attenuation_freq1)) /
# (freq1 ** X)) -
# np.full((stg.r.shape[1], stg.t.shape[1]), zeta_freq1) *
# np.log(j_cross_section_freq2 * np.exp(4 * r2D * np.full((stg.r.shape[1], stg.t.shape[1]), water_attenuation_freq2)) /
# (freq2 ** X))) /
# (zeta_freq2 - zeta_freq1))
print("compute VBI finished")
return np.exp(logVBI) return np.exp(logVBI)
# ------------- Computing SSC fine ------------- # # ------------- Computing SSC fine ------------- #
def SSC_fine(self, zeta, r2D, VBI, freq, X, j_cross_section, alpha_w): def SSC_fine(self, zeta, r2D, VBI, freq, X, j_cross_section, alpha_w):
SSC_fine = (1/zeta) * ( 1/(4 * r2D) * np.log((VBI * freq**X) / j_cross_section) - alpha_w) SSC_fine = (1/zeta) * ( 1/(4 * r2D) * np.log((VBI * freq**X) / j_cross_section) - alpha_w)
print("compute SSC fine finished")
return SSC_fine return SSC_fine
# ------------- Computing SSC sand ------------- # # ------------- Computing SSC sand ------------- #
def SSC_sand(self, VBI, freq, X, ks): def SSC_sand(self, VBI, freq, X, ks):
SSC_sand = (16 * np.pi * VBI * freq ** X) / (3 * ks**2) SSC_sand = (16 * np.pi * VBI * freq ** X) / (3 * ks**2)
print("compute SSC sand finished")
return SSC_sand return SSC_sand

View File

@ -1,8 +1,17 @@
# AcouSed # AcouSed
**TODO** short description AcouSed for **Acou**stic Backscattering for Concentration of Suspended **Sed**iments in Rivers is a software developped by INRAE, in collaboation with CNR.
## Getting started ![](icons/Logo-INRAE.jpg)
It is divided in six tabs:
- Acoustic data : acoustic raw data are downloaded and visualised
- Signal preprocessing : acoustic raw signal is preprocessed with filters
- Sample data : fine and sand sediments samples data are downloaded and visualised
- Calibration : calibration parameter are computed
- Inversion : inversion method is calculated to provide fine and sand sediments fields
## Software documentation
### Installation ### Installation
@ -11,8 +20,9 @@ greater. By default, Acoused is developped with Pypi package
dependencies, but is also possible to use Guix package manager to run dependencies, but is also possible to use Guix package manager to run
Acoused. Acoused.
### **TODO** Windows ## Development documentation
### **TODO** Windows
### Linux ### Linux
@ -39,33 +49,34 @@ script `guix.sh` to run the program.
guix shell sqlitebrowser -- ./guix.sh guix shell sqlitebrowser -- ./guix.sh
``` ```
## **TODO** Documentation
## Authors and acknowledgment
### Development
- Brahim MOUDJED ????-2025 ([INRAE](https://www.inrae.fr/))
- Pierre-Antoine ROUBY 2025 ([TECC](https://parouby.fr))
### **TODO** Funding
- [INRAE](https://www.inrae.fr/)
- CNR
## License ## License
Copyright (C) ????-2025 INRAE AcouSed
Copyright (C) 2024 - INRAE
This program is free software: you can redistribute it and/or modify This program is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version.
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful, This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the You should have received a copy of the GNU General Public License along with this program. If not, see <https://www.gnu.org/licenses/>.
GNU General Public License for more details.
## Authors & Contacts
- Brahim MOUDJED 2022-2025 ([INRAE](https://www.inrae.fr/))
- Pierre-Antoine ROUBY 2025 ([TECC](https://parouby.fr))
If you have any questions or suggestions, please contact us to celine.berni@inrae.fr and/or jerome.lecoz@inrae.fr.
## Acknowledgment (Funding)
This study was conducted within the [Rhône Sediment Observatory](https://observatoire-sediments-rhone.fr/) (OSR), a multi-partner research program funded through the Plan Rhône by the European Regional Development Fund (ERDF), Agence de lEau RMC, CNR, EDF and three regional councils (Auvergne-Rhône-Alpes, PACA and Occitanie). It was also support by CNR.
## Support files & References
- [ ] [Acoustic inversion method diagram](https://forgemia.inra.fr/theophile.terraz/acoused/-/blob/main/Acoustic_Inversion_theory.pdf?ref_type=heads)
- [ ] [Tutorial AQUAscat software : AQUAtalk](https://forgemia.inra.fr/theophile.terraz/acoused/-/blob/main/Tutorial_AQUAscat_software.pdf?ref_type=heads)
- [ ] [Adrien Vergne thesis (2018)](https://theses.fr/2018GREAU046)
- [ ] [Vergne A., Le Coz J., Berni C., & Pierrefeu G. (2020), Water Resources Research, 56(2)](https://doi.org/10.1029/2019WR024877)
- [ ] [Vergne A., Berni C., Le Coz J., & Tencé F., (2021), Water Resources Research, 57(9)](https://doi.org/10.1029/2021WR029589)
You should have received a copy of the GNU General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.

View File

@ -1,8 +1,7 @@
from PyQt5.QtGui import QIcon, QPixmap from PyQt5.QtGui import QIcon, QPixmap
from PyQt5.QtWidgets import (QWidget, QLabel, QHBoxLayout, QVBoxLayout, QApplication, QMainWindow, QGridLayout, from PyQt5.QtWidgets import (QWidget, QLabel, QHBoxLayout, QVBoxLayout, QApplication, QMainWindow, QGridLayout,
QDialog, QDialogButtonBox, QPushButton, QTextEdit, QFrame, QTabWidget, QScrollArea) QDialog, QFrame, QTabWidget, QScrollArea)
from PyQt5.QtCore import Qt
import numpy as np import numpy as np
@ -11,13 +10,8 @@ from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolBar from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolBar
from matplotlib.colors import LogNorm, BoundaryNorm from matplotlib.colors import LogNorm, BoundaryNorm
import datetime
import settings as stg import settings as stg
from Translation.constant_string import HORIZONTAL
from settings import depth_cross_section
class PlotNoiseWindow(QDialog): class PlotNoiseWindow(QDialog):
@ -55,12 +49,10 @@ class PlotNoiseWindow(QDialog):
val_min = np.nanmin(stg.BS_noise_raw_data[i][freq_ind, :, :]) val_min = np.nanmin(stg.BS_noise_raw_data[i][freq_ind, :, :])
val_max = np.nanmax(stg.BS_noise_raw_data[i][freq_ind, :, :]) val_max = np.nanmax(stg.BS_noise_raw_data[i][freq_ind, :, :])
print("val_min = ", val_min, "val_max = ", val_max)
if val_min == val_max: if val_min == val_max:
exec("pcm = self.ax" + str(i) + "[" + str(freq_ind) + "]" + ".pcolormesh(" + exec("pcm = self.ax" + str(i) + "[" + str(freq_ind) + "]" + ".pcolormesh(" +
"stg.time[" + str(i) + "][" + str(freq_ind) + ", :]," + "stg.time_noise[" + str(i) + "][" + str(freq_ind) + ", :]," +
"-stg.depth[" + str(i) + "][" + str(freq_ind) + ", :]," + "-stg.depth_noise[" + str(i) + "][" + str(freq_ind) + ", :]," +
"stg.BS_noise_raw_data[" + str(i) + "][" + str(freq_ind) + ", :, :]," + "stg.BS_noise_raw_data[" + str(i) + "][" + str(freq_ind) + ", :, :]," +
"cmap='hsv')") "cmap='hsv')")
else: else:

View File

@ -1084,23 +1084,20 @@ class SignalProcessingTab(QWidget):
val_max = np.nanmax(stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :]) val_max = np.nanmax(stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max: if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
if val_min == 0: if val_min == 0:
val_min = 1e-5 val_min = 1e-5
if val_max > 1000: if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
levels = np.array([00.1, 1, 2, 10, 100, val_max]) levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y, cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :], stg.SNR_stream_bed[self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
levels, cmap='gist_rainbow', levels, cmap='gist_rainbow',
@ -1140,24 +1137,21 @@ class SignalProcessingTab(QWidget):
val_max = np.nanmax(stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :]) val_max = np.nanmax(stg.SNR_cross_section[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max: if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
if val_min == 0: if val_min == 0:
val_min = 1e-5 val_min = 1e-5
if val_max > 1000: if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
levels = np.array([00.1, 1, 2, 10, 100, val_max]) levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y, cf = (self.axis_SNR[f].contourf(x, -y,
stg.SNR_cross_section[ stg.SNR_cross_section[
self.combobox_acoustic_data_choice.currentIndex()][f, :, :], self.combobox_acoustic_data_choice.currentIndex()][f, :, :],
@ -1172,18 +1166,18 @@ class SignalProcessingTab(QWidget):
val_max = np.nanmax(stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :]) val_max = np.nanmax(stg.SNR_raw_data[self.combobox_acoustic_data_choice.currentIndex()][f, :, :])
if val_min == val_max: if val_min == val_max:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
if val_min == 0: if val_min == 0:
val_min = 1e-5 val_min = 1e-5
if val_max > 1000: if val_max > 1000:
levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6]) levels = np.array([00.1, 1, 2, 10, 100, 1000, 1e6])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max, val_max * 1.2] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1.2]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
else: else:
levels = np.array([00.1, 1, 2, 10, 100, val_max]) levels = np.array([00.1, 1, 2, 10, 100, 1000, val_max*1000 + 1])
bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000] bounds = [00.1, 1, 2, 10, 100, 1000, val_max * 1000 + 1]
norm = BoundaryNorm(boundaries=bounds, ncolors=300) norm = BoundaryNorm(boundaries=bounds, ncolors=300)
cf = (self.axis_SNR[f].contourf(x, -y, cf = (self.axis_SNR[f].contourf(x, -y,