Implementation of UBSediFlow data in Acoused. They are organized with dictionaries and updated to numpy array for Acoused

dev-brahim
brahim 2023-10-10 14:48:37 +02:00
parent 4537a7d414
commit 92a0b5fa54
40 changed files with 2836 additions and 157 deletions

View File

@ -2,18 +2,22 @@ from Model.AquascatDataLoader import RawAquascatData
import numpy as np
import pandas as pd
import datetime
import matplotlib.pyplot as plt
from matplotlib.colors import LogNorm
from udt_extract.raw_extract import raw_extract
# raw_20210519_102332.udt raw_20210520_135452.udt raw_20210525_092759.udt
# path_BS_raw_data = ("/home/bmoudjed/Documents/3 SSC acoustic meas project/Graphical interface project/Data/APAVER_2021/"
# "Rhone_20210519/Rhone_20210519/record/raw_20210525_092759.udt")
from Model.udt_extract.raw_extract import raw_extract
# raw_20210519_102332.udt raw_20210520_135452.udt raw_20210525_092759.udt raw_20210525_080454.udt
path_BS_raw_data0 = ("/home/bmoudjed/Documents/3 SSC acoustic meas project/Graphical interface project/Data/APAVER_2021/"
"Rhone_20210519/Rhone_20210519/record/")
filename0 = "raw_20210526_153310.udt"
class AcousticDataLoaderUBSediFlow():
def __init__(self, path_BS_raw_data: str):
path_BS_raw_data = path_BS_raw_data0 + filename0
self.path_BS_raw_data = path_BS_raw_data
# --- Extract Backscatter acoustic raw data with class ---
@ -23,7 +27,20 @@ class AcousticDataLoaderUBSediFlow():
device_name, time_begin, time_end, param_us_dicts, data_us_dicts, data_dicts, settings_dict \
= raw_extract(self.path_BS_raw_data)
self._freq = []
# --- Date and Hour of measurements read on udt data file ---
filename = self.path_BS_raw_data[-23:]
date_and_time = datetime.datetime(year=int(filename[4:8]),
month=int(filename[8:10]),
day=int(filename[10:12]),
hour=int(filename[13:15]),
minute=int(filename[15:17]),
second=int(filename[17:19]))
self._date = date_and_time.date()
print(f"date : {self._date}")
self._hour = date_and_time.time()
print(f"time : {self._hour}")
self._freq = np.array([[]])
self._r = np.array([[]])
self._time = np.array([[]])
self._BS_raw_data = np.array([[[]]])
@ -32,11 +49,13 @@ class AcousticDataLoaderUBSediFlow():
# print("-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x")
# print(f"config : {config} \n")
for channel in param_us_dicts[config].keys():
# print("-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x")
# print(f"channel : {channel} \n")
print("-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x-x")
print(f"channel : {channel} \n")
print(data_us_dicts[config][channel].keys())
# print(data_us_dicts[config][channel]['echo_avg_profile'])
# --- Frequencies ---
self._freq.append(param_us_dicts[config][channel]['f0'])
self._freq = np.append(self._freq, param_us_dicts[config][channel]['f0'])
# --- Depth for each frequencies ---
depth = [param_us_dicts[config][channel]['r_cell1'] * i
@ -95,8 +114,9 @@ class AcousticDataLoaderUBSediFlow():
print("self._r.shape ", self._r.shape)
self._freq_text = [str(f) for f in [np.round(f*1e-6, 2) for f in self._freq]]
self._freq_text = np.array([str(f) + " MHz" for f in [np.round(f*1e-6, 2) for f in self._freq]])
print("self._freq_text ", self._freq_text)
print("self._freq_text ", self._freq)
# self._BS_raw_data = np.array(np.reshape(self._BS_raw_data, (len(self._freq), self._r.shape[1], self._time.shape[1])))
print("self._BS_raw_data.shape ", self._BS_raw_data.shape)
@ -125,9 +145,6 @@ class AcousticDataLoaderUBSediFlow():
# self._data_BS = RawAquascatData(self.path_BS_raw_data)
#
# self._date = self._data_BS.date.date()
# self._hour = self._data_BS.date.time()
# self._nb_profiles = self._data_BS.NumProfiles
# self._nb_profiles_per_sec = self._data_BS.ProfileRate
# self._nb_cells = self._data_BS.NumCells
@ -151,48 +168,55 @@ class AcousticDataLoaderUBSediFlow():
# print(self._time[np.where(np.floor(self._time) == 175)])
# print(np.where((self._time) == 155)[0][0])
fig, ax = plt.subplots(nrows=len(self._freq), ncols=1)
for f, freq in enumerate(self._freq):
# print(f"{f} : {freq} \n")
pcm = ax[f].pcolormesh(self._time[f, :], self._r[f, :], (self._BS_raw_data[f, :, :self._time.shape[1]]),
cmap='viridis',
norm=LogNorm(vmin=np.min(self._BS_raw_data[f, :, :]), vmax=np.max(self._BS_raw_data[f, :, :])), shading='gouraud') # )
# ax.pcolormesh(range(self._BS_raw_data.shape[2]), range(self._BS_raw_data.shape[0]), self._BS_raw_data[:, 1, :], cmap='viridis',
# norm=LogNorm(vmin=1e-5, vmax=np.max(self._BS_raw_data[:, 0, :]))) # , shading='gouraud')
fig.colorbar(pcm, ax=ax[:], shrink=1, location='right')
plt.show()
# fig, ax = plt.subplots(nrows=len(self._freq), ncols=1)
# for f, freq in enumerate(self._freq):
# # print(f"{f} : {freq} \n")
# pcm = ax[f].pcolormesh(self._time[f, :], self._r[f, :], (self._BS_raw_data[f, :, :self._time.shape[1]]),
# cmap='viridis',
# norm=LogNorm(vmin=np.min(self._BS_raw_data[f, :, :]), vmax=np.max(self._BS_raw_data[f, :, :])), shading='gouraud') # )
# # ax.pcolormesh(range(self._BS_raw_data.shape[2]), range(self._BS_raw_data.shape[0]), self._BS_raw_data[:, 1, :], cmap='viridis',
# # norm=LogNorm(vmin=1e-5, vmax=np.max(self._BS_raw_data[:, 0, :]))) # , shading='gouraud')
# fig.colorbar(pcm, ax=ax[:], shrink=1, location='right')
# plt.show()
# fig, ax = plt.subplots(nrows=1, ncols=1)
# ax.plot(list(range(self._time.shape[1])), self._time[0, :])
# # ax.set_ylim(2, 20)
# plt.show()
# print(self.reshape_BS_raw_cross_section()[0, 0])
# print(self.reshape_BS_raw_cross_section())
# self.reshape_BS_raw_cross_section()
# self.reshape_r()
# self.reshape_t()
# def reshape_BS_raw_cross_section(self):
# BS_raw_cross_section = np.reshape(self._BS_raw_data,
# (self._r.shape[0]*len(self._time), self._freq.shape[0]),
# order="F")
# return BS_raw_cross_section
#
# def reshape_r(self):
# r = np.reshape(np.repeat(self._r, self._time.shape[0], axis=1),
# self._r.shape[0]*self._time.shape[0],
# order="F")
# return r
#
def reshape_BS_raw_cross_section(self):
BS_raw_cross_section = np.reshape(self._BS_raw_data,
(self._r.shape[1]*self._time.shape[1], len(self._freq)),
order="F")
# print(BS_raw_cross_section.shape)
return BS_raw_cross_section
def reshape_r(self):
r = np.zeros((self._r.shape[1]*self._time.shape[1], len(self._freq)))
for i, _ in enumerate(self._freq):
r[:, i] = np.repeat(self._r[i, :], self._time.shape[1])
# print(r.shape)
return r
# def compute_r_2D(self):
# r2D = np.repeat(self._r, self._time.size, axis=1)
# return r2D
#
# def reshape_t(self):
# t = np.reshape(np.repeat(self._time, self._r.shape[0]), (self._time.shape[0]*self._r.shape[0], 1))
# return t
def reshape_t(self):
t = np.zeros((self._r.shape[1]*self._time.shape[1], len(self._freq)))
for i, _ in enumerate(self._freq):
t[:, i] = np.repeat(self._time[i, :], self._r.shape[1])
# print(t.shape)
return t
# def concatenate_data(self):
# self.reshape_t()
# self.reshape_BS_raw_cross_section()
# # print(self.reshape_t().shape)
# # print(se.lf.reshape_BS_raw_cross_section().shape)
@ -201,7 +225,7 @@ class AcousticDataLoaderUBSediFlow():
# return df
# if __name__ == "__main__":
# AcousticDataLoaderUBSediFlow(path_BS_raw_data)
if __name__ == "__main__":
AcousticDataLoaderUBSediFlow(path_BS_raw_data0 + filename0)

View File

Binary file not shown.

View File

@ -0,0 +1,23 @@
{
"ADDR_SOUND_SPEED_AUTO" : "0x0004",
"ADDR_SOUND_SPEED_SET" : "0x0005",
"ADDR_CONFIG_ID" : "0x0010",
"ADDR_CONFIG" : "0x0011",
"OFFSET_CONFIG" : 20,
"SIZE_CONFIG" : 17,
"ADDR_TANGAGE" : "0x0058",
"ADDR_ROULIS" : "0x0059",
"ADDR_TEMP_MOY" : "0x005A",
"ADDR_SOUND_SPEED" : "0x005B",
"ADDR_GAIN_CA0" : "0x005C",
"ADDR_GAIN_CA1" : "0x005D",
"ADDR_NOISE_GMAX" : "0x005E",
"ADDR_NOISE_GMID" : "0x005F",
"ADDR_PROFILE_HEADER" : "0x0058",
"SIZE_PROFILE_HEADER" : 8,
"ADDR_PROFILE_DATA" : "0x0060"
}

View File

@ -0,0 +1,24 @@
{
"ADDR_SOUND_SPEED_AUTO" : "0x0004",
"ADDR_SOUND_SPEED_SET" : "0x0005",
"ADDR_SNR_FILTER_THRESHOLD" : "0x0006",
"ADDR_CONFIG_ID" : "0x0010",
"ADDR_CONFIG" : "0x0011",
"OFFSET_CONFIG" : 20,
"SIZE_CONFIG" : 17,
"ADDR_TANGAGE" : "0x02C3",
"ADDR_ROULIS" : "0x02C4",
"ADDR_TEMP_MOY" : "0x02C5",
"ADDR_SOUND_SPEED" : "0x02C6",
"ADDR_GAIN_CA0" : "0x02C7",
"ADDR_GAIN_CA1" : "0x02C8",
"ADDR_NOISE_GMAX" : "0x02C9",
"ADDR_NOISE_GMID" : "0x02CA",
"ADDR_PROFILE_HEADER" : "0x02C3",
"SIZE_PROFILE_HEADER" : 8,
"ADDR_PROFILE_DATA" : "0x02CB"
}

View File

@ -0,0 +1,124 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
import os
import json
import logging
logging.basicConfig(level=logging.DEBUG)
## Adresses et commandes de l'APF04
# numéro de commande d'inaction (tourne en boucle en attendant une commande)
CMD_NULL = 0
# numéro de commande pour l'arrêt de l'action en cours
CMD_STOP = 1
# numéro de commande mode bloquant
CMD_PROFILE_BLOCKING = 3
# numéro de commande mode non bloquant
CMD_PROFILE_NON_BLOCKING = 4
# numéro de commande mesure avec remontée des IQ
CMD_PROFILE_IQ = 6
# numéro de commande démarrage du mode auto
CMD_START_AUTO = 2
# numéro de commande verification de la configuration courante
CMD_CHECK_CONFIG = 5
# numéro de commande de réinitialisation du settings
CMD_INIT_SETTINGS = 7
# mesure de niveau
CMD_MEAS_LEVEL = 20
# numéro de commande pour un test de LED
CMD_TEST_LED = 190
# numéro de commande pour une mesure de température + pitch + roll
CMD_TEST_I2C = 195
# ces 5 adresses sont considérées comme fixes et qui ne changeront jamais.
ADDR_ACTION = 0xFFFD
ADDR_VERSION_C = 0x0000 # nécessaire pour pouvoir justement déterminer le dict des autres adresses
ADDR_VERSION_VHDL = 0xFFFE
ADDR_MODEL_YEAR = 0x0001
ADDR_SERIAL_NUM = 0x0002
def get_addr_dict(version_c, addr_json=None):
"""
Gets the addresses in RAM given the firmware version.
Args:
version_c: two digits number version
addr_json: possible to give directly a json file
Returns:
Dictionnary with the addresses names as keys and addresses in hexa as values.
"""
version_c = int(version_c)
if addr_json:
with open(addr_json) as json_file:
addr_dict = json.loads(json_file.read())
else:
if version_c <= 52 and version_c >= 47:
addr_json = os.path.abspath(__file__).split('/peacock_uvp/')[0] + "/peacock_uvp/addr_S-Firmware-47.json"
else:
addr_json = os.path.abspath(__file__).split('/peacock_uvp/')[0] + "/peacock_uvp/addr_S-Firmware-"+str(version_c)+".json"
if addr_json.split("/")[-1] in os.listdir(os.path.abspath(__file__).split('/peacock_uvp/')[0] + "/peacock_uvp/"):
with open(addr_json) as json_file:
addr_dict = json.loads(json_file.read())
else:
# TODO mb 20/10/2021 choisir si on veut mettre un comportement par défaut ou fonctionner par exception
logging.debug("WARNING: Unknown Addresses for this S-Firmware version.")
addr_dict = None
logging.debug(os.listdir("."))
logging.debug("addr json: %s"%addr_json)
logging.debug("addr dict: %s"%addr_dict)
print(addr_dict)
# conversion of haxa strings to hexa number
if addr_dict:
for key,value in addr_dict.items():
if isinstance(value, str):
if "x" in value:
addr_dict.update({key:int(value, 16)})
logging.debug("addr dict converted: %s"%addr_dict)
return addr_dict
# ===============================================
# DESCRIPTION OF AVAILABLE ADDRESSES IN THE DICT:
# ===============================================
#ADDR_SOUND_SPEED_AUTO
#ADDR_SOUND_SPEED_SET
#Adresse contenant l'adresse de la configuration de séquencement ultrasons demandée par Modbus. Elle est suivie par les 3 config partagées avec Modbus.
#ADDR_CONFIG_ID
#Adresse de départ de la zone contenant les config
#ADDR_CONFIG
#Décallage entre chaque config
#OFFSET_CONFIG
#SIZE_CONFIG
# ----- Mesures Sensors -----
#Adresse du tanguage moyen mesuré (à destination du Modbus)
#ADDR_TANGAGE
#Adresse du roulis moyen mesuré (à destination du Modbus)
#ADDR_ROULIS
#Adresse de la température moyenne mesurée (à destination du Modbus)
#ADDR_TEMP_MOY
# ---- En-tête des profils ----
#ADDR_SOUND_SPEED
#ADDR_GAIN_CA0
#ADDR_GAIN_CA1
#Adresse des profils de vitesse et amplitude
#ADDR_PROFILE_HEADER # adresse du tangage, 1er sensor
#SIZE_PROFILE_HEADER
#ADDR_PROFILE_DATA # le début des données

View File

@ -0,0 +1,293 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Marie Burckbuchler, Stéphane Fischer
import logging
from math import ceil
from .apf04_gain import convert_dB_m2code, convert_code2dB_m, convert_code2dB, convert_dB2code, APF04_CODE_MAX_APPLIED
#from .ap_exception import ap_protocol_error
from .apf_type import cast_int16, cast_uint16
#ap_protocol_error(3300, "Warning: v_min has to be in [-Nyquist_Range, 0].")
class ConfigHw ():
# @brief To instantiate an object of this class, you can give no parameter to get attributes set to zero, or you can give a settings, the ID of the config and the sound_speed to use.
# TODO (san 21/04/2020) il serait surement plus approprié, plus propre (et plus compact) de stocker la configHW dans un dict
# accompagné d'une liste donnant l'ordre des valeurs en mémoire :
# order = ['div_f0', 'n_tir', 'c_prf', 'n_em', 'n_vol', 'c_vol1', 'c_dvol' ...]
def __init__(self, _f_sys):
logging.debug("f_sys = %.1e"%_f_sys)
self.f_sys = _f_sys
self.div_f0 = 0
self.n_tir = 0
self.c_prf = 0
self.n_em = 0
self.n_vol = 0
self.c_vol1 = 0
self.c_dvol = 0
self.gain_ca0 = 0
self.gain_ca1 = 0
self.tr = 0
self.phi_min = 0
self.method = 0
self.reserved1 = 0
self.reserved2 = 0
self.n_avg = 0
self.blind_ca0 = 0
self.blind_ca1 = 0
def set(self, _config_data, _sound_speed=1480, _gain_blind_zone=None):
"""
docstring
"""
if type(_config_data) is dict :
logging.debug ("call from_dict")
self.from_dict(_config_data, _sound_speed, _gain_blind_zone)
elif type(_config_data) is list :
logging.debug ("call from_dict")
self.from_list(_config_data)
else :
logging.info("wrong data type for _config_data")
return self
# @brief Chargement des paramètres d'une configuration acoustique
# On rétrocalcule à chaque fois la valeur en paramètre utilisateur par rapport au paramètre hardware (pour prendre en compte les modifications dues à des cast etc, lorsqu'il y a interdépendance entre paramètres.)
# @param _data : ref sur objet de la classe data donnant accès aux configs et au sound speed et à la fréquency système.
# @param meas_ultrasound_key : clé de la configuration en cours.
# appelé uniquement par le constructeur
def from_dict(self, _config_dict, _sound_speed=1480, _gain_blind_zone=None):
logging.debug("start import dict")
self.div_f0 = cast_int16(self.f_sys / _config_dict['f0'] -1)
f0 = self.f_sys / (self.div_f0+1)
self.c_prf = cast_int16(f0 / _config_dict['prf'])
prf = _config_dict['f0']/self.c_prf
self.n_tir = cast_int16(_config_dict['n_ech'])
# n_em is equal to 0 only if r_em = 0. If not, n_em is at least equal to 1.
if _config_dict['r_em'] == 0:
self.n_em = cast_int16(0)
else:
self.n_em = cast_int16(round(2./_sound_speed * f0 *_config_dict['r_em']))
if self.n_em == 0:
self.n_em = cast_int16(1)
r_em = _sound_speed/(2.*f0)*self.n_em
self.n_vol = cast_int16(_config_dict['n_vol'])
self.c_vol1 = cast_uint16(2./_sound_speed * f0 * (_config_dict['r_vol1'] - r_em/2.))
r_vol1 = _sound_speed/(2.*f0)*self.c_vol1 + r_em/2.
self.c_dvol = cast_int16(2./_sound_speed * f0 *_config_dict['r_dvol'])
if self.c_dvol < 2: # constraint from APF04 hardware
self.c_dvol = cast_int16(2)
r_dvol = _sound_speed/(2.*f0)*self.c_dvol
self.gain_ca1 = cast_int16(convert_dB_m2code(_config_dict['gain_function']['a1'], r_dvol))
a1 = convert_code2dB_m(self.gain_ca1, r_dvol)
if _gain_blind_zone :
self.blind_ca1 = cast_int16(convert_dB_m2code(_gain_blind_zone['a1_max'], r_dvol))
else :
self.blind_ca1 = 0
# Pour a1 max, on ne rétrocalcule pas dans le const, puisque c'est un const et que cette valeur n'est utile que quelques lignes plus loin dans le calcul du ca0_max.
a1_max = convert_code2dB_m(self.blind_ca1, r_dvol)
r_ny = _sound_speed*prf/(2*f0)
self.phi_min = cast_int16(_config_dict['v_min']*65535/(2*r_ny))
self.gain_ca0 = cast_int16(convert_dB2code(_config_dict['gain_function']['a0'] + r_vol1*a1))
if _gain_blind_zone :
self.blind_ca0 = cast_int16(convert_dB2code(_gain_blind_zone['a0_max'] + r_vol1 * a1_max))
else :
self.blind_ca0 = APF04_CODE_MAX_APPLIED
# on a vu plus simple comme écriture ...
self.tr = cast_int16(int(''.join(ele for ele in _config_dict['tr_out'] if ele.isdigit())))-1
# TODO ça limite à 9, attention
if _config_dict['method'] == "ppc_cont":
self.burst_mode = False
else: # Donc method == "corr_ampl"
self.burst_mode = True
self.phase_coding = _config_dict['phase_coding']
self.static_echo_filter = _config_dict['static_echo_filter']
self.gain_auto = _config_dict['gain_function']['auto']
# Pour retourner choisir le paramètres methode traitement, remplacer la dernière parenthèse par un 2 ou un 0.
if(self.gain_auto == True):
logging.debug("gain auto is set")
# +2048 pour activer l'I2C (pour firmware >C51)
self.method = cast_int16(512 + (cast_int16(self.static_echo_filter)<<8) + (cast_int16(self.phase_coding)<<2) + cast_int16(self.burst_mode) + (cast_int16(self.burst_mode)<<1))
else:
logging.debug("gain is set to manual")
self.method = cast_int16(0 + (cast_int16(self.static_echo_filter)<<8) + (cast_int16(self.phase_coding)<<2) + cast_int16(self.burst_mode) + (cast_int16(self.burst_mode)<<1))
self.n_avg = cast_int16(_config_dict['n_profile'])
self.reserved1 = 0
self.reserved2 = 0
# @brief Chargement des paramètres à partir d'un tableau lu dans le Hardware
# @param _param_table : tableau des valeurs dans l'ordre indiqué ci-dessous. cf. aussi dt_protocole de l'APF04.
def from_list(self, _param_table):
logging.debug("start import list")
if len(_param_table)==17:
self.div_f0 = _param_table[0]
self.n_tir = _param_table[1]
self.c_prf = _param_table[2]
self.n_em = _param_table[3]
self.n_vol = _param_table[4]
self.c_vol1 = _param_table[5]
self.c_dvol = _param_table[6]
self.gain_ca0 = _param_table[7]
self.gain_ca1 = _param_table[8]
self.tr = _param_table[9]
self.phi_min = _param_table[10]
self.method = _param_table[11]
self.n_avg = _param_table[14]
self.blind_ca0 = _param_table[15]
self.blind_ca1 = _param_table[16]
# Other useful parameters (coded in method bits array) :
if (self.method & 0x0001) == 0:
self.burst_mode = False
else:
self.burst_mode = True
# en self.method & 0x0002, il y a l'indication de méthode traitement.
if (self.method & 0x0004) == 0:
self.phase_coding = False
else:
self.phase_coding = True
if (self.method & 0x0100) == 0:
self.static_echo_filter = False
else:
self.static_echo_filter = True
if (self.method & 0x0200) == 0:
self.gain_auto = False
else:
self.gain_auto = True
#else :
# logging.info("WARNING")
# TODO raise error
# @brief Update the config with the current config_hw.
# @param _sound_speed: information of the sound speed
def to_dict(self, _sound_speed):
# TODO si div_f0 : pas initialisé -> ERROR
config = {}
f0_ = (self.f_sys/(self.div_f0+1))
config['f0'] = f0_
config['tr_out'] = 'tr'+str(self.tr+1)
config['prf'] = f0_/self.c_prf
config['r_vol1'] = _sound_speed*((self.c_vol1+self.n_em/2.)/f0_)/2.
config['r_dvol'] = _sound_speed*(self.c_dvol/f0_)/2.
config['n_vol'] = self.n_vol
config['r_em'] = _sound_speed*(self.n_em/f0_)/2.
config['n_ech'] = self.n_tir
if self.burst_mode:
config['method'] = "corr_ampl"
else:
config['method'] = "ppc_cont"
# en self.method & 0x0002, il y a l'indication de méthode traitement.
if self.phase_coding:
config['phase_coding'] = True
else:
config['phase_coding'] = False
if self.static_echo_filter:
config['static_echo_filter'] = True
else:
config['static_echo_filter'] = False
config['gain_function'] = {}
if self.gain_auto:
config['gain_function']['auto'] = True
else:
config['gain_function']['auto'] = False
config['n_profile'] = self.n_avg
rdvol = _sound_speed*(self.c_dvol/f0_)/2.
rvol1 = _sound_speed*((self.c_vol1+self.n_em/2.)/f0_)/2.
a1 = convert_code2dB_m(self.gain_ca1, rdvol)
config['gain_function']['a0'] = convert_code2dB(self.gain_ca0)-a1*rvol1
config['gain_function']['a1'] = a1
config['v_min'] = 2*_sound_speed*config['prf']*self.phi_min/(2*65535*f0_)
return config
def get_bloc_duration(self):
#TODO san 27/09/2017 attention ça augmente si n_vol> 100
return self.n_tir * (self.n_avg) * (self.div_f0 + 1) * self.c_prf / self.f_sys
# @brief Affichage du paramétrage en cours.
def print_config_hw(self):
logging.debug("div_F0 = %s", self.div_f0)
logging.debug("n_tir = %s", self.n_tir)
logging.debug("c_PRF = %s", self.c_prf)
logging.debug("n_Em = %s", self.n_em)
logging.debug("n_vol = %s", self.n_vol)
logging.debug("c_vol1 = %s", self.c_vol1)
logging.debug("c_dvol = %s", self.c_dvol)
logging.debug("CA0_dac = %s", self.gain_ca0)
logging.debug("CA1_dac = %s", self.gain_ca1)
logging.debug("CA0_max_dac = %s", self.blind_ca0)
logging.debug("CA1_max_dac = %s", self.blind_ca1)
logging.debug("Cs_Tr = %s", self.tr)
logging.debug("phi_min = %s", self.phi_min)
logging.debug("Methode = %s", self.method)
logging.debug("n_avg = %s", self.n_avg)
# logging.debug("gain auto : %s", self.gain_auto)
# logging.debug("static_echo_fiter : %s", self.static_echo_filter)
# logging.debug("burst_mode : %s", self.burst_mode)
# logging.debug("phase_coding : %s", self.phase_coding)
def to_list(self):
buf=[]
buf.append(self.div_f0)
buf.append(self.n_tir)
buf.append(self.c_prf)
buf.append(self.n_em)
buf.append(self.n_vol)
buf.append(self.c_vol1)
buf.append(self.c_dvol)
buf.append(self.gain_ca0)
buf.append(self.gain_ca1)
buf.append(self.tr)
buf.append(self.phi_min)
buf.append(self.method)
buf.append(self.reserved1)
buf.append(self.reserved2)
buf.append(self.n_avg)
buf.append(self.blind_ca0)
buf.append(self.blind_ca1)
return buf
def __str__(self):
return str(self.__dict__)
def __eq__(self, other):
if not isinstance(other, type(self)):
logging.info("NOT IMPLEMENTED")
return NotImplemented
return ((self.div_f0, self.tr, self.method, self.c_prf, self.phi_min, self.n_tir, self.c_vol1, self.c_dvol, self.n_em, self.n_vol, self.reserved1, self.reserved2, self.n_avg, self.gain_ca0, self.gain_ca1, self.blind_ca0, self.blind_ca1) == (other.div_f0, other.tr, other.method, other.c_prf, other.phi_min, other.n_tir, other.c_vol1, other.c_dvol, other.n_em, other.n_vol, other.reserved1, other.reserved2, other.n_avg, other.gain_ca0, other.gain_ca1, other.blind_ca0, other.blind_ca1))
def __ne__(self, other):
return not self == other

View File

@ -0,0 +1,177 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
from datetime import datetime
import struct
import logging
from .apf04_modbus import Apf04Modbus
from .apf04_addr_cmd import *
from .apf04_config_hw import ConfigHw
from .apf_timestamp import encode_timestamp
from .apf04_exception import apf04_exception
# TODO gérer ici les erreur spécifiques au HW
class Apf04Driver (Apf04Modbus):
""" @brief gère l'instrument APF04
"""
# TODO : tester la com dans un init par une lecture de la version
def __init__(self, _baudrate, _f_sys, _dev=None, _addr_dict=None):
self.f_sys=_f_sys
Apf04Modbus.__init__(self, _baudrate, _dev)
self.addr = _addr_dict
def new_config (self):
""" @brief create an empty config
"""
# TODO pourrait aussi s'appeler create_config ou empty_config @marie : un avis ?
return ConfigHw(self.f_sys)
def read_config (self, _id_config=0):
""" @brief lecture des paramètres d'une configuration
@param _id_config : identifiant de la configuration [0..2] (par défaut la config n°1/3)
principalement utilisé pour relire la config après un check_config
"""
self.config = ConfigHw(self.f_sys)
self.config.id_config = _id_config
# tous les paramètres des settings sont en signé
self.config.from_list(self.read_list_i16(int(self.addr["ADDR_CONFIG"])+_id_config*int(self.addr["OFFSET_CONFIG"]), int(self.addr["SIZE_CONFIG"]))) # en mots
return self.config
# TODO .to_list() à faire par l'appelant ? APF04Driver ne connait pas config_hw ou passer config_hw en self.config (actuellement au niveau au dessus) ?
def write_config (self, _config, _id_config):
""" @brief écriture des paramètres d'une configuration
@param _config : configuration (de type ConfigHw)
@param _id_config : identifiant de la configuration [0..2]
"""
logging.debug("%s"%(_config.to_list()))
self.write_buf_i16(_config.to_list(), self.addr["ADDR_CONFIG"]+_id_config*self.addr["OFFSET_CONFIG"])
# DEFINI LA CONFIG 0 UTILISEE PAR L'APPAREIL
# _config = [0..2]
def select_config (self, _id_config):
logging.debug("selecting config %d [0..N-1]"%(_id_config))
self.write_i16(_id_config, self.addr["ADDR_CONFIG_ID"])
def read_version (self):
""" @brief Lecture des versions C et VHDL
"""
self.version_vhdl = self.read_i16(ADDR_VERSION_VHDL)
self.version_c = self.read_i16(ADDR_VERSION_C)
logging.debug("Version VHDL=%s", self.version_vhdl)
logging.debug("Version C=%s", self.version_c)
if self.version_c < 45:
print ("WARNING firmware version %d do not provide noise measurements in profile's header" % self.version_c)
self.model = 0
self.year = 2018
self.serial_num = 0
else :
model_year = self.read_i16(ADDR_MODEL_YEAR)
self.model = (model_year & 0xFF00)>>8
self.year = 2000 + (model_year & 0x00FF)
if self.model == 0x01 :
logging.debug("Model is Peacock UVP")
else :
logging.info("Warning, model (id %s) is not defined"%self.model)
logging.debug("Year of production = %s", self.year)
self.serial_num = self.read_i16(ADDR_SERIAL_NUM)
logging.debug("Serial number=%s", self.serial_num)
return self.version_vhdl, self.version_c
def write_sound_speed (self, sound_speed=1480, sound_speed_auto=False):
""" @brief Writing of the sound speed global parameter in RAM
"""
addr_ss_auto = self.addr["ADDR_SOUND_SPEED_AUTO"]
addr_ss_set = self.addr["ADDR_SOUND_SPEED_SET"]
# fix for firmware prior to 45
if self.version_c < 45:
addr_ss_auto -= 2
addr_ss_set -= 2
if sound_speed_auto:
self.write_i16(1, addr_ss_auto)
else:
self.write_i16(0, addr_ss_auto)
self.write_i16(sound_speed, addr_ss_set)
def __action_cmd__(self, _cmd, _timeout=0.0):
""" @brief generic action function
send a command asking for a given action. Unless specific case,
the function is released when the action is finished. The timeout
should be set consequently. """
try:
self.write_i16(_cmd, ADDR_ACTION, _timeout)
except apf04_exception as ae:
logging.info("apf04_exception catched with command %s with timeout %e"%(_cmd, _timeout))
raise ae
def act_stop (self):
""" @brief Stop the measurement (only in non blocking mode)"""
self.__action_cmd__(CMD_STOP, 5.0)
def act_meas_I2C (self):
""" @brief Make one measure of pitch, roll and temp. Those values are then updated in the RAM.
"""
self.__action_cmd__(CMD_TEST_I2C, 2.0)
def act_test_led (self):
self.__action_cmd__(CMD_TEST_LED, 1.5)
# timeout set to 1.5 seconds to let the Led blink
def act_meas_IQ (self):
self.__action_cmd__(CMD_PROFILE_IQ) # TODO timeout
def act_meas_profile (self, _timeout=0.):
""" @brief start to measure a block of profils
@param _timeout maximum delay to get an answer from the board
"""
# get UTC timestamp just before strating the measurements
self.timestamp_profile = datetime.utcnow()
logging.debug ("setting timeout to %f"%_timeout)
self.__action_cmd__(CMD_PROFILE_BLOCKING, _timeout)
def act_check_config (self):
self.__action_cmd__(CMD_CHECK_CONFIG, 0.2)
def act_start_auto_mode (self):
self.__action_cmd__(CMD_START_AUTO) # TODO timeout
def read_temp (self):
return self.read_i16(self.addr["ADDR_TEMP_MOY"])
def read_pitch (self):
return self.read_i16(self.addr["ADDR_TANGAGE"])
def read_roll (self):
return self.read_i16(self.addr["ADDR_ROULIS"])
def read_profile (self, _n_vol):
logging.debug("timestamp: %s"%self.timestamp_profile)
#logging.debug("pitch: %s, roll: %s,"%(self.read_i16(self.addr["ADDR_TANGAGE"]), self.read_i16(self.addr["ADDR_ROULIS"])))
#logging.debug("pitch: %s, roll: %s, temps: %s, sound_speed: %s, ca0: %s, ca1: %s"%(self.read_i16(self.addr["ADDR_TANGAGE"]), self.read_i16(self.addr["ADDR_ROULIS"]), self.read_i16(self.addr["ADDR_TEMP_MOY"]), self.read_i16(self.addr["ADDR_SOUND_SPEED"]), self.read_i16(self.addr["ADDR_GAIN_CA0"]), self.read_i16(self.addr["ADDR_GAIN_CA1"])))
data_list = self.read_buf_i16(self.addr["ADDR_PROFILE_HEADER"], self.addr["SIZE_PROFILE_HEADER"] + _n_vol*4)
logging.debug("processing+transfert delay = %fs"%(datetime.utcnow()-self.timestamp_profile).total_seconds())
# on passe en litte endian (les données initiales sont en big endian)
# traitement < 1ms pour 50 cellules sur macbook pro
data_packed = struct.pack('<%sh'%int(len(data_list)/2), \
*struct.unpack('>%sh'%int(len(data_list)/2), data_list))
logging.debug("pack string = '%s'"%'>%sh'%int(len(data_list)/2))
logging.debug("processing+transfert+swap delay = %fs"%(datetime.utcnow()-self.timestamp_profile).total_seconds())
return encode_timestamp(self.timestamp_profile) + data_packed

View File

@ -0,0 +1,25 @@
#!/usr/bin/env python
# -*- coding: UTF_8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
class apf04_base_exception (Exception):
""" @brief base class for APF04 specific exceptions """
def __init__(self, _code, _message):
self.code = _code
self.message = _message
def __str__(self):
return "base_exception %d : %s"%(self.code, self.message)
class apf04_exception (apf04_base_exception):
# something that happend occasionnaly
def __str__(self):
return "apf04_exception %d : %s"%(self.code, self.message)
class apf04_error (apf04_base_exception):
# something that should not happend
def __str__(self):
return "apf04_error %d : %s"%(self.code, self.message)

View File

@ -0,0 +1,132 @@
#!/usr/bin/env python
# -*- coding: UTF_8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer, Alexandre Schaeffer, Marie Burckbuchler
from math import pow
APF04_RECEPTION_CHAIN_CONSTANT_GAIN = 11.72 # dB (after DAC+LNA)
# currently on hardware after 05/2021 the max value is 14.5 and depend on f0 (filter bandwidth)
APF04_GAIN_CODE_RATIO = 4.029E-2 # in the chain (DAC + LNA), where DAC (12bits-->4096, 3.3V) and gain LNA (50dB/V).
APF04_CODE_MAX_APPLIED = 1241
APF04_CODE_MAX_USER = 4095
APF04_CODE_MIN_USER = -4096
APF04_CODE_MIN_APPLIED = 50
def convert_dB_m2code(_gain_dB, _r_dvol):
"""Conversion of gain slope a1 (in dB) to code ca1.
4 bits shift is used for precision reasons. The code is truncated in the available range.
Args:
_gain_dB(float): gain slope in dB/m
_r_dvol(float): inter-volume size in m
Returns:
code (int)
"""
code = int(round((16. * _gain_dB * _r_dvol) / APF04_GAIN_CODE_RATIO, 1))
code = _truncate(code, APF04_CODE_MAX_USER, APF04_CODE_MIN_USER)
return code
def convert_code2dB_m(_code, _r_dvol):
"""Conversion of any code ca1 to gain slope a1 (in dB)
4 bits shift is used for precision reasons.
Args:
_code(int): gain code
_r_dvol(float): inter-volume size in m
Returns:
gain slope in dB/m (float)
"""
gain_dB = (APF04_GAIN_CODE_RATIO / (16. * _r_dvol)) * _code
return gain_dB
def convert_dB2code(_gain_dB):
"""Conversion of gain (in dB) to code.
The code is truncated in the available range.
Args:
_gain_dB(float): gain intercept in dB
Returns:
gain code (int)
"""
code = int(round((_gain_dB - APF04_RECEPTION_CHAIN_CONSTANT_GAIN) / APF04_GAIN_CODE_RATIO, 1))
code = _truncate(code, APF04_CODE_MAX_APPLIED, APF04_CODE_MIN_USER)
return code
def convert_code2dB(_code):
"""Conversion of any code to a theoretical gain (in dB)
Args:
_code(int): gain code
Returns:
gain intercept in dB (float)
"""
gain_dB = (_code * APF04_GAIN_CODE_RATIO) + APF04_RECEPTION_CHAIN_CONSTANT_GAIN
return gain_dB
def _convert_code2dB_trunc(_code):
"""Conversion of code to the effective (truncated) gain (in dB) applied in a cell
Args :
_code (int) : gain code
Returns :
gain in dB applied in a cell
"""
_code = _truncate(_code, APF04_CODE_MAX_APPLIED, APF04_CODE_MIN_APPLIED)
gain_dB = convert_code2dB(_code)
return gain_dB
def calc_gain(_n_vol, _gain_ca0, _gain_ca1, _gain_max_ca0, _gain_max_ca1):
"""Compute the table of the gains in dB applied to each cell of the profile
Args:
_n_vol(int): number of cells in the profile
_gain_ca0(int): code of the gain intercept
_gain_ca1(int): code of the gain slope
_gain_max_ca0(int): code of the blind zone gain limit intercept
_gain_max_ca1(int): code of the blind zone gain limit slope
Returns:
list of gains in dB to apply to each cell of the profile
"""
tab_gain = []
i = 0
while i <= (_n_vol - 1):
G = _convert_code2dB_trunc(_gain_ca0 + (i * _gain_ca1) / 16.)
G_max = _convert_code2dB_trunc(_gain_max_ca0 + (i * _gain_max_ca1) / 16.)
if (G >= G_max):
tab_gain.append(pow(10, G_max / 20.))
else:
tab_gain.append(pow(10, G / 20.))
i = i + 1
return tab_gain
def _truncate(value, limit_max, limit_min):
"""Troncate value with min/max limit
Args:
value: value to troncate
limit_max: max limit
limit_min: min limit
Returns:
the truncated value
"""
return max(min(value, limit_max), limit_min)

View File

@ -0,0 +1,141 @@
#!/usr/bin/env python
# -*- coding: UTF_8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Marie Burckbuchler
# @date 20 Aout 2020
from array import *
from struct import calcsize, unpack
from math import sqrt, pi, pow
from .apf_timestamp import decode_timestamp
from .apf04_gain import _convert_code2dB_trunc, convert_code2dB_m, convert_code2dB, calc_gain
# @brief Utilise une frame pour récupérer un profil voulu (format UDT005)
# # une ligne de profil dans raw UDT005 contient
# le raw profile contient un header puis le profil codé
# ce header contient des scalaires qu'il faut aussi enregistrer
# @param _data : le bloc de données binaire
def extract_measures (data, config_hw) :
size = len(data)
data_dict = {
"velocity" : [],
"amplitude" : [],
"snr" : [],
"std" : []
}
# Attention, pas de ref à lire à ce stade
# Lecture du timestamp
data_dict["timestamp"], offset = decode_timestamp( data )
head_size = offset
scalars_size = calcsize('hhhhhhhh')
data_dict['pitch'], data_dict['roll'], data_dict['temp'], \
sound_speed, data_dict['gain_ca0'], data_dict['gain_ca1'], \
data_dict['noise_g_max'], data_dict['noise_g_mid'] \
= unpack('hhhhhhhh', data[head_size:head_size+scalars_size])
# A few acoustic parameters which are needed for the following calculations
n_vol = config_hw.n_vol
c_prf = config_hw.c_prf
n_avg = config_hw.n_avg
#r_dvol = to_dict(self.config_hw[self.current_config - 1].config_hw, sound_speed)['r_dvol']
#r_vol1 = to_dict(self.config_hw[self.current_config - 1].config_hw, sound_speed)['r_vol1']
blind_ca0 = config_hw.blind_ca0
blind_ca1 = config_hw.blind_ca1
if (size-(head_size+scalars_size))/4/2 != n_vol:
raise Exception('volume number', "expected %d volumes, but profile data contains %d"%(n_vol, ((size-(head_size+scalars_size))/4/2)))
tab_size = calcsize('h')
offset = head_size+scalars_size
for i in range(n_vol):
data_dict['velocity'].append(unpack('h', data[offset: offset + tab_size])[0])
offset += calcsize('h')
data_dict['std'].append(unpack('h', data[offset: offset + tab_size])[0])
offset += calcsize('h')
data_dict['amplitude'].append(unpack('h', data[offset: offset + tab_size])[0])
offset += calcsize('h')
data_dict['snr'].append(unpack('h', data[offset: offset + tab_size])[0])
offset += calcsize('h')
# conversion des valeurs codées:
# Note: il faut convertir les scalaires après pour avoir les gains tels que pour la conversion du profil d'echo
conversion_profile(data_dict, sound_speed, n_vol, n_avg, c_prf, data_dict['gain_ca0'], data_dict['gain_ca1'], blind_ca0, blind_ca1)
#conversion_scalar(scalars_dict)
#conversion_us_scalar(scalars_us_dict, n_avg, r_dvol, r_vol1)
return data_dict
def conversion_profile(data_dict, sound_speed, n_vol, n_avg, c_prf, gain_ca0, gain_ca1, blind_ca0, blind_ca1):
sat = array('f')
ny_jump = array('f')
v_ref = 1.25
fact_code2velocity = sound_speed / (c_prf * 65535.)
# print("factor code to velocity %f"%fact_code2velocity)
tab_gain = calc_gain(n_vol, gain_ca0, gain_ca1, blind_ca0, blind_ca1)
for i in range(n_vol):
# Velocity standard deviation
if data_dict['std'][i] == -32768:
data_dict['std'][i] = None
else:
if data_dict['std'][i] < 0:
ny_jump.append(True)
data_dict['std'][i] *= -1
else:
ny_jump.append(False)
data_dict['std'][i] = data_dict['std'][i]*fact_code2velocity
# Velocity
if data_dict['velocity'][i] == -32768:
data_dict['velocity'][i] = None
else:
data_dict['velocity'][i] *= fact_code2velocity
# SNR Doppler
if data_dict['snr'][i] == -32768:
data_dict['snr'][i] = None
else:
data_dict['snr'][i] /= 10.
# Echo amplitude
if data_dict['amplitude'][i] < 0:
sat.append(True)
data_dict['amplitude'][i] *= -1
else:
sat.append(False)
data_dict['amplitude'][i] *= ((v_ref*2)/4096) / sqrt(n_avg) / tab_gain[i]
def conversion_scalar(data_dict):
# convert temperature to Kelvin
data_dict["temp"] += 273.15
# convert angles to red
data_dict['pitch'] *= pi/180.
data_dict['roll'] *= pi/180.
def conversion_us_scalar(self, data_dict, n_avg, r_dvol, r_vol1):
# convert coded gain to dB and dB/m
data_dict["a1"] = convert_code2dB_m(data_dict["gain_ca1"], r_dvol)
del data_dict["gain_ca1"]
data_dict["a0"] = convert_code2dB(data_dict["gain_ca0"])-data_dict["a1"]*r_vol1
del data_dict["gain_ca0"]
# convert coded noise values to V
v_ref = 1.25
gain = pow(10, ((_convert_code2dB_trunc(1241)) / 20.)) # gain max
data_dict["noise_g_high"] = sqrt(data_dict["noise_g_max"]) * ((v_ref*2)/4096) / sqrt(n_avg) / gain
del data_dict["noise_g_max"]
gain = pow(10, ((_convert_code2dB_trunc(993)) / 20.)) # gain max - 10dB
data_dict["noise_g_low"] = sqrt(data_dict["noise_g_mid"]) * ((v_ref*2)/4096) / sqrt(n_avg) / gain
del data_dict["noise_g_mid"]

View File

@ -0,0 +1,334 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer, Marie Burckbuchler
import struct # Struct est utilisée pour extraite les données séries
import serial # Utilisé pour récuperer les donnée vennant de la liaison Série RS485
from sys import platform
import traceback
import logging
from time import time, sleep
from .apf04_exception import apf04_error, apf04_exception
from .modbus_crc import crc16
def hex_print (_bytes):
""" @brief print a byte array in hexadecimal string
"""
print (''.join('%02x'%i for i in _bytes))
def autodetect_usb_device() :
# In case of a *nux system, we can find the port of the APF04
# automatically thanks to the serial.tools.list_ports library
# and knowing that the RS485 to USB adapter has PID:VID = 0403:6001
usb_device = None
# Known USB chips defined by VID/PID
known_chips ={"0403:6001", "1486:5523", "1A86:5523", "1A86:7523"}
if platform in ["linux","linux2","darwin","cygwin"]: # linux and Mac OS
import serial.tools.list_ports as lPort
reslt = lPort.comports()
for res in reslt:
logging.debug("checking %s / %s"%(res[0],res[2]))
# get USB device id
try:
device_id = res[2].split("VID:PID=")[1].split(" ")[0]
logging.debug("usb_device_id = %s"%device_id)
except:
device_id = None
# check if the device is known
if device_id in known_chips : # dongle USB avec et sans alim
logging.debug("APF04 detected on serial port: %s", res[2])
usb_device = res[0]
elif usb_device == None : #if no device has been detected yet
print("unknown device detected on serial port: %s (the last found will be selected)"%(res))
print("You should add the device manually in 'known_chips' dict")
usb_device = res[0]
# for platform == "cygwin" and "win32", the serial port should be modified manually:
# for example "/dev/ttyS3" on cygwin or "COM10" on Windows
if usb_device is None : # usb device could not be detected
logging.critical("USB device cannot be detected automatically, check the wiring or specify the device port.")
raise apf04_error (1000, "No device port defined.")
return usb_device
class Apf04Modbus ():
""" @brief Modbus communication layer
modbus est en big-endian (défaut)
l'adressage est fait en 16 bits.
"""
def __init__(self, _baudrate=None, _dev=None):
""" @brief Initialisation de la couche communication de l'instrument
@param _baudrate : vitesse de communication, 57600 bits par seconde par defaut
_baudrate peut avoir pour valeur 230400 115200 57600 ...
"""
# Default device address on modbus
self.apf04_addr = 0x04
#la lecture et l'écriture de bloc sont segmentées en blocs de 123 mots
# Modbus limite les blocs à un maximum de 123 mots en ecriture et 125 mots en lecture
self.max_seg_size = 123
logging.debug("Platform is %s", platform)
self.usb_device = _dev
if self.usb_device is None :
print ("Getting the USB device automatically")
self.usb_device = autodetect_usb_device()
logging.debug("usb_device is at %s with baudrate %s"%(self.usb_device, _baudrate))
if _baudrate :
self.connect(_baudrate)
# In order to reduce serial latency of the linux driver, you may set the ASYNC_LOW_LATENCY flag :
# setserial /dev/<tty_name> low_latency
logging.debug("end init")
def connect (self, _baudrate):
try :
# Create an instance of the Peacock's driver at a given baudrate
self.ser = serial.Serial(self.usb_device, _baudrate, timeout=0.5, \
bytesize=8, parity='N', stopbits=1, xonxoff=0, rtscts=0)
# serial timeout is set to 500 ms. This can be changed by setting
# self.ser.timeout to balance between performance and efficiency
except serial.serialutil.SerialException :
raise apf04_error (1005, "Unable to connect to the device.")
def __del__(self):
""" @brief close serial port if necessary """
try : # au cas où le constructeur a planté
self.ser.close()
except :
pass
def autobaud (self):
""" @brief automatically detect the baudrate
@return baudrate if detected, None instead
If the baudrate is found, the connexion to the device is active
WARNING : be carefull, this method is not robust, you may try several times to get the baudrate
"""
# Scan available baudrates for the Peacock UVP
for baudrate in [57600, 115200, 230400, 750000]:
try:
logging.debug("try if baudrate = %d"%baudrate)
self.connect(baudrate)
# Read the firmware version
self.read_i16(0)
except:
# if failed, the baudrate is wrong
self.ser.close()
continue
# if success, the baudrate is correct
return baudrate
logging.debug("Fail to detect the baudrate automatically")
return None
def __check_addr_range(self, _begin, _size):
""" @brief check if the address range is allowed
@param _begin : addresse de début en octets
@param _size : taille du bloc en mots (16 bits)
"""
addr_ram_begin = 0x0000
addr_ram_end = 0x07FF
addr_reg_action = 0xFFFD # also defined as ADDR_ACTION in apf04_addr_cmd.py
# adresses des blocs mémoire :
assert(_begin>=addr_ram_begin)
if _begin>addr_ram_end:
assert _begin!=addr_reg_action and _size!=1, "Warning, access at %d, size= %d bytes not allowed"%(_begin, _size)
def __read__(self, _size, _timeout=0.0):
""" @brief Low level read method
@param _size number of bytes to read
"""
if _size == 0:
raise apf04_error(2002, "ask to read null size data." )
try :
read_data = b''
start_time = time()
# the read of modbus is not interuptible
while (True):
read_data += self.ser.read(_size)
if len (read_data) == _size or time() - start_time > _timeout:
break
except serial.serialutil.SerialException:
#self.log("hardware apparently disconnected")
#read_data = b''
raise apf04_error(1010, "Hardware apparently disconnected." )
if len (read_data) != _size :
if len (read_data) == 0:
logging.debug ("WARNING timeout, no answer from device")
raise apf04_exception(2003, "timeout : device do not answer (please check cable connexion, timeout or baudrate)" )
else :
logging.debug ("WARNING, uncomplete answer from device (%d/%d)"%(len (read_data), _size))
raise apf04_exception(2004, "timeout : uncomplete answer from device (please check timeout or baudrate) (%d/%d)"%(len (read_data), _size))
return read_data
############## Read functions ###############################################
def read_i16 (self, _addr):
""" @brief Read one word (signed 16 bits)
@param _addr : data address (given in bytes)
@return : integer
"""
# les données sont transmises en big endian (octet de poids faible en premier)
return struct.unpack(">h",self.read_seg_16(_addr , 1))[0]
def read_list_i16(self, _addr, _size):
""" @brief Read several words (signed 16 bits)
@param _addr : data address (given in bytes)
@param _size : number of word to read
@return : list of integers
"""
# TODO utiliser read_buf_i16
return struct.unpack(">%dh"%_size,self.read_seg_16(_addr , _size))
# TODO mettre en private
def read_seg_16(self, _addr, _size):
""" @brief Low level read (in a single modbus frame)
@param _addr : data address (given in bytes)
@param _size : number of word to read
@return : byte array
"""
assert (_size <= self.max_seg_size) # segment de 125 mots (max en lecture)
logging.debug ("reading %d words at %d"%(_size, _addr))
# on utilise la fonction modbus 3 pour la lecture des octets
#self.__check_addr_range(_addr, 2 * _size)
# request read
read_query = struct.pack(">BBHh",self.apf04_addr, 0x03, _addr, _size )
read_query += struct.pack(">H",crc16 (read_query) )
#print ("read query = ")
#hex_print(read_query)
try :
self.ser.write(read_query)
except serial.serialutil.SerialException:
#self.log("hardware apparently disconnected")
# TODO traiter les différentes erreurs, se mettre en 3 MBaud sur R0W (bcp de buffer overflow !)
raise apf04_error(1010, "Hardware apparently disconnected." )
# read answer
slave_response = self.__read__(3)
if slave_response[1] != 3:
logging.info ("WARNING error while reading %s"%slave_response)
slave_response += self.__read__(slave_response[2]+2)
#print ("slave answer = ")
#hex_print(slave_response)
# check crc
#print ("%X"%crc16 (slave_response[0:-2]))
#print ("%X"%struct.unpack(">H",slave_response[-2:]))
assert (crc16 (slave_response[0:-2]) == struct.unpack(">H",slave_response[-2:])[0])
return slave_response[3:-2]
def read_buf_i16 (self, _addr , _size):
""" @brief Read buffer
@param _addr : data address (given in bytes)
@param _size : number of word to read
@return : byte array
Note : data are transmitted in big endian
"""
data = b''
addr = _addr
remind = _size
logging.debug ("reading %d words at %d"%(_size, _addr))
while remind :
logging.debug ("remind = %s ; self.max_seg_size = %s ; div : %s"%(remind, self.max_seg_size, remind/self.max_seg_size))
if remind/self.max_seg_size>=1:
logging.debug ("read max_seg_size")
seg_size=self.max_seg_size
else :
seg_size = remind
logging.debug ("read remind")
data+=self.read_seg_16(addr , seg_size)
addr+=seg_size # addr en mots de 16 bits
remind-=seg_size
#print( "__Read_buf : %d readed"%(int(addr - _addr)) )
return data
############## Write functions ##############################################
def write_i16 (self, _value, _addr, _timeout=0.0):
""" @brief Write one word (signed 16 bits)
@param _value : value of the word
@param _addr : destination data address (given in bytes)
"""
try:
self.write_buf_i16 ([_value], _addr, _timeout)
except apf04_exception as ae:
raise ae # apf04_exception are simply raised upper
except :
print(traceback.format_exc())
raise apf04_error(3000, "write_i16 : FAIL to write 0%04x at %d\n"%(_value, _addr))
def write_buf_i16 (self, _data, _addr, _timeout=0.0):
""" @brief Write buffer
@param _data : list of words (max size : 123 words)
@param _addr : data address (given in bytes)
"""
# ATTENTION ici on ne gère pas de boucle sur un "write_seg_16" car on n'a pas besoin d'écrire de gros blocs de données
# segmenter en blocs de 123 mots (max en ecriture)
assert (len(_data)<=self.max_seg_size)
try:
# request read
write_query = struct.pack(">BBHhB%sh"%len(_data),self.apf04_addr, 16, _addr, len(_data), 2*len(_data), *_data )
write_query += struct.pack(">H",crc16 (write_query) )
try:
#print (write_query)
self.ser.write(write_query)
except serial.serialutil.SerialException:
logging.error("hardware apparently disconnected")
raise apf04_error(3004, "write_buf_i16 : hardware apparently disconnected")
# read answer
slave_response = self.__read__(2, _timeout)
# TODO 1 : format de la trame d'erreur et codes d'erreurs effectivement traités
if slave_response[1] == 16 :
slave_response += self.__read__(6)
# TODO sur le principe il faudrait vérifier que le bon nombre de mots a été écrit
else:
# TODO traiter les erreurs selon doc
size = struct.unpack("B",self.__read__(1))[0]
print ("size following : %d"%size)
self.__read__(size)
print("error while writing")
print (slave_response)
except apf04_exception as ae:
raise ae # apf04_exception are simply raised upper
except :
print(traceback.format_exc())
raise apf04_error(3001, "write_buf_i16 : Fail to write")

View File

@ -0,0 +1,50 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
from datetime import datetime, timezone, timedelta
from time import mktime
from struct import pack, unpack, calcsize
# temps ZERO (Ubertone Epoch)
UBT_EPOCH = datetime(2020, 1, 1, tzinfo=timezone.utc)
def encode_timestamp(_datetime):
"""Encode timestamp in words
Args:
_datetime: timestap
Returns:
bytearray representing the encoded timestamp
ubertone's epoch :
- 01/01/2020 starting from version 2.01
"""
timestamp = mktime(_datetime.timetuple()) + _datetime.microsecond/1e6 - mktime(UBT_EPOCH.timetuple())
# timestamp (epoch) 2*int16 Epoch en secondes MSB + LSB (*)
# timestamp extension int16 En millisecondes
# DIRECTIVE WARNING : attention à ne pas mélanger int16 et int32. En effet la machine est susceptible d'aligner les données sur 32 bits.
# du coup un pack "hih" vas donner le premier short (16 bits) suivi par 0x0000 puis le second entier (int32) !!!
# TODO forcer l'endianness ?
return pack("hhh", int((int(timestamp)>>15)&0x0000FFFF), int(int(timestamp)&0x00007FFF),\
int(1000.*(timestamp%1)))
def decode_timestamp(_encoded_datetime):
"""Extract timestamp from a byte array
Args:
_encoded_datetime:
Returns:
timestamp and offset
"""
timestamp_size = calcsize('hhh')
nsec_pF, nsec_pf, msec = unpack('hhh', _encoded_datetime[0:timestamp_size])
return UBT_EPOCH+timedelta(seconds=(int(nsec_pF)<<15)|nsec_pf, milliseconds=msec), timestamp_size

View File

@ -0,0 +1,19 @@
#!/usr/bin/env python
# -*- coding: UTF_8 -*-
# from types import IntType
def cast_int16 (_value):
_value = int(round(_value))
if _value > 32767:
_value = 32767
elif _value < -32768:
_value = -32768
return _value
def cast_uint16 (_value):
_value = int(round(_value))
if _value < 0:
_value = 0
if _value > 65535:
_value = 65535
return _value

View File

@ -0,0 +1,46 @@
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @author Stéphane Fischer
def __generate_crc16_table():
""" Generates a crc16 lookup table
.. note:: This will only be generated once
src : pymodbus
"""
result = []
for byte in range(256):
crc = 0x0000
for _ in range(8):
if (byte ^ crc) & 0x0001:
crc = (crc >> 1) ^ 0xa001
else: crc >>= 1
byte >>= 1
result.append(crc)
return result
__crc16_table = __generate_crc16_table()
def crc16(data):
""" Computes a crc16 on the passed in string. For modbus,
this is only used on the binary serial protocols (in this
case RTU).
The difference between modbus's crc16 and a normal crc16
is that modbus starts the crc value out at 0xffff.
:param data: The data to create a crc16 of
:returns: The calculated CRC
src : pymodbus
vérification du CRC16 (modbus) :
https://crccalc.com/
https://www.lammertbies.nl/comm/info/crc-calculation
"""
crc = 0xffff
for a in data:
idx = __crc16_table[(crc ^ a) & 0xff]
crc = ((crc >> 8) & 0xff) ^ idx
swapped = ((crc << 8) & 0xff00) | ((crc >> 8) & 0x00ff)
return swapped

View File

@ -0,0 +1,70 @@
from array import array
import numpy as np
from Model.peacock_uvp.apf04_gain import calc_gain, convert_code2dB_m, convert_code2dB, _convert_code2dB_trunc
class apf04_hardware ():
def conversion_profile(self, vectors_dict, sound_speed, n_vol, n_avg, c_prf, gain_ca0, gain_ca1, blind_ca0, blind_ca1):
"""Function that converts the US profiles values from raw coded values to human readable and SI units.
Args:
vectors_dict (dict): dict of unique vectors keyed by datatype
sound_speed (float): sound speed used for this measurement
n_vol, n_avg, c_prf, gain_ca0, gain_ca1 (floats): parameters for the ongoing param_us (one config, one channel): number of cells, of measures per block, coded PRF, gain intercept and gain slope.
blind_ca0, blind_ca1 (floats): intercept and slope of limitation of gain in blind zone
Returns:
None
"""
#APF04 or APF04S
self.sat = array('f')
self.ny_jump = array('f')
v_ref = 1.25
fact_code2velocity = sound_speed / (c_prf * 65535.)
# print("factor code to velocity %f"%fact_code2velocity)
tab_gain = calc_gain(n_vol, gain_ca0, gain_ca1, blind_ca0, blind_ca1)
# Nypquist jump when raw velocity standard deviation <0
self.ny_jump = vectors_dict['std'] < 0
# conversion raw velocity standard deviation and raw velocity
vectors_dict['std'] = (np.absolute(vectors_dict['std'])-1)*fact_code2velocity
vectors_dict['velocity'] = vectors_dict['velocity']*fact_code2velocity
# Saturation when raw echo amplitude <0
vectors_dict['sat'] = vectors_dict['amplitude'] < 0
# conversion of raw echo amplitude and gain taken into account
vectors_dict['amplitude'] = np.absolute(vectors_dict['amplitude']) * ((v_ref*2)/4096) / np.sqrt(n_avg) / tab_gain
# conversion raw snr
vectors_dict['snr'] = vectors_dict['snr'] / 10.
def conversion_us_scalar(self, scalars_dict, n_avg, r_dvol, r_vol1):
"""Function that converts the scalar US values from raw coded values to human readable and SI units.
Args:
scalars_dict (dict): dict of scalars US keyed by datatype
n_avg, r_dvol, r_vol1 (floats): parameters for the ongoing param_us (one config, one channel): number of measurements per block, intercell distance and first cell position.
Returns:
None
"""
# convert coded gain to dB and dB/m
scalars_dict["a1"] = convert_code2dB_m(scalars_dict["gain_ca1"], r_dvol)
del scalars_dict["gain_ca1"]
scalars_dict["a0"] = convert_code2dB(scalars_dict["gain_ca0"])-scalars_dict["a1"]*r_vol1
del scalars_dict["gain_ca0"]
# convert coded noise values to V
v_ref = 1.25
gain = pow(10, ((_convert_code2dB_trunc(1241)) / 20.)) # gain max
scalars_dict["noise_g_high"] = scalars_dict["noise_g_max"] * ((v_ref*2)/4096) / np.sqrt(n_avg) / gain
del scalars_dict["noise_g_max"]
gain = pow(10, ((_convert_code2dB_trunc(993)) / 20.)) # gain max - 10dB
scalars_dict["noise_g_low"] = scalars_dict["noise_g_mid"] * ((v_ref*2)/4096) / np.sqrt(n_avg) / gain
del scalars_dict["noise_g_mid"]

View File

@ -0,0 +1,127 @@
#!/usr/bin/env python
# -*- coding: UTF_8 -*-
from math import pow
APF06_RECEPTION_CHAIN_CONSTANT_GAIN = 14.6 # dB
APF06_GAIN_CODE_RATIO = 1.5e-3 # dB/quantum
APF06_CODE_MAX_APPLIED = 32767
APF06_CODE_MAX_USER = 65535
APF06_CODE_MIN_USER = -65535
APF06_CODE_MIN_APPLIED = 1280
def convert_dB_m2code(_gain_dB, _r_dvol):
"""Conversion of gain slope a1 (in dB) to code ca1.
(difference with APF04 : 4 bits shift is not used)
Args:
_gain_dB(float): gain slope in dB/m
_r_dvol(float): inter-volume size in m
Returns:
code (int)
"""
code = int(round((_gain_dB * _r_dvol) / APF06_GAIN_CODE_RATIO, 1))
code = _truncate(code, APF06_CODE_MAX_USER, APF06_CODE_MIN_USER)
return code
def convert_code2dB_m(_code, _r_dvol):
"""Conversion of any code ca1 to gain slope a1 (in dB)
(difference with APF04 : 4 bits shift is not used)
Args:
_code(int): gain code
_r_dvol(float): inter-volume size in m
Returns:
gain slope in dB/m (float)
"""
gain_dB = (APF06_GAIN_CODE_RATIO / _r_dvol) * _code
return gain_dB
def convert_dB2code(_gain_dB):
"""Conversion of gain (in dB) to code.
The code is truncated in the available range.
Args:
_gain_dB(float): gain intercept in dB
Returns:
gain code (int)
"""
code = int(round((_gain_dB - APF06_RECEPTION_CHAIN_CONSTANT_GAIN) / APF06_GAIN_CODE_RATIO, 1))
code = _truncate(code, APF06_CODE_MAX_APPLIED, APF06_CODE_MIN_USER)
return code
def convert_code2dB(_code):
"""Conversion of any code to a theoretical gain (in dB)
Args:
_code(int): gain code
Returns:
gain intercept in dB (float)
"""
gain_dB = (_code * APF06_GAIN_CODE_RATIO) + APF06_RECEPTION_CHAIN_CONSTANT_GAIN
return gain_dB
def _convert_code2dB_trunc(_code):
"""Conversion of code to the effective (truncated) gain (in dB) applied in a cell
Args :
_code (int) : gain code
Returns :
gain in dB applied in a cell
"""
_code = _truncate(_code, APF06_CODE_MAX_APPLIED, APF06_CODE_MIN_APPLIED)
gain_dB = convert_code2dB(_code)
return gain_dB
def calc_gain(_n_vol, _gain_ca0, _gain_ca1, _gain_max_ca0, _gain_max_ca1):
"""Compute the table of the gains in dB applied to each cell of the profile
(difference with APF04 : 4 bits shift is not used)
Args:
_n_vol(int): number of cells in the profile
_gain_ca0(int): code of the gain intercept
_gain_ca1(int): code of the gain slope
_gain_max_ca0(int): code of the blind zone gain limit intercept
_gain_max_ca1(int): code of the blind zone gain limit slope
Returns:
list of gains in dB to apply to each cell of the profile
"""
tab_gain = []
i = 0
while i <= (_n_vol - 1):
G = _convert_code2dB_trunc(_gain_ca0 + i * _gain_ca1)
G_max = _convert_code2dB_trunc(_gain_max_ca0 + i * _gain_max_ca1)
if (G >= G_max):
tab_gain.append(pow(10, G_max / 20.))
else:
tab_gain.append(pow(10, G / 20.))
i = i + 1
return tab_gain
def _truncate(value, limit_max, limit_min):
"""Troncate value with min/max limit
Args:
value: value to troncate
limit_max: max limit
limit_min: min limit
Returns:
the truncated value
"""
return max(min(value, limit_max), limit_min)

View File

@ -0,0 +1,61 @@
from array import array
import numpy as np
from .apf06_gain import calc_gain, convert_code2dB_m, convert_code2dB, APF06_CODE_MAX_APPLIED
class apf06_hardware ():
def conversion_profile(self, vectors_dict, sound_speed, n_vol, n_avg, c_prf, gain_ca0, gain_ca1, blind_ca0, blind_ca1):
"""Function that converts the US profiles values from raw coded values to human readable and SI units.
Args:
vectors_dict (dict): dict of unique vectors keyed by datatype
sound_speed (float): sound speed used for this measurement
n_vol, n_avg, c_prf, gain_ca0, gain_ca1 (floats): parameters for the ongoing param_us (one config, one channel): number of cells, of measures per block, coded PRF, gain intercept and gain slope.
not used yet : blind_ca0, blind_ca1 (floats): intercept and slope of limitation of gain in blind zone
Returns:
None
"""
#APF04 or APF04S
self.sat = array('f')
self.ny_jump = array('f')
fact_code2velocity = sound_speed / (c_prf * 65535.)
# print("factor code to velocity %f"%fact_code2velocity)
tab_gain = calc_gain(n_vol, gain_ca0, gain_ca1, gain_ca0, gain_ca1) #blind_ca0, blind_ca1)
vectors_dict['velocity'] = vectors_dict['velocity']*fact_code2velocity
# Saturation when raw echo amplitude <0
vectors_dict['sat'] = vectors_dict['amplitude'] < 0
# conversion of raw echo amplitude and gain taken into account
vectors_dict['amplitude'] = np.absolute(vectors_dict['amplitude']) * (2./4096) / tab_gain
# conversion raw snr
vectors_dict['snr'] = vectors_dict['snr'] / 10.
def conversion_us_scalar(self, scalars_dict, n_avg, r_dvol, r_vol1):
"""Function that converts the scalar US values from raw coded values to human readable and SI units.
Args:
scalars_dict (dict): dict of scalars US keyed by datatype
n_avg, r_dvol, r_vol1 (floats): parameters for the ongoing param_us (one config, one channel): number of measurements per block, intercell distance and first cell position.
Returns:
None
"""
# convert coded gain to dB and dB/m
scalars_dict["a1"] = convert_code2dB_m(scalars_dict["gain_ca1"], r_dvol)
del scalars_dict["gain_ca1"]
scalars_dict["a0"] = convert_code2dB(scalars_dict["gain_ca0"])-scalars_dict["a1"]*r_vol1
del scalars_dict["gain_ca0"]
# convert coded noise values to V
# not implemented yet
scalars_dict["noise_g_high"] = 0
del scalars_dict["noise_g_max"]
scalars_dict["noise_g_low"] = 0
del scalars_dict["noise_g_mid"]

View File

@ -0,0 +1,133 @@
#!/usr/bin/env python3
# -*- coding: UTF_8 -*-
import warnings
import json
import os
#Le script d'import quand à lui renvoie un Warning lorsque aucun paramètre ne correspond .
#Ce Warning contient le fichier importé concerné et préviens de la valeur qui n'a pas été acceptée.
def warning_style(message, category, filename, lineno, file=None, line=None):
return ' %s:%s: %s: %s\n' % (filename, lineno, category.__name__, message)
warnings.formatwarning = warning_style
def clean_type(measure_type_string):
"""
This function allows us to clean the data type that we want to search.
Only by lowering the input, and replacing every '-' by '_'.
Parameters
----------
measure_type_string: str
The input, what we want to clean.
Returns
-------
res: str
What we just cleaned.
"""
lower = measure_type_string.lower()
res = lower.replace("-", "_")
# remove all white spaces and beginning or end of the string
tocheck = 1
while tocheck:
if res[0]!=" " and res[-1]!=" ":
tocheck=0
elif res[0]==" ":
res = res[1:]
elif res[-1]==" ":
res = res[:-1]
# replace white spaces in the middle of the string by underscores
return res.replace(" ","_")
def translate_key(raw_key, _type="data"):
"""
Translate data_types through a translation json file. Returns None if raw_key not in json file.
Parameters
----------
raw_key: string
Returns
-------
translated_key: string or None
"""
translated_key = None
# use of a json file for the translation in valid data type names for the DB
_transation_path = os.path.dirname(os.path.realpath(__file__))+'/translation.json'
f = open(_transation_path)
translation_dict = json.loads(f.read())[_type]
for key, value in translation_dict.items():
# leave unchanged the already valid data type names
# translate those which are translatable
if (raw_key == key):
translated_key = key
break
elif value["alter_ego"] is not None:
if raw_key in value["alter_ego"]:
translated_key = key
break
#if translated_key == None:
# print("delete %s"%raw_key)
return translated_key
def translate_paramdict(param_dict):
"""
Parse a dict and translate param_names and clean out those not to be imported in the ORM
Parameters
----------
param_dicts: dict
Returns
-------
param_dict: dict
"""
translated_param_dict = {}
for key,elem in param_dict.items():
if translate_key(clean_type(key),_type="param_var") is not None:
translated_param_dict[translate_key(clean_type(key),_type="param_var")] = elem
elif translate_key(clean_type(key),_type="param_const") is not None:
translated_param_dict[translate_key(clean_type(key), _type="param_const")] = elem
return translated_param_dict
def translate_datadictslist(data_dicts):
"""
Parse a list of dicts and translate data_types and clean out those not to be imported in the ORM
Can be used on scalar or vector dicts.
Parameters
----------
data_dicts: list(dict)
Returns
-------
importable_data_dicts: list(dict)
"""
importable_data_dicts = []
i=0
for data_dict in data_dicts:
importable_data_dicts.append(data_dict)
# standardisation of the data type names --> only underscores and lower case
importable_data_dicts[i]['name'] = clean_type(data_dict['name'])
# traduire les noms qui ont une traduction (cf switcher dans convert_type.py)
importable_data_dicts[i]['name'] = translate_key(importable_data_dicts[i]['name'], _type="data")
if importable_data_dicts[i]['name'] is None:
# supprimer les lignes sont le type n'existe pas la la liste de données importables
del importable_data_dicts[i]
else:
i=i+1
return importable_data_dicts

View File

@ -0,0 +1,12 @@
#!/usr/bin/env python3
# -*- coding: UTF_8 -*-
from dateutil.parser import parse
def date_parse(date_str):
if str(date_str[0:4]).isdigit():
# Ex : 2019-12-09
return parse(date_str, yearfirst=True, dayfirst=False)
else:
# Ex : 09-12-2019
return parse(date_str, dayfirst=True, yearfirst=False)

View File

@ -0,0 +1,113 @@
# -*- coding: UTF_8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
import json
from datetime import datetime # pour time count
from .ubt_raw_file import ubt_raw_file
from .ubt_raw_data import ubt_raw_data
from .ubt_raw_flag import *
def raw_extract(_raw_file):
"""
This method will extract data from the raw.udt file and convert it to dicts which are easy to go through and to import in the DB.
Parameters
----------
_raw_file : string
path to .udt file
Returns
-------
device_name : string
time_begin : datetime
time_end : datetime
data_us_dicts : list of dicts
data, us related, with param_us associated
param_us_dicts : list of dicts
param_us for us related data
data_dicts : list of dicts
data not us related, with no param_us associated
"""
fileraw = ubt_raw_file(_raw_file)
profile_id = 0
try:
while 1:
flag, size, data = fileraw.read_chunk()
# Pour raw UDT005 (ie. UB-Lab P, UB-SediFlow, UB-Lab 3C) on peut
# rencontrer 4 flags: const, settings json, configs (HW), profils
if flag == CONST_TAG:
try:
const_dict = json.loads(data.decode("utf-8"))
except:
const_dict = json.loads(
data.decode("utf-8")
.replace("'", '"')
.replace("True", "true")
.replace("False", "false")
)
print("const: %s" % const_dict)
ubt_data = ubt_raw_data( const_dict )
if flag == SETTINGS_JSON_TAG:
try:
settings_dict = json.loads(data.decode("utf-8"))
except:
settings_dict = json.loads(
data.decode("utf-8")
.replace("'", '"')
.replace("True", "true")
.replace("False", "false")
)
print("settings: %s" % settings_dict)
ubt_data.set_config(settings_dict)
if flag == CONFIG_TAG:
# what is needed from here and which is not in param_us_dict is only blind_ca0 and blind_ca1
# note: this is not useful on APF06, but could be used for double check
ubt_data.set_confighw(size, data)
if flag == PROFILE_TAG or flag == PROFILE_INST_TAG:
timestamp = ubt_data.read_line(size, data, flag==PROFILE_INST_TAG)
profile_id += 1
# get the first timestamp of udt file for time_begin definition of the run:
if profile_id == 1:
time_begin = timestamp
except KeyboardInterrupt:
print("read interrupted by user")
except EOFError:
print("End of file")
except:
print("Error")
raise
#print("%d profiles read" % profile_id)
# last timestamp of udt file for time_end definition of run:
# based on the last profile processed
time_end = timestamp
return (
const_dict["product_id"],
time_begin,
time_end,
ubt_data.param_us_dicts,
ubt_data.data_us_dicts,
ubt_data.data_dicts,
settings_dict,
)

View File

@ -0,0 +1,205 @@
{
"data" :
{
"echo_avg_profile": {
"alter_ego":["amplitude"],
"unit": "V",
"graph_title": "Echo"
},
"velocity_avg_profile": {
"alter_ego":["velocity"],
"unit": "m/s",
"graph_title": "Velocity"
},
"snr_doppler_avg_profile": {
"alter_ego":["snr"],
"unit": "dB",
"graph_title": "SNR <br> Doppler"
},
"velocity_std_profile": {
"alter_ego":["std_velocity", "std"],
"unit": "m/s",
"graph_title": "Velocity <br> standard deviation"
},
"turbidity_avg_profile": {
"alter_ego":["turbi"],
"unit": "1/m"
},
"saturation_avg_profile": {
"alter_ego":["saturation"],
"unit": null
},
"ny_jump_avg_profile": {
"alter_ego":null,
"unit": null
},
"saturation_profile": {
"alter_ego":null,
"unit": null
},
"ny_jump_profile": {
"alter_ego":null,
"unit": null
},
"echo_profile": {
"alter_ego":["instamplitude"],
"unit": "V",
"graph_title": "Echo"
},
"velocity_profile": {
"alter_ego":["instvelocity"],
"unit": "m/s",
"graph_title": "Velocity"
},
"snr_doppler_profile": {
"alter_ego":["instsnr"],
"unit": "dB",
"graph_title": "SNR <br> Doppler"
},
"turbidity_profile": {
"alter_ego":["instturbi"],
"unit": "1/m"
},
"temperature": {
"alter_ego":["temp"],
"unit": "K"
},
"sound_speed": {
"alter_ego": null,
"unit": "m/s"
},
"roll": {
"alter_ego": null,
"unit": "rad"
},
"pitch": {
"alter_ego": null,
"unit": "rad"
},
"velocity_avg": {
"alter_ego": ["v_moy"],
"unit": "m/s"
},
"velocity_max": {
"alter_ego": ["v_max"],
"unit": "m/s"
},
"velocity_min": {
"alter_ego": ["v_min"],
"unit": "m/s"
},
"velocity_std": {
"alter_ego": ["v_std"],
"unit": "m/s"
},
"snr_doppler_avg": {
"alter_ego": ["snr_doppler"],
"unit": "dB"
},
"snr_echo_avg": {
"alter_ego": ["snr_echo"],
"unit": "dB"
},
"rate_doppler": {
"alter_ego": ["n_good"],
"unit": "%"
},
"velocity_distribution": {
"alter_ego": ["tests_av_distrib_in_air","tests_av_distrib_in_flume", "distrib"],
"unit": null
},
"noise_g_high": {
"alter_ego": ["noise"],
"unit": "V"
},
"noise_g_low": {
"alter_ego": null,
"unit": "V"
}
},
"param_const":
{
"operator" : {
"alter_ego": null,
"unit": null
},
"comments" : {
"alter_ego": null,
"unit": null
},
"emitter" : {
"alter_ego": ["tr_out"],
"unit": null
},
"receiver" : {
"alter_ego": ["tr_in"],
"unit": null
}
},
"param_var": {
"f0": {
"alter_ego": null,
"unit": "Hz"
},
"v_min": {
"alter_ego": ["v_min_1"],
"unit": "m/s"
},
"v_max": {
"alter_ego": null,
"unit": "m/s"
},
"prf": {
"alter_ego": null,
"unit": "Hz"
},
"r_cell1": {
"alter_ego": ["r_vol1"],
"unit": "m"
},
"r_dcell": {
"alter_ego": ["r_dvol"],
"unit": "m"
},
"n_cell": {
"alter_ego": ["n_vol"],
"unit": null
},
"r_em": {
"alter_ego": null,
"unit": "m"
},
"n_p": {
"alter_ego": ["n_ech"],
"unit": null
},
"v_em": {
"alter_ego": null,
"unit": "V"
},
"n_avg": {
"alter_ego": ["n_profile", "n_profil"],
"unit": null
},
"a0": {
"alter_ego": ["gain_a0"],
"unit": "dB"
},
"a1": {
"alter_ego": ["gain_a1"],
"unit": "dB/m"
},
"phase_coding": {
"alter_ego": null,
"unit": null
},
"static_echo_filter": {
"alter_ego": null,
"unit": null
},
"sound_speed": {
"alter_ego": null,
"unit": "m/s"
}
}
}

View File

@ -0,0 +1,72 @@
# -*- coding: UTF_8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
from copy import deepcopy
from .convert_type import translate_paramdict
def paramus_rawdict2ormdict(settings_dict):
"""Function that converts a settings dict read from a raw file (webui2, UB-Lab P) to a formatted dict for data processing.
Args:
settings_dict (dict): original settings dict in raw.udt file
Returns:
paramus (dict): dict structure with keys the config int, with subkey the channel int and value the paramus dict
"""
#dict of config parameters of channel_id of config_id in this settings dict
paramus = {}
# keep order of configuration_order:
for config_num in settings_dict["global"]["configuration_order"]:
paramus[int(config_num[-1])] = {}
temp_param = settings_dict["configs"][config_num]
# clean subdicts parts:
key2delete = []
item2add = {}
for key, elem in temp_param.items():
if isinstance(elem,dict):
# for gain management
#print("type dict detected for key: %s"%key)
for param_key, param_elem in elem.items():
item2add[param_key] = param_elem
key2delete.append(key)
#if isinstance(elem,list):
# for receiver management
#print("type list detected for key: %s"%key)
for key in key2delete:
del temp_param[key]
for key,elem in item2add.items():
temp_param[key] = elem
if "tr_in" not in temp_param.keys():
#print ("tr_in not defined. Monostatic mode, same as tr_out")
temp_param["tr_in"] = temp_param["tr_out"]
# translate for orm param names:
temp_param = translate_paramdict(temp_param)
# add global settings elements:
temp_param["operator"] = settings_dict["global"]["operator"]
temp_param["comments"] = settings_dict["global"]["comments"]
temp_param["sound_speed"] = settings_dict["global"]["sound_speed"]["value"]
# TODO si temp_param["receiver"] est une liste, il faut la balayer
if isinstance(temp_param["receiver"],list):
# TODO attention, en passant par un dictionnaire on va perdre l'ordre !!!
for receiver in temp_param["receiver"]:
#print ("process %s"%receiver)
# translate for orm with dict cleaner and with formated dict:
paramus[int(config_num[-1])][int(receiver[2:])] = deepcopy(temp_param) # mandatory to avoid having multiple references on the same dict
paramus[int(config_num[-1])][int(receiver[2:])]["receiver"] = receiver
else:
paramus[int(config_num[-1])][int(temp_param["receiver"][2:])] = temp_param
#print(sorted(paramus[int(config_num[-1])].keys()))
return paramus

271
Model/udt_extract/ubt_raw_data.py Executable file
View File

@ -0,0 +1,271 @@
#!/usr/bin/env python3
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
#from ctypes import sizeof
from struct import calcsize, unpack
from string import ascii_letters
import numpy as np
from numpy import asarray as ar
from Model.peacock_uvp.apf_timestamp import decode_timestamp
from .convert_type import translate_key
from .date_parser import date_parse
from .ubt_raw_config import paramus_rawdict2ormdict
class ubt_raw_data () :
def __init__ (self, _const):
"""Function that initiates z ubt_raw_data object which contains the data read in a raw.udt file.
Args:
param_us_dicts (dict): dicts of the param_us for each config and each receiving channel
blind_ca0 (float): intercept of limitation of gain in blind zone
blind_ca1 (float): slope of limitation of gain in blind zone
Returns:
None
"""
# liste des dictionnaires standardisés des données non US (model Measure)
self.data_dicts = {}
# dict, ordonné par num_config, channel_id, de listes des dictionnaires standardisés des données US (model MeasureUs)
self.data_us_dicts = {}
print (_const["hardware"]["board_version"])
# TODO il faut supprimer la révision
self.board = "apf" + _const["hardware"]["board_version"].lower().split("apf")[-1].rstrip(ascii_letters)
print("initiating ubt_raw_data for board %s" %self.board)
assert (self.board in ["apf04", "apf06"])
if self.board == "apf04" :
from .apf04_hardware import apf04_hardware
self.hardware = apf04_hardware()
elif self.board == "apf06" :
from .apf06_hardware import apf06_hardware
self.hardware = apf06_hardware()
self.current_config = None
self.current_channel = None
def set_config (self, _settings):
param_us_dicts = paramus_rawdict2ormdict(_settings)
self.param_us_dicts = param_us_dicts
# list of blind zone gain parameters :
self.blind_ca0 = []
self.blind_ca1 = []
for config in self.param_us_dicts.keys():
self.data_us_dicts[config] = {}
for channel in self.param_us_dicts[config].keys():
self.data_us_dicts[config][channel] = {}
if self.board == "apf06" : # test pas idéal, mais fonctionnel dans l'état actuel
for datatype in ["echo_profile", "saturation_profile", "velocity_profile", "snr_doppler_profile"]:
# print(f"datatype : {datatype} \n")
self.data_us_dicts[config][channel][datatype] = {"time": [], "data": []}
else :
for datatype in ["echo_avg_profile", "saturation_avg_profile", "velocity_avg_profile", "snr_doppler_avg_profile", "velocity_std_profile"]:
self.data_us_dicts[config][channel][datatype] = {"time":[], "data":[]}
def set_confighw (self, _size, _data):
blind_ca0, blind_ca1 = unpack('%dh'%2, _data[_size-2*calcsize('h'):_size])
# les config HW (toujours écrits dans l'ordre dans le raw)
# we use a list with config id (0..N-1) as index
self.blind_ca0.append(blind_ca0)
self.blind_ca1.append(blind_ca1)
def read_line (self, size, data, _inst=False) :
"""Utilise une frame pour récupérer un profil voulu (pour fichiers UDT005)
une ligne de profil dans raw UDT005 contient: (ref&0x000007FF)<<4 or int(config_key) puis le raw profile
le raw profile contient un header puis le profil codé
ce header contient des scalaires qu'il faut aussi enregistrer
Nous rangeons les données us dans un dict data_us_dicts hiérarchiquement par config, par channel récepteur, par datatype.
Les données non us sont rangées dans un dict data_dicts par datatype.
Chaque donnée a ses valeurs listées à la clé "data" et ses timestamps correspondants listés à la clé "time".
Il y a donc forte potentielle duplication de la donnée "time", mais cela permet une plus grande liberté dans l'utilisation des données ensuite
et couvre le cas on aurait des données non systématiquement enregristrées (désinchronisées) lors d'un record.
Exemple: pour des données d'APF02, on a des données de profils instantanés, mais le gain auto n'est re-calculé qu'à chaque bloc.
Args:
_size (int) : la taille du bloc
_data : le bloc de données binaire
Returns:
timestamp
"""
if _inst :
data_per_cell = 3
else :
data_per_cell = 4
##################################################
# header reading: timestamp and config reference
##################################################
head_size = calcsize('hhhh')
ref = unpack('h', data[0:2])
# ref_config : la référence des settings (numéro unique)
# print("ref %s" % (ref >> 4))
# self.current_config : le numéro de la configuration utilisée (1 à 3)
self.current_config = int(ref[0] & 0x0000000F) + 1
# get the first channel :
# TODO fonctionner avec la liste entière, comme dans translator_udt001234 qui fonctionne pour la 2C
self.current_channel = list(self.param_us_dicts[self.current_config].keys())[0]
#print (self.param_us_dicts[self.current_config].keys())
# print("num config %s" % self.current_config)
if self.current_config not in self.data_us_dicts.keys():
raise Exception('chunk', "unexpected number of configurations (%d)" % self.current_config)
#print(convert_packed_timestamp(nsec_pF, nsec_pf, msec))
#print(convert_packed_timestamp(nsec_pF, nsec_pf, msec).strftime("%Y-%m-%dT%H:%M:%S.%f"))
dt_timestamp, _ = decode_timestamp(data[2:head_size])
time = date_parse(dt_timestamp.strftime("%Y-%m-%dT%H:%M:%S.%f"))
#print("time", type(time))
#print("time", time)
# A few acoustic parameters which are needed for the following calculations
n_vol = self.param_us_dicts[self.current_config][self.current_channel]["n_cell"]
c_prf = self.param_us_dicts[self.current_config][self.current_channel]["f0"] / \
self.param_us_dicts[self.current_config][self.current_channel]["prf"]
n_avg = self.param_us_dicts[self.current_config][self.current_channel]["n_avg"]
r_dvol = self.param_us_dicts[self.current_config][self.current_channel]['r_dcell']
r_vol1 = self.param_us_dicts[self.current_config][self.current_channel]['r_cell1']
nb_rx = len(self.param_us_dicts[self.current_config])
#print ("n_vol = %d ; nb_rx = %d"%(n_vol, nb_rx))
###################
# scalars reading
###################
scalars_size = calcsize('hhhhhh')
scalars_us_dict = {}
scalars_dict = {}
scalars_dict['pitch'], scalars_dict['roll'], scalars_dict['temp'], sound_speed, scalars_us_dict['gain_ca0'], scalars_us_dict['gain_ca1'] = unpack('hhhhhh', data[head_size:head_size+scalars_size])
for _ in range(nb_rx):
# TODO attention il faudra traiter individuellement le bruit de chaque ligne
scalars_us_dict['noise_g_max'], scalars_us_dict['noise_g_mid'] = unpack("hh", data[head_size+scalars_size:head_size+scalars_size+calcsize('hh')])
scalars_size += calcsize('hh')
if (size - (head_size+scalars_size)) / (data_per_cell * 2) != n_vol * nb_rx:
raise Exception('volume number', "expected %d volumes, but profile data contains %d" % (
n_vol, ((size - (head_size + scalars_size)) / (data_per_cell * 2 * nb_rx))))
###################
# vectors reading
###################
vectors_dict = {}
offset = head_size+scalars_size
unpacked_data = ar(unpack('%dh'%(data_per_cell*n_vol*nb_rx), data[offset:offset + data_per_cell*n_vol*nb_rx*calcsize('h')]))
channels = sorted(self.param_us_dicts[self.current_config].keys())
for channel_id in range(len(channels)):
#print ("processing %d"%channel_id)
self.current_channel = channels[channel_id]
# [offset + i*data_per_cell*nb_tr_rx + meas_data.current_receiver*data_per_cell + velocity_rank ]);
if _inst :
vectors_dict['amplitude'] = unpacked_data[0+3*channel_id::3*nb_rx]
vectors_dict['velocity'] = unpacked_data[1+3*channel_id::3*nb_rx]
vectors_dict['snr'] = unpacked_data[2+3*channel_id::3*nb_rx]
else :
#print(unpacked_data)
# TODO on pourrait utiliser directement les nom destinés à l'ORM (ça pourrait simplifier la boucle sur les datatype)
vectors_dict['velocity'] = unpacked_data[0+4*channel_id::4*nb_rx]
vectors_dict['std'] = unpacked_data[1+4*channel_id::4*nb_rx]
vectors_dict['amplitude'] = unpacked_data[2+4*channel_id::4*nb_rx]
vectors_dict['snr'] = unpacked_data[3+4*channel_id::4*nb_rx]
# print(vectors_dict)
##################################
# conversion des valeurs codées:
##################################
# Note: il faut convertir les scalaires après pour avoir les gains tels que pour la conversion du profil d'echo
self.hardware.conversion_profile(vectors_dict, sound_speed, n_vol, n_avg, c_prf, scalars_us_dict['gain_ca0'], scalars_us_dict['gain_ca1'], self.blind_ca0[self.current_config-1], self.blind_ca1[self.current_config-1])
# elif self.board == "apf06" :
# self.conversion_profile_apf06(vectors_dict, sound_speed, n_vol, c_prf, scalars_us_dict['gain_ca0'], scalars_us_dict['gain_ca1'])
###################################################################################################
# rangement dans la liste de dictionnaires de données US (ici tous les profils sont des données US)
###################################################################################################
if _inst :
for datatype in ["echo_profile", "saturation_profile", "velocity_profile", "snr_doppler_profile"]:
self.data_us_dicts[self.current_config][self.current_channel][datatype]["time"].append(time)
self.data_us_dicts[self.current_config][self.current_channel]["echo_profile"]["data"].append(vectors_dict['amplitude'])
self.data_us_dicts[self.current_config][self.current_channel]["saturation_profile"]["data"].append(vectors_dict['sat'])
self.data_us_dicts[self.current_config][self.current_channel]["velocity_profile"]["data"].append(vectors_dict['velocity'])
self.data_us_dicts[self.current_config][self.current_channel]["snr_doppler_profile"]["data"].append(vectors_dict['snr'])
else:
for datatype in ["echo_avg_profile", "saturation_avg_profile", "velocity_avg_profile", "snr_doppler_avg_profile",
"velocity_std_profile"]:
self.data_us_dicts[self.current_config][self.current_channel][datatype]["time"].append(time)
self.data_us_dicts[self.current_config][self.current_channel]["echo_avg_profile"]["data"].append(vectors_dict['amplitude'])
self.data_us_dicts[self.current_config][self.current_channel]["saturation_avg_profile"]["data"].append(vectors_dict['sat'])
self.data_us_dicts[self.current_config][self.current_channel]["velocity_avg_profile"]["data"].append(vectors_dict['velocity'])
self.data_us_dicts[self.current_config][self.current_channel]["snr_doppler_avg_profile"]["data"].append(vectors_dict['snr'])
self.data_us_dicts[self.current_config][self.current_channel]["velocity_std_profile"]["data"].append(vectors_dict['std'])
# get the first channel again:
#self.current_channel = list(self.param_us_dicts[self.current_config].keys())[0]
self.hardware.conversion_us_scalar(scalars_us_dict, n_avg, r_dvol, r_vol1)
# traduction des noms des types de données US:
for key, value in scalars_us_dict.items():
translated_key = translate_key(key)
# gestion des scalaires qui sont des paramètres us variables (auto)
if translated_key == None:
translated_key = translate_key(key, _type="param_var")
if translated_key:
translated_key = translated_key+"_param"
if translated_key:
# note : commun à tous les channels en multichannel
for channel in list(self.param_us_dicts[self.current_config].keys()):
if translated_key not in self.data_us_dicts[self.current_config][channel].keys():
self.data_us_dicts[self.current_config][channel][translated_key] = {"time":[], "data":[]}
self.data_us_dicts[self.current_config][channel][translated_key]["data"].append(value)
self.data_us_dicts[self.current_config][channel][translated_key]["time"].append(time)
self.conversion_scalar(scalars_dict)
# traduction des noms des types de données non US:
for key, value in scalars_dict.items():
translated_key = translate_key(key)
if translated_key:
if translated_key not in self.data_dicts.keys():
self.data_dicts[translated_key] = {"time":[], "data":[]}
self.data_dicts[translated_key]["data"].append(value)
self.data_dicts[translated_key]["time"].append(time)
return time
def conversion_scalar(self, scalars_dict):
"""Function that converts the scalar values from raw coded values to human readable and SI units.
Args:
scalars_dict (dict): dict of scalars keyed by datatype
Returns:
None
"""
# convert temperature to Kelvin
scalars_dict["temp"] += 273.15
# convert angles to rad
scalars_dict['pitch'] *= np.pi/180.
scalars_dict['roll'] *= np.pi/180.

View File

@ -0,0 +1,83 @@
#!/usr/bin/env python3
# -*- coding: UTF_8 -*-
# @copyright this code is the property of Ubertone.
# You may use this code for your personal, informational, non-commercial purpose.
# You may not distribute, transmit, display, reproduce, publish, license, create derivative works from, transfer or sell any information, software, products or services based on this code.
# @author Stéphane Fischer
# lecture du fichier de données de données brutes
from struct import calcsize, unpack
class ubt_raw_file:
def __init__(self, _filename):
"""Function that initiates a ubt_raw_file object which allows to read a raw.udt file chunk by chunk.
Works only for raw.udt files from webui2 (UB-Lab P).
Args:
_filename (string): file path of raw.udt file
Returns:
None
"""
self.fd=open(_filename,'rb') # 1er argument : fichier raw
self.total_size=0
header = self.fd.read(42).decode("utf-8").split(" ")
self.version=header[0]
print("raw header : ", self.version)
assert (self.version == "UDT005")
self.board = header[1]
print("board : ", self.board)
header = header[2].split("/")
self.webui2 = header[0]
print("webui2 : ", self.webui2)
print("header extension : ", header[1:])
def __read_file__ (self, _size):
"""Function that reads a certain sized chunk of the file.
Args:
_size (int): size of chunk to read
Returns:
_data (bytes object): read chunk
"""
# print "_size in read file %d"%_size
_data=self.fd.read(_size)
# print "_data in read file %s"%_data
if _data == '':
print("%d byte read in the file"%(self.total_size))
raise EOFError
else:
if _size!=len(_data):
raise EOFError
self.total_size+=_size
#print("total size in read file %d"%self.total_size)
return _data
def read_chunk(self) :
"""Function that reads a certain sized chunk of the file.
Args:
_size (int): size of chunk to read
Returns:
flag (int): identification flag for data in the chunk
size (int): size of the data in the chunk
data (bytes object): data in the chunk
"""
flag = unpack('h', self.__read_file__(calcsize('h')))[0]
size = unpack('h', self.__read_file__(calcsize('h')))[0]
# print "flag in read chunk %d"%flag
# print "size in read chunk %d"%size
if size:
data=self.__read_file__(size)
else :
print("chunck vide")
data = ''
# crc = unpack('h', self.__read_file__(calcsize('h')))[0]
return flag, size, data

View File

@ -0,0 +1,9 @@
#!/usr/bin/env python3
# -*- coding: UTF_8 -*-
# Flags APF04 and APF06
PROFILE_TAG = 100
PROFILE_INST_TAG = 101
CONFIG_TAG = 200
SETTINGS_JSON_TAG = 201
CONST_TAG = 300

View File

@ -28,6 +28,7 @@ import Translation.constant_string as cs
from Model.TableModel import TableModel
from Model.AquascatDataLoader import RawAquascatData
from Model.acoustic_data_loader import AcousticDataLoader
from Model.acoustic_data_loader_UBSediFlow import AcousticDataLoaderUBSediFlow
# from View.window_noise_level_averaged_profile import WindowNoiseLevelTailAveragedProfile
from View.sample_data_tab import SampleDataTab
@ -204,8 +205,8 @@ class AcousticDataTab(QWidget):
self.label_rx = QLabel()
self.label_tx = QLabel()
self.label_to_do = QLabel()
self.label_to_do.setText("UBSediFlow data : to do for Oct. 20th")
# self.label_to_do = QLabel()
# self.label_to_do.setText("UBSediFlow data : to do for Oct. 20th")
# self.groupbox_measurement_information_Aquascat()
self.combobox_ABS_system_choice.currentTextChanged.connect(self.ABS_system_choice)
@ -611,16 +612,21 @@ class AcousticDataTab(QWidget):
self.lineEdit_acoustic_file.clear()
self.label_date_groupbox_acoustic_file.clear()
self.label_date_groupbox_acoustic_file.setText(_translate("CONSTANT_STRING", cs.DATE) + ": ")
self.label_hour_groupbox_acoustic_file.clear()
self.label_hour_groupbox_acoustic_file.setText(_translate("CONSTANT_STRING", cs.HOUR) + ": ")
elif self.combobox_ABS_system_choice.currentText() == "UB-SediFlow":
self.groupbox_measurement_information_UBSediFlow()
self.lineEdit_acoustic_file.clear()
self.label_date_groupbox_acoustic_file.clear()
self.label_date_groupbox_acoustic_file.setText(_translate("CONSTANT_STRING", cs.DATE) + ": ")
self.label_hour_groupbox_acoustic_file.clear()
self.label_hour_groupbox_acoustic_file.setText(_translate("CONSTANT_STRING", cs.HOUR) + ": ")
def groupbox_measurement_information_Aquascat(self):
# self.gridLayout_goupbox_info.itemAt(0).widget().deleteLater()
self.label_to_do.hide()
# self.label_to_do.hide()
self.label_freq.hide()
self.label_profiles.show()
self.label_profiles_per_sec.show()
@ -661,9 +667,15 @@ class AcousticDataTab(QWidget):
self.label_rx.hide()
self.label_tx.hide()
self.label_to_do.show()
# self.label_to_do.show()
#
# self.gridLayout_goupbox_info.addWidget(self.label_to_do, 0, 0, 1, 1)
self.label_freq.show()
self.gridLayout_goupbox_info.addWidget(self.label_freq, 0, 0, 1, 1)
self.gridLayout_goupbox_info.addWidget(self.label_to_do, 0, 0, 1, 1)
def clicked_pushbutton_noise_level(self):
self.WindowNoiseLevelTailAveragedProfile().show()
@ -683,11 +695,13 @@ class AcousticDataTab(QWidget):
"Aquascat file (*.aqa)")
dir_name = path.dirname(filename[0])
name = path.basename(filename[0])
elif self.combobox_ABS_system_choice.currentIndex() == 2:
filename = QFileDialog.getOpenFileName(self, "Open file", "", "UBSediFlow file (*.udt)")
dir_name = path.dirname(filename[0])
name = path.basename(filename[0])
# --- Fill lineEdit with path and file names + load acoustic data ---
# --- fill date, hour and measurements information + fill frequency combobox for bottom detection ---
if self.combobox_ABS_system_choice.currentIndex() != 0:
@ -734,91 +748,148 @@ class AcousticDataTab(QWidget):
_translate("CONSTANT_STRING", cs.HOUR) + ": " + str(stg.hour_noise))
def load_BS_acoustic_raw_data(self):
acoustic_data = AcousticDataLoader(stg.path_BS_raw_data + "/" + stg.filename_BS_raw_data)
stg.BS_raw_data = acoustic_data._BS_raw_data
stg.BS_raw_data_reshape = acoustic_data.reshape_BS_raw_cross_section()
stg.r = acoustic_data._r
stg.r_2D = acoustic_data.compute_r_2D()
stg.r_reshape = acoustic_data.reshape_r()
stg.time = acoustic_data._time
stg.time_reshape = acoustic_data.reshape_t()
stg.freq = acoustic_data._freq
stg.freq_text = acoustic_data._freq_text
stg.date = acoustic_data._date
stg.hour = acoustic_data._hour
stg.nb_profiles = acoustic_data._nb_profiles
stg.nb_profiles_per_sec = acoustic_data._nb_profiles_per_sec
stg.nb_cells = acoustic_data._nb_cells
stg.cell_size = acoustic_data._cell_size
stg.pulse_length = acoustic_data._cell_size
stg.nb_pings_per_sec = acoustic_data._nb_pings_per_sec
stg.nb_pings_averaged_per_profile = acoustic_data._nb_pings_averaged_per_profile
stg.kt = acoustic_data._kt
stg.gain_rx = acoustic_data._gain_rx
stg.gain_tx = acoustic_data._gain_tx
if self.combobox_ABS_system_choice.currentIndex() == 1:
acoustic_data = AcousticDataLoader(stg.path_BS_raw_data + "/" + stg.filename_BS_raw_data)
stg.BS_raw_data = acoustic_data._BS_raw_data
stg.BS_raw_data_reshape = acoustic_data.reshape_BS_raw_cross_section()
stg.r = acoustic_data._r
stg.r_2D = acoustic_data.compute_r_2D()
stg.r_reshape = acoustic_data.reshape_r()
stg.time = acoustic_data._time
stg.time_reshape = acoustic_data.reshape_t()
stg.freq = acoustic_data._freq
stg.freq_text = acoustic_data._freq_text
stg.date = acoustic_data._date
stg.hour = acoustic_data._hour
stg.nb_profiles = acoustic_data._nb_profiles
stg.nb_profiles_per_sec = acoustic_data._nb_profiles_per_sec
stg.nb_cells = acoustic_data._nb_cells
stg.cell_size = acoustic_data._cell_size
stg.pulse_length = acoustic_data._cell_size
stg.nb_pings_per_sec = acoustic_data._nb_pings_per_sec
stg.nb_pings_averaged_per_profile = acoustic_data._nb_pings_averaged_per_profile
stg.kt = acoustic_data._kt
stg.gain_rx = acoustic_data._gain_rx
stg.gain_tx = acoustic_data._gain_tx
elif self.combobox_ABS_system_choice.currentIndex() == 2:
acoustic_data = AcousticDataLoaderUBSediFlow(stg.path_BS_raw_data + "/" + stg.filename_BS_raw_data)
stg.date = acoustic_data._date
stg.hour = acoustic_data._hour
stg.freq = acoustic_data._freq
stg.time = acoustic_data._time
stg.r = acoustic_data._r
stg.freq_text = acoustic_data._freq_text
stg.BS_raw_data = acoustic_data._BS_raw_data
stg.BS_raw_data_reshape = acoustic_data.reshape_BS_raw_cross_section()
stg.r_reshape = acoustic_data.reshape_r()
stg.time_reshape = acoustic_data.reshape_t()
def load_noise_data_and_compute_SNR(self):
noise_data = AcousticDataLoader(stg.path_BS_noise_data + "/" + stg.filename_BS_noise_data)
# stg.BS_noise_data = noise_data._BS_raw_data
stg.date_noise = noise_data._date
stg.hour_noise = noise_data._hour
stg.time_snr = noise_data._time
noise = np.zeros(stg.BS_raw_data.shape)
for f in range(noise_data._freq.shape[0]):
noise[:, f, :] = np.mean(noise_data._BS_raw_data[:, f, :], axis=(0, 1))
stg.BS_noise_data = noise
stg.snr = np.divide((stg.BS_raw_data - stg.BS_noise_data) ** 2, stg.BS_noise_data ** 2)
stg.snr_reshape = np.reshape(stg.snr, (stg.r.shape[0] * stg.time.shape[0], stg.freq.shape[0]), order="F")
if self.combobox_ABS_system_choice.currentIndex() == 1:
noise_data = AcousticDataLoader(stg.path_BS_noise_data + "/" + stg.filename_BS_noise_data)
# stg.BS_noise_data = noise_data._BS_raw_data
stg.date_noise = noise_data._date
stg.hour_noise = noise_data._hour
stg.time_snr = noise_data._time
noise = np.zeros(stg.BS_raw_data.shape)
for f in range(noise_data._freq.shape[0]):
noise[:, f, :] = np.mean(noise_data._BS_raw_data[:, f, :], axis=(0, 1))
stg.BS_noise_data = noise
stg.snr = np.divide((stg.BS_raw_data - stg.BS_noise_data) ** 2, stg.BS_noise_data ** 2)
stg.snr_reshape = np.reshape(stg.snr, (stg.r.shape[0] * stg.time.shape[0], stg.freq.shape[0]), order="F")
elif self.combobox_ABS_system_choice.currentIndex() == 2:
noise_data = AcousticDataLoaderUBSediFlow(stg.path_BS_noise_data + "/" + stg.filename_BS_noise_data)
stg.date_noise = noise_data._date
stg.hour_noise = noise_data._hour
stg.time_snr = noise_data._time
noise = np.zeros(stg.BS_raw_data.shape)
for f in range(noise_data._freq.shape[0]):
noise[:, f, :] = np.mean(noise_data._BS_raw_data[:, f, :], axis=(0, 1))
stg.BS_noise_data = noise
stg.snr = np.divide((stg.BS_raw_data - stg.BS_noise_data) ** 2, stg.BS_noise_data ** 2)
stg.snr_reshape = np.reshape(stg.snr, (stg.r.shape[0] * stg.time.shape[0], stg.freq.shape[0]), order="F")
def fill_measurements_information_groupbox(self):
# acoustic_data = self.load_BS_acoustic_raw_data()
if self.combobox_ABS_system_choice.currentIndex() == 1:
self.label_profiles.setText(
_translate("CONSTANT_STRING", cs.NB_PROFILES) + ": " + str(stg.nb_profiles))
self.label_profiles_per_sec.setText(
_translate("CONSTANT_STRING", cs.NB_PROFILES_PER_SEC) + ": " +
str(stg.nb_profiles_per_sec) + " Hz")
self.label_freq.setText(
_translate("CONSTANT_STRING", cs.FREQUENCY) + ": " + ', '.join(stg.freq_text))
self.label_cells.setText(
_translate("CONSTANT_STRING", cs.NB_CELLS) + ": " + str(stg.nb_cells))
self.label_cell_size.setText(
_translate("CONSTANT_STRING", cs.CELL_SIZE) + ": " + str(100*round(stg.cell_size, 3)) + " cm")
self.label_pulse_length.setText(
_translate("CONSTANT_STRING", cs.PULSE_LENGHT) + ": " + str(round(stg.pulse_length,6)) + "sec")
self.label_pings_per_sec.setText(
_translate("CONSTANT_STRING", cs.NB_PINGS_PER_SEC) + ": " + str(stg.nb_pings_per_sec) + " Hz")
self.label_pings_per_profile.setText(
_translate("CONSTANT_STRING", cs.NB_PINGS_PER_PROFILE) + ": " +
str(stg.nb_pings_averaged_per_profile))
self.label_kt.setText(
_translate("CONSTANT_STRING", cs.KT) + ": " + ', '.join(map(str, stg.kt)))
self.label_rx.setText(
_translate("CONSTANT_STRING", cs.GAIN_RX) + ": " + ', '.join(map(str, stg.gain_rx)))
self.label_tx.setText(
_translate("CONSTANT_STRING", cs.GAIN_TX) + ": " + ', '.join(map(str, stg.gain_tx)))
elif self.combobox_ABS_system_choice.currentIndex() == 2:
self.label_freq.setText(
_translate("CONSTANT_STRING", cs.FREQUENCY) + ": " + ', '.join(stg.freq_text))
self.label_profiles.setText(
_translate("CONSTANT_STRING", cs.NB_PROFILES) + ": " + str(stg.nb_profiles))
self.label_profiles_per_sec.setText(
_translate("CONSTANT_STRING", cs.NB_PROFILES_PER_SEC) + ": " +
str(stg.nb_profiles_per_sec) + " Hz")
self.label_freq.setText(
_translate("CONSTANT_STRING", cs.FREQUENCY) + ": " + ', '.join(stg.freq_text))
self.label_cells.setText(
_translate("CONSTANT_STRING", cs.NB_CELLS) + ": " + str(stg.nb_cells))
self.label_cell_size.setText(
_translate("CONSTANT_STRING", cs.CELL_SIZE) + ": " + str(100*round(stg.cell_size, 3)) + " cm")
self.label_pulse_length.setText(
_translate("CONSTANT_STRING", cs.PULSE_LENGHT) + ": " + str(round(stg.pulse_length,6)) + "sec")
self.label_pings_per_sec.setText(
_translate("CONSTANT_STRING", cs.NB_PINGS_PER_SEC) + ": " + str(stg.nb_pings_per_sec) + " Hz")
self.label_pings_per_profile.setText(
_translate("CONSTANT_STRING", cs.NB_PINGS_PER_PROFILE) + ": " +
str(stg.nb_pings_averaged_per_profile))
self.label_kt.setText(
_translate("CONSTANT_STRING", cs.KT) + ": " + ', '.join(map(str, stg.kt)))
self.label_rx.setText(
_translate("CONSTANT_STRING", cs.GAIN_RX) + ": " + ', '.join(map(str, stg.gain_rx)))
self.label_tx.setText(
_translate("CONSTANT_STRING", cs.GAIN_TX) + ": " + ', '.join(map(str, stg.gain_tx)))
def fill_table(self):
if ((self.lineEdit_acoustic_file.text()) and (self.lineEdit_noise_file.text())):
stg.DataFrame_acoustic = pd.DataFrame(
np.concatenate((stg.time_reshape, stg.BS_raw_data_reshape, stg.snr_reshape), axis=1),
columns=list(map(str, ["Time"] + ["BS - " + f for f in stg.freq_text] +
["SNR - " + f for f in stg.freq_text])))
self.tableModel = TableModel(stg.DataFrame_acoustic)
self.tableView.setModel(self.tableModel)
elif self.lineEdit_acoustic_file.text():
stg.DataFrame_acoustic = pd.DataFrame(
np.concatenate((stg.time_reshape, stg.BS_raw_data_reshape), axis=1),
columns=list(map(str, ["Time"] + ["BS - " + f for f in stg.freq_text])))
self.tableModel = TableModel(stg.DataFrame_acoustic)
self.tableView.setModel(self.tableModel)
else:
msgBox = QMessageBox()
msgBox.setWindowTitle("Fill table Error")
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("Download files before fill table")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec()
if self.combobox_ABS_system_choice.currentIndex() == 1:
if ((self.lineEdit_acoustic_file.text()) and (self.lineEdit_noise_file.text())):
stg.DataFrame_acoustic = pd.DataFrame(
np.concatenate((stg.time_reshape, stg.BS_raw_data_reshape, stg.snr_reshape), axis=1),
columns=list(map(str, ["Time"] + ["BS - " + f for f in stg.freq_text] +
["SNR - " + f for f in stg.freq_text])))
self.tableModel = TableModel(stg.DataFrame_acoustic)
self.tableView.setModel(self.tableModel)
elif self.lineEdit_acoustic_file.text():
stg.DataFrame_acoustic = pd.DataFrame(
np.concatenate((stg.time_reshape, stg.BS_raw_data_reshape), axis=1),
columns=list(map(str, ["Time"] + ["BS - " + f for f in stg.freq_text])))
self.tableModel = TableModel(stg.DataFrame_acoustic)
self.tableView.setModel(self.tableModel)
else:
msgBox = QMessageBox()
msgBox.setWindowTitle("Fill table Error")
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("Download files before fill table")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec()
elif self.combobox_ABS_system_choice.currentIndex() == 2:
if ((self.lineEdit_acoustic_file.text()) and (self.lineEdit_noise_file.text())):
stg.DataFrame_acoustic = pd.DataFrame(
np.concatenate((stg.time_reshape, stg.BS_raw_data_reshape, stg.snr_reshape), axis=1),
columns=list(map(str, ["Time"] + ["BS - " + f for f in stg.freq_text] +
["SNR - " + f for f in stg.freq_text])))
self.tableModel = TableModel(stg.DataFrame_acoustic)
self.tableView.setModel(self.tableModel)
elif self.lineEdit_acoustic_file.text():
stg.DataFrame_acoustic = pd.DataFrame(
np.concatenate((stg.time_reshape, stg.BS_raw_data_reshape), axis=1),
columns=list(map(str, ["Time - " + f for f in stg.freq_text] + ["BS - " + f for f in stg.freq_text])))
self.tableModel = TableModel(stg.DataFrame_acoustic)
self.tableView.setModel(self.tableModel)
else:
msgBox = QMessageBox()
msgBox.setWindowTitle("Fill table Error")
msgBox.setIcon(QMessageBox.Warning)
msgBox.setText("Download files before fill table")
msgBox.setStandardButtons(QMessageBox.Ok)
msgBox.exec()
def export_table(self):
if self.tableWidget.columnCount() == 10:
@ -876,23 +947,38 @@ class AcousticDataTab(QWidget):
self.scroll_BS.setAlignment(Qt.AlignCenter)
self.verticalLayout_groupbox_transect_2Dplot_raw_BS_data.addWidget(self.scroll_BS)
self.spinbox_tmin.setValue(np.min(stg.time))
self.spinbox_tmax.setValue(np.round(np.max(stg.time), 2))
self.spinbox_tmin.setValue(np.round(np.min(stg.time[0, :]), 2))
self.spinbox_tmax.setValue(np.round(np.max(stg.time[0, :]), 2))
for f in range(stg.freq.shape[0]):
stg.tmin = np.array([])
stg.tmax = np.array([])
val_min = np.nanmin(stg.BS_raw_data[:, f, :])
val_max = np.nanmax(stg.BS_raw_data[:, f, :])
for f, _ in enumerate(stg.freq):
val_min = np.nanmin(stg.BS_raw_data[f, :, :])
val_max = np.nanmax(stg.BS_raw_data[f, :, :])
if val_min == 0:
val_min = 1e-5
stg.tmin = (
np.append(stg.tmin,
np.where(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmin.value()) ==
np.nanmin(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmin.value())))[0][0])
)
stg.tmax = (
np.append(stg.tmax,
np.where(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmax.value()) ==
np.nanmin(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmax.value())))[0][0])
)
print(f"freq = {f}")
print(f"tmin {stg.tmin}")
print(f"tmax {stg.tmax}")
pcm = self.axis_BS[f].pcolormesh(
stg.time[np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]],
-stg.r,
stg.BS_raw_data[:, f,
np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]],
stg.time[f, int(stg.tmin[f]):int(stg.tmax[f])], stg.r[f, :],
stg.BS_raw_data[f, :, int(stg.tmin[f]):int(stg.tmax[f])],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
self.axis_BS[f].text(1, .70, stg.freq_text[f],
@ -919,33 +1005,56 @@ class AcousticDataTab(QWidget):
msgBox.exec()
else:
# --- Backscatter acoustic signal is recorded for next tab ---
stg.BS_data = stg.BS_raw_data[:, :, np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]]
print("stg.time shape ", stg.time.shape)
stg.t = stg.time[np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]]
print("stg.t shape ", stg.t.shape)
stg.r_2D = stg.r_2D[:, np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]]
print("stg.r shape ", stg.r_2D.shape)
for f in range(stg.freq.shape[0]):
# --- Backscatter acoustic signal is recorded for next tab ---
stg.BS_data = np.array([[[]]])
stg.t = np.array([[]])
for f, _ in enumerate(stg.freq):
stg.tmin[f] = np.where(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmin.value()) ==
np.nanmin(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmin.value())))[0][0]
stg.tmax[f] = np.where(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmax.value()) ==
np.nanmin(np.abs(np.round(stg.time[f, :], 2) - self.spinbox_tmax.value())))[0][0]
print("stg.tmin[f] ", stg.tmin[f])
print("stg.tmax[f] ", stg.tmax[f])
print("shape of BS_raw_data ", np.array([stg.BS_raw_data[f, :, int(stg.tmin[f]):int(stg.tmax[f])]]).shape)
print("BS_data shape ", stg.BS_data.shape)
if stg.BS_data.shape[2] == 0:
stg.BS_data = np.array([stg.BS_raw_data[f, :, int(stg.tmin[f]):int(stg.tmax[f])]])
else:
stg.BS_data = np.append(stg.BS_data, np.array([stg.BS_raw_data[f, :, int(stg.tmin[f]):int(stg.tmax[f])]]), axis=0)
# stg.BS_data = np.stack(np.array([stg.BS_raw_data[f, :, int(stg.tmin[f]):int(stg.tmax[f])]]), axis=0)
# stg.BS_data = np.append(stg.BS_data, np.array([stg.BS_raw_data[f, :, int(stg.tmin[f]):int(stg.tmax[f])]]), axis=2)
print("stg.BS_data.shape ", stg.BS_data.shape)
print("stg.BS_data.size ", stg.BS_data.size)
print("stg.time shape ", stg.time.shape)
print("stg.t shape ", stg.t.shape)
if stg.t.shape[1] == 0:
stg.t = np.array([stg.time[f, int(stg.tmin[f]):int(stg.tmax[f])]])
else:
stg.t = np.append(stg.t, np.array([stg.time[f, int(stg.tmin[f]):int(stg.tmax[f])]]), axis=0)
# stg.t = np.append(stg.t, np.array([stg.time[f, int(stg.tmin[f]):int(stg.tmax[f])]]), axis=0)
print("stg.t shape ", stg.t.shape)
# stg.r_2D = stg.r_2D[:, np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
# np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]]
# print("stg.r shape ", stg.r_2D.shape)
self.axis_BS[f].cla()
val_min = np.min(stg.BS_raw_data[:, f, :])
val_max = np.max(stg.BS_raw_data[:, f, :])
val_min = np.min(stg.BS_data[f, :, :])
val_max = np.max(stg.BS_data[f, :, :])
if val_min == 0:
val_min = 1e-5
pcm = self.axis_BS[f].pcolormesh(
stg.time[np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]],
-stg.r,
(stg.BS_raw_data[:, f,
np.where(np.round(stg.time, 2) == self.spinbox_tmin.value())[0][0]:
np.where(np.round(stg.time, 2) == self.spinbox_tmax.value())[0][0]]),
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
# print("stg.t[f, :].shape ", stg.t[f])
# print("stg.r[f, :].shape ", stg.r[f, :])
pcm = self.axis_BS[f].pcolormesh(stg.t[f, :], stg.r[f, :], stg.BS_data[f, :, :],
cmap='viridis', norm=LogNorm(vmin=val_min, vmax=val_max))
self.axis_BS[f].text(1, .70, stg.freq_text[f],
fontsize=14, fontweight='bold', fontname="Ubuntu", c="black", alpha=0.5,

View File

@ -44,6 +44,8 @@ snr_reshape = np.array([]) # snr is reshape to be included in table of valu
DataFrame_acoustic = pd.DataFrame()
# --- Processed data in Acoustic Data Tab and used in Acoustic processing tab ---
tmin = np.array([]) # minimum boundary of time (spin box tmin)
tmax = np.array([]) # maximum boundary of time (spin box tmin)
BS_data = np.array([]) # BS data limited with tmin and tmax values of spin box
BS_data_section = np.array([]) # BS data in the section. Values NaN outside the bottom of the section are deleted
Noise_data = np.array([]) # Noise_data = BS_noise_data[:, :, tmin:tmax]