pep8 src/Solver/AdisTS.py

adists_release
Theophile Terraz 2024-11-28 14:32:45 +01:00
parent 824669f70f
commit da7ef60bef
8 changed files with 247 additions and 145 deletions

View File

@ -29,6 +29,7 @@ _translate = QCoreApplication.translate
logger = logging.getLogger() logger = logging.getLogger()
class AdistsOutputRKChecker(AbstractModelChecker): class AdistsOutputRKChecker(AbstractModelChecker):
def __init__(self): def __init__(self):
super(AdistsOutputRKChecker, self).__init__() super(AdistsOutputRKChecker, self).__init__()

View File

@ -521,7 +521,7 @@ Last export at: @date."""
return self._Pollutants return self._Pollutants
@property @property
def initial_conditions_adists(self): def ic_adists(self):
return self._InitialConditionsAdisTS return self._InitialConditionsAdisTS
@property @property

View File

@ -16,9 +16,9 @@
# -*- coding: utf-8 -*- # -*- coding: utf-8 -*-
import os, glob import os
import glob
import logging import logging
from http.cookiejar import reach
import numpy as np import numpy as np
@ -44,6 +44,7 @@ from itertools import chain
logger = logging.getLogger() logger = logging.getLogger()
def adists_file_open(filepath, mode): def adists_file_open(filepath, mode):
f = open(filepath, mode) f = open(filepath, mode)
@ -60,6 +61,7 @@ def adists_file_open(filepath, mode):
return f return f
class AdisTS(CommandLineSolver): class AdisTS(CommandLineSolver):
_type = "adists" _type = "adists"
@ -137,7 +139,9 @@ class AdisTS(CommandLineSolver):
self._export_REP_additional_lines(study, f) self._export_REP_additional_lines(study, f)
path_mage_net = os.path.join(os.path.abspath(os.path.join(repertory, os.pardir)), f"{mage_rep}/net") path_mage_net = os.path.join(os.path.abspath(
os.path.join(repertory, os.pardir)
), f"{mage_rep}/net")
path_adists_net = os.path.join(repertory, "net") path_adists_net = os.path.join(repertory, "net")
if os.path.exists(path_mage_net): if os.path.exists(path_mage_net):
@ -207,10 +211,10 @@ class AdisTS(CommandLineSolver):
""" """
n = node.id n = node.id
##print("node name id: ", n) # print("node name id: ", n)
##if n in self._nodes_names: # if n in self._nodes_names:
##return self._nodes_names[n] # return self._nodes_names[n]
name = "" name = ""
@ -279,33 +283,49 @@ class AdisTSwc(AdisTS):
for pollutant in pollutants: for pollutant in pollutants:
name = pollutant.name name = pollutant.name
with adists_file_open(os.path.join(repertory, f"{name}.POL"), "w+") as f: with adists_file_open(
os.path.join(repertory, f"{name}.POL"), "w+"
) as f:
files.append(f"{name}.POL") files.append(f"{name}.POL")
f.write(f"name = {name}\n") f.write(f"name = {name}\n")
self._export_POL_Characteristics(study, pollutant._data, f, qlog) self._export_POL_Characteristics(
study, pollutant._data, f, qlog
)
POL_ICs = next(filter(lambda ic: ic.pollutant == pollutant.id,\ POL_ICs = next(filter(
study.river.initial_conditions_adists.Initial_Conditions_List)) lambda ic: ic.pollutant == pollutant.id,
study.river.ic_adists.Initial_Conditions_List
))
if POL_ICs.concentration != None: if POL_ICs.concentration is not None:
f.write(f"file_ini = {name}.INI\n") f.write(f"file_ini = {name}.INI\n")
self._export_ICs_AdisTS(study, repertory, POL_ICs, qlog, name) self._export_ICs_AdisTS(
study, repertory, POL_ICs, qlog, name
)
POL_BCs = list(filter(lambda bc: bc.pollutant == pollutant.id,\ POL_BCs = list(filter(
study.river.boundary_conditions_adists.BCs_AdisTS_List)) lambda bc: bc.pollutant == pollutant.id,
study.river.boundary_conditions_adists.BCs_AdisTS_List
))
if len(POL_BCs) != 0: if len(POL_BCs) != 0:
f.write(f"file_cl = {name}.CDT\n") f.write(f"file_cl = {name}.CDT\n")
self._export_BCs_AdisTS(study, repertory, POL_BCs, qlog, name) self._export_BCs_AdisTS(
study, repertory, POL_BCs, qlog, name
)
POL_LAT_Cont = list(filter(lambda lc: lc.pollutant == pollutant.id,\ POL_LAT_Cont = list(filter(
study.river.lateral_contributions_adists.Lat_Cont_List)) lambda lc: lc.pollutant == pollutant.id,
study.river.lateral_contributions_adists.Lat_Cont_List
))
if len(POL_LAT_Cont) != 0: if len(POL_LAT_Cont) != 0:
f.write(f"file_ald = {name}.ALD\n") f.write(f"file_ald = {name}.ALD\n")
f.write(f"*\n") f.write(f"*\n")
self._export_Lat_AdisTS(study, repertory, POL_LAT_Cont, qlog, name) self._export_Lat_AdisTS(
study, repertory, POL_LAT_Cont, qlog, name
)
return files return files
@ -314,16 +334,22 @@ class AdisTSwc(AdisTS):
if qlog is not None: if qlog is not None:
qlog.put("Export POL LCs files") qlog.put("Export POL LCs files")
with adists_file_open(os.path.join(repertory, f"{POL_name}.ALD"), "w+") as f: with adists_file_open(
os.path.join(repertory, f"{POL_name}.ALD"), "w+"
) as f:
for LC in POL_LC: for LC in POL_LC:
reach = next(filter(lambda edge: edge.id == LC.edge, study.river.edges())) #.name reach = next(filter(
lambda edge: edge.id == LC.edge, study.river.edges()
)) # .name
reach_name = self.get_reach_name(self, reach) reach_name = self.get_reach_name(self, reach)
f.write(f"${reach_name} {LC.begin_rk} {LC.end_rk}\n") f.write(f"${reach_name} {LC.begin_rk} {LC.end_rk}\n")
f.write(f"*temps |débit massique (kg/s)\n") f.write(f"*temps |débit massique (kg/s)\n")
f.write(f"*---------++++++++++\n") f.write(f"*---------++++++++++\n")
for LC_data in LC._data: for LC_data in LC._data:
f.write(f"{timestamp_to_old_pamhyr_date_adists(int(LC_data[0]))} {LC_data[1]}\n") tmp = timestamp_to_old_pamhyr_date_adists(int(LC_data[0]))
f.write(" ".join((f"{tmp}",
f"{LC_data[1]}\n")))
f.write(f"*\n") f.write(f"*\n")
return True return True
@ -333,10 +359,16 @@ class AdisTSwc(AdisTS):
if qlog is not None: if qlog is not None:
qlog.put("Export POL BCs files") qlog.put("Export POL BCs files")
with adists_file_open(os.path.join(repertory, f"{POL_name}.CDT"), "w+") as f: with adists_file_open(os.path.join(
repertory, f"{POL_name}.CDT"), "w+"
) as f:
for BC in POL_BC: for BC in POL_BC:
node = next(filter(lambda x: x.id == BC.node, study.river._nodes)) #.name node = next(filter(
print("node name in BC:", node, node.name, self.get_node_name(node)) lambda x: x.id == BC.node, study.river._nodes
)) # .name
print("node name in BC:",
node, node.name,
self.get_node_name(node))
node_name = node.name # self.get_node_name(node) node_name = node.name # self.get_node_name(node)
f.write(f"${node_name}\n") f.write(f"${node_name}\n")
@ -350,39 +382,59 @@ class AdisTSwc(AdisTS):
f.write(f"*---------++++++++++\n") f.write(f"*---------++++++++++\n")
for BC_data in BC._data: for BC_data in BC._data:
f.write(f"{timestamp_to_old_pamhyr_date_adists(int(BC_data[0]))} {BC_data[1]}\n") tmp = timestamp_to_old_pamhyr_date_adists(int(BC_data[0]))
f.write(" ".join((f"{tmp}",
f"{BC_data[1]}\n")))
f.write(f"*\n") f.write(f"*\n")
return True return True
def _export_ICs_AdisTS(self, study, repertory, POL_IC_default, qlog, POL_name): def _export_ICs_AdisTS(self, study, repertory,
POL_IC_default, qlog, POL_name):
if qlog is not None: if qlog is not None:
qlog.put("Export POL ICs files") qlog.put("Export POL ICs files")
with adists_file_open(os.path.join(repertory, f"{POL_name}.INI"), "w+") as f: with adists_file_open(os.path.join(
repertory, f"{POL_name}.INI"
), "w+") as f:
f.write(f"*État initial pour le polluant {POL_name}\n") f.write(f"*État initial pour le polluant {POL_name}\n")
f.write(f"DEFAULT = {POL_IC_default.concentration} {POL_IC_default.eg} "+ f.write(" ".join(("DEFAULT =",
f"{POL_IC_default.em} {POL_IC_default.ed}\n") f"{POL_IC_default.concentration}",
f"{POL_IC_default.eg}",
f"{POL_IC_default.em}",
f"{POL_IC_default.ed}\n")))
if len(POL_IC_default._data) != 0: if len(POL_IC_default._data) != 0:
self._export_ICs_AdisTS_Spec(study, POL_IC_default._data, f, qlog) self._export_ICs_AdisTS_Spec(
study, POL_IC_default._data, f, qlog
)
def _export_ICs_AdisTS_Spec(self, study, pol_ics_spec_data, f, qlog, name="0"): def _export_ICs_AdisTS_Spec(self, study, pol_ics_spec_data,
f, qlog, name="0"):
for ic_spec in pol_ics_spec_data: for ic_spec in pol_ics_spec_data:
f.write(f"{ic_spec.name} = {ic_spec.reach} {ic_spec.start_rk} {ic_spec.end_rk} " + f.write(" ".join((f"{ic_spec.name}",
f"{ic_spec.concentration} {ic_spec.eg} {ic_spec.em} {ic_spec.ed} {ic_spec.rate}") "=",
f"{ic_spec.reach}",
f"{ic_spec.start_rk}",
f"{ic_spec.end_rk}",
f"{ic_spec.concentration}",
f"{ic_spec.eg}",
f"{ic_spec.em}",
f"{ic_spec.ed}",
f"{ic_spec.rate}\n")))
return True return True
def _export_POL_Characteristics(self, study, pol_data, f, qlog, name="0"): def _export_POL_Characteristics(self, study, pol_data, f, qlog, name="0"):
list_characteristics = ["type", "diametre", "rho", "porosity", "cdc_riv", "cdc_cas", "apd", "ac", "bc"] list_characteristics = ["type", "diametre", "rho", "porosity",
"cdc_riv", "cdc_cas", "apd", "ac", "bc"]
if len(list_characteristics) == (len(pol_data[0])-1): if len(list_characteristics) == (len(pol_data[0])-1):
for l in range(len(list_characteristics)): for i in range(len(list_characteristics)):
f.write(f"{list_characteristics[l]} = {pol_data[0][l]}\n") f.write(f"{list_characteristics[i]} = {pol_data[0][i]}\n")
def _export_D90(self, study, repertory, qlog=None, name="0"): def _export_D90(self, study, repertory, qlog=None, name="0"):
@ -391,7 +443,9 @@ class AdisTSwc(AdisTS):
if qlog is not None: if qlog is not None:
qlog.put("Export D90 file") qlog.put("Export D90 file")
with adists_file_open(os.path.join(repertory, f"{name}.D90"), "w+") as f: with adists_file_open(
os.path.join(repertory, f"{name}.D90"), "w+"
) as f:
files.append(f"{name}.D90") files.append(f"{name}.D90")
f.write(f"*Diamètres caractéristiques du fond stable\n") f.write(f"*Diamètres caractéristiques du fond stable\n")
@ -407,8 +461,11 @@ class AdisTSwc(AdisTS):
def _export_d90_spec(self, study, d90_spec_data, f, qlog, name="0"): def _export_d90_spec(self, study, d90_spec_data, f, qlog, name="0"):
for d90_spec in d90_spec_data: for d90_spec in d90_spec_data:
if (d90_spec.name is None) or (d90_spec.reach is None) or (d90_spec.start_rk is None) or \ if (d90_spec.name is None
(d90_spec.end_rk is None) or (d90_spec.d90 is None): or d90_spec.reach is None
or d90_spec.start_rk is None
or d90_spec.end_rk is None
or d90_spec.d90 is None):
return return
edges = study.river.enable_edges() edges = study.river.enable_edges()
@ -420,7 +477,12 @@ class AdisTSwc(AdisTS):
if id_reach not in id_edges: if id_reach not in id_edges:
return return
f.write(f"{d90_spec.name} = {id_reach} {d90_spec.start_rk} {d90_spec.end_rk} {d90_spec.d90}\n") f.write(" ".join((f"{d90_spec.name}",
"=",
f"{id_reach}",
f"{d90_spec.start_rk}",
f"{d90_spec.end_rk}",
f"{d90_spec.d90}\n")))
def _export_DIF(self, study, repertory, qlog=None, name="0"): def _export_DIF(self, study, repertory, qlog=None, name="0"):
@ -429,7 +491,9 @@ class AdisTSwc(AdisTS):
if qlog is not None: if qlog is not None:
qlog.put("Export DIF file") qlog.put("Export DIF file")
with adists_file_open(os.path.join(repertory, f"{name}.DIF"), "w+") as f: with adists_file_open(
os.path.join(repertory, f"{name}.DIF"), "w+"
) as f:
files.append(f"{name}.DIF") files.append(f"{name}.DIF")
f.write(f"*Définition des paramètres des fonctions de calcul du\n") f.write(f"*Définition des paramètres des fonctions de calcul du\n")
@ -438,9 +502,15 @@ class AdisTSwc(AdisTS):
difAdisTS = study.river.dif_adists.DIF_AdisTS_List difAdisTS = study.river.dif_adists.DIF_AdisTS_List
if difAdisTS[0].method != "generique": if difAdisTS[0].method != "generique":
f.write(f"defaut = {difAdisTS[0].method} {difAdisTS[0].dif }\n") f.write(" ".join((f"defaut = ",
f"{difAdisTS[0].method}",
f"{difAdisTS[0].dif}\n")))
else: else:
f.write(f"defaut = {difAdisTS[0].method} {difAdisTS[0].dif} {difAdisTS[0].b} {difAdisTS[0].c}\n") f.write(" ".join((f"defaut ="
f"{difAdisTS[0].method}",
f"{difAdisTS[0].dif}",
f"{difAdisTS[0].b}",
f"{difAdisTS[0].c}\n")))
self._export_dif_spec(study, difAdisTS[0]._data, f, qlog) self._export_dif_spec(study, difAdisTS[0]._data, f, qlog)
@ -449,8 +519,12 @@ class AdisTSwc(AdisTS):
def _export_dif_spec(self, study, dif_spec_data, f, qlog, name="0"): def _export_dif_spec(self, study, dif_spec_data, f, qlog, name="0"):
for dif_spec in dif_spec_data: for dif_spec in dif_spec_data:
if (dif_spec.reach is None) or (dif_spec.start_rk is None) or \ if (dif_spec.reach is None
(dif_spec.end_rk is None) or (dif_spec.dif is None) or (dif_spec.b is None) or (dif_spec.c is None): or dif_spec.start_rk is None
or dif_spec.end_rk is None
or dif_spec.dif is None
or dif_spec.b is None
or dif_spec.c is None):
return return
edges = study.river.enable_edges() edges = study.river.enable_edges()
@ -463,10 +537,20 @@ class AdisTSwc(AdisTS):
return return
if dif_spec.method != "generique": if dif_spec.method != "generique":
f.write(f"{dif_spec.method} = {id_reach} {dif_spec.start_rk} {dif_spec.end_rk} {dif_spec.dif}\n") f.write(" ".join((f"{dif_spec.method}",
"=",
f"{id_reach}",
f"{dif_spec.start_rk}",
f"{dif_spec.end_rk}",
f"{dif_spec.dif}\n")))
else: else:
f.write(f"{dif_spec.method} = {id_reach} {dif_spec.start_rk} {dif_spec.end_rk} {dif_spec.dif}" + f.write(" ".join((f"{dif_spec.method}",
f"{dif_spec.b} {dif_spec.c}\n") f"=" f"{id_reach}",
f"{dif_spec.start_rk}",
f"{dif_spec.end_rk}",
f"{dif_spec.dif}",
f"{dif_spec.b}",
f"{dif_spec.c}\n")))
def _export_NUM(self, study, repertory, qlog=None, name="0"): def _export_NUM(self, study, repertory, qlog=None, name="0"):
@ -484,7 +568,9 @@ class AdisTSwc(AdisTS):
if qlog is not None: if qlog is not None:
qlog.put("Export NUM file") qlog.put("Export NUM file")
with adists_file_open(os.path.join(repertory, f"{name}.NUM"), "w+") as f: with adists_file_open(
os.path.join(repertory, f"{name}.NUM"), "w+"
) as f:
files.append(f"{name}.NUM") files.append(f"{name}.NUM")
params = study.river.get_params(self.type).parameters params = study.river.get_params(self.type).parameters
@ -509,7 +595,9 @@ class AdisTSwc(AdisTS):
return files return files
def _export_outputrk(self, study, outputrk, f, qlog, name="0"): def _export_outputrk(self, study, outputrk, f, qlog, name="0"):
if (outputrk.reach is None) or (outputrk.rk is None) or (outputrk.title is None): if (outputrk.reach is None
or outputrk.rk is None
or outputrk.title is None):
return return
edges = study.river.enable_edges() edges = study.river.enable_edges()
@ -529,7 +617,8 @@ class AdisTSwc(AdisTS):
def read_bin(self, study, repertory, results, qlog=None, name="0"): def read_bin(self, study, repertory, results, qlog=None, name="0"):
repertory_results = os.path.join(repertory, "resultats") repertory_results = os.path.join(repertory, "resultats")
files_bin_names = [el.split("/")[-1] for el in glob.glob(repertory_results+"/*.bin")] files_bin_names = [el.split("/")[-1]
for el in glob.glob(repertory_results+"/*.bin")]
print("files names resultats: ", files_bin_names) print("files names resultats: ", files_bin_names)
ifilename = os.path.join(repertory_results, files_bin_names[0]) ifilename = os.path.join(repertory_results, files_bin_names[0])
@ -540,18 +629,18 @@ class AdisTSwc(AdisTS):
with open(ifilename, 'rb') as f: with open(ifilename, 'rb') as f:
# header # header
# first line # first line
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
data = np.fromfile(f, dtype=np.int32, count=3) data = np.fromfile(f, dtype=np.int32, count=3)
ibmax = data[0] # number of reaches ibmax = data[0] # number of reaches
ismax = data[1] # total number of cross sections ismax = data[1] # total number of cross sections
kbl = data[2] * -1 # block size for .BIN header kbl = data[2] * -1 # block size for .BIN header
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# second line # second line
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
ibu = np.fromfile(f, dtype=np.int32, count=ibmax) ibu = np.fromfile(f, dtype=np.int32, count=ibmax)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# third line # third line
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
data = np.fromfile(f, dtype=np.int32, count=2 * ibmax) data = np.fromfile(f, dtype=np.int32, count=2 * ibmax)
is1 = np.zeros(ibmax, dtype=np.int32) is1 = np.zeros(ibmax, dtype=np.int32)
is2 = np.zeros(ibmax, dtype=np.int32) is2 = np.zeros(ibmax, dtype=np.int32)
@ -582,33 +671,40 @@ class AdisTSwc(AdisTS):
logger.debug(f"read_bin: iprofiles = {iprofiles}") logger.debug(f"read_bin: iprofiles = {iprofiles}")
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# fourth line # fourth line
pk = np.zeros(ismax, dtype=np.float32) pk = np.zeros(ismax, dtype=np.float32)
for k in range(0, ismax, kbl): for k in range(0, ismax, kbl):
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
pk[k:min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, count=min(k + kbl, ismax) - k) pk[k:min(k + kbl, ismax)] = np.fromfile(f,
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) dtype=np.float32,
count=min(
k + kbl, ismax
) - k)
data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# fifth line (useless) # fifth line (useless)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
zmin_OLD = np.fromfile(f, dtype=np.float32, count=1)[0] zmin_OLD = np.fromfile(f, dtype=np.float32, count=1)[0]
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# sixth line # sixth line
zf = np.zeros(ismax, dtype=np.float32) zf = np.zeros(ismax, dtype=np.float32)
z = np.zeros(ismax * 3, dtype=np.float32) z = np.zeros(ismax * 3, dtype=np.float32)
for k in range(0, ismax, kbl): for k in range(0, ismax, kbl):
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
z[3 * k:3 * min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, z[3 * k:3 * min(k + kbl, ismax)] = \
count=3 * (min(k + kbl, ismax) - k)) np.fromfile(f,
dtype=np.float32,
count=3 * (min(k + kbl, ismax) - k)
)
# z[i*3+1] and z[i*3+2] are useless # z[i*3+1] and z[i*3+2] are useless
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
zf = [z[i * 3] for i in range(ismax)] zf = [z[i * 3] for i in range(ismax)]
# seventh line (useless) # seventh line (useless)
for k in range(0, ismax, kbl): for k in range(0, ismax, kbl):
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
zero = np.fromfile(f, dtype=np.int32, count=ismax) zero = np.fromfile(f, dtype=np.int32, count=ismax)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# end header # end header
def ip_to_r(i): def ip_to_r(i):
@ -623,7 +719,8 @@ class AdisTSwc(AdisTS):
def ip_to_ri(r, i): return i - reach_offset[r] def ip_to_ri(r, i): return i - reach_offset[r]
path_files = map(lambda file: os.path.join(repertory_results, file), files_bin_names) path_files = map(lambda file: os.path.join(
repertory_results, file), files_bin_names)
data_tmp = {} data_tmp = {}
@ -633,57 +730,63 @@ class AdisTSwc(AdisTS):
with open(file_bin, 'rb') as f: with open(file_bin, 'rb') as f:
# header # header
# first line # first line
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
data = np.fromfile(f, dtype=np.int32, count=3) data = np.fromfile(f, dtype=np.int32, count=3)
ibmax = data[0] # number of reaches ibmax = data[0] # number of reaches
ismax = data[1] # total number of cross sections ismax = data[1] # total number of cross sections
kbl = data[2] * -1 # block size for .BIN header kbl = data[2] * -1 # block size for .BIN header
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# second line # second line
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
ibu = np.fromfile(f, dtype=np.int32, count=ibmax) ibu = np.fromfile(f, dtype=np.int32, count=ibmax)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# third line # third line
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
data = np.fromfile(f, dtype=np.int32, count=2 * ibmax) data = np.fromfile(f, dtype=np.int32, count=2 * ibmax)
is1 = np.zeros(ibmax, dtype=np.int32) is1 = np.zeros(ibmax, dtype=np.int32)
is2 = np.zeros(ibmax, dtype=np.int32) is2 = np.zeros(ibmax, dtype=np.int32)
for i in range(ibmax): for i in range(ibmax):
is1[i] = data[2 * i] # first section of reach i (FORTRAN numbering) # first section of reach i (FORTRAN numbering)
is2[i] = data[2 * i + 1] # last section of reach i (FORTRAN numbering) is1[i] = data[2 * i]
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) # last section of reach i (FORTRAN numbering)
is2[i] = data[2 * i + 1]
data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# fourth line # fourth line
pk = np.zeros(ismax, dtype=np.float32) pk = np.zeros(ismax, dtype=np.float32)
for k in range(0, ismax, kbl): for k in range(0, ismax, kbl):
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
pk[k:min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, count=min(k + kbl, ismax) - k) pk[k:min(k + kbl, ismax)] = np.fromfile(
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) f, dtype=np.float32, count=min(k + kbl, ismax) - k
)
data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# fifth line (useless) # fifth line (useless)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
zmin_OLD = np.fromfile(f, dtype=np.float32, count=1)[0] zmin_OLD = np.fromfile(f, dtype=np.float32, count=1)[0]
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# sixth line # sixth line
zf = np.zeros(ismax, dtype=np.float32) zf = np.zeros(ismax, dtype=np.float32)
z = np.zeros(ismax * 3, dtype=np.float32) z = np.zeros(ismax * 3, dtype=np.float32)
for k in range(0, ismax, kbl): for k in range(0, ismax, kbl):
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
z[3 * k:3 * min(k + kbl, ismax)] = np.fromfile(f, dtype=np.float32, z[3 * k:3 * min(k + kbl, ismax)] = np.fromfile(
count=3 * (min(k + kbl, ismax) - k)) f, dtype=np.float32,
count=3 * (min(k + kbl, ismax) - k)
)
# z[i*3+1] and z[i*3+2] are useless # z[i*3+1] and z[i*3+2] are useless
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
zf = [z[i * 3] for i in range(ismax)] zf = [z[i * 3] for i in range(ismax)]
# seventh line (useless) # seventh line (useless)
for k in range(0, ismax, kbl): for k in range(0, ismax, kbl):
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
zero = np.fromfile(f, dtype=np.int32, count=ismax) zero = np.fromfile(f, dtype=np.int32, count=ismax)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
# end header # end header
# data # data
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
while data.size > 0: while data.size > 0:
ismax = np.fromfile(f, dtype=np.int32, count=1)[0] ismax = np.fromfile(f, dtype=np.int32, count=1)[0]
t = np.fromfile(f, dtype=np.float64, count=1)[0] t = np.fromfile(f, dtype=np.float64, count=1)[0]
if not t in data_tmp[key_pol]: if t not in data_tmp[key_pol]:
data_tmp[key_pol][t] = {} data_tmp[key_pol][t] = {}
c = np.fromfile(f, dtype=np.byte, count=1) c = np.fromfile(f, dtype=np.byte, count=1)
# possible values : # possible values :
@ -692,24 +795,22 @@ class AdisTSwc(AdisTS):
phys_var = bytearray(c).decode() phys_var = bytearray(c).decode()
data_tmp[key_pol][t][phys_var] = {} data_tmp[key_pol][t][phys_var] = {}
real_data = np.fromfile(f, dtype=np.float32, count=ismax) real_data = np.fromfile(f, dtype=np.float32, count=ismax)
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (end) data = np.fromfile(f, dtype=np.int32, count=1) # (end)
data_tmp[key_pol][t][phys_var] = real_data data_tmp[key_pol][t][phys_var] = real_data
data = np.fromfile(f, dtype=np.int32, count=1) # line length (bytes) (start) data = np.fromfile(f, dtype=np.int32, count=1) # (start)
# end data # end data
###print("dta tmp AAA")
###print("-----------")
###print(data_tmp["AAA-silt"])
pollutants_keys = list(data_tmp.keys()) pollutants_keys = list(data_tmp.keys())
timestamps_keys = list(data_tmp[pollutants_keys[0]].keys()) timestamps_keys = list(data_tmp[pollutants_keys[0]].keys())
phys_data_names = list(data_tmp[pollutants_keys[0]][timestamps_keys[0]].keys()) phys_data_names = list(data_tmp[pollutants_keys[0]]
[timestamps_keys[0]].keys())
type_pol_index = len(phys_data_names) type_pol_index = len(phys_data_names)
# ##print("pol keys: ", pollutants_keys) # ##print("pol keys: ", pollutants_keys)
# ##print("t keys: ", timestamps_keys) # ##print("t keys: ", timestamps_keys)
# ##print("phys var: ", phys_data_names) # ##print("phys var: ", phys_data_names)
###print("phys data names mass zero:", list(data_tmp[pollutants_keys[1]][timestamps_keys[0]].keys())) # ##print("phys data names mass zero:",
# list(data_tmp[pollutants_keys[1]][timestamps_keys[0]].keys()))
# print("set timestamps keys: ", set(timestamps_keys)) # print("set timestamps keys: ", set(timestamps_keys))
# print("isma") # print("isma")
# ##print("iprofiles: ", iprofiles) # ##print("iprofiles: ", iprofiles)
@ -726,8 +827,12 @@ class AdisTSwc(AdisTS):
for t_data in timestamps_keys: for t_data in timestamps_keys:
pol_view = [] pol_view = []
for pol in pollutants_keys: for pol in pollutants_keys:
#print("pol results: ", type(list(data_tmp[pol][t_data].values()))) # print("pol results: ",
pol_view.append(tuple( list(map(lambda data_el: data_el[p_i], list(data_tmp[pol][t_data].values()))) )) # type(list(data_tmp[pol][t_data].values())))
pol_view.append(tuple(list(map(
lambda data_el: data_el[p_i],
list(data_tmp[pol][t_data].values())
))))
reach.set(p_i, t_data, "pols", pol_view) reach.set(p_i, t_data, "pols", pol_view)
@ -739,7 +844,7 @@ class AdisTSwc(AdisTS):
print("'''''''reading bin files is ok =======") print("'''''''reading bin files is ok =======")
results.set("timestamps", set(timestamps_keys)) results.set("timestamps", set(timestamps_keys))
#print("------------------------set timestamps results meta data: ", set(timestamps_keys)) # print("set timestamps results meta data: ", set(timestamps_keys))
# ##print("debug profiles for draw:") # ##print("debug profiles for draw:")
# ##print("------------------------") # ##print("------------------------")
@ -787,16 +892,11 @@ class AdisTSwc(AdisTS):
files = files + func(study, repertory, qlog, name=name) files = files + func(study, repertory, qlog, name=name)
self.export_additional_files(study, repertory, qlog, name=name) self.export_additional_files(study, repertory, qlog, name=name)
self._export_REP(study, repertory, mage_rep, files, qlog, name=name) self._export_REP(study, repertory, mage_rep,
files, qlog, name=name)
return True return True
except Exception as e: except Exception as e:
logger.error(f"Failed to export study to {self._type}") logger.error(f"Failed to export study to {self._type}")
logger_exception(e) logger_exception(e)
return False return False

View File

@ -89,7 +89,7 @@ class InitialConditionsAdisTSWindow(PamhyrWindow):
self._hash_data.append(data) self._hash_data.append(data)
self._ics_adists_lst = study.river.initial_conditions_adists self._ics_adists_lst = study.river.ic_adists
self.setup_table() self.setup_table()

View File

@ -90,7 +90,7 @@ class TableModel(PamhyrTableModel):
self._undo.push( self._undo.push(
AddCommand( AddCommand(
self._lst, row, self._data.initial_conditions_adists self._lst, row, self._data.ic_adists
) )
) )

View File

@ -200,7 +200,7 @@ class PollutantsWindow(PamhyrWindow):
pollutant_id = self._pollutants_lst.get(row).id pollutant_id = self._pollutants_lst.get(row).id
ics_adists = next(filter(lambda x: x.pollutant == pollutant_id, ics_adists = next(filter(lambda x: x.pollutant == pollutant_id,
self._study.river.initial_conditions_adists.lst)) self._study.river.ic_adists.lst))
if self.sub_window_exists( if self.sub_window_exists(
InitialConditionsAdisTSWindow, InitialConditionsAdisTSWindow,

View File

@ -41,7 +41,6 @@ class Config(SQL):
self.filename = Config.filename() self.filename = Config.filename()
self.set_default_value() self.set_default_value()
logging.info(f"Configuration file : {self.filename}") logging.info(f"Configuration file : {self.filename}")
super(Config, self).__init__(filename=self.filename) super(Config, self).__init__(filename=self.filename)

View File

@ -299,6 +299,7 @@ def timestamp_to_old_pamhyr_date(time: int):
return s return s
def timestamp_to_old_pamhyr_date_adists(time: int): def timestamp_to_old_pamhyr_date_adists(time: int):
t0 = datetime.fromtimestamp(0) t0 = datetime.fromtimestamp(0)
@ -318,6 +319,7 @@ def timestamp_to_old_pamhyr_date_adists(time: int):
return s return s
def get_user_name(): def get_user_name():
if with_pwd: if with_pwd:
return pwd.getpwuid(os.getuid()).pw_gecos return pwd.getpwuid(os.getuid()).pw_gecos