Compare commits

..

No commits in common. "26d1d63edbfbe28d2476a01e65cbe1f4402eacb2" and "3a3c4d9d73ab77fd367140dda9e4d6339ffe6b21" have entirely different histories.

8 changed files with 66 additions and 136 deletions

View File

@ -30,8 +30,6 @@ logger = logging.getLogger()
class Results(SQLSubModel):
_sub_classes = [River]
def __init__(self, id=-1, study=None, solver=None,
repertory="", name="0"):
super(Results, self).__init__(
@ -71,13 +69,6 @@ class Results(SQLSubModel):
def is_valid(self):
return ("timestamps" in self._meta_data)
@property
def solver_name(self):
if self._solver is None:
return self._meta_data["solver_name"]
return self._solver.name
def get(self, key):
return self._meta_data[key]
@ -99,8 +90,7 @@ class Results(SQLSubModel):
execute(f"""
CREATE TABLE results{ext} (
{cls.create_db_add_pamhyr_id()},
solver_name TEXT NOT NULL,
solver_type TEXT NOT NULL,
solver TEXT NOT NULL,
study_revision INTEGER NOT NULL,
creation_data DATE NOT NULL,
nb_timestamps INTEGER NOT NULL,
@ -111,7 +101,7 @@ class Results(SQLSubModel):
)
""")
return True
return cls._create_submodel(execute)
@classmethod
def _db_update(cls, execute, version, data=None):
@ -124,63 +114,61 @@ class Results(SQLSubModel):
@classmethod
def _db_load(cls, execute, data=None):
new = None
new = []
study = data['study']
status = data['status']
scenario = data["scenario"]
loaded = data['loaded_pid']
table = execute(
values = execute(
"SELECT pamhyr_id, solver_name, solver_type, " +
"study_revision, creation_data, nb_timestamps, timestamps, " +
"scenario " +
"FROM results " +
f"WHERE scenario = {scenario.id}"
f"WHERE scenario = {scenario.id} " +
f"AND pamhyr_id NOT IN ({', '.join(map(str, loaded))}) " +
"ORDER BY ind ASC"
)
if len(table) > 1:
logger.warning("Multiple results for this scenario")
for v in table:
for v in values:
it = iter(v)
pid = next(it)
solver_name = next(it)
solver_type = next(it)
solver = next(it)
revision = next(it)
creation_date = next(it)
nb_timestamps = next(it)
timestamps_bytes = next(it)
owner_scenario = next(it)
new_results = cls(study=study)
new_results = cls(
id=pid, status=status,
owner_scenario=owner_scenario
)
new_results.set("solver_name", solver_name)
new_results.set("solver_type", solver_type)
new_results.set("study_revision", revision)
new_results.set("creation_date", creation_date)
sf = ">" + ''.join(itertools.repeat("d", nb_timestamps))
ts = struct.unpack(sf, timestamps_bytes)
sf = ">" + ''.join(itertools.repeat("d", len(nb_timestamps)))
ts = struct.unpack(sf, timestamp_bytes)
new_results.set("timestamps", ts)
data["timestamps"] = ts
new_results._river = River._db_load(execute, data)
new = new_results
loaded.add(pid)
new.append(new_results)
return new
def _db_save(self, execute, data=None):
if self._status.scenario.id != self._owner_scenario:
return
execute(
"DELETE FROM results " +
"DELETED FROM results " +
f"WHERE scenario = {self._owner_scenario}"
)
execute(
"DELETE FROM results_data " +
"DELETED FROM results_data " +
f"WHERE scenario = {self._owner_scenario}"
)
@ -198,12 +186,11 @@ class Results(SQLSubModel):
execute(
"INSERT INTO " +
"results (pamhyr_id, solver_name, solver_type, " +
"study_revision, creation_data, " +
"nb_timestamps, timestamps, " +
"scenario) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
"study_revision, creation_data, nb_timestamps, timestamps, " +
"scenario) VALUES (?, ?, ?, ?, ?, ?, ?)",
self._pamhyr_id, solver_name, solver_type,
self._status.scenario.id, self.get("creation_date"),
len(ts), struct.pack(sf, *ts), self._owner_scenario
self._owner_scenario.revision, self.get("creation_data"),
len(ts), struct.pack(sf, ts), self._owner_scenario
)
data["result"] = self._pamhyr_id

View File

@ -21,7 +21,6 @@ import itertools
from functools import reduce
from datetime import datetime
from Model.Scenario import Scenario
from Model.Tools.PamhyrDB import SQLSubModel
logger = logging.getLogger()
@ -63,10 +62,9 @@ class Profile(SQLSubModel):
return self._data[timestamp]
def get_key(self, key):
res = list(
return list(
map(lambda ts: self._data[ts][key], self._data)
)
return res
def get_ts_key(self, timestamp, key):
if timestamp in self._data:
@ -98,7 +96,10 @@ class Profile(SQLSubModel):
)
""")
return True
if ext == "_tmp":
return True
return cls._create_submodel(execute)
@classmethod
def _db_update(cls, execute, version, data=None):
@ -112,22 +113,19 @@ class Profile(SQLSubModel):
@classmethod
def _db_load(cls, execute, data=None):
new = []
status = data['status']
study = data['study']
reach = data['reach']
profile = data['profile']
status = data['status']
scenario = data["scenario"]
loaded = data['loaded_pid']
timestamps = data['timestamps']
values = execute(
"SELECT pamhyr_id, result, key, " +
"len_data, data, scenario " +
"reach, section, len_data, data, scenario " +
"FROM results_data " +
f"WHERE scenario = {scenario.id} " +
f"AND reach = {reach.pamhyr_id} " +
f"AND section = {profile.pamhyr_id}"
f"AND pamhyr_id NOT IN ({', '.join(map(str, loaded))})"
)
for v in values:
@ -136,11 +134,16 @@ class Profile(SQLSubModel):
pid = next(it)
result = next(it)
key = next(it)
reach = next(it)
section = next(it)
len_data = next(it)
data = next(it)
owner_scenario = next(it)
new_data = cls(profile, study)
new_data = cls(
id=pid, status=status,
owner_scenario=owner_scenario
)
sf = ">" + ''.join(itertools.repeat("f", len_data))
values = struct.unpack(sf, data)
@ -155,38 +158,26 @@ class Profile(SQLSubModel):
def get_keys(self):
return reduce(
lambda acc, ts: acc.union(self._data[ts].keys()),
self._data.keys(), set()
lambda acc, ts: acc.union(d[ts].keys())
)
def _db_save(self, execute, data=None):
logger.debug("Save profile...")
pid = self._pamhyr_id
result = data["result"]
keys = self.get_keys()
logger.debug(f"{keys}...")
for key in keys:
values = self.get_key(key)
if any(filter(lambda x: type(x) in [tuple, list], values)):
logger.debug(f"{key} : {len(values)} {values[0]}")
continue
for key in self.get_keys():
data = self.get_key(key)
values = list(map(float, values))
sf = ">" + ''.join(itertools.repeat("f", len(values)))
data_bytes = struct.pack(sf, *values)
sf = ">" + ''.join(itertools.repeat("f", len(data)))
data_bytes = struct.pack(sf, data)
execute(
"INSERT INTO " +
"results_data (pamhyr_id, result, " +
"reach, section, " +
"key, len_data, data, " +
"scenario) VALUES (?, ?, ?, ?, ?, ?, ?, ?)",
pid, result,
data["reach"].pamhyr_id,
self._profile.pamhyr_id,
key, len(values), data_bytes,
"results_data (pamhyr_id, result, , " +
"study_revision, key, len_data, data, " +
"scenario) VALUES (?, ?, ?, ?, ?, ?, ?)",
pid, result, self._owner_scenario.revision,
key, len(data), data_bytes,
self._owner_scenario
)
@ -194,8 +185,6 @@ class Profile(SQLSubModel):
class Reach(SQLSubModel):
_sub_classes = [Profile]
def __init__(self, reach, study):
super(Reach, self).__init__(
id=-1, status=study.status,
@ -245,30 +234,15 @@ class Reach(SQLSubModel):
@classmethod
def _db_load(cls, execute, data=None):
reach = data["reach"]
new_reach = cls(
data["reach"], data["study"]
)
for profile in reach.profiles:
data["profile"] = profile
new_reach._profiles.append(
Profile._db_load(execute, data)
)
return new_reach
return cls._db_load(execute, data)
def _db_save(self, execute, data=None):
logger.debug("Save reach...")
for profile in self._profiles:
data["profile"] = profile.geometry.pamhyr_id
profile._db_save(execute, data)
class River(SQLSubModel):
_sub_classes = [Reach]
def __init__(self, study):
super(River, self).__init__(
id=-1, status=study.status,
@ -319,19 +293,9 @@ class River(SQLSubModel):
@classmethod
def _db_load(cls, execute, data=None):
study = data["study"]
new_river = cls(study)
for reach in study.river.reachs():
data["reach"] = reach.reach
new_river._reachs.append(
Reach._db_load(execute, data)
)
return new_river
return cls._db_load(execute, data)
def _db_save(self, execute, data=None):
logger.debug("Save river...")
for reach in self._reachs:
data["reach"] = reach.geometry
data["reach"] = reach.geometry.pamhyr_id
reach._db_save(execute, data)

View File

@ -58,7 +58,6 @@ from Model.LateralContributionsAdisTS.LateralContributionsAdisTSList \
import LateralContributionsAdisTSList
from Model.D90AdisTS.D90AdisTSList import D90AdisTSList
from Model.DIFAdisTS.DIFAdisTSList import DIFAdisTSList
from Model.Results.Results import Results
logger = logging.getLogger()
@ -438,7 +437,6 @@ class River(Graph):
LateralContributionsAdisTSList,
D90AdisTSList,
DIFAdisTSList,
Results
]
def __init__(self, status=None):
@ -475,8 +473,6 @@ class River(Graph):
self._D90AdisTS = D90AdisTSList(status=self._status)
self._DIFAdisTS = DIFAdisTSList(status=self._status)
self._results = {}
@classmethod
def _db_create(cls, execute):
cls._create_submodel(execute)
@ -589,9 +585,6 @@ class River(Graph):
return new
def _db_load_results(self, execute, data=None):
self._results = Results._db_load(execute, data)
def _db_save(self, execute, data=None):
self._db_save_delete_artefact(execute, data)
@ -617,9 +610,6 @@ class River(Graph):
objs.append(self._D90AdisTS)
objs.append(self._DIFAdisTS)
if self._results is not None:
objs.append(self._results)
self._save_submodel(execute, objs, data)
return True

View File

@ -353,10 +353,7 @@ class Study(SQLModel):
)
)
data = {
"study": new,
"status": new.status
}
data = {"status": new.status}
# Scenarios
new.scenarios = Scenarios._db_load(
@ -384,11 +381,10 @@ class Study(SQLModel):
# Load river data
new._river = River._db_load(
sql_exec, data=data
sql_exec,
data=data
)
new._river._db_load_results(sql_exec, data=data)
return new
def _save(self, progress=None):
@ -446,7 +442,7 @@ class Study(SQLModel):
)
self.commit()
def sql_save_request_count(self, *args, **kargs):
def sql_save_request_count(self):
return self._count()
def _count(self):

View File

@ -95,15 +95,11 @@ class SQLModel(SQL):
def _save_submodel(self, objs, data=None):
progress = data if data is not None else lambda: None
def fn(sql, *args, **kargs):
if "fetch_one" not in kargs:
kargs["fetch_one"] = False
if "commit" not in kargs:
kargs["commit"] = False
def fn(sql):
res = self.execute(
sql, *args, **kargs
sql,
fetch_one=False,
commit=False
)
progress()
return res
@ -121,7 +117,7 @@ class SQLModel(SQL):
def _count(self):
raise NotImplementedMethodeError(self, self._count)
def _save_count(self, objs, *args, data={}, **kargs):
def _save_count(self, objs, data={}):
counter = {
"insert": 0,
"update": 0,
@ -129,7 +125,7 @@ class SQLModel(SQL):
"other": 0,
}
def fn(sql, *args, **kargs):
def fn(sql):
if "insert" in sql.lower():
counter["insert"] = counter["insert"] + 1
elif "update" in sql.lower():

View File

@ -106,12 +106,12 @@ class SQL(object):
return value
@timer
def execute(self, cmd, *args, fetch_one=True, commit=False, **kargs):
logger.debug(f"SQL - {cmd} + {', '.join(map(str, args))}")
def execute(self, cmd, fetch_one=True, commit=False):
logger.debug(f"SQL - {cmd}")
value = None
try:
res = self._cur.execute(cmd, args)
res = self._cur.execute(cmd)
if commit:
self._db.commit()

View File

@ -80,9 +80,7 @@ class TableModel(PamhyrTableModel):
return f"{v:.4f}"
elif self._opt_data == "solver":
if self._headers[column] == "solver":
v = self._lst[row]
if v is None:
v = self._data[0].solver_name
v = self._lst[row].name
return str(v)
elif self._opt_data == "raw_data":
p = self._lst[row]

View File

@ -95,11 +95,10 @@ class ResultsWindow(PamhyrWindow):
if trad is None:
trad = ResultsTranslate()
name = (
trad[self._pamhyr_name] + " - "
+ study.name + " - "
+ " - ".join([r.solver_name for r in self._results])
+ " - ".join([s.name for s in self._solvers])
)
super(ResultsWindow, self).__init__(