diff --git a/HSP2/PLANK_Class.py b/HSP2/PLANK_Class.py
index eefccc16..c2c1a56f 100644
--- a/HSP2/PLANK_Class.py
+++ b/HSP2/PLANK_Class.py
@@ -1122,7 +1122,7 @@ def baldth(nsfg,no3,tam,po4,paldh,naldh,aldl,aldh,mbal,dox,anaer,oxald,bal,depco
# use unit death rate to compute death rate; dthbal is expressed
# as umoles of phosphorus per liter per interval
- return (ald * bal) + slof # dthbal
+ return (ald * bal) + slof, bal # dthbal
def balrx(self, ballit,tw,talgrl,talgrh,talgrm,malgr,cmmp, cmmnp,tamfg,amrfg,nsfg,cmmn,cmmlt,delt60,
@@ -1162,7 +1162,7 @@ def balrx(self, ballit,tw,talgrl,talgrh,talgrm,malgr,cmmp, cmmnp,tamfg,amrfg,nsf
grobal = self.grochk (po4,no3,tam,phfg,decfg,baco2,cvbpc,cvbpn,nsfg,nmingr,pmingr,cmingr,i0,grtotn,grobal)
# calculate benthic algae death, baldth only called here
- dthbal = self.baldth(nsfg,no3,tam,po4,paldh,naldh,aldl,aldh,mbal,dox,anaer,oxald,bal,depcor)
+ (dthbal, bal) = self.baldth(nsfg,no3,tam,po4,paldh,naldh,aldl,aldh,mbal,dox,anaer,oxald,bal,depcor)
bal += grobal # determine the new benthic algae population
diff --git a/HSP2tools/HDF5.py b/HSP2tools/HDF5.py
index e4ab929d..6b70e6cb 100644
--- a/HSP2tools/HDF5.py
+++ b/HSP2tools/HDF5.py
@@ -10,6 +10,9 @@
from threading import Lock
class HDF5:
+
+ REQUIRES_MAPPING = ['GQUAL','CONS']
+
def __init__(self, file_name:str) -> None:
self.file_name = file_name
self.aliases = self._read_aliases_csv()
@@ -17,26 +20,37 @@ def __init__(self, file_name:str) -> None:
self.lock = Lock()
self.gqual_prefixes = self._read_gqual_mapping()
+ self.cons_prefixes = self._read_cons_mapping()
+ self.iqual_prefixes = self._read_iqual_mapping()
- def _read_gqual_mapping(self) -> Dict[str,str]:
- """"GQUAL is based on number which corresponds to the parameter
- however which number is assoicated with which parameter changes
- based on the UCI file. Need to read from GQUAL tables
+ def _read_nqual_mapping(self, key:str, target_col:str, nquals:int = 10) -> Dict[str,str]:
+ """Some modules, like GQUAL, allow for number which corresponds to the consistent
+ being modeled. However which number is assoicated with which parameter changes
+ based on the UCI file. Need to read from specification tables
"""
-
- gqual_prefixes = {}
- for i in range(1,7):
+ dict_mappings = {}
+ for i in range(1,nquals):
try:
with pd.HDFStore(self.file_name,'r') as store:
- df = pd.read_hdf(store,f'RCHRES/GQUAL/GQUAL{i}')
+ df = pd.read_hdf(store,f'{key}{i}')
row = df.iloc[0]
- gqid = row['GQID']
- gqual_prefixes[gqid] = str(i)
+ gqid = row[target_col]
+ dict_mappings[gqid] = str(i)
except KeyError:
- #Mean no gqual number (e.g. QUAL3) for this run
+ #Mean no nqual number (e.g. GQUAL3) for this run
pass
- return gqual_prefixes
+ return dict_mappings
+ def _read_gqual_mapping(self) -> Dict[str,str]:
+ return self._read_nqual_mapping(R'RCHRES/GQUAL/GQUAL', 'GQID', 7)
+
+ def _read_cons_mapping(self) -> Dict[str,str]:
+ return self._read_nqual_mapping(R'RCHRES/CONS/CONS','CONID', 7)
+
+ def _read_iqual_mapping(self) -> Dict[str,str]:
+ """placeholder - for implementation similar to gqual - but for current test just assume 1"""
+ return {'':'1'}
+
def _read_aliases_csv(self) -> Dict[Tuple[str,str,str],str]:
datapath = os.path.join(HSP2tools.__path__[0], 'data', 'HBNAliases.csv')
df = pd.read_csv(datapath)
@@ -52,11 +66,12 @@ def get_time_series(self, operation:str, id:str, constituent:str, activity:str)
#We still need a special case for IMPLAND/IQUAL and PERLAND/PQUAL
constituent_prefix = ''
- if activity == 'GQUAL':
+ if activity in self.REQUIRES_MAPPING:
constituent_prefix = ''
- for key, value in self.gqual_prefixes.items():
+ prefix_dict = getattr(self, f'{activity.lower()}_prefixes')
+ for key, value in prefix_dict.items():
if constituent.startswith(key):
- constituent_prefix = f'GQUAL{value}_'
+ constituent_prefix = f'{activity}{value}_'
constituent = constituent.replace(key,'')
key = (operation,id,activity)
diff --git a/HSP2tools/data/HBNAliases.csv b/HSP2tools/data/HBNAliases.csv
index 0896d00c..e136566c 100644
--- a/HSP2tools/data/HBNAliases.csv
+++ b/HSP2tools/data/HBNAliases.csv
@@ -127,3 +127,14 @@ RCHRES,PLANK,TP,PTOTCONC
RCHRES,PLANK,ROTORN,NTOTORGOUT
RCHRES,PLANK,ROTORP,PTOTORGOUT
RCHRES,PLANK,ROTORC,CTOTORGOUT
+IMPLND,IQUAL,IQADDR,IQADDRCOD
+IMPLND,IQUAL,IQADEP,IQADEPCOD
+IMPLND,IQUAL,IQADWT,IQADWTCOD
+IMPLND,IQUAL,SOQC,SOQCCOD
+IMPLND,IQUAL,SOQO,SOQOCOD
+IMPLND,IQUAL,SOQOC,SOQOCCOD
+IMPLND,IQUAL,SOQS,SOQSCOD
+IMPLND,IQUAL,SOQSP,SOQSPCOD
+IMPLND,IQUAL,SOQUAL,SOQUALCOD
+IMPLND,IQUAL,SQO,SQOCOD
+RCHRES,CONS,CON,CONC
diff --git a/tests/convert/regression_base.py b/tests/convert/regression_base.py
index 8b4a8978..7396f4a7 100644
--- a/tests/convert/regression_base.py
+++ b/tests/convert/regression_base.py
@@ -1,3 +1,4 @@
+from datetime import time
import os
import inspect
import webbrowser
@@ -6,10 +7,14 @@
import pandas as pd
import numpy as np
-from typing import Dict, List, Tuple
+from typing import Dict, List, Tuple, Union
from concurrent.futures import ThreadPoolExecutor, as_completed, thread
+
+OperationsTuple = Tuple[str,str,str,str,str]
+ResultsTuple = Tuple[bool,bool,bool,float]
+
class RegressTest(object):
def __init__(self, compare_case:str, operations:List[str]=[], activities:List[str]=[],
tcodes:List[str] = ['2'], ids:List[str] = [], threads:int=os.cpu_count() - 1) -> None:
@@ -38,13 +43,22 @@ def _init_files(self):
def _get_hbn_data(self, test_dir: str) -> None:
sub_dir = os.path.join(test_dir, 'HSPFresults')
+ self.hspf_data_collection = {}
for file in os.listdir(sub_dir):
if file.lower().endswith('.hbn'):
- self.hspf_data = HBNOutput(os.path.join(test_dir, sub_dir, file))
- break
- self.hspf_data.read_data()
-
- def _get_hdf5_data(self, test_dir: str) -> List[HDF5]:
+ hspf_data = HBNOutput(os.path.join(test_dir, sub_dir, file))
+ hspf_data.read_data()
+ for key in hspf_data.output_dictionary.keys():
+ self.hspf_data_collection[key] = hspf_data
+
+ def get_hspf_time_series(self, ops:OperationsTuple) -> Union[pd.Series,None]:
+ operation, activity, id, constituent, tcode = ops
+ key = f'{operation}_{activity}_{id}_{tcode}'
+ hspf_data = self.hspf_data_collection[key]
+ series = hspf_data.get_time_series(operation, int(id), constituent, activity, 'hourly')
+ return series
+
+ def _get_hdf5_data(self, test_dir: str) -> None:
sub_dir = os.path.join(test_dir, 'HSP2results')
for file in os.listdir(sub_dir):
if file.lower().endswith('.h5') or file.lower().endswith('.hdf'):
@@ -62,12 +76,12 @@ def should_compare(self, operation:str, activity:str, id:str, tcode:str) -> bool
return False
return True
- def generate_report(self, file:str, results: Dict[Tuple[str,str,str,str,str],Tuple[bool,bool,bool,float]]) -> None:
+ def generate_report(self, file:str, results: Dict[OperationsTuple,ResultsTuple]) -> None:
html = self.make_html_report(results)
self.write_html(file,html)
webbrowser.open_new_tab('file://' + file)
- def make_html_report(self, results_dict:Dict[Tuple[str,str,str,str,str],Tuple[bool,bool,bool,float]]) -> str:
+ def make_html_report(self, results_dict:Dict[OperationsTuple,ResultsTuple]) -> str:
"""populates html table"""
style_th = 'style="text-align:left"'
style_header = 'style="border:1px solid; background-color:#EEEEEE"'
@@ -75,13 +89,14 @@ def make_html_report(self, results_dict:Dict[Tuple[str,str,str,str,str],Tuple[bo
html = f'CONVERSION TEST REPORT
| {key} | ||||
|---|---|---|---|---|
| Constituent | Max Diff | Match | Note | |