Commit 01d22b7f authored by Mathieu Istas's avatar Mathieu Istas
Browse files

Merge branch 'l1_analysis'

parents 04a59ff7 2827ebac
......@@ -117,4 +117,4 @@ pygeodyn/tests/dummy_files/*
# Ignore RST files generated by Sphinx
docs/pygeodyn.*.rst
docs/pygeodyn.rst
docs/modules.rst
\ No newline at end of file
docs/modules.rst
......@@ -178,11 +178,25 @@ List of possible parameters
- *COVOBS*, a stochastic model from [GJF13]_ (extended in [GBF15]_),
- *COVOBS_hdf5*, a more recent version of the stochastic model COVOBS,
- *chaos_hdf5*, an estimation of earth magnetic field using ground observatory and satellite data (SWARM and CHAMP missions)
- *GO_VO*, measurements of Ground Observatories and preprocessed satellite data (CHAMP, Swarm) under the form of Virtual Observatories spread all around the globe ([BHF18]_, [Hammer18]_).
- *GO_VO*, measurements of Ground Observatories and preprocessed satellite data (CHAMP, Swarm, Oersted, Cryosat and composite) under the form of Virtual Observatories spread all around the globe ([BHF18]_, [Hammer18]_).
- **Type**: *str*
- **Default value**: *COVOBS*
- obs_select
Only if obs_type is equal to GO_VO. Dictionnary that defines the selection of ground and virtual observatories data used in the algorithm. For instance, to use the secular variation (*SV*) of the ground obsertories and both the magnetic field (*MF*) and the *SV* for the CHAMP satellite, define "obs_select dict {'GROUND':'SV', 'CHAMP':('MF', 'SV')}". Possible observatories are *GROUND*, *CHAMP*, *SWARM*, *OERSTED*, *CRYOSAT* and *COMPOSITE*, and corresponding files name should start with *GO*, *CH*, *SW*, *OR*, *CR* or *CO*.
Default value will search for all available data in folder except *COMPOSITE*
- **Type**: *dict*
- **Default value**: *all*
- discard_high_lat
All observations that are located at a geomagnetic latitude higher than the value of discard_high_lat will not be taken into account. Inclination of the dipole is computed using the first three Gauss coefficients from Chaos 7 data at the closest time from the observations. The default value is 0 which means no observations will be discarded.
- **Type**: *float*
- **Default value**: *0*
- obs_mod_errors_dir
.. warning::
......
......@@ -138,8 +138,7 @@ class AugkfAlgo(GenericAlgo):
del realisations_U
#ensure realistations_U is deleted
locs = locals().items()
for name, var in locs:
for name, var in list(locals().items()):
if name == 'realistations_U':
logging.debug('realistations_U still exist, {0} {1} {2}'.format(name, var.shape, sys.getrefcount(var)))
......
......@@ -35,7 +35,7 @@ class AugkfAnalyser(GenericComputer):
nb_obs_mf, nb_obs_sv = self.extract_observations()
if nb_obs_mf == 0:
logging.error(
"No observation was extracted for MF! Analyses will be completely skipped"
"No observation was extracted for MF! Analyses on b will be completely skipped"
)
elif nb_obs_sv == 0:
logging.error(
......@@ -52,8 +52,7 @@ class AugkfAnalyser(GenericComputer):
# Take full Pzz initially for P^a_zz
self.Pzz_analyse = self.scaled_Pzz
# Same for the initial covariance matrix for b:
self.Pbb_analyse = None # will be equal to self.algo.covariance_matrices["b,b"] in first step
self.Pbb_analyse = None
self.Pbb_forecast = None # Will be computed in the first analysis step
# Initialize DivH(uBr), necessary for first analysis step
......@@ -62,8 +61,16 @@ class AugkfAnalyser(GenericComputer):
# Store inverse of covariance matrices for misfit computations
self.inv_cov_matrices = self.compute_inv_cov_matrices()
def invalid_misfits(self):
return {k: np.nan for k in ["MF", "SV", "ER", "U"]}
def invalid_misfits(self, keys):
possible_keys = ["MF", "SV", "ER", "U"]
err = 'invalid key passed in invalid_mifits(), keys: {}, possible keys: {}'
assert all([k in possible_keys for k in keys]), err.format(keys, possible_keys)
try:
for k in keys:
self.current_misfits[k] = np.nan
return self.current_misfits
except NameError:
return {k: np.nan for k in keys}
def extract_observations(self):
"""
......@@ -98,7 +105,6 @@ class AugkfAnalyser(GenericComputer):
),
measure_type,
)
return (
len(self.measure_observations["MF"].keys()),
len(self.measure_observations["SV"].keys()),
......@@ -154,50 +160,58 @@ class AugkfAnalyser(GenericComputer):
)
)
# Set the core state to analyse to the forecast core state at t
# Note : do a copy to avoid replacing forecast data as ndarrays are mutable
ana_core_state = input_core_state.copy()
try:
mf_observations = self.measure_observations["MF"][t]
ana_b = True
except KeyError:
self.skipped_analysis_b += 1
logging.critical(
"No observation data exists for B "
"at {} !. Analysis was skipped. Observation available at {}".format(t, self.measure_observations["MF"].keys())
)
self.current_misfits = self.invalid_misfits()
return input_core_state
ana_core_state.B = self.analyse_B(input_core_state, mf_observations)
self.skipped_analysis_b = 0 # analysis on b is done, count of skipped analysis goes back to 0
# Compute A(B) by radmats on mean analysed core state
logging.debug("Computing DivH(uBr)...")
self.Ab = self.compute_Ab(ana_core_state.mean(axis=0))
# Start analyse of Z=[U ER] (or Z = [PCA_U ER])
logging.debug("Getting best linear unbiased estimate of Z...")
self.current_misfits = self.invalid_misfits(keys=('MF',))
ana_b = False
# Extract the observations for SV
try:
sv_observations = self.measure_observations["SV"][t]
# Compute A(B) by radmats on mean analysed core state
ana_z_sv = True
except KeyError:
self.skipped_analysis_z += 1
logging.critical(
"No observation data exists for SV "
"at {} ! Analysis was skipped.".format(t)
)
self.current_misfits = self.invalid_misfits()
self.current_misfits = self.invalid_misfits(keys=("SV", "ER", "U"))
ana_z_sv = False
if not ana_b and not ana_z_sv: # no analysis are done
# A(b) is not computed and self.Ab value correspond to the last analysis on b
return input_core_state
ana_core_state = self.analyse_augmented_state(
ana_core_state, sv_observations, self.Ab
)
# Set the core state to analyse to the forecast core state at t
# Note : do a copy to avoid replacing forecast data as ndarrays are mutable
ana_core_state = input_core_state.copy()
if ana_b: # doing analysis on b
ana_core_state.B = self.analyse_B(input_core_state, mf_observations)
self.skipped_analysis_b = 0 # analysis on b is done, count of skipped analysis goes back to 0
# Start analyse of Z=[U ER] (or Z = [PCA_U ER])
logging.debug("Getting best linear unbiased estimate of Z...")
ana_core_state = self.analyse_SV(ana_core_state, sv_observations, self.Ab)
logging.debug("Computing DivH(uBr)...")
# ana_core_state is the input core state if the analysis on b has not been done
self.Ab = self.compute_Ab(ana_core_state.mean(axis=0))
if ana_z_sv:# perform analysis on u, e and sv
ana_core_state = self.analyse_augmented_state(
ana_core_state, sv_observations, self.Ab
)
ana_core_state = self.analyse_SV(ana_core_state, sv_observations, self.Ab)
self.skipped_analysis_z = 0 # analysis on u, e and sv is done, count of skipped analysis goes back to 0
self.skipped_analysis_z = 0 # analysis on u, e and sv is done, count of skipped analysis goes back to 0
# End of analysis : return the new core_state
return ana_core_state
......@@ -242,9 +256,17 @@ class AugkfAnalyser(GenericComputer):
cubic_term *= (self.cfg.decimal_dt_a / N)**3 * (S(N2-1) - S(N1-2))
if self.Pbb_analyse is None:
return self.algo.covariance_matrices["b,b"] + quadratic_term + cubic_term
else:
return np.diag(self.Pbb_analyse) + quadratic_term + cubic_term
# Pbb_analyse is initialized from the input_core_state, which is either
# initialized from an init_file or the priors with noise.
if self.cfg.core_state_init == 'from_file':
self.Pbb_analyse = (1.0 / (self.algo.nb_realisations - 1)
* common.compute_cov_matrix_from_reals(input_core_state.B))
else: # self.core_state_init = 'normal' or 'constant'
self.Pbb_analyse = self.algo.covariance_matrices["b,b"]
P_bb = np.diag(np.diag(self.Pbb_analyse)) + quadratic_term + cubic_term
# enforce symmetry of matrix at all steps.
return (P_bb + P_bb.T) / 2
def analyse_B(self, input_core_state, mf_observations):
"""
......@@ -262,30 +284,38 @@ class AugkfAnalyser(GenericComputer):
Rbb = mf_observations.errors
self.Pbb_forecast = self.cov_mat_b_evolution(input_core_state)
Kbb = common.compute_Kalman_gain_matrix(
self.Pbb_forecast, Hb, Rbb
)
# Updates the B part of the core_state by the result of the Kalman filter for each model
logging.debug("Getting best linear unbiased estimate of B...")
analysed_B = np.zeros_like(input_core_state.B)
# NOTE TO SELF: Can this loop be replaced by NumPy broadcast ?
for i_real in range(self.algo.nb_realisations):
analysed_B[i_real] = common.get_BLUE(
input_core_state.B[i_real],
mf_observations.data[i_real],
self.Pbb_forecast,
Hb,
Rbb,
K=Kbb,
if self.cfg.kalman_norm == 'l2': # for non least square norm, iteration are needed
Kbb = common.compute_Kalman_gain_matrix(
self.Pbb_forecast, Hb, Rbb
)
for i_real in range(self.algo.nb_realisations):
analysed_B[i_real] = common.get_BLUE(
input_core_state.B[i_real],
mf_observations.data[i_real],
self.Pbb_forecast,
Hb,
Rbb,
K=Kbb,
)
elif self.cfg.kalman_norm == 'huber':
# compute inverse of P_bb before loop on reals using its symmetry
P_eig_val, P_eig_vec = np.linalg.eigh(self.Pbb_forecast)
P_eig_val[P_eig_val < 1e-10] = 1e-10 # in case if matrix is not full rank, which should not happen
Pbb_inv = P_eig_vec @ np.diag(1 / P_eig_val) @ P_eig_vec.T
for i_real in range(self.algo.nb_realisations):
analysed_B[i_real] = common.compute_Kalman_huber(input_core_state.B[i_real], mf_observations.data[i_real],
Pbb_inv, Hb, Rbb)
else:
raise ValueError('Invalide value of param kalman_norm, should be equal to huber or l2, got {}'.format(self.cfg.kalman_norm))
# Compute the misfits for B (Y - HX)
HX_b = np.transpose(np.matmul(Hb, np.transpose(analysed_B)))
inv_Rbb = linalg.inv(Rbb)
self.current_misfits["MF"] = common.compute_misfit(
mf_observations.data, HX_b, inv_Rbb
mf_observations.data, HX_b, linalg.inv(Rbb)
)
# Update Pbb_analyse with the resulting covariances of the analyse
......@@ -296,6 +326,7 @@ class AugkfAnalyser(GenericComputer):
return analysed_B
def analyse_augmented_state(self, input_core_state, sv_observations, Ab):
"""
Returns the analysed data for the augmented state Z = [U ER] by a BLUE given the observations.
......@@ -325,25 +356,36 @@ class AugkfAnalyser(GenericComputer):
Pzz_forecast = self.scaled_Pzz + np.diag(np.diag(self.Pzz_analyse))
# Kalman gain matrix can be computed outside the loop
Kzz = common.compute_Kalman_gain_matrix(
Pzz_forecast, complete_H, sv_observations.errors
)
if self.cfg.kalman_norm == 'l2':
Kzz = common.compute_Kalman_gain_matrix(
Pzz_forecast, complete_H, sv_observations.errors
)
else:
# Inverse of covariance matrix can be computed outside the loop
P_eig_val, P_eig_vec = np.linalg.eigh(Pzz_forecast)
P_eig_val[P_eig_val < 1e-10] = 1e-10 # in case if matrix is not full rank, which should not happen
Pzz_inv = P_eig_vec @ np.diag(1 / P_eig_val) @ P_eig_vec.T
for i_real in range(self.algo.nb_realisations):
# Build the observation for Z: HZ=SV
real_core_state = input_core_state[i_real]
# Kalman filter on Z = [U ER]
Z_to_analyse = np.concatenate((real_core_state.U, real_core_state.ER))
real_core_state.Z = common.get_BLUE(
Z_to_analyse,
sv_observations.data[i_real],
Pzz_forecast,
complete_H,
sv_observations.errors,
K=Kzz,
)
if self.cfg.kalman_norm == 'l2': # for non least square norm, iteration are needed
real_core_state.Z = common.get_BLUE(
Z_to_analyse,
sv_observations.data[i_real],
Pzz_forecast,
complete_H,
sv_observations.errors,
K=Kzz,
)
elif self.cfg.kalman_norm == 'huber':
real_core_state.Z = common.compute_Kalman_huber(Z_to_analyse, sv_observations.data[i_real], Pzz_inv,
complete_H, sv_observations.errors)
else:
raise ValueError('Invalide value of param kalman_norm, should be equal to huber or l2, got {}'.format(self.cfg.kalman_norm))
# Update ana_core_state with the result
analysed_core_state[i_real] = real_core_state.copy()
......@@ -386,9 +428,8 @@ class AugkfAnalyser(GenericComputer):
HX_sv = np.transpose(
np.matmul(sv_observations.operator, np.transpose(analysed_core_state.SV))
)
inv_Rzz = linalg.inv(sv_observations.errors)
self.current_misfits["SV"] = common.compute_misfit(
sv_observations.data, HX_sv, inv_Rzz
sv_observations.data, HX_sv, linalg.inv(sv_observations.errors)
)
return analysed_core_state
......@@ -463,14 +504,15 @@ class AugkfAnalyserPCA(AugkfAnalyser):
Class that handles the analyses of the Augmented State Kalman Filter algorithm with DIFF treated as a contribution to ER.
"""
def invalid_misfits(self):
def invalid_misfits(self, keys):
"""
Adds PCA_U to the dict storing misfits
:return: dict
"""
invalid_misfits = super().invalid_misfits()
invalid_misfits["PCA_U"] = np.nan
invalid_misfits = super().invalid_misfits(keys)
if 'U' in keys:
invalid_misfits["PCA_U"] = np.nan
return invalid_misfits
......@@ -521,10 +563,16 @@ class AugkfAnalyserPCA(AugkfAnalyser):
# Update P^f_zz for AugKF (P^a_zz is diag matrix for now)
Pzz_forecast = self.scaled_Pzz + np.diag(np.diag(self.Pzz_analyse))
# Kalman gain matrix can be computed outside the loop
Kzz = common.compute_Kalman_gain_matrix(
Pzz_forecast, complete_H, sv_observations.errors
)
if self.cfg.kalman_norm == 'l2':
# Kalman gain matrix can be computed outside the loop
Kzz = common.compute_Kalman_gain_matrix(
Pzz_forecast, complete_H, sv_observations.errors
)
else:
# Inverse of covariance matrix can be computed outside the loop
P_eig_val, P_eig_vec = np.linalg.eigh(Pzz_forecast)
P_eig_val[P_eig_val < 1e-10] = 1e-10 # in case if matrix is not full rank, which should not happen
Pzz_inv = P_eig_vec @ np.diag(1 / P_eig_val) @ P_eig_vec.T
for i_real in range(self.algo.nb_realisations):
real_core_state = input_core_state[i_real]
......@@ -536,14 +584,20 @@ class AugkfAnalyserPCA(AugkfAnalyser):
Z_to_analyse = np.concatenate(
(real_core_state.getMeasure("PCA_U"), real_core_state.ER)
)
analysed_Z = common.get_BLUE(
Z_to_analyse,
obs_z,
Pzz_forecast,
complete_H,
sv_observations.errors,
K=Kzz,
)
if self.cfg.kalman_norm == 'l2': # for non least square norm, iteration are needed
analysed_Z = common.get_BLUE(
Z_to_analyse,
obs_z,
Pzz_forecast,
complete_H,
sv_observations.errors,
K=Kzz,
)
elif self.cfg.kalman_norm == 'huber':
analysed_Z = common.compute_Kalman_huber(Z_to_analyse, obs_z, Pzz_inv, complete_H,
sv_observations.errors)
else:
raise ValueError('Invalid value of param kalman_norm, should be equal to huber or l2, got {}'.format(self.cfg.kalman_norm))
# Update real_core_state with the result
analysed_pca = analysed_Z[:Npca]
real_core_state.setMeasure("PCA_U", analysed_pca)
......
......@@ -5,12 +5,12 @@
import math
import numpy as np
from scipy import linalg
import fortran
import geodyn_fortran as fortran
from .constants import r_earth
from . import utilities
def compute_direct_obs_operator(positions, max_degree):
def compute_direct_obs_operator(positions, max_degree, eps=1e-7):
"""
Computes the direct observation operator H of a measure of given max degree for a list of given positions.
......@@ -38,6 +38,11 @@ def compute_direct_obs_operator(positions, max_degree):
# Compute prefactors
r_earth_on_r = r_earth / r
sin_th = math.sin(th)
if abs(sin_th) < eps: # theta is either O or pi
sin_th_0 = True
sign_cos_th = np.sign(math.cos(th))
else:
sin_th_0 = False
if th not in computed_plms:
# Evaluate the Legendre coeffs at a given th using the Fortran function
......@@ -58,19 +63,19 @@ def compute_direct_obs_operator(positions, max_degree):
if coef == "g":
H[i, k] = (l + 1) * r_earth_on_r ** (l + 2) * Plm_at_z * cos_mph
H[i + 1, k] = - r_earth_on_r ** (l + 2) * dPlm_at_z * cos_mph
if sin_th == 0:
if sin_th_0:
assert sin_mph == 0 or Plm_at_z == 0, 'One of these values should be 0, {}, {}'.format(sin_mph , Plm_at_z)
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * dPlm_at_z * sin_mph
continue
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * Plm_at_z * sin_mph / sin_th
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * sign_cos_th * dPlm_at_z * sin_mph
else:
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * Plm_at_z * sin_mph / sin_th
else:
H[i, k] = (l + 1) * r_earth_on_r ** (l + 2) * Plm_at_z * sin_mph
H[i + 1, k] = -r_earth_on_r ** (l + 2) * dPlm_at_z * sin_mph
if sin_th == 0:
if sin_th_0:
assert cos_mph == 0 or Plm_at_z == 0, 'One of these values should be 0, {}, {}'.format(cos_mph , Plm_at_z)
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * dPlm_at_z * cos_mph
continue
H[i + 2, k] = -r_earth_on_r ** (l + 2) * m * Plm_at_z * cos_mph / sin_th
H[i + 2, k] = -r_earth_on_r ** (l + 2) * m * sign_cos_th * dPlm_at_z * cos_mph
else:
H[i + 2, k] = -r_earth_on_r ** (l + 2) * m * Plm_at_z * cos_mph / sin_th
return H
......@@ -306,6 +311,34 @@ def compute_Kalman_gain_matrix(P, H, R, use_cholesky=False):
return np.transpose(linalg.solve(np.transpose(matrix_to_invert), np.transpose(PHT)))
def compute_Kalman_huber(b_f, y, Pbb_inv, H, Rbb, max_steps=50):
"""
"""
sq_R = 1 / np.sqrt(np.diag(Rbb))
R_H = np.multiply(sq_R[:, None], H); H_R = R_H.T
W = np.ones(Rbb.shape[0])
ones_min = np.ones(Rbb.shape[0])
c = 1.5
cutoff = 1e-8 #just to avoid division by zero
y_minus_H_b = y - H @ b_f
for i in range(max_steps):
b_kplus1 = b_f + np.linalg.solve(H_R @ np.multiply(W[:, None], R_H) + Pbb_inv,
np.multiply(H_R, (W * sq_R)[None, :])) @ y_minus_H_b
# if difference between last iteration and this one is less than 2% of the deviation, break the loop
if i and np.amax(b_kplus1 - b_k) < 1e-4 * np.amax(b_k):
break
b_k = b_kplus1
epsilon = abs(y - H @ b_kplus1) * sq_R
epsilon[epsilon < cutoff] = cutoff
W = np.min([c / epsilon, ones_min], axis=0) # if residual is bigger than one, weight is replaced by one
# conditions to break the loop to make it faster
if np.all(W == ones_min): # then nothing will change in the loop, LS solution because no outliers
break
return b_kplus1
def compute_cov_matrix_from_reals(real_A, real_B=None):
"""
Returns (real_A - <real_A>)^T * (real_B - <real_B>). If real_B is None, replaces it by real_A.
......@@ -325,7 +358,9 @@ def compute_cov_matrix_from_reals(real_A, real_B=None):
mean_B = np.mean(real_B, axis=0)
dev_B = real_B - mean_B
else:
dev_B = dev_A
P = dev_A.T @ dev_A
# enforce symmetry of matrix
return (P + P.T) / 2
return np.matmul(np.transpose(dev_A), dev_B)
......
import numpy as np
from ..corestates import with_core_state_of_dimensions
import fortran
import geodyn_fortran as fortran
from ..inout.config import ComputationConfig
......@@ -64,4 +64,3 @@ class GenericComputer(object):
# Return A(b)
return np.transpose(AbT)
......@@ -24,7 +24,7 @@ def config_from_hdf5_file(hdf5_file):
'Lb', 'Lu', 'Lsv', 'TauU', 'TauE',
'prior_dir', 'prior_type', 'obs_dir', 'obs_type',
'Nth_legendre']
optional_keys = ['pca_norm', 'init_file']
optional_keys = ['pca_norm', 'init_file', 'obs_select', 'discard_high_lat']
for key in optional_keys:
if key in file_attrs:
keys_without_conversion.append(key)
......@@ -60,16 +60,18 @@ def find_obs_type(obs_dir):
for f in os.listdir(obs_dir):
if isfile(join(obs_dir, f)) and "README" not in f:
first_file = f
extension = first_file.split('.')[-1]
if extension in ['pdf', 'jpg', 'png']:
continue
break
# find extension of file
extension = first_file.split('.')[-1]
if extension == 'hdf5':
return 'COVOBS_hdf5'
elif extension == 'dat':
return 'COVOBS'
elif extension == 'obs':
elif extension == 'obs' or extension == 'cdf':
return 'GO_VO'
else:
raise TypeError("obs_type could not be deduced, please set a value in configuration file")
......@@ -126,6 +128,7 @@ class ComputationConfig(object):
self.init_paths()
self.init_times()
self.init_corestate_inits()
self.init_observations()
@property
def Nb(self):
......@@ -151,6 +154,10 @@ class ComputationConfig(object):
logging.warning("Compute ER was not set in the config file! Assuming True.")
self.compute_e = 1
if not hasattr(self, 'kalman_norm'):
logging.warning("Kalman_norm was not set in the config file! Assuming l2 norm.")
self.kalman_norm = 'l2'
if hasattr(self, 'compute_diff'):
logging.warning("Compute DIFF is DEPRECATED and will not be used!")
......@@ -217,6 +224,17 @@ class ComputationConfig(object):
self.obs_type = find_obs_type(self.obs_dir)
logging.warning("obs_type has been set to {0}".format(self.obs_type))
def init_observations(self):
if self.obs_type == 'GO_VO':
if not hasattr(self, 'obs_select'):
logging.warning('No values of govo_obs_list found, default take all obs files in observation directory')
self.obs_select = all
if not hasattr(self, 'discard_high_lat'):
logging.warning("Suppression of high latitude data was not set, assuming no suppression")
self.discard_high_lat = False
else:
self.obs_select = None
def init_physical_constants(self):
"""
Builds the physical constants of the computation.
......@@ -278,6 +296,7 @@ class ComputationConfig(object):
raise ValueError("All flows should be taken into account and not only a subset with coupled earth")
if self.fraction_u > 1 or self.fraction_u < 0:
raise ValueError("fraction_u should be a float between 0 and 1, got {} instead".format(self.fraction_u))
logging.info('Taking {} % of available realizations of flows'.format(self.fraction_u * 100))
if "dt_sampling" not in self.__dict__:
logging.warning("No prior sampling time was read ! Using default value 60 months.")
......@@ -333,4 +352,9 @@ class ComputationConfig(object):
try:
hdf5file.attrs[k] = v
except TypeError:
logging.error('Config member {} was not saved as hdf5 attribute ({} type not supported by this format)'.format(k, type(v)))
# most likely a dict, try to convert to str
try:
hdf5file.attrs[k] = str(v)
except TypeError:
err_mess = 'Config member {} was not saved as hdf5 attribute ({} type not supported and cannot be converted to string)'
logging.error(err_mess.format(k, type(v)))
#-*- coding: utf-8 -*-
""" Contains the functions building observations """
import os.path
import os, os.path
import numpy as np
import h5py
import time
import math
import logging
import glob
......@@ -18,7 +17,7 @@ class Observation:
* Observation.operator
* Observation.errors
"""
def __init__(self, obs_data, H, R):
def __init__(self, obs_data, H, R, pos_go_vo=None):
"""
The observation is expected to be used as Y = HX with Y the observation data, X the observed data under spectral form and H the observation operator.
......@@ -40,6 +39,8 @@ class Observation:
if R.shape != (self.nb_obs, self.nb_obs):
raise ValueError('Observation errors\' dimensions must be equal to number of observations ({}).Got {} dims instead.'.format(self.nb_obs, R.shape))
self.errors = R
# for GVOs, it is useful for a few applications to save positions
self.positions = pos_go_vo
def build_go_vo_observations(cfg, nb_realisations, measure_type):
......@@ -59,97 +60,122 @@ def build_go_vo_observations(cfg, nb_realisations, measure_type):
"""
datadir = cfg.obs_dir
obs_types_to_use = ['GO', 'VO_CHAMP', 'VO_SWARM', 'VO_CRYO']
# if adding a new satellite, the first two letters of the cdf file should
# match the first two letters after '_' in the variable obs_types_to_use.