Commit 01d22b7f authored by Mathieu Istas's avatar Mathieu Istas
Browse files

Merge branch 'l1_analysis'

parents 04a59ff7 2827ebac
...@@ -117,4 +117,4 @@ pygeodyn/tests/dummy_files/* ...@@ -117,4 +117,4 @@ pygeodyn/tests/dummy_files/*
# Ignore RST files generated by Sphinx # Ignore RST files generated by Sphinx
docs/pygeodyn.*.rst docs/pygeodyn.*.rst
docs/pygeodyn.rst docs/pygeodyn.rst
docs/modules.rst docs/modules.rst
\ No newline at end of file
...@@ -178,11 +178,25 @@ List of possible parameters ...@@ -178,11 +178,25 @@ List of possible parameters
- *COVOBS*, a stochastic model from [GJF13]_ (extended in [GBF15]_), - *COVOBS*, a stochastic model from [GJF13]_ (extended in [GBF15]_),
- *COVOBS_hdf5*, a more recent version of the stochastic model COVOBS, - *COVOBS_hdf5*, a more recent version of the stochastic model COVOBS,
- *chaos_hdf5*, an estimation of earth magnetic field using ground observatory and satellite data (SWARM and CHAMP missions) - *chaos_hdf5*, an estimation of earth magnetic field using ground observatory and satellite data (SWARM and CHAMP missions)
- *GO_VO*, measurements of Ground Observatories and preprocessed satellite data (CHAMP, Swarm) under the form of Virtual Observatories spread all around the globe ([BHF18]_, [Hammer18]_). - *GO_VO*, measurements of Ground Observatories and preprocessed satellite data (CHAMP, Swarm, Oersted, Cryosat and composite) under the form of Virtual Observatories spread all around the globe ([BHF18]_, [Hammer18]_).
- **Type**: *str* - **Type**: *str*
- **Default value**: *COVOBS* - **Default value**: *COVOBS*
- obs_select
Only if obs_type is equal to GO_VO. Dictionnary that defines the selection of ground and virtual observatories data used in the algorithm. For instance, to use the secular variation (*SV*) of the ground obsertories and both the magnetic field (*MF*) and the *SV* for the CHAMP satellite, define "obs_select dict {'GROUND':'SV', 'CHAMP':('MF', 'SV')}". Possible observatories are *GROUND*, *CHAMP*, *SWARM*, *OERSTED*, *CRYOSAT* and *COMPOSITE*, and corresponding files name should start with *GO*, *CH*, *SW*, *OR*, *CR* or *CO*.
Default value will search for all available data in folder except *COMPOSITE*
- **Type**: *dict*
- **Default value**: *all*
- discard_high_lat
All observations that are located at a geomagnetic latitude higher than the value of discard_high_lat will not be taken into account. Inclination of the dipole is computed using the first three Gauss coefficients from Chaos 7 data at the closest time from the observations. The default value is 0 which means no observations will be discarded.
- **Type**: *float*
- **Default value**: *0*
- obs_mod_errors_dir - obs_mod_errors_dir
.. warning:: .. warning::
......
...@@ -138,8 +138,7 @@ class AugkfAlgo(GenericAlgo): ...@@ -138,8 +138,7 @@ class AugkfAlgo(GenericAlgo):
del realisations_U del realisations_U
#ensure realistations_U is deleted #ensure realistations_U is deleted
locs = locals().items() for name, var in list(locals().items()):
for name, var in locs:
if name == 'realistations_U': if name == 'realistations_U':
logging.debug('realistations_U still exist, {0} {1} {2}'.format(name, var.shape, sys.getrefcount(var))) logging.debug('realistations_U still exist, {0} {1} {2}'.format(name, var.shape, sys.getrefcount(var)))
......
...@@ -35,7 +35,7 @@ class AugkfAnalyser(GenericComputer): ...@@ -35,7 +35,7 @@ class AugkfAnalyser(GenericComputer):
nb_obs_mf, nb_obs_sv = self.extract_observations() nb_obs_mf, nb_obs_sv = self.extract_observations()
if nb_obs_mf == 0: if nb_obs_mf == 0:
logging.error( logging.error(
"No observation was extracted for MF! Analyses will be completely skipped" "No observation was extracted for MF! Analyses on b will be completely skipped"
) )
elif nb_obs_sv == 0: elif nb_obs_sv == 0:
logging.error( logging.error(
...@@ -52,8 +52,7 @@ class AugkfAnalyser(GenericComputer): ...@@ -52,8 +52,7 @@ class AugkfAnalyser(GenericComputer):
# Take full Pzz initially for P^a_zz # Take full Pzz initially for P^a_zz
self.Pzz_analyse = self.scaled_Pzz self.Pzz_analyse = self.scaled_Pzz
# Same for the initial covariance matrix for b: self.Pbb_analyse = None
self.Pbb_analyse = None # will be equal to self.algo.covariance_matrices["b,b"] in first step
self.Pbb_forecast = None # Will be computed in the first analysis step self.Pbb_forecast = None # Will be computed in the first analysis step
# Initialize DivH(uBr), necessary for first analysis step # Initialize DivH(uBr), necessary for first analysis step
...@@ -62,8 +61,16 @@ class AugkfAnalyser(GenericComputer): ...@@ -62,8 +61,16 @@ class AugkfAnalyser(GenericComputer):
# Store inverse of covariance matrices for misfit computations # Store inverse of covariance matrices for misfit computations
self.inv_cov_matrices = self.compute_inv_cov_matrices() self.inv_cov_matrices = self.compute_inv_cov_matrices()
def invalid_misfits(self): def invalid_misfits(self, keys):
return {k: np.nan for k in ["MF", "SV", "ER", "U"]} possible_keys = ["MF", "SV", "ER", "U"]
err = 'invalid key passed in invalid_mifits(), keys: {}, possible keys: {}'
assert all([k in possible_keys for k in keys]), err.format(keys, possible_keys)
try:
for k in keys:
self.current_misfits[k] = np.nan
return self.current_misfits
except NameError:
return {k: np.nan for k in keys}
def extract_observations(self): def extract_observations(self):
""" """
...@@ -98,7 +105,6 @@ class AugkfAnalyser(GenericComputer): ...@@ -98,7 +105,6 @@ class AugkfAnalyser(GenericComputer):
), ),
measure_type, measure_type,
) )
return ( return (
len(self.measure_observations["MF"].keys()), len(self.measure_observations["MF"].keys()),
len(self.measure_observations["SV"].keys()), len(self.measure_observations["SV"].keys()),
...@@ -154,50 +160,58 @@ class AugkfAnalyser(GenericComputer): ...@@ -154,50 +160,58 @@ class AugkfAnalyser(GenericComputer):
) )
) )
# Set the core state to analyse to the forecast core state at t
# Note : do a copy to avoid replacing forecast data as ndarrays are mutable
ana_core_state = input_core_state.copy()
try: try:
mf_observations = self.measure_observations["MF"][t] mf_observations = self.measure_observations["MF"][t]
ana_b = True
except KeyError: except KeyError:
self.skipped_analysis_b += 1 self.skipped_analysis_b += 1
logging.critical( logging.critical(
"No observation data exists for B " "No observation data exists for B "
"at {} !. Analysis was skipped. Observation available at {}".format(t, self.measure_observations["MF"].keys()) "at {} !. Analysis was skipped. Observation available at {}".format(t, self.measure_observations["MF"].keys())
) )
self.current_misfits = self.invalid_misfits() self.current_misfits = self.invalid_misfits(keys=('MF',))
return input_core_state ana_b = False
ana_core_state.B = self.analyse_B(input_core_state, mf_observations)
self.skipped_analysis_b = 0 # analysis on b is done, count of skipped analysis goes back to 0
# Compute A(B) by radmats on mean analysed core state
logging.debug("Computing DivH(uBr)...")
self.Ab = self.compute_Ab(ana_core_state.mean(axis=0))
# Start analyse of Z=[U ER] (or Z = [PCA_U ER])
logging.debug("Getting best linear unbiased estimate of Z...")
# Extract the observations for SV # Extract the observations for SV
try: try:
sv_observations = self.measure_observations["SV"][t] sv_observations = self.measure_observations["SV"][t]
# Compute A(B) by radmats on mean analysed core state
ana_z_sv = True
except KeyError: except KeyError:
self.skipped_analysis_z += 1 self.skipped_analysis_z += 1
logging.critical( logging.critical(
"No observation data exists for SV " "No observation data exists for SV "
"at {} ! Analysis was skipped.".format(t) "at {} ! Analysis was skipped.".format(t)
) )
self.current_misfits = self.invalid_misfits() self.current_misfits = self.invalid_misfits(keys=("SV", "ER", "U"))
ana_z_sv = False
if not ana_b and not ana_z_sv: # no analysis are done
# A(b) is not computed and self.Ab value correspond to the last analysis on b
return input_core_state return input_core_state
ana_core_state = self.analyse_augmented_state( # Set the core state to analyse to the forecast core state at t
ana_core_state, sv_observations, self.Ab # Note : do a copy to avoid replacing forecast data as ndarrays are mutable
) ana_core_state = input_core_state.copy()
if ana_b: # doing analysis on b
ana_core_state.B = self.analyse_B(input_core_state, mf_observations)
self.skipped_analysis_b = 0 # analysis on b is done, count of skipped analysis goes back to 0
# Start analyse of Z=[U ER] (or Z = [PCA_U ER])
logging.debug("Getting best linear unbiased estimate of Z...")
ana_core_state = self.analyse_SV(ana_core_state, sv_observations, self.Ab) logging.debug("Computing DivH(uBr)...")
# ana_core_state is the input core state if the analysis on b has not been done
self.Ab = self.compute_Ab(ana_core_state.mean(axis=0))
if ana_z_sv:# perform analysis on u, e and sv
ana_core_state = self.analyse_augmented_state(
ana_core_state, sv_observations, self.Ab
)
ana_core_state = self.analyse_SV(ana_core_state, sv_observations, self.Ab)
self.skipped_analysis_z = 0 # analysis on u, e and sv is done, count of skipped analysis goes back to 0 self.skipped_analysis_z = 0 # analysis on u, e and sv is done, count of skipped analysis goes back to 0
# End of analysis : return the new core_state # End of analysis : return the new core_state
return ana_core_state return ana_core_state
...@@ -242,9 +256,17 @@ class AugkfAnalyser(GenericComputer): ...@@ -242,9 +256,17 @@ class AugkfAnalyser(GenericComputer):
cubic_term *= (self.cfg.decimal_dt_a / N)**3 * (S(N2-1) - S(N1-2)) cubic_term *= (self.cfg.decimal_dt_a / N)**3 * (S(N2-1) - S(N1-2))
if self.Pbb_analyse is None: if self.Pbb_analyse is None:
return self.algo.covariance_matrices["b,b"] + quadratic_term + cubic_term # Pbb_analyse is initialized from the input_core_state, which is either
else: # initialized from an init_file or the priors with noise.
return np.diag(self.Pbb_analyse) + quadratic_term + cubic_term
if self.cfg.core_state_init == 'from_file':
self.Pbb_analyse = (1.0 / (self.algo.nb_realisations - 1)
* common.compute_cov_matrix_from_reals(input_core_state.B))
else: # self.core_state_init = 'normal' or 'constant'
self.Pbb_analyse = self.algo.covariance_matrices["b,b"]
P_bb = np.diag(np.diag(self.Pbb_analyse)) + quadratic_term + cubic_term
# enforce symmetry of matrix at all steps.
return (P_bb + P_bb.T) / 2
def analyse_B(self, input_core_state, mf_observations): def analyse_B(self, input_core_state, mf_observations):
""" """
...@@ -262,30 +284,38 @@ class AugkfAnalyser(GenericComputer): ...@@ -262,30 +284,38 @@ class AugkfAnalyser(GenericComputer):
Rbb = mf_observations.errors Rbb = mf_observations.errors
self.Pbb_forecast = self.cov_mat_b_evolution(input_core_state) self.Pbb_forecast = self.cov_mat_b_evolution(input_core_state)
Kbb = common.compute_Kalman_gain_matrix(
self.Pbb_forecast, Hb, Rbb
)
# Updates the B part of the core_state by the result of the Kalman filter for each model # Updates the B part of the core_state by the result of the Kalman filter for each model
logging.debug("Getting best linear unbiased estimate of B...") logging.debug("Getting best linear unbiased estimate of B...")
analysed_B = np.zeros_like(input_core_state.B) analysed_B = np.zeros_like(input_core_state.B)
# NOTE TO SELF: Can this loop be replaced by NumPy broadcast ? if self.cfg.kalman_norm == 'l2': # for non least square norm, iteration are needed
for i_real in range(self.algo.nb_realisations): Kbb = common.compute_Kalman_gain_matrix(
analysed_B[i_real] = common.get_BLUE( self.Pbb_forecast, Hb, Rbb
input_core_state.B[i_real],
mf_observations.data[i_real],
self.Pbb_forecast,
Hb,
Rbb,
K=Kbb,
) )
for i_real in range(self.algo.nb_realisations):
analysed_B[i_real] = common.get_BLUE(
input_core_state.B[i_real],
mf_observations.data[i_real],
self.Pbb_forecast,
Hb,
Rbb,
K=Kbb,
)
elif self.cfg.kalman_norm == 'huber':
# compute inverse of P_bb before loop on reals using its symmetry
P_eig_val, P_eig_vec = np.linalg.eigh(self.Pbb_forecast)
P_eig_val[P_eig_val < 1e-10] = 1e-10 # in case if matrix is not full rank, which should not happen
Pbb_inv = P_eig_vec @ np.diag(1 / P_eig_val) @ P_eig_vec.T
for i_real in range(self.algo.nb_realisations):
analysed_B[i_real] = common.compute_Kalman_huber(input_core_state.B[i_real], mf_observations.data[i_real],
Pbb_inv, Hb, Rbb)
else:
raise ValueError('Invalide value of param kalman_norm, should be equal to huber or l2, got {}'.format(self.cfg.kalman_norm))
# Compute the misfits for B (Y - HX) # Compute the misfits for B (Y - HX)
HX_b = np.transpose(np.matmul(Hb, np.transpose(analysed_B))) HX_b = np.transpose(np.matmul(Hb, np.transpose(analysed_B)))
inv_Rbb = linalg.inv(Rbb)
self.current_misfits["MF"] = common.compute_misfit( self.current_misfits["MF"] = common.compute_misfit(
mf_observations.data, HX_b, inv_Rbb mf_observations.data, HX_b, linalg.inv(Rbb)
) )
# Update Pbb_analyse with the resulting covariances of the analyse # Update Pbb_analyse with the resulting covariances of the analyse
...@@ -296,6 +326,7 @@ class AugkfAnalyser(GenericComputer): ...@@ -296,6 +326,7 @@ class AugkfAnalyser(GenericComputer):
return analysed_B return analysed_B
def analyse_augmented_state(self, input_core_state, sv_observations, Ab): def analyse_augmented_state(self, input_core_state, sv_observations, Ab):
""" """
Returns the analysed data for the augmented state Z = [U ER] by a BLUE given the observations. Returns the analysed data for the augmented state Z = [U ER] by a BLUE given the observations.
...@@ -325,25 +356,36 @@ class AugkfAnalyser(GenericComputer): ...@@ -325,25 +356,36 @@ class AugkfAnalyser(GenericComputer):
Pzz_forecast = self.scaled_Pzz + np.diag(np.diag(self.Pzz_analyse)) Pzz_forecast = self.scaled_Pzz + np.diag(np.diag(self.Pzz_analyse))
# Kalman gain matrix can be computed outside the loop # Kalman gain matrix can be computed outside the loop
Kzz = common.compute_Kalman_gain_matrix( if self.cfg.kalman_norm == 'l2':
Pzz_forecast, complete_H, sv_observations.errors Kzz = common.compute_Kalman_gain_matrix(
) Pzz_forecast, complete_H, sv_observations.errors
)
else:
# Inverse of covariance matrix can be computed outside the loop
P_eig_val, P_eig_vec = np.linalg.eigh(Pzz_forecast)
P_eig_val[P_eig_val < 1e-10] = 1e-10 # in case if matrix is not full rank, which should not happen
Pzz_inv = P_eig_vec @ np.diag(1 / P_eig_val) @ P_eig_vec.T
for i_real in range(self.algo.nb_realisations): for i_real in range(self.algo.nb_realisations):
# Build the observation for Z: HZ=SV # Build the observation for Z: HZ=SV
real_core_state = input_core_state[i_real] real_core_state = input_core_state[i_real]
# Kalman filter on Z = [U ER] # Kalman filter on Z = [U ER]
Z_to_analyse = np.concatenate((real_core_state.U, real_core_state.ER)) Z_to_analyse = np.concatenate((real_core_state.U, real_core_state.ER))
real_core_state.Z = common.get_BLUE(
Z_to_analyse,
sv_observations.data[i_real],
Pzz_forecast,
complete_H,
sv_observations.errors,
K=Kzz,
)
if self.cfg.kalman_norm == 'l2': # for non least square norm, iteration are needed
real_core_state.Z = common.get_BLUE(
Z_to_analyse,
sv_observations.data[i_real],
Pzz_forecast,
complete_H,
sv_observations.errors,
K=Kzz,
)
elif self.cfg.kalman_norm == 'huber':
real_core_state.Z = common.compute_Kalman_huber(Z_to_analyse, sv_observations.data[i_real], Pzz_inv,
complete_H, sv_observations.errors)
else:
raise ValueError('Invalide value of param kalman_norm, should be equal to huber or l2, got {}'.format(self.cfg.kalman_norm))
# Update ana_core_state with the result # Update ana_core_state with the result
analysed_core_state[i_real] = real_core_state.copy() analysed_core_state[i_real] = real_core_state.copy()
...@@ -386,9 +428,8 @@ class AugkfAnalyser(GenericComputer): ...@@ -386,9 +428,8 @@ class AugkfAnalyser(GenericComputer):
HX_sv = np.transpose( HX_sv = np.transpose(
np.matmul(sv_observations.operator, np.transpose(analysed_core_state.SV)) np.matmul(sv_observations.operator, np.transpose(analysed_core_state.SV))
) )
inv_Rzz = linalg.inv(sv_observations.errors)
self.current_misfits["SV"] = common.compute_misfit( self.current_misfits["SV"] = common.compute_misfit(
sv_observations.data, HX_sv, inv_Rzz sv_observations.data, HX_sv, linalg.inv(sv_observations.errors)
) )
return analysed_core_state return analysed_core_state
...@@ -463,14 +504,15 @@ class AugkfAnalyserPCA(AugkfAnalyser): ...@@ -463,14 +504,15 @@ class AugkfAnalyserPCA(AugkfAnalyser):
Class that handles the analyses of the Augmented State Kalman Filter algorithm with DIFF treated as a contribution to ER. Class that handles the analyses of the Augmented State Kalman Filter algorithm with DIFF treated as a contribution to ER.
""" """
def invalid_misfits(self): def invalid_misfits(self, keys):
""" """
Adds PCA_U to the dict storing misfits Adds PCA_U to the dict storing misfits
:return: dict :return: dict
""" """
invalid_misfits = super().invalid_misfits() invalid_misfits = super().invalid_misfits(keys)
invalid_misfits["PCA_U"] = np.nan if 'U' in keys:
invalid_misfits["PCA_U"] = np.nan
return invalid_misfits return invalid_misfits
...@@ -521,10 +563,16 @@ class AugkfAnalyserPCA(AugkfAnalyser): ...@@ -521,10 +563,16 @@ class AugkfAnalyserPCA(AugkfAnalyser):
# Update P^f_zz for AugKF (P^a_zz is diag matrix for now) # Update P^f_zz for AugKF (P^a_zz is diag matrix for now)
Pzz_forecast = self.scaled_Pzz + np.diag(np.diag(self.Pzz_analyse)) Pzz_forecast = self.scaled_Pzz + np.diag(np.diag(self.Pzz_analyse))
# Kalman gain matrix can be computed outside the loop if self.cfg.kalman_norm == 'l2':
Kzz = common.compute_Kalman_gain_matrix( # Kalman gain matrix can be computed outside the loop
Pzz_forecast, complete_H, sv_observations.errors Kzz = common.compute_Kalman_gain_matrix(
) Pzz_forecast, complete_H, sv_observations.errors
)
else:
# Inverse of covariance matrix can be computed outside the loop
P_eig_val, P_eig_vec = np.linalg.eigh(Pzz_forecast)
P_eig_val[P_eig_val < 1e-10] = 1e-10 # in case if matrix is not full rank, which should not happen
Pzz_inv = P_eig_vec @ np.diag(1 / P_eig_val) @ P_eig_vec.T
for i_real in range(self.algo.nb_realisations): for i_real in range(self.algo.nb_realisations):
real_core_state = input_core_state[i_real] real_core_state = input_core_state[i_real]
...@@ -536,14 +584,20 @@ class AugkfAnalyserPCA(AugkfAnalyser): ...@@ -536,14 +584,20 @@ class AugkfAnalyserPCA(AugkfAnalyser):
Z_to_analyse = np.concatenate( Z_to_analyse = np.concatenate(
(real_core_state.getMeasure("PCA_U"), real_core_state.ER) (real_core_state.getMeasure("PCA_U"), real_core_state.ER)
) )
analysed_Z = common.get_BLUE( if self.cfg.kalman_norm == 'l2': # for non least square norm, iteration are needed
Z_to_analyse, analysed_Z = common.get_BLUE(
obs_z, Z_to_analyse,
Pzz_forecast, obs_z,
complete_H, Pzz_forecast,
sv_observations.errors, complete_H,
K=Kzz, sv_observations.errors,
) K=Kzz,
)
elif self.cfg.kalman_norm == 'huber':
analysed_Z = common.compute_Kalman_huber(Z_to_analyse, obs_z, Pzz_inv, complete_H,
sv_observations.errors)
else:
raise ValueError('Invalid value of param kalman_norm, should be equal to huber or l2, got {}'.format(self.cfg.kalman_norm))
# Update real_core_state with the result # Update real_core_state with the result
analysed_pca = analysed_Z[:Npca] analysed_pca = analysed_Z[:Npca]
real_core_state.setMeasure("PCA_U", analysed_pca) real_core_state.setMeasure("PCA_U", analysed_pca)
......
...@@ -5,12 +5,12 @@ ...@@ -5,12 +5,12 @@
import math import math
import numpy as np import numpy as np
from scipy import linalg from scipy import linalg
import fortran import geodyn_fortran as fortran
from .constants import r_earth from .constants import r_earth
from . import utilities from . import utilities
def compute_direct_obs_operator(positions, max_degree): def compute_direct_obs_operator(positions, max_degree, eps=1e-7):
""" """
Computes the direct observation operator H of a measure of given max degree for a list of given positions. Computes the direct observation operator H of a measure of given max degree for a list of given positions.
...@@ -38,6 +38,11 @@ def compute_direct_obs_operator(positions, max_degree): ...@@ -38,6 +38,11 @@ def compute_direct_obs_operator(positions, max_degree):
# Compute prefactors # Compute prefactors
r_earth_on_r = r_earth / r r_earth_on_r = r_earth / r
sin_th = math.sin(th) sin_th = math.sin(th)
if abs(sin_th) < eps: # theta is either O or pi
sin_th_0 = True
sign_cos_th = np.sign(math.cos(th))
else:
sin_th_0 = False
if th not in computed_plms: if th not in computed_plms:
# Evaluate the Legendre coeffs at a given th using the Fortran function # Evaluate the Legendre coeffs at a given th using the Fortran function
...@@ -58,19 +63,19 @@ def compute_direct_obs_operator(positions, max_degree): ...@@ -58,19 +63,19 @@ def compute_direct_obs_operator(positions, max_degree):
if coef == "g": if coef == "g":
H[i, k] = (l + 1) * r_earth_on_r ** (l + 2) * Plm_at_z * cos_mph H[i, k] = (l + 1) * r_earth_on_r ** (l + 2) * Plm_at_z * cos_mph
H[i + 1, k] = - r_earth_on_r ** (l + 2) * dPlm_at_z * cos_mph H[i + 1, k] = - r_earth_on_r ** (l + 2) * dPlm_at_z * cos_mph
if sin_th == 0: if sin_th_0:
assert sin_mph == 0 or Plm_at_z == 0, 'One of these values should be 0, {}, {}'.format(sin_mph , Plm_at_z) assert sin_mph == 0 or Plm_at_z == 0, 'One of these values should be 0, {}, {}'.format(sin_mph , Plm_at_z)
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * dPlm_at_z * sin_mph H[i + 2, k] = r_earth_on_r ** (l + 2) * m * sign_cos_th * dPlm_at_z * sin_mph
continue else:
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * Plm_at_z * sin_mph / sin_th H[i + 2, k] = r_earth_on_r ** (l + 2) * m * Plm_at_z * sin_mph / sin_th
else: else:
H[i, k] = (l + 1) * r_earth_on_r ** (l + 2) * Plm_at_z * sin_mph H[i, k] = (l + 1) * r_earth_on_r ** (l + 2) * Plm_at_z * sin_mph
H[i + 1, k] = -r_earth_on_r ** (l + 2) * dPlm_at_z * sin_mph H[i + 1, k] = -r_earth_on_r ** (l + 2) * dPlm_at_z * sin_mph
if sin_th == 0: if sin_th_0:
assert cos_mph == 0 or Plm_at_z == 0, 'One of these values should be 0, {}, {}'.format(cos_mph , Plm_at_z) assert cos_mph == 0 or Plm_at_z == 0, 'One of these values should be 0, {}, {}'.format(cos_mph , Plm_at_z)
H[i + 2, k] = r_earth_on_r ** (l + 2) * m * dPlm_at_z * cos_mph H[i + 2, k] = -r_earth_on_r ** (l + 2) * m * sign_cos_th * dPlm_at_z * cos_mph
continue else:
H[i + 2, k] = -r_earth_on_r ** (l + 2) * m * Plm_at_z * cos_mph / sin_th H[i + 2, k] = -r_earth_on_r ** (l + 2) * m * Plm_at_z * cos_mph / sin_th
return H return H
...@@ -306,6 +311,34 @@ def compute_Kalman_gain_matrix(P, H, R, use_cholesky=False): ...@@ -306,6 +311,34 @@ def compute_Kalman_gain_matrix(P, H, R, use_cholesky=False):
return np.transpose(linalg.solve(np.transpose(matrix_to_invert), np.transpose(PHT))) return np.transpose(linalg.solve(np.transpose(matrix_to_invert), np.transpose(PHT)))
def compute_Kalman_huber(b_f, y, Pbb_inv, H, Rbb, max_steps=50):
"""
"""
sq_R = 1 / np.sqrt(np.diag(Rbb))
R_H = np.multiply(sq_R[:, None], H); H_R = R_H.T
W = np.ones(Rbb.shape[0])
ones_min = np.ones(Rbb.shape[0])
c = 1.5
cutoff = 1e-8 #just to avoid division by zero
y_minus_H_b = y - H @ b_f
for i in range(max_steps):
b_kplus1 = b_f + np.linalg.solve(H_R @ np.multiply(W[:, None], R_H) + Pbb_inv,
np.multiply(H_R, (W * sq_R)[None, :])) @ y_minus_H_b
# if difference between last iteration and this one is less than 2% of the deviation, break the loop
if i and np.amax(b_kplus1 - b_k) < 1e-4 * np.amax(b_k):
break
b_k = b_kplus1
epsilon = abs(y - H @ b_kplus1) * sq_R
epsilon[epsilon < cutoff] = cutoff
W = np.min([c / epsilon, ones_min], axis=0) # if residual is bigger than one, weight is replaced by one
# conditions to break the loop to make it faster
if np.all(W == ones_min): # then nothing will change in the loop, LS solution because no outliers
break
return b_kplus1
def compute_cov_matrix_from_reals(real_A, real_B=None): def compute_cov_matrix_from_reals(real_A, real_B=None):
""" """
Returns (real_A - <real_A>)^T * (real_B - <real_B>). If real_B is None, replaces it by real_A. Returns (real_A - <real_A>)^T * (real_B - <real_B>). If real_B is None, replaces it by real_A.
...@@ -325,7 +358,9 @@ def compute_cov_matrix_from_reals(real_A, real_B=None): ...@@ -325,7 +358,9 @@ def compute_cov_matrix_from_reals(real_A, real_B=None):
mean_B = np.mean(real_B, axis=0) mean_B = np.mean(real_B, axis=0)
dev_B = real_B - mean_B dev_B = real_B - mean_B
else: else:
dev_B = dev_A P = dev_A.T @ dev_A