From 6045e6d9d9ac740d3bbb988422d748de6e11e157 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 28 Dec 2020 12:58:48 +0900 Subject: [PATCH 01/37] Update 0.4.3 to 0.5.0 --- bayeso/__init__.py | 4 ++-- docs/conf.py | 2 +- setup.py | 2 +- tests/common/test_version.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) diff --git a/bayeso/__init__.py b/bayeso/__init__.py index ace0018..ab16c03 100644 --- a/bayeso/__init__.py +++ b/bayeso/__init__.py @@ -1,7 +1,7 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 28, 2020 # """BayesO is a simple, but essential Bayesian optimization package, implemented in Python.""" -__version__ = '0.4.3' +__version__ = '0.5.0' diff --git a/docs/conf.py b/docs/conf.py index 0320f41..0cd495c 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -24,7 +24,7 @@ author = 'Jungtaek Kim and Seungjin Choi' # The short X.Y version -version = '0.4.3' +version = '0.5.0' # The full version, including alpha/beta/rc tags release = '{} alpha'.format(version) diff --git a/setup.py b/setup.py index c768947..4d5b51f 100644 --- a/setup.py +++ b/setup.py @@ -17,7 +17,7 @@ setup( name='bayeso', - version='0.4.3', + version='0.5.0', author='Jungtaek Kim', author_email='jtkim@postech.ac.kr', url='http://bayeso.org', diff --git a/tests/common/test_version.py b/tests/common/test_version.py index e716fc3..4233e81 100644 --- a/tests/common/test_version.py +++ b/tests/common/test_version.py @@ -1,11 +1,11 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 28, 2020 # """test_import""" -STR_VERSION = '0.4.3' +STR_VERSION = '0.5.0' def test_version_bayeso(): import bayeso From 1ceb6a6e632bd69be7c029de3c52e96f7158c66b Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 28 Dec 2020 18:41:49 +0900 Subject: [PATCH 02/37] Replace sobol_seq to qmcpy, and add halton and gaussian --- bayeso/bo.py | 85 ++++++++++++++++++++++++++++++++++------- bayeso/constants.py | 2 +- requirements.txt | 2 +- tests/common/test_bo.py | 43 +++++++++++++++------ 4 files changed, 105 insertions(+), 27 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 39e9609..38a2868 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -15,7 +15,7 @@ import cma except: # pragma: no cover cma = None -import sobol_seq +import qmcpy from bayeso.gp import gp from bayeso.gp import gp_common @@ -150,13 +150,57 @@ def _get_samples_uniform(self, num_samples: int, assert isinstance(seed, (int, type(None))) if seed is not None: - np.random.seed(seed) + state_random = np.random.RandomState(seed) + else: + state_random = np.random.RandomState() + + list_initials = [] + for _ in range(0, num_samples): + list_initial = [] + for elem in self.range_X: + list_initial.append(state_random.uniform(elem[0], elem[1])) + list_initials.append(np.array(list_initial)) + initials = np.array(list_initials) + return initials + + def _get_samples_gaussian(self, num_samples: int, + seed: constants.TYPING_UNION_INT_NONE=None + ) -> np.ndarray: + """ + It returns `num_samples` examples sampled from Gaussian distribution. + + :param num_samples: the number of samples. + :type num_samples: int. + :param seed: None, or random seed. + :type seed: NoneType or int., optional + + :returns: random examples. Shape: (`num_samples`, d). + :rtype: numpy.ndarray + + :raises: AssertionError + + """ + + assert isinstance(num_samples, int) + assert isinstance(seed, (int, type(None))) + + if seed is not None: + state_random = np.random.RandomState(seed) + else: + state_random = np.random.RandomState() list_initials = [] + for _ in range(0, num_samples): list_initial = [] for elem in self.range_X: - list_initial.append(np.random.uniform(elem[0], elem[1])) + new_mean = (elem[1] + elem[0]) / 2.0 + new_std = (elem[1] - elem[0]) / 4.0 + + cur_sample = state_random.randn() * new_std + new_mean + cur_sample = np.clip(cur_sample, elem[0], elem[1]) + + list_initial.append(cur_sample) list_initials.append(np.array(list_initial)) initials = np.array(list_initials) return initials @@ -165,14 +209,14 @@ def _get_samples_sobol(self, num_samples: int, seed: constants.TYPING_UNION_INT_NONE=None ) -> np.ndarray: """ - It returns `num_samples` examples sampled from Sobol sequence. + It returns `num_samples` examples sampled from Sobol' sequence. :param num_samples: the number of samples. :type num_samples: int. :param seed: None, or random seed. :type seed: NoneType or int., optional - :returns: examples sampled from Sobol sequence. Shape: (`num_samples`, d). + :returns: examples sampled from Sobol' sequence. Shape: (`num_samples`, d). :rtype: numpy.ndarray :raises: AssertionError @@ -182,31 +226,40 @@ def _get_samples_sobol(self, num_samples: int, assert isinstance(num_samples, int) assert isinstance(seed, (int, type(None))) - if seed is None: - seed = np.random.randint(0, 100000) - if self.debug: - logger.debug('seed: %d', seed) + sampler = qmcpy.Sobol(self.num_dim, seed=seed, graycode=True) + samples = sampler.gen_samples(num_samples) - samples = sobol_seq.i4_sobol_generate(self.num_dim, num_samples, seed) samples = samples * (self.range_X[:, 1].flatten() - self.range_X[:, 0].flatten()) \ + self.range_X[:, 0].flatten() return samples - def _get_samples_latin(self, num_samples: int) -> np.ndarray: + def _get_samples_halton(self, num_samples: int, + seed: constants.TYPING_UNION_INT_NONE=None + ) -> np.ndarray: """ - It returns `num_samples` examples sampled from Latin hypercube. + It returns `num_samples` examples sampled by Halton algorithm. :param num_samples: the number of samples. :type num_samples: int. + :param seed: None, or random seed. + :type seed: NoneType or int., optional - :returns: examples sampled from Latin hypercube. Shape: (`num_samples`, d). + :returns: examples sampled by Halton algorithm. Shape: (`num_samples`, d). :rtype: numpy.ndarray :raises: AssertionError """ - raise NotImplementedError('_get_samples_latin in bo.py') + assert isinstance(num_samples, int) + assert isinstance(seed, (int, type(None))) + + sampler = qmcpy.Halton(self.num_dim, randomize='OWEN', seed=seed) + samples = sampler.gen_samples(num_samples) + + samples = samples * (self.range_X[:, 1].flatten() - self.range_X[:, 0].flatten()) \ + + self.range_X[:, 0].flatten() + return samples # TODO: num_grids should be able to be input. def get_samples(self, str_sampling_method: str, @@ -247,8 +300,12 @@ def get_samples(self, str_sampling_method: str, samples = utils_bo.get_best_acquisition_by_evaluation(samples, fun_objective) elif str_sampling_method == 'uniform': samples = self._get_samples_uniform(num_samples, seed=seed) + elif str_sampling_method == 'gaussian': + samples = self._get_samples_gaussian(num_samples, seed=seed) elif str_sampling_method == 'sobol': samples = self._get_samples_sobol(num_samples, seed=seed) + elif str_sampling_method == 'halton': + samples = self._get_samples_halton(num_samples, seed=seed) elif str_sampling_method == 'latin': raise NotImplementedError('get_samples: latin') else: diff --git a/bayeso/constants.py b/bayeso/constants.py index 917de5e..d33a3f6 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -47,7 +47,7 @@ ALLOWED_GP_COV_SET = ['set_' + str_cov for str_cov in ALLOWED_GP_COV_BASE] ALLOWED_GP_COV = ALLOWED_GP_COV_BASE + ALLOWED_GP_COV_SET ALLOWED_BO_ACQ = ['pi', 'ei', 'ucb', 'aei', 'pure_exploit', 'pure_explore'] -ALLOWED_INITIALIZING_METHOD_BO = ['sobol', 'uniform', 'latin'] +ALLOWED_INITIALIZING_METHOD_BO = ['uniform', 'gaussian', 'sobol', 'halton'] ALLOWED_SAMPLING_METHOD = ALLOWED_INITIALIZING_METHOD_BO + ['grid'] ALLOWED_MLM_METHOD = ['regular', 'converged'] ALLOWED_MODELSELECTION_METHOD = ['ml', 'loocv'] diff --git a/requirements.txt b/requirements.txt index 0b4be11..f6515e3 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ numpy scipy -sobol_seq +qmcpy cma diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index 388734a..290095f 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -100,10 +100,14 @@ def test_get_samples(): model_bo.get_samples('uniform', num_samples='abc') with pytest.raises(AssertionError) as error: model_bo.get_samples('uniform', seed='abc') + with pytest.raises(AssertionError) as error: + model_bo.get_samples('gaussian', seed='abc') + with pytest.raises(AssertionError) as error: + model_bo.get_samples('sobol', seed='abc') + with pytest.raises(AssertionError) as error: + model_bo.get_samples('halton', seed='abc') with pytest.raises(AssertionError) as error: model_bo.get_samples('abc') - with pytest.raises(NotImplementedError) as error: - model_bo.get_samples('latin') arr_initials = model_bo.get_samples('grid', fun_objective=fun_objective) truth_arr_initials = np.array([ @@ -111,15 +115,25 @@ def test_get_samples(): ]) assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() - arr_initials = model_bo.get_samples('sobol', num_samples=3) + arr_initials_ = model_bo.get_samples('sobol', num_samples=3) arr_initials = model_bo.get_samples('sobol', num_samples=3, seed=42) truth_arr_initials = np.array([ - [4.84375, 1.3125, 0.46875], - [3.59375, -0.1875, -0.78125], - [8.59375, 1.8125, 4.21875], + [6.863512583076954, -0.1525135599076748, -1.732824514620006], + [0.4720448818989098, 1.9830138171091676, 1.8962347391061485], + [3.9235182013362646, -1.8038121052086353, -3.2463264442048967], ]) assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() + arr_initials_ = model_bo.get_samples('halton', num_samples=3) + arr_initials = model_bo.get_samples('halton', num_samples=3, seed=42) + truth_arr_initials = np.array([ + [4.325625705206888, 1.9045673771707823, 1.0981007622257621], + [9.325625705206889, 0.5712340438374492, -2.9018992377742396], + [1.825625705206888, -0.7620992894958845, 3.098100762225762], + ]) + assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() + + arr_initials_ = model_bo.get_samples('uniform', num_samples=3) arr_initials = model_bo.get_samples('uniform', num_samples=3, seed=42) truth_arr_initials = np.array([ [3.74540119, 1.80285723, 2.31993942], @@ -128,6 +142,15 @@ def test_get_samples(): ]) assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() + arr_initials_ = model_bo.get_samples('gaussian', num_samples=3) + arr_initials = model_bo.get_samples('gaussian', num_samples=3, seed=42) + truth_arr_initials = np.array([ + [6.241785382528082, -0.13826430117118466, 1.6192213452517312], + [8.807574641020064, -0.23415337472333597, -0.5853423923729514], + [8.948032038768478, 0.7674347291529088, -1.1736859648373803], + ]) + assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() + def test_get_initials(): np.random.seed(42) arr_range = np.array([ @@ -152,15 +175,13 @@ def test_get_initials(): model_bo.get_initials('uniform', 10, seed='abc') with pytest.raises(AssertionError) as error: model_bo.get_initials('abc', 10) - with pytest.raises(NotImplementedError) as error: - model_bo.get_initials('latin', 10) arr_initials = model_bo.get_initials('sobol', 3) arr_initials = model_bo.get_initials('sobol', 3, seed=42) truth_arr_initials = np.array([ - [4.84375, 1.3125, 0.46875], - [3.59375, -0.1875, -0.78125], - [8.59375, 1.8125, 4.21875], + [6.863512583076954, -0.1525135599076748, -1.732824514620006], + [0.4720448818989098, 1.9830138171091676, 1.8962347391061485], + [3.9235182013362646, -1.8038121052086353, -3.2463264442048967], ]) assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() From 3adc54fbcb0084e870dc0f3fde63422dd0e2d86a Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 28 Dec 2020 18:42:32 +0900 Subject: [PATCH 03/37] Update date modified --- bayeso/bo.py | 2 +- bayeso/constants.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 38a2868..de92199 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 28, 2020 # """It defines a class for Bayesian optimization.""" diff --git a/bayeso/constants.py b/bayeso/constants.py index d33a3f6..a598068 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 28, 2020 # """This file is for declaring various default constants. If you would like to see the details, check out the repository.""" From a4cd2889a7645f01b911bb947e2c0ca691621908 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 29 Dec 2020 10:56:30 +0900 Subject: [PATCH 04/37] Test sampling methods including Sobol and Halton --- bayeso/bo.py | 2 -- bayeso/constants.py | 4 ++-- examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py | 2 +- examples/04_benchmarks/example_benchmarks_branin_bo_ei.py | 2 +- .../04_benchmarks/example_benchmarks_eggholder_bo_ei.py | 2 +- examples/05_hpo/example_hpo_ridge_regression_ei.py | 6 +++--- examples/05_hpo/example_hpo_xgboost_ei.py | 8 ++++---- 7 files changed, 12 insertions(+), 14 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index de92199..bc915f5 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -306,8 +306,6 @@ def get_samples(self, str_sampling_method: str, samples = self._get_samples_sobol(num_samples, seed=seed) elif str_sampling_method == 'halton': samples = self._get_samples_halton(num_samples, seed=seed) - elif str_sampling_method == 'latin': - raise NotImplementedError('get_samples: latin') else: raise NotImplementedError('get_samples: allowed str_sampling_method,\ but it is not implemented.') diff --git a/bayeso/constants.py b/bayeso/constants.py index a598068..9856be9 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -15,9 +15,9 @@ STR_OPTIMIZER_METHOD_GP = 'BFGS' STR_GP_COV = 'matern52' STR_BO_ACQ = 'ei' -STR_INITIALIZING_METHOD_BO = 'uniform' +STR_INITIALIZING_METHOD_BO = 'sobol' STR_OPTIMIZER_METHOD_AO = 'L-BFGS-B' -STR_SAMPLING_METHOD_AO = 'uniform' +STR_SAMPLING_METHOD_AO = 'sobol' STR_MLM_METHOD = 'regular' STR_MODELSELECTION_METHOD = 'ml' STR_FRAMEWORK_GP = 'scipy' diff --git a/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py b/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py index 73719bf..c7c6bfd 100644 --- a/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py @@ -32,7 +32,7 @@ def main(): list_time = [] for ind_bo in range(0, num_bo): print('BO Round', ind_bo + 1) - X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, num_iter, str_initial_method_bo='uniform', str_sampling_method_ao='uniform', num_samples_ao=100) + X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, num_iter, str_initial_method_bo='gaussian', str_sampling_method_ao='gaussian', num_samples_ao=100, seed=42 * (ind_bo + 1)) print(X_final) print(Y_final) print(time_final) diff --git a/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py b/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py index b403dab..9311eae 100644 --- a/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py @@ -32,7 +32,7 @@ def main(): list_time = [] for ind_bo in range(0, num_bo): print('BO Round', ind_bo + 1) - X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, num_iter, str_initial_method_bo='uniform', str_sampling_method_ao='uniform', num_samples_ao=100) + X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, num_iter, str_initial_method_bo='sobol', str_sampling_method_ao='sobol', num_samples_ao=100) print(X_final) print(Y_final) print(time_final) diff --git a/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py b/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py index 7426ac5..788d805 100644 --- a/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py @@ -32,7 +32,7 @@ def main(): list_time = [] for ind_bo in range(0, num_bo): print('BO Round', ind_bo + 1) - X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, num_iter, str_initial_method_bo='uniform', str_sampling_method_ao='uniform', num_samples_ao=100) + X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, num_iter, str_initial_method_bo='halton', str_sampling_method_ao='halton', num_samples_ao=100, seed=42 * (ind_bo + 1)) print(X_final) print(Y_final) print(time_final) diff --git a/examples/05_hpo/example_hpo_ridge_regression_ei.py b/examples/05_hpo/example_hpo_ridge_regression_ei.py index 772bec8..d75479e 100644 --- a/examples/05_hpo/example_hpo_ridge_regression_ei.py +++ b/examples/05_hpo/example_hpo_ridge_regression_ei.py @@ -32,14 +32,14 @@ def fun_target(X): return mse def main(): - # (max_depth, n_estimators) - num_init = 5 + # (alpha, ) + num_init = 1 model_bo = bo.BO(np.array([[0.1, 2]]), debug=True) list_Y = [] list_time = [] for _ in range(0, 10): - X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, 10, str_initial_method_bo='uniform', str_sampling_method_ao='uniform', num_samples_ao=100) + X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, 10, str_initial_method_bo='sobol', str_sampling_method_ao='sobol', num_samples_ao=100) list_Y.append(Y_final) list_time.append(time_final) arr_Y = np.array(list_Y) diff --git a/examples/05_hpo/example_hpo_xgboost_ei.py b/examples/05_hpo/example_hpo_xgboost_ei.py index 8bf2cf9..0fac642 100644 --- a/examples/05_hpo/example_hpo_xgboost_ei.py +++ b/examples/05_hpo/example_hpo_xgboost_ei.py @@ -1,6 +1,6 @@ # example_hpo_xgboost_ei # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: January 06, 2020 +# last updated: December 29, 2020 import numpy as np import os @@ -26,19 +26,19 @@ def fun_target(X): print(X) - xgb_model = xgb.XGBClassifier(max_depth=int(X[0]), n_estimators=int(X[1])).fit(DATA_TRAIN, LABELS_TRAIN) + xgb_model = xgb.XGBClassifier(max_depth=int(X[0]), n_estimators=int(X[1]), use_label_encoder=False).fit(DATA_TRAIN, LABELS_TRAIN, eval_metric='mlogloss') preds = xgb_model.predict(DATA_TEST) return 1.0 - sklearn.metrics.accuracy_score(LABELS_TEST, preds) def main(): # (max_depth, n_estimators) - num_init = 5 + num_init = 1 model_bo = bo.BO(np.array([[1, 10], [100, 500]]), debug=True) list_Y = [] list_time = [] for _ in range(0, 5): - X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, 10, str_initial_method_bo='uniform', str_sampling_method_ao='uniform', num_samples_ao=100) + X_final, Y_final, time_final, _, _ = wrappers_bo.run_single_round(model_bo, fun_target, num_init, 10, str_initial_method_bo='sobol', str_sampling_method_ao='sobol', num_samples_ao=100) list_Y.append(Y_final) list_time.append(time_final) arr_Y = np.array(list_Y) From b0751b2b12bffe81c3b66913b1070fe11a02d721 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 29 Dec 2020 12:53:59 +0900 Subject: [PATCH 05/37] Relocate functions defined in gp_common --- .pylintrc | 5 +- bayeso/bo.py | 11 ++- bayeso/constants.py | 3 +- bayeso/covariance.py | 112 ++++++++++++++++++++- bayeso/gp/gp.py | 16 +-- bayeso/gp/gp_common.py | 123 ----------------------- bayeso/gp/gp_gpytorch.py | 10 +- bayeso/gp/gp_scipy.py | 18 ++-- bayeso/gp/gp_tensorflow.py | 10 +- bayeso/utils/utils_covariance.py | 45 ++++++++- bayeso/utils/utils_gp.py | 43 +------- tests/common/test_covariance.py | 124 ++++++++++++++++++++++- tests/common/test_gp_common.py | 135 -------------------------- tests/common/test_import.py | 11 +-- tests/common/test_utils_covariance.py | 32 +++++- tests/common/test_utils_gp.py | 32 +----- 16 files changed, 353 insertions(+), 377 deletions(-) delete mode 100644 bayeso/gp/gp_common.py delete mode 100644 tests/common/test_gp_common.py diff --git a/.pylintrc b/.pylintrc index ad50ef9..f5ea5c2 100644 --- a/.pylintrc +++ b/.pylintrc @@ -1,9 +1,12 @@ [MASTER] +# command +# pylint --rcfile=.pylintrc bayeso + # A comma-separated list of package or module names from where C extensions may # be loaded. Extensions are loading into the active Python interpreter and may # run arbitrary code. -extension-pkg-whitelist= +extension-pkg-whitelist=numpy # Specify a score threshold to be exceeded before program exits with error. fail-under=10.0 diff --git a/bayeso/bo.py b/bayeso/bo.py index bc915f5..c1d52a9 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 28, 2020 +# last updated: December 29, 2020 # """It defines a class for Bayesian optimization.""" @@ -17,12 +17,12 @@ cma = None import qmcpy +from bayeso import covariance +from bayeso import constants from bayeso.gp import gp -from bayeso.gp import gp_common from bayeso.utils import utils_bo from bayeso.utils import utils_common from bayeso.utils import utils_logger -from bayeso import constants logger = utils_logger.get_logger('bo') @@ -465,7 +465,8 @@ def g(bx): list_next_point.append(next_point_x) next_points = np.array(list_next_point) - next_point = utils_bo.get_best_acquisition_by_evaluation(next_points, fun_negative_acquisition)[0] + next_point = utils_bo.get_best_acquisition_by_evaluation( + next_points, fun_negative_acquisition)[0] return next_point, next_points def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, @@ -542,7 +543,7 @@ def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, if self.debug: logger.debug('hyps converged.') hyps = self.historical_hyps[-1] - cov_X_X, inv_cov_X_X, _ = gp_common.get_kernel_inverse(X_train, hyps, + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, self.str_cov, fix_noise=fix_noise, debug=self.debug) else: # pragma: no cover raise ValueError('optimize: missing condition for str_mlm_method.') diff --git a/bayeso/constants.py b/bayeso/constants.py index 9856be9..20c35c2 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 28, 2020 # -"""This file is for declaring various default constants. If you would like to see the details, check out the repository.""" +"""This file is for declaring various default constants. +If you would like to see the details, check out the repository.""" import typing import numpy as np diff --git a/bayeso/covariance.py b/bayeso/covariance.py index ad69890..fddaabe 100644 --- a/bayeso/covariance.py +++ b/bayeso/covariance.py @@ -1,11 +1,12 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It defines covariance functions and their associated functions.""" import numpy as np import scipy.spatial.distance as scisd +import scipy.linalg from bayeso import constants from bayeso.utils import utils_covariance @@ -54,6 +55,115 @@ def choose_fun_cov(str_cov: str, choose_grad: bool) -> callable: choose_grad conditions, but it is not implemented.') return fun_cov +@utils_common.validate_types +def get_kernel_inverse(X_train: np.ndarray, hyps: dict, str_cov: str, + fix_noise: bool=constants.FIX_GP_NOISE, + use_gradient: bool=False, + debug: bool=False +) -> constants.TYPING_TUPLE_THREE_ARRAYS: + """ + This function computes a kernel inverse without any matrix decomposition techniques. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param hyps: dictionary of hyperparameters for Gaussian process. + :type hyps: dict. + :param str_cov: the name of covariance function. + :type str_cov: str. + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param use_gradient: flag for computing and returning gradients of + negative log marginal likelihood. + :type use_gradient: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of kernel matrix over `X_train`, kernel matrix + inverse, and gradients of kernel matrix. If `use_gradient` is False, + gradients of kernel matrix would be None. + :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(str_cov, str) + assert isinstance(use_gradient, bool) + assert isinstance(fix_noise, bool) + assert isinstance(debug, bool) + utils_covariance.check_str_cov('get_kernel_inverse', str_cov, X_train.shape) + + cov_X_X = cov_main(str_cov, X_train, X_train, hyps, True) \ + + hyps['noise']**2 * np.eye(X_train.shape[0]) + cov_X_X = (cov_X_X + cov_X_X.T) / 2.0 + inv_cov_X_X = np.linalg.inv(cov_X_X) + + if use_gradient: + grad_cov_X_X = grad_cov_main(str_cov, X_train, X_train, + hyps, fix_noise, same_X_Xp=True) + else: + grad_cov_X_X = None + + return cov_X_X, inv_cov_X_X, grad_cov_X_X + +@utils_common.validate_types +def get_kernel_cholesky(X_train: np.ndarray, hyps: dict, str_cov: str, + fix_noise: bool=constants.FIX_GP_NOISE, + use_gradient: bool=False, + debug: bool=False +) -> constants.TYPING_TUPLE_THREE_ARRAYS: + """ + This function computes a kernel inverse with Cholesky decomposition. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param hyps: dictionary of hyperparameters for Gaussian process. + :type hyps: dict. + :param str_cov: the name of covariance function. + :type str_cov: str. + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param use_gradient: flag for computing and returning gradients of + negative log marginal likelihood. + :type use_gradient: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of kernel matrix over `X_train`, lower matrix computed + by Cholesky decomposition, and gradients of kernel matrix. If + `use_gradient` is False, gradients of kernel matrix would be None. + :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(str_cov, str) + assert isinstance(fix_noise, bool) + assert isinstance(use_gradient, bool) + assert isinstance(debug, bool) + utils_covariance.check_str_cov('get_kernel_cholesky', str_cov, X_train.shape) + + cov_X_X = cov_main(str_cov, X_train, X_train, hyps, True) \ + + hyps['noise']**2 * np.eye(X_train.shape[0]) + cov_X_X = (cov_X_X + cov_X_X.T) / 2.0 + try: + lower = scipy.linalg.cholesky(cov_X_X, lower=True) + except np.linalg.LinAlgError: # pragma: no cover + cov_X_X += 1e-2 * np.eye(X_train.shape[0]) + lower = scipy.linalg.cholesky(cov_X_X, lower=True) + + if use_gradient: + grad_cov_X_X = grad_cov_main(str_cov, X_train, X_train, + hyps, fix_noise, same_X_Xp=True) + else: + grad_cov_X_X = None + return cov_X_X, lower, grad_cov_X_X + @utils_common.validate_types def cov_se(X: np.ndarray, Xp: np.ndarray, lengthscales: constants.TYPING_UNION_ARRAY_FLOAT, signal: float diff --git a/bayeso/gp/gp.py b/bayeso/gp/gp.py index 6d68ad5..4681359 100644 --- a/bayeso/gp/gp.py +++ b/bayeso/gp/gp.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It defines Gaussian process regression.""" @@ -10,9 +10,9 @@ from bayeso import covariance from bayeso import constants -from bayeso.gp import gp_common from bayeso.gp import gp_scipy from bayeso.utils import utils_gp +from bayeso.utils import utils_covariance from bayeso.utils import utils_common from bayeso.utils import utils_logger @@ -102,7 +102,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(debug, bool) assert len(Y_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] - utils_gp.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD assert str_framework in constants.ALLOWED_FRAMEWORK_GP @@ -193,7 +193,8 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra assert len(cov_X_X.shape) == 2 assert len(inv_cov_X_X.shape) == 2 assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() - utils_gp.check_str_cov('predict_with_cov', str_cov, X_train.shape, shape_X2=X_test.shape) + utils_covariance.check_str_cov('predict_with_cov', str_cov, + X_train.shape, shape_X2=X_test.shape) assert X_train.shape[0] == Y_train.shape[0] assert X_train.shape[1] == X_test.shape[1] @@ -250,11 +251,12 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr assert isinstance(debug, bool) assert callable(prior_mu) or prior_mu is None assert len(Y_train.shape) == 2 - utils_gp.check_str_cov('predict_with_hyps', str_cov, X_train.shape, shape_X2=X_test.shape) + utils_covariance.check_str_cov('predict_with_hyps', str_cov, + X_train.shape, shape_X2=X_test.shape) assert X_train.shape[0] == Y_train.shape[0] assert X_train.shape[1] == X_test.shape[1] - cov_X_X, inv_cov_X_X, _ = gp_common.get_kernel_inverse(X_train, + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, debug=debug) mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, @@ -310,7 +312,7 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test assert isinstance(debug, bool) assert callable(prior_mu) or prior_mu is None assert len(Y_train.shape) == 2 - utils_gp.check_str_cov('predict_with_optimized_kernel', str_cov, + utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov, X_train.shape, shape_X2=X_test.shape) assert X_train.shape[0] == Y_train.shape[0] assert X_train.shape[1] == X_test.shape[1] diff --git a/bayeso/gp/gp_common.py b/bayeso/gp/gp_common.py deleted file mode 100644 index 4352f79..0000000 --- a/bayeso/gp/gp_common.py +++ /dev/null @@ -1,123 +0,0 @@ -# -# author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 -# -"""It defines functions for Gaussian process regression.""" - -import numpy as np -import scipy.linalg - -from bayeso import covariance -from bayeso import constants -from bayeso.utils import utils_gp -from bayeso.utils import utils_common - - -@utils_common.validate_types -def get_kernel_inverse(X_train: np.ndarray, hyps: dict, str_cov: str, - fix_noise: bool=constants.FIX_GP_NOISE, - use_gradient: bool=False, - debug: bool=False -) -> constants.TYPING_TUPLE_THREE_ARRAYS: - """ - This function computes a kernel inverse without any matrix decomposition techniques. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param hyps: dictionary of hyperparameters for Gaussian process. - :type hyps: dict. - :param str_cov: the name of covariance function. - :type str_cov: str. - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param use_gradient: flag for computing and returning gradients of - negative log marginal likelihood. - :type use_gradient: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, kernel matrix - inverse, and gradients of kernel matrix. If `use_gradient` is False, - gradients of kernel matrix would be None. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) - - :raises: AssertionError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(use_gradient, bool) - assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - utils_gp.check_str_cov('get_kernel_inverse', str_cov, X_train.shape) - - cov_X_X = covariance.cov_main(str_cov, X_train, X_train, hyps, True) \ - + hyps['noise']**2 * np.eye(X_train.shape[0]) - cov_X_X = (cov_X_X + cov_X_X.T) / 2.0 - inv_cov_X_X = np.linalg.inv(cov_X_X) - - if use_gradient: - grad_cov_X_X = covariance.grad_cov_main(str_cov, X_train, X_train, - hyps, fix_noise, same_X_Xp=True) - else: - grad_cov_X_X = None - - return cov_X_X, inv_cov_X_X, grad_cov_X_X - -@utils_common.validate_types -def get_kernel_cholesky(X_train: np.ndarray, hyps: dict, str_cov: str, - fix_noise: bool=constants.FIX_GP_NOISE, - use_gradient: bool=False, - debug: bool=False -) -> constants.TYPING_TUPLE_THREE_ARRAYS: - """ - This function computes a kernel inverse with Cholesky decomposition. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param hyps: dictionary of hyperparameters for Gaussian process. - :type hyps: dict. - :param str_cov: the name of covariance function. - :type str_cov: str. - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param use_gradient: flag for computing and returning gradients of - negative log marginal likelihood. - :type use_gradient: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, lower matrix computed - by Cholesky decomposition, and gradients of kernel matrix. If - `use_gradient` is False, gradients of kernel matrix would be None. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) - - :raises: AssertionError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(fix_noise, bool) - assert isinstance(use_gradient, bool) - assert isinstance(debug, bool) - utils_gp.check_str_cov('get_kernel_cholesky', str_cov, X_train.shape) - - cov_X_X = covariance.cov_main(str_cov, X_train, X_train, hyps, True) \ - + hyps['noise']**2 * np.eye(X_train.shape[0]) - cov_X_X = (cov_X_X + cov_X_X.T) / 2.0 - try: - lower = scipy.linalg.cholesky(cov_X_X, lower=True) - except np.linalg.LinAlgError: # pragma: no cover - cov_X_X += 1e-2 * np.eye(X_train.shape[0]) - lower = scipy.linalg.cholesky(cov_X_X, lower=True) - - if use_gradient: - grad_cov_X_X = covariance.grad_cov_main(str_cov, X_train, X_train, - hyps, fix_noise, same_X_Xp=True) - else: - grad_cov_X_X = None - return cov_X_X, lower, grad_cov_X_X diff --git a/bayeso/gp/gp_gpytorch.py b/bayeso/gp/gp_gpytorch.py index 9e6d125..46686ad 100644 --- a/bayeso/gp/gp_gpytorch.py +++ b/bayeso/gp/gp_gpytorch.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It is Gaussian process regression implementations with GPyTorch.""" @@ -9,9 +9,9 @@ import torch import gpytorch +from bayeso import covariance from bayeso import constants -from bayeso.gp import gp_common -from bayeso.utils import utils_gp +from bayeso.utils import utils_covariance from bayeso.utils import utils_common from bayeso.utils import utils_logger @@ -93,7 +93,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(debug, bool) assert len(Y_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] - utils_gp.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert num_iters >= 10 or num_iters == 0 # TODO: prior_mu and fix_noise are not working now. @@ -153,7 +153,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, 'noise': np.sqrt(model.likelihood.noise.item()) } - cov_X_X, inv_cov_X_X, _ = gp_common.get_kernel_inverse(X_train, hyps, + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) time_end = time.time() diff --git a/bayeso/gp/gp_scipy.py b/bayeso/gp/gp_scipy.py index 866c36f..ae714e9 100644 --- a/bayeso/gp/gp_scipy.py +++ b/bayeso/gp/gp_scipy.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It is Gaussian process regression implementations with SciPy.""" @@ -9,8 +9,8 @@ import scipy.linalg import scipy.optimize +from bayeso import covariance from bayeso import constants -from bayeso.gp import gp_common from bayeso.utils import utils_gp from bayeso.utils import utils_covariance from bayeso.utils import utils_common @@ -70,12 +70,12 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, assert len(Y_train.shape) == 2 assert len(prior_mu_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] - utils_gp.check_str_cov('neg_log_ml', str_cov, X_train.shape) + utils_covariance.check_str_cov('neg_log_ml', str_cov, X_train.shape) hyps = utils_covariance.restore_hyps(str_cov, hyps, fix_noise=fix_noise) new_Y_train = Y_train - prior_mu_train if use_cholesky: - cov_X_X, lower, grad_cov_X_X = gp_common.get_kernel_cholesky(X_train, + cov_X_X, lower, grad_cov_X_X = covariance.get_kernel_cholesky(X_train, hyps, str_cov, fix_noise=fix_noise, use_gradient=use_gradient, debug=debug) @@ -95,7 +95,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, else: # TODO: use_gradient is fixed. use_gradient = False - cov_X_X, inv_cov_X_X, grad_cov_X_X = gp_common.get_kernel_inverse(X_train, + cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, use_gradient=use_gradient, debug=debug) @@ -152,12 +152,12 @@ def neg_log_pseudo_l_loocv(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.nd assert len(Y_train.shape) == 2 assert len(prior_mu_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] - utils_gp.check_str_cov('neg_log_pseudo_l_loocv', str_cov, X_train.shape) + utils_covariance.check_str_cov('neg_log_pseudo_l_loocv', str_cov, X_train.shape) num_data = X_train.shape[0] hyps = utils_covariance.restore_hyps(str_cov, hyps, fix_noise=fix_noise) - _, inv_cov_X_X, _ = gp_common.get_kernel_inverse(X_train, hyps, + _, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) log_pseudo_l_ = 0.0 @@ -232,7 +232,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(debug, bool) assert len(Y_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] - utils_gp.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD # TODO: fix this. @@ -301,7 +301,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise) hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) - cov_X_X, inv_cov_X_X, _ = gp_common.get_kernel_inverse(X_train, + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) time_end = time.time() diff --git a/bayeso/gp/gp_tensorflow.py b/bayeso/gp/gp_tensorflow.py index 8a7034d..826e545 100644 --- a/bayeso/gp/gp_tensorflow.py +++ b/bayeso/gp/gp_tensorflow.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It is Gaussian process regression implementations with TensorFlow.""" @@ -9,9 +9,9 @@ import tensorflow as tf import tensorflow_probability as tfp +from bayeso import covariance from bayeso import constants -from bayeso.gp import gp_common -from bayeso.utils import utils_gp +from bayeso.utils import utils_covariance from bayeso.utils import utils_common from bayeso.utils import utils_logger @@ -71,7 +71,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(debug, bool) assert len(Y_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] - utils_gp.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert num_iters >= 10 or num_iters == 0 # TODO: prior_mu and fix_noise are not working now. @@ -173,7 +173,7 @@ def log_prob_outputs(): # pragma: no cover # 'noise': np.sqrt(var_observation_noise_variance._value().numpy()) } - cov_X_X, inv_cov_X_X, _ = gp_common.get_kernel_inverse(X_train, hyps, + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) time_end = time.time() diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index de3136b..72940dc 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It is utilities for covariance functions.""" @@ -64,7 +64,7 @@ def get_hyps(str_cov: str, dim: int, if use_ard: hyps['lengthscales'] = np.ones(dim) else: - # TODO: It makes bunch of erros. I should fix it. + # TODO: It makes bunch of errors. I should fix it. hyps['lengthscales'] = 1.0 else: raise NotImplementedError('get_hyps: allowed str_cov, but it is not implemented.') @@ -293,3 +293,44 @@ def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int # is_valid = True raise NotImplementedError('validate_hyps_arr in utils_covariance.py') + +@utils_common.validate_types +def check_str_cov(str_fun: str, str_cov: str, shape_X1: tuple, + shape_X2: tuple=None +) -> constants.TYPE_NONE: + """ + It is for validating the shape of X1 (and optionally the shape of X2). + + :param str_fun: the name of function. + :type str_fun: str. + :param str_cov: the name of covariance function. + :type str_cov: str. + :param shape_X1: the shape of X1. + :type shape_X1: tuple + :param shape_X2: None, or the shape of X2. + :type shape_X2: NoneType or tuple, optional + + :returns: None, if it is valid. Raise an error, otherwise. + :rtype: NoneType + + :raises: AssertionError, ValueError + + """ + + assert isinstance(str_fun, str) + assert isinstance(str_cov, str) + assert isinstance(shape_X1, tuple) + assert shape_X2 is None or isinstance(shape_X2, tuple) + + if str_cov in constants.ALLOWED_GP_COV_BASE: + assert len(shape_X1) == 2 + if shape_X2 is not None: + assert len(shape_X2) == 2 + elif str_cov in constants.ALLOWED_GP_COV_SET: + assert len(shape_X1) == 3 + if shape_X2 is not None: + assert len(shape_X2) == 3 + elif str_cov in constants.ALLOWED_GP_COV: # pragma: no cover + raise ValueError('{}: missing conditions for str_cov.'.format(str_fun)) + else: + raise ValueError('{}: invalid str_cov.'.format(str_fun)) diff --git a/bayeso/utils/utils_gp.py b/bayeso/utils/utils_gp.py index 8e00346..cf37f50 100644 --- a/bayeso/utils/utils_gp.py +++ b/bayeso/utils/utils_gp.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """It is utilities for Gaussian process regression.""" @@ -10,47 +10,6 @@ from bayeso import constants -@utils_common.validate_types -def check_str_cov(str_fun: str, str_cov: str, shape_X1: tuple, - shape_X2: tuple=None -) -> constants.TYPE_NONE: - """ - It is for validating the shape of X1 (and optionally the shape of X2). - - :param str_fun: the name of function. - :type str_fun: str. - :param str_cov: the name of covariance function. - :type str_cov: str. - :param shape_X1: the shape of X1. - :type shape_X1: tuple - :param shape_X2: None, or the shape of X2. - :type shape_X2: NoneType or tuple, optional - - :returns: None, if it is valid. Raise an error, otherwise. - :rtype: NoneType - - :raises: AssertionError, ValueError - - """ - - assert isinstance(str_fun, str) - assert isinstance(str_cov, str) - assert isinstance(shape_X1, tuple) - assert shape_X2 is None or isinstance(shape_X2, tuple) - - if str_cov in constants.ALLOWED_GP_COV_BASE: - assert len(shape_X1) == 2 - if shape_X2 is not None: - assert len(shape_X2) == 2 - elif str_cov in constants.ALLOWED_GP_COV_SET: - assert len(shape_X1) == 3 - if shape_X2 is not None: - assert len(shape_X2) == 3 - elif str_cov in constants.ALLOWED_GP_COV: # pragma: no cover - raise ValueError('{}: missing conditions for str_cov.'.format(str_fun)) - else: - raise ValueError('{}: invalid str_cov.'.format(str_fun)) - @utils_common.validate_types def get_prior_mu(prior_mu: constants.TYPING_UNION_CALLABLE_NONE, X: np.ndarray) -> np.ndarray: """ diff --git a/tests/common/test_covariance.py b/tests/common/test_covariance.py index 1b44401..627ed11 100644 --- a/tests/common/test_covariance.py +++ b/tests/common/test_covariance.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """test_covariance""" @@ -12,7 +12,7 @@ from bayeso.utils import utils_covariance -TEST_EPSILON = 1e-5 +TEST_EPSILON = 1e-7 def test_choose_fun_cov_typing(): annos = covariance.choose_fun_cov.__annotations__ @@ -36,6 +36,126 @@ def test_choose_fun_cov(): assert covariance.choose_fun_cov('matern32', True) == covariance.grad_cov_matern32 assert covariance.choose_fun_cov('matern52', True) == covariance.grad_cov_matern52 +def test_get_kernel_inverse_typing(): + annos = covariance.get_kernel_inverse.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['hyps'] == dict + assert annos['str_cov'] == str + assert annos['fix_noise'] == bool + assert annos['use_gradient'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] + +def test_get_kernel_inverse(): + dim_X = 3 + X = np.reshape(np.arange(0, 9), (3, dim_X)) + hyps = utils_covariance.get_hyps('se', dim_X) + + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(1, hyps, 'se') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(np.arange(0, 100), hyps, 'se') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(X, 1, 'se') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(X, hyps, 1) + with pytest.raises(ValueError) as error: + covariance.get_kernel_inverse(X, hyps, 'abc') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(X, hyps, 'se', debug=1) + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(X, hyps, 'se', use_gradient='abc') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_inverse(X, hyps, 'se', fix_noise='abc') + + cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X, hyps, 'se') + print(cov_X_X) + print(inv_cov_X_X) + truth_cov_X_X = np.array([ + [1.00011000e+00, 1.37095909e-06, 3.53262857e-24], + [1.37095909e-06, 1.00011000e+00, 1.37095909e-06], + [3.53262857e-24, 1.37095909e-06, 1.00011000e+00] + ]) + truth_inv_cov_X_X = np.array([ + [9.99890012e-01, -1.37065753e-06, 1.87890871e-12], + [-1.37065753e-06, 9.99890012e-01, -1.37065753e-06], + [1.87890871e-12, -1.37065753e-06, 9.99890012e-01] + ]) + assert (np.abs(cov_X_X - truth_cov_X_X) < TEST_EPSILON).all() + assert (np.abs(inv_cov_X_X - truth_inv_cov_X_X) < TEST_EPSILON).all() + assert cov_X_X.shape == inv_cov_X_X.shape + + cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X, hyps, 'se', use_gradient=True, fix_noise=True) + print(grad_cov_X_X) + print(grad_cov_X_X.shape) + + truth_grad_cov_X_X = np.array([ + [ + [2.00002000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], + [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05], + [7.06525714e-24, 3.81523886e-22, 3.81523886e-22, 3.81523886e-22] + ], [ + [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05], + [2.00002000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], + [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05] + ], [ + [7.06525714e-24, 3.81523886e-22, 3.81523886e-22, 3.81523886e-22], + [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05], + [2.00002000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] + ] + ]) + assert (np.abs(cov_X_X - truth_cov_X_X) < TEST_EPSILON).all() + assert (np.abs(inv_cov_X_X - truth_inv_cov_X_X) < TEST_EPSILON).all() + assert (np.abs(grad_cov_X_X - truth_grad_cov_X_X) < TEST_EPSILON).all() + assert cov_X_X.shape == inv_cov_X_X.shape == grad_cov_X_X.shape[:2] + +def test_get_kernel_cholesky_typing(): + annos = covariance.get_kernel_cholesky.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['hyps'] == dict + assert annos['str_cov'] == str + assert annos['fix_noise'] == bool + assert annos['use_gradient'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] + +def test_get_kernel_cholesky(): + dim_X = 3 + X = np.reshape(np.arange(0, 9), (3, dim_X)) + hyps = utils_covariance.get_hyps('se', dim_X) + + with pytest.raises(AssertionError) as error: + covariance.get_kernel_cholesky(1, hyps, 'se') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_cholesky(np.arange(0, 10), hyps, 'se') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_cholesky(X, 1, 'se') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_cholesky(X, hyps, 1) + with pytest.raises(ValueError) as error: + covariance.get_kernel_cholesky(X, hyps, 'abc') + with pytest.raises(AssertionError) as error: + covariance.get_kernel_cholesky(X, hyps, 'se', debug=1) + + cov_X_X, lower, _ = covariance.get_kernel_cholesky(X, hyps, 'se') + print(cov_X_X) + print(lower) + truth_cov_X_X = [ + [1.00011000e+00, 1.37095909e-06, 3.53262857e-24], + [1.37095909e-06, 1.00011000e+00, 1.37095909e-06], + [3.53262857e-24, 1.37095909e-06, 1.00011000e+00], + ] + truth_lower = [ + [1.00005500e+00, 0.00000000e+00, 0.00000000e+00], + [1.37088369e-06, 1.00005500e+00, 0.00000000e+00], + [3.53243429e-24, 1.37088369e-06, 1.00005500e+00], + ] + assert (np.abs(cov_X_X - truth_cov_X_X) < TEST_EPSILON).all() + assert (np.abs(lower - truth_lower) < TEST_EPSILON).all() + assert cov_X_X.shape == lower.shape + def test_cov_se_typing(): annos = covariance.cov_se.__annotations__ diff --git a/tests/common/test_gp_common.py b/tests/common/test_gp_common.py deleted file mode 100644 index c32313d..0000000 --- a/tests/common/test_gp_common.py +++ /dev/null @@ -1,135 +0,0 @@ -# -# author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 -# -"""test_gp_common""" - -import typing -import pytest -import numpy as np - -from bayeso.gp import gp_common -from bayeso.utils import utils_covariance - -TEST_EPSILON = 1e-7 - - -def test_get_kernel_inverse_typing(): - annos = gp_common.get_kernel_inverse.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['hyps'] == dict - assert annos['str_cov'] == str - assert annos['fix_noise'] == bool - assert annos['use_gradient'] == bool - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] - -def test_get_kernel_inverse(): - dim_X = 3 - X = np.reshape(np.arange(0, 9), (3, dim_X)) - hyps = utils_covariance.get_hyps('se', dim_X) - - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(1, hyps, 'se') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(np.arange(0, 100), hyps, 'se') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(X, 1, 'se') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(X, hyps, 1) - with pytest.raises(ValueError) as error: - gp_common.get_kernel_inverse(X, hyps, 'abc') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(X, hyps, 'se', debug=1) - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(X, hyps, 'se', use_gradient='abc') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_inverse(X, hyps, 'se', fix_noise='abc') - - cov_X_X, inv_cov_X_X, grad_cov_X_X = gp_common.get_kernel_inverse(X, hyps, 'se') - print(cov_X_X) - print(inv_cov_X_X) - truth_cov_X_X = np.array([ - [1.00011000e+00, 1.37095909e-06, 3.53262857e-24], - [1.37095909e-06, 1.00011000e+00, 1.37095909e-06], - [3.53262857e-24, 1.37095909e-06, 1.00011000e+00] - ]) - truth_inv_cov_X_X = np.array([ - [9.99890012e-01, -1.37065753e-06, 1.87890871e-12], - [-1.37065753e-06, 9.99890012e-01, -1.37065753e-06], - [1.87890871e-12, -1.37065753e-06, 9.99890012e-01] - ]) - assert (np.abs(cov_X_X - truth_cov_X_X) < TEST_EPSILON).all() - assert (np.abs(inv_cov_X_X - truth_inv_cov_X_X) < TEST_EPSILON).all() - assert cov_X_X.shape == inv_cov_X_X.shape - - cov_X_X, inv_cov_X_X, grad_cov_X_X = gp_common.get_kernel_inverse(X, hyps, 'se', use_gradient=True, fix_noise=True) - print(grad_cov_X_X) - print(grad_cov_X_X.shape) - - truth_grad_cov_X_X = np.array([ - [ - [2.00002000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05], - [7.06525714e-24, 3.81523886e-22, 3.81523886e-22, 3.81523886e-22] - ], [ - [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05], - [2.00002000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00], - [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05] - ], [ - [7.06525714e-24, 3.81523886e-22, 3.81523886e-22, 3.81523886e-22], - [2.74191817e-06, 3.70158953e-05, 3.70158953e-05, 3.70158953e-05], - [2.00002000e+00, 0.00000000e+00, 0.00000000e+00, 0.00000000e+00] - ] - ]) - assert (np.abs(cov_X_X - truth_cov_X_X) < TEST_EPSILON).all() - assert (np.abs(inv_cov_X_X - truth_inv_cov_X_X) < TEST_EPSILON).all() - assert (np.abs(grad_cov_X_X - truth_grad_cov_X_X) < TEST_EPSILON).all() - assert cov_X_X.shape == inv_cov_X_X.shape == grad_cov_X_X.shape[:2] - -def test_get_kernel_cholesky_typing(): - annos = gp_common.get_kernel_cholesky.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['hyps'] == dict - assert annos['str_cov'] == str - assert annos['fix_noise'] == bool - assert annos['use_gradient'] == bool - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] - -def test_get_kernel_cholesky(): - dim_X = 3 - X = np.reshape(np.arange(0, 9), (3, dim_X)) - hyps = utils_covariance.get_hyps('se', dim_X) - - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_cholesky(1, hyps, 'se') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_cholesky(np.arange(0, 10), hyps, 'se') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_cholesky(X, 1, 'se') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_cholesky(X, hyps, 1) - with pytest.raises(ValueError) as error: - gp_common.get_kernel_cholesky(X, hyps, 'abc') - with pytest.raises(AssertionError) as error: - gp_common.get_kernel_cholesky(X, hyps, 'se', debug=1) - - cov_X_X, lower, _ = gp_common.get_kernel_cholesky(X, hyps, 'se') - print(cov_X_X) - print(lower) - truth_cov_X_X = [ - [1.00011000e+00, 1.37095909e-06, 3.53262857e-24], - [1.37095909e-06, 1.00011000e+00, 1.37095909e-06], - [3.53262857e-24, 1.37095909e-06, 1.00011000e+00], - ] - truth_lower = [ - [1.00005500e+00, 0.00000000e+00, 0.00000000e+00], - [1.37088369e-06, 1.00005500e+00, 0.00000000e+00], - [3.53243429e-24, 1.37088369e-06, 1.00005500e+00], - ] - assert (np.abs(cov_X_X - truth_cov_X_X) < TEST_EPSILON).all() - assert (np.abs(lower - truth_lower) < TEST_EPSILON).all() - assert cov_X_X.shape == lower.shape diff --git a/tests/common/test_import.py b/tests/common/test_import.py index f66f074..ed0b759 100644 --- a/tests/common/test_import.py +++ b/tests/common/test_import.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """test_import""" @@ -16,15 +16,15 @@ def test_import_covariance(): def test_import_acquisition(): import bayeso.acquisition +def test_import_constants(): + import bayeso.constants + def test_import_gp(): import bayeso.gp def test_import_gp_gp(): import bayeso.gp.gp -def test_import_gp_gp_common(): - import bayeso.gp.gp_common - def test_import_gp_gp_scipy(): import bayeso.gp.gp_scipy @@ -57,6 +57,3 @@ def test_import_utils_utils_logger(): def test_import_wrappers_wrappers_bo(): import bayeso.wrappers.wrappers_bo - -def test_import_constants(): - import bayeso.constants diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index 626031e..323f016 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """test_utils_covariance""" @@ -248,3 +248,33 @@ def test_validate_hyps_arr(): _, is_valid = utils_covariance.validate_hyps_arr(cur_hyps, 'abc', num_dim) with pytest.raises(AssertionError) as error: _, is_valid = utils_covariance.validate_hyps_arr(cur_hyps, str_cov, 'abc') + +def test_check_str_cov_typing(): + annos = utils_covariance.check_str_cov.__annotations__ + + assert annos['str_fun'] == str + assert annos['str_cov'] == str + assert annos['shape_X1'] == tuple + assert annos['shape_X2'] == tuple + assert annos['return'] == type(None) + +def test_check_str_cov(): + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov(1, 'se', (2, 1)) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 1, (2, 1)) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 'se', 1) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 'se', (2, 100, 100)) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 'se', (2, 100), shape_X2=(2, 100, 100)) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 'set_se', (2, 100), shape_X2=(2, 100, 100)) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 'set_se', (2, 100, 100), shape_X2=(2, 100)) + with pytest.raises(AssertionError) as error: + utils_covariance.check_str_cov('test', 'se', (2, 1), shape_X2=1) + + with pytest.raises(ValueError) as error: + utils_covariance.check_str_cov('test', 'abc', (2, 1)) diff --git a/tests/common/test_utils_gp.py b/tests/common/test_utils_gp.py index cab13fe..44be308 100644 --- a/tests/common/test_utils_gp.py +++ b/tests/common/test_utils_gp.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: December 29, 2020 # """test_utils_gp""" @@ -13,36 +13,6 @@ TEST_EPSILON = 1e-7 -def test_check_str_cov_typing(): - annos = utils_gp.check_str_cov.__annotations__ - - assert annos['str_fun'] == str - assert annos['str_cov'] == str - assert annos['shape_X1'] == tuple - assert annos['shape_X2'] == tuple - assert annos['return'] == type(None) - -def test_check_str_cov(): - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov(1, 'se', (2, 1)) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 1, (2, 1)) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 'se', 1) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 'se', (2, 100, 100)) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 'se', (2, 100), shape_X2=(2, 100, 100)) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 'set_se', (2, 100), shape_X2=(2, 100, 100)) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 'set_se', (2, 100, 100), shape_X2=(2, 100)) - with pytest.raises(AssertionError) as error: - utils_gp.check_str_cov('test', 'se', (2, 1), shape_X2=1) - - with pytest.raises(ValueError) as error: - utils_gp.check_str_cov('test', 'abc', (2, 1)) - def test_get_prior_mu_typing(): annos = utils_gp.get_prior_mu.__annotations__ From 8c62010a2bb27859382b0fb9f8da3b6527ba8378 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 29 Dec 2020 16:40:26 +0900 Subject: [PATCH 06/37] Define tested package as package_target --- tests/common/test_acquisition.py | 104 ++++---- tests/common/test_bo.py | 70 +++--- tests/common/test_covariance.py | 264 ++++++++++----------- tests/common/test_gp.py | 170 +++++++------- tests/common/test_gp_gpytorch.py | 52 ++-- tests/common/test_gp_scipy.py | 112 ++++----- tests/common/test_gp_tensorflow.py | 52 ++-- tests/common/test_utils_bo.py | 94 ++++---- tests/common/test_utils_common.py | 48 ++-- tests/common/test_utils_covariance.py | 154 ++++++------ tests/common/test_utils_gp.py | 18 +- tests/common/test_utils_logger.py | 84 +++---- tests/common/test_utils_plotting.py | 326 +++++++++++++------------- tests/common/test_wrappers_bo.py | 74 +++--- 14 files changed, 811 insertions(+), 811 deletions(-) diff --git a/tests/common/test_acquisition.py b/tests/common/test_acquisition.py index df34e9a..b4c0171 100644 --- a/tests/common/test_acquisition.py +++ b/tests/common/test_acquisition.py @@ -8,14 +8,14 @@ import pytest import numpy as np -from bayeso import acquisition +from bayeso import acquisition as package_target TEST_EPSILON = 1e-5 def test_pi_typing(): - annos = acquisition.pi.__annotations__ + annos = package_target.pi.__annotations__ assert annos['pred_mean'] == np.ndarray assert annos['pred_std'] == np.ndarray @@ -25,30 +25,30 @@ def test_pi_typing(): def test_pi(): with pytest.raises(AssertionError) as error: - acquisition.pi('abc', np.ones(10), np.zeros((5, 1))) + package_target.pi('abc', np.ones(10), np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones(10), 'abc', np.zeros((5, 1))) + package_target.pi(np.ones(10), 'abc', np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones(10), np.ones(10), 'abc') + package_target.pi(np.ones(10), np.ones(10), 'abc') with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones(10), np.ones(10), np.zeros((5, 1)), 1) + package_target.pi(np.ones(10), np.ones(10), np.zeros((5, 1)), 1) with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones(5), np.ones(10), np.zeros((5, 1))) + package_target.pi(np.ones(5), np.ones(10), np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones(10), np.ones(10), np.zeros(5)) + package_target.pi(np.ones(10), np.ones(10), np.zeros(5)) with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones(10), np.ones((10, 1)), np.zeros((5, 1))) + package_target.pi(np.ones(10), np.ones((10, 1)), np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.pi(np.ones((10, 1)), np.ones(10), np.zeros((5, 1))) + package_target.pi(np.ones((10, 1)), np.ones(10), np.zeros((5, 1))) - val_acq = acquisition.pi(np.arange(0, 10), np.ones(10), np.zeros((5, 1))) + val_acq = package_target.pi(np.arange(0, 10), np.ones(10), np.zeros((5, 1))) truth_val_acq = np.array([5.00000000e-01, 1.58657674e-01, 2.27512118e-02, 1.35003099e-03, 3.16765954e-05, 2.86725916e-07, 9.86952260e-10, 1.28045212e-12, 6.22500364e-16, 1.12951395e-19]) print(val_acq) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() def test_ei_typing(): - annos = acquisition.ei.__annotations__ + annos = package_target.ei.__annotations__ assert annos['pred_mean'] == np.ndarray assert annos['pred_std'] == np.ndarray @@ -58,28 +58,28 @@ def test_ei_typing(): def test_ei(): with pytest.raises(AssertionError) as error: - acquisition.ei('abc', np.ones(10), np.zeros((5, 1))) + package_target.ei('abc', np.ones(10), np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones(10), 'abc', np.zeros((5, 1))) + package_target.ei(np.ones(10), 'abc', np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones(10), np.ones(10), 'abc') + package_target.ei(np.ones(10), np.ones(10), 'abc') with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones(10), np.ones(10), np.zeros((5, 1)), 1) + package_target.ei(np.ones(10), np.ones(10), np.zeros((5, 1)), 1) with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones(5), np.ones(10), np.zeros((5, 1))) + package_target.ei(np.ones(5), np.ones(10), np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones(10), np.ones(10), np.zeros(5)) + package_target.ei(np.ones(10), np.ones(10), np.zeros(5)) with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones(10), np.ones((10, 1)), np.zeros((5, 1))) + package_target.ei(np.ones(10), np.ones((10, 1)), np.zeros((5, 1))) with pytest.raises(AssertionError) as error: - acquisition.ei(np.ones((10, 1)), np.ones(10), np.zeros((5, 1))) + package_target.ei(np.ones((10, 1)), np.ones(10), np.zeros((5, 1))) - val_acq = acquisition.ei(np.arange(0, 10), np.ones(10), np.zeros((5, 1))) + val_acq = package_target.ei(np.arange(0, 10), np.ones(10), np.zeros((5, 1))) truth_val_acq = np.array([3.98942280e-01, 8.33154706e-02, 8.49070261e-03, 3.82154315e-04, 7.14525833e-06, 5.34616535e-08, 1.56356969e-10, 1.76032579e-13, 7.55026079e-17, 1.22477876e-20]) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() def test_ucb_typing(): - annos = acquisition.ucb.__annotations__ + annos = package_target.ucb.__annotations__ assert annos['pred_mean'] == np.ndarray assert annos['pred_std'] == np.ndarray @@ -90,34 +90,34 @@ def test_ucb_typing(): def test_ucb(): with pytest.raises(AssertionError) as error: - acquisition.ucb('abc', np.ones(10)) + package_target.ucb('abc', np.ones(10)) with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(10), 'abc') + package_target.ucb(np.ones(10), 'abc') with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(10), np.ones(10), kappa='abc') + package_target.ucb(np.ones(10), np.ones(10), kappa='abc') with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(5), np.ones(10)) + package_target.ucb(np.ones(5), np.ones(10)) with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(10), np.ones(10), Y_train='abc') + package_target.ucb(np.ones(10), np.ones(10), Y_train='abc') with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(10), np.ones(10), Y_train=np.zeros(5)) + package_target.ucb(np.ones(10), np.ones(10), Y_train=np.zeros(5)) with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(10), np.ones((10, 1))) + package_target.ucb(np.ones(10), np.ones((10, 1))) with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones((10, 1)), np.ones(10)) + package_target.ucb(np.ones((10, 1)), np.ones(10)) with pytest.raises(AssertionError) as error: - acquisition.ucb(np.ones(10), np.ones(10), increase_kappa='abc') + package_target.ucb(np.ones(10), np.ones(10), increase_kappa='abc') - val_acq = acquisition.ucb(np.arange(0, 10), np.ones(10), Y_train=np.zeros((5, 1))) + val_acq = package_target.ucb(np.arange(0, 10), np.ones(10), Y_train=np.zeros((5, 1))) truth_val_acq = np.array([3.21887582, 2.21887582, 1.21887582, 0.21887582, -0.78112418, -1.78112418, -2.78112418, -3.78112418, -4.78112418, -5.78112418]) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() - val_acq = acquisition.ucb(np.arange(0, 10), np.ones(10)) + val_acq = package_target.ucb(np.arange(0, 10), np.ones(10)) truth_val_acq = np.array([2.0, 1.0, 0.0, -1.0, -2.0, -3.0, -4.0, -5.0, -6.0, -7.0]) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() def test_aei_typing(): - annos = acquisition.aei.__annotations__ + annos = package_target.aei.__annotations__ assert annos['pred_mean'] == np.ndarray assert annos['pred_std'] == np.ndarray @@ -127,56 +127,56 @@ def test_aei_typing(): def test_aei(): with pytest.raises(AssertionError) as error: - acquisition.aei('abc', np.ones(10), np.zeros((5, 1)), 1.0) + package_target.aei('abc', np.ones(10), np.zeros((5, 1)), 1.0) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(10), 'abc', np.zeros((5, 1)), 1.0) + package_target.aei(np.ones(10), 'abc', np.zeros((5, 1)), 1.0) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(10), np.ones(10), 'abc', 1.0) + package_target.aei(np.ones(10), np.ones(10), 'abc', 1.0) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(10), np.ones(10), np.zeros((5, 1)), 'abc') + package_target.aei(np.ones(10), np.ones(10), np.zeros((5, 1)), 'abc') with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(10), np.ones(10), np.zeros((5, 1)), 1.0, jitter=1) + package_target.aei(np.ones(10), np.ones(10), np.zeros((5, 1)), 1.0, jitter=1) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(5), np.ones(10), np.zeros((5, 1)), 1.0) + package_target.aei(np.ones(5), np.ones(10), np.zeros((5, 1)), 1.0) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(10), np.ones(10), np.zeros(5), 1.0) + package_target.aei(np.ones(10), np.ones(10), np.zeros(5), 1.0) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones(10), np.ones((10, 1)), np.zeros((5, 1)), 1.0) + package_target.aei(np.ones(10), np.ones((10, 1)), np.zeros((5, 1)), 1.0) with pytest.raises(AssertionError) as error: - acquisition.aei(np.ones((10, 1)), np.ones(10), np.zeros((5, 1)), 1.0) + package_target.aei(np.ones((10, 1)), np.ones(10), np.zeros((5, 1)), 1.0) - val_acq = acquisition.aei(np.arange(0, 10), np.ones(10), np.zeros((5, 1)), 1.0) + val_acq = package_target.aei(np.arange(0, 10), np.ones(10), np.zeros((5, 1)), 1.0) truth_val_acq = np.array([1.16847489e-01, 2.44025364e-02, 2.48686922e-03, 1.11930407e-04, 2.09279771e-06, 1.56585558e-08, 4.57958958e-11, 5.15587486e-14, 2.21142019e-17, 3.58729395e-21]) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() def test_pure_exploit_typing(): - annos = acquisition.pure_exploit.__annotations__ + annos = package_target.pure_exploit.__annotations__ assert annos['pred_mean'] == np.ndarray assert annos['return'] == np.ndarray def test_pure_exploit(): with pytest.raises(AssertionError) as error: - acquisition.pure_exploit('abc') + package_target.pure_exploit('abc') with pytest.raises(AssertionError) as error: - acquisition.pure_exploit(np.ones((10, 1))) + package_target.pure_exploit(np.ones((10, 1))) - val_acq = acquisition.pure_exploit(np.arange(0, 10)) + val_acq = package_target.pure_exploit(np.arange(0, 10)) truth_val_acq = -np.arange(0, 10) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() def test_pure_explore_typing(): - annos = acquisition.pure_explore.__annotations__ + annos = package_target.pure_explore.__annotations__ assert annos['pred_std'] == np.ndarray assert annos['return'] == np.ndarray def test_pure_explore(): with pytest.raises(AssertionError) as error: - acquisition.pure_explore('abc') + package_target.pure_explore('abc') with pytest.raises(AssertionError) as error: - acquisition.pure_explore(np.ones((10, 1))) + package_target.pure_explore(np.ones((10, 1))) - val_acq = acquisition.pure_explore(np.arange(0, 10)) + val_acq = package_target.pure_explore(np.arange(0, 10)) truth_val_acq = np.arange(0, 10) assert (np.abs(val_acq - truth_val_acq) < TEST_EPSILON).all() diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index 290095f..dff1cfc 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -7,7 +7,7 @@ import pytest import numpy as np -from bayeso import bo +from bayeso import bo as package_target TEST_EPSILON = 1e-5 @@ -37,44 +37,44 @@ def test_load_bo(): ]) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(1) + model_bo = package_target.BO(1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(np.arange(0, 10)) + model_bo = package_target.BO(np.arange(0, 10)) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_3) + model_bo = package_target.BO(arr_range_3) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_4) + model_bo = package_target.BO(arr_range_4) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_cov=1) + model_bo = package_target.BO(arr_range_1, str_cov=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_cov='abc') + model_bo = package_target.BO(arr_range_1, str_cov='abc') with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_acq=1) + model_bo = package_target.BO(arr_range_1, str_acq=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_acq='abc') + model_bo = package_target.BO(arr_range_1, str_acq='abc') with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, use_ard='abc') + model_bo = package_target.BO(arr_range_1, use_ard='abc') with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, use_ard=1) + model_bo = package_target.BO(arr_range_1, use_ard=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, prior_mu=1) + model_bo = package_target.BO(arr_range_1, prior_mu=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_optimizer_method_gp=1) + model_bo = package_target.BO(arr_range_1, str_optimizer_method_gp=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_optimizer_method_gp='abc') + model_bo = package_target.BO(arr_range_1, str_optimizer_method_gp='abc') with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_optimizer_method_bo=1) + model_bo = package_target.BO(arr_range_1, str_optimizer_method_bo=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_optimizer_method_bo='abc') + model_bo = package_target.BO(arr_range_1, str_optimizer_method_bo='abc') with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_modelselection_method=1) + model_bo = package_target.BO(arr_range_1, str_modelselection_method=1) with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, str_modelselection_method='abc') + model_bo = package_target.BO(arr_range_1, str_modelselection_method='abc') with pytest.raises(AssertionError) as error: - model_bo = bo.BO(arr_range_1, debug=1) + model_bo = package_target.BO(arr_range_1, debug=1) - model_bo = bo.BO(arr_range_1) - model_bo = bo.BO(arr_range_2) + model_bo = package_target.BO(arr_range_1) + model_bo = package_target.BO(arr_range_2) def test_get_samples(): np.random.seed(42) @@ -88,7 +88,7 @@ def test_get_samples(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) fun_objective = lambda X: np.sum(X) - model_bo = bo.BO(arr_range, debug=True) + model_bo = package_target.BO(arr_range, debug=True) with pytest.raises(AssertionError) as error: model_bo.get_samples(1) @@ -163,7 +163,7 @@ def test_get_initials(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) fun_objective = lambda X: np.sum(X) - model_bo = bo.BO(arr_range) + model_bo = package_target.BO(arr_range) with pytest.raises(AssertionError) as error: model_bo.get_initials(1, 10) @@ -204,7 +204,7 @@ def test_optimize(): num_X = 5 X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) - model_bo = bo.BO(arr_range_1) + model_bo = package_target.BO(arr_range_1) with pytest.raises(AssertionError) as error: model_bo.optimize(1, Y) @@ -269,7 +269,7 @@ def test_optimize_str_acq(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) - model_bo = bo.BO(arr_range_1, str_acq='pi') + model_bo = package_target.BO(arr_range_1, str_acq='pi') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -296,7 +296,7 @@ def test_optimize_str_acq(): assert next_points.shape[1] == dim_X assert next_points.shape[0] == acquisitions.shape[0] - model_bo = bo.BO(arr_range_1, str_acq='ucb') + model_bo = package_target.BO(arr_range_1, str_acq='ucb') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -323,7 +323,7 @@ def test_optimize_str_acq(): assert next_points.shape[1] == dim_X assert next_points.shape[0] == acquisitions.shape[0] - model_bo = bo.BO(arr_range_1, str_acq='aei') + model_bo = package_target.BO(arr_range_1, str_acq='aei') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -350,7 +350,7 @@ def test_optimize_str_acq(): assert next_points.shape[1] == dim_X assert next_points.shape[0] == acquisitions.shape[0] - model_bo = bo.BO(arr_range_1, str_acq='pure_exploit') + model_bo = package_target.BO(arr_range_1, str_acq='pure_exploit') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -377,7 +377,7 @@ def test_optimize_str_acq(): assert next_points.shape[1] == dim_X assert next_points.shape[0] == acquisitions.shape[0] - model_bo = bo.BO(arr_range_1, str_acq='pure_explore') + model_bo = package_target.BO(arr_range_1, str_acq='pure_explore') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -416,7 +416,7 @@ def test_optimize_str_optimize_method_bo(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) - model_bo = bo.BO(arr_range_1, str_optimizer_method_bo='L-BFGS-B') + model_bo = package_target.BO(arr_range_1, str_optimizer_method_bo='L-BFGS-B') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -445,7 +445,7 @@ def test_optimize_str_optimize_method_bo(): # TODO: add DIRECT test, now it causes an error. - model_bo = bo.BO(arr_range_1, str_optimizer_method_bo='CMA-ES') + model_bo = package_target.BO(arr_range_1, str_optimizer_method_bo='CMA-ES') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -484,7 +484,7 @@ def test_optimize_str_mlm_method(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) - model_bo = bo.BO(arr_range_1) + model_bo = package_target.BO(arr_range_1) next_point, dict_info = model_bo.optimize(X, Y, str_mlm_method='converged') next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -523,7 +523,7 @@ def test_optimize_str_modelselection_method(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) - model_bo = bo.BO(arr_range_1, str_modelselection_method='loocv') + model_bo = package_target.BO(arr_range_1, str_modelselection_method='loocv') next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -562,7 +562,7 @@ def test_optimize_normalize_Y(): X = np.random.randn(num_X, dim_X) Y = np.random.randn(num_X, 1) - model_bo = bo.BO(arr_range, str_acq='ei', normalize_Y=True) + model_bo = package_target.BO(arr_range, str_acq='ei', normalize_Y=True) next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] @@ -600,7 +600,7 @@ def test_optimize_normalize_Y(): [100.0], ]) - model_bo = bo.BO(arr_range, str_acq='ei', normalize_Y=True) + model_bo = package_target.BO(arr_range, str_acq='ei', normalize_Y=True) next_point, dict_info = model_bo.optimize(X, Y) next_points = dict_info['next_points'] acquisitions = dict_info['acquisitions'] diff --git a/tests/common/test_covariance.py b/tests/common/test_covariance.py index 627ed11..d28b3ce 100644 --- a/tests/common/test_covariance.py +++ b/tests/common/test_covariance.py @@ -8,14 +8,14 @@ import pytest import numpy as np -from bayeso import covariance +from bayeso import covariance as package_target from bayeso.utils import utils_covariance TEST_EPSILON = 1e-7 def test_choose_fun_cov_typing(): - annos = covariance.choose_fun_cov.__annotations__ + annos = package_target.choose_fun_cov.__annotations__ assert annos['str_cov'] == str assert annos['choose_grad'] == bool @@ -23,21 +23,21 @@ def test_choose_fun_cov_typing(): def test_choose_fun_cov(): with pytest.raises(AssertionError) as error: - covariance.choose_fun_cov(123, False) + package_target.choose_fun_cov(123, False) with pytest.raises(AssertionError) as error: - covariance.choose_fun_cov('se', 'abc') + package_target.choose_fun_cov('se', 'abc') with pytest.raises(NotImplementedError) as error: - covariance.choose_fun_cov('abc', False) + package_target.choose_fun_cov('abc', False) - assert covariance.choose_fun_cov('se', False) == covariance.cov_se - assert covariance.choose_fun_cov('matern32', False) == covariance.cov_matern32 - assert covariance.choose_fun_cov('matern52', False) == covariance.cov_matern52 - assert covariance.choose_fun_cov('se', True) == covariance.grad_cov_se - assert covariance.choose_fun_cov('matern32', True) == covariance.grad_cov_matern32 - assert covariance.choose_fun_cov('matern52', True) == covariance.grad_cov_matern52 + assert package_target.choose_fun_cov('se', False) == package_target.cov_se + assert package_target.choose_fun_cov('matern32', False) == package_target.cov_matern32 + assert package_target.choose_fun_cov('matern52', False) == package_target.cov_matern52 + assert package_target.choose_fun_cov('se', True) == package_target.grad_cov_se + assert package_target.choose_fun_cov('matern32', True) == package_target.grad_cov_matern32 + assert package_target.choose_fun_cov('matern52', True) == package_target.grad_cov_matern52 def test_get_kernel_inverse_typing(): - annos = covariance.get_kernel_inverse.__annotations__ + annos = package_target.get_kernel_inverse.__annotations__ assert annos['X_train'] == np.ndarray assert annos['hyps'] == dict @@ -53,23 +53,23 @@ def test_get_kernel_inverse(): hyps = utils_covariance.get_hyps('se', dim_X) with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(1, hyps, 'se') + package_target.get_kernel_inverse(1, hyps, 'se') with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(np.arange(0, 100), hyps, 'se') + package_target.get_kernel_inverse(np.arange(0, 100), hyps, 'se') with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(X, 1, 'se') + package_target.get_kernel_inverse(X, 1, 'se') with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(X, hyps, 1) + package_target.get_kernel_inverse(X, hyps, 1) with pytest.raises(ValueError) as error: - covariance.get_kernel_inverse(X, hyps, 'abc') + package_target.get_kernel_inverse(X, hyps, 'abc') with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(X, hyps, 'se', debug=1) + package_target.get_kernel_inverse(X, hyps, 'se', debug=1) with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(X, hyps, 'se', use_gradient='abc') + package_target.get_kernel_inverse(X, hyps, 'se', use_gradient='abc') with pytest.raises(AssertionError) as error: - covariance.get_kernel_inverse(X, hyps, 'se', fix_noise='abc') + package_target.get_kernel_inverse(X, hyps, 'se', fix_noise='abc') - cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X, hyps, 'se') + cov_X_X, inv_cov_X_X, grad_cov_X_X = package_target.get_kernel_inverse(X, hyps, 'se') print(cov_X_X) print(inv_cov_X_X) truth_cov_X_X = np.array([ @@ -86,7 +86,7 @@ def test_get_kernel_inverse(): assert (np.abs(inv_cov_X_X - truth_inv_cov_X_X) < TEST_EPSILON).all() assert cov_X_X.shape == inv_cov_X_X.shape - cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X, hyps, 'se', use_gradient=True, fix_noise=True) + cov_X_X, inv_cov_X_X, grad_cov_X_X = package_target.get_kernel_inverse(X, hyps, 'se', use_gradient=True, fix_noise=True) print(grad_cov_X_X) print(grad_cov_X_X.shape) @@ -111,7 +111,7 @@ def test_get_kernel_inverse(): assert cov_X_X.shape == inv_cov_X_X.shape == grad_cov_X_X.shape[:2] def test_get_kernel_cholesky_typing(): - annos = covariance.get_kernel_cholesky.__annotations__ + annos = package_target.get_kernel_cholesky.__annotations__ assert annos['X_train'] == np.ndarray assert annos['hyps'] == dict @@ -127,19 +127,19 @@ def test_get_kernel_cholesky(): hyps = utils_covariance.get_hyps('se', dim_X) with pytest.raises(AssertionError) as error: - covariance.get_kernel_cholesky(1, hyps, 'se') + package_target.get_kernel_cholesky(1, hyps, 'se') with pytest.raises(AssertionError) as error: - covariance.get_kernel_cholesky(np.arange(0, 10), hyps, 'se') + package_target.get_kernel_cholesky(np.arange(0, 10), hyps, 'se') with pytest.raises(AssertionError) as error: - covariance.get_kernel_cholesky(X, 1, 'se') + package_target.get_kernel_cholesky(X, 1, 'se') with pytest.raises(AssertionError) as error: - covariance.get_kernel_cholesky(X, hyps, 1) + package_target.get_kernel_cholesky(X, hyps, 1) with pytest.raises(ValueError) as error: - covariance.get_kernel_cholesky(X, hyps, 'abc') + package_target.get_kernel_cholesky(X, hyps, 'abc') with pytest.raises(AssertionError) as error: - covariance.get_kernel_cholesky(X, hyps, 'se', debug=1) + package_target.get_kernel_cholesky(X, hyps, 'se', debug=1) - cov_X_X, lower, _ = covariance.get_kernel_cholesky(X, hyps, 'se') + cov_X_X, lower, _ = package_target.get_kernel_cholesky(X, hyps, 'se') print(cov_X_X) print(lower) truth_cov_X_X = [ @@ -157,7 +157,7 @@ def test_get_kernel_cholesky(): assert cov_X_X.shape == lower.shape def test_cov_se_typing(): - annos = covariance.cov_se.__annotations__ + annos = package_target.cov_se.__annotations__ assert annos['X'] == np.ndarray assert annos['Xp'] == np.ndarray @@ -167,19 +167,19 @@ def test_cov_se_typing(): def test_cov_se(): with pytest.raises(AssertionError) as error: - covariance.cov_se(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_se(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_se(np.zeros((1, 2)), np.zeros((1, 3)), np.array([1.0, 1.0]), 0.1) + package_target.cov_se(np.zeros((1, 2)), np.zeros((1, 3)), np.array([1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_se(np.zeros((1, 3)), np.zeros((1, 2)), np.array([1.0, 1.0]), 0.1) + package_target.cov_se(np.zeros((1, 3)), np.zeros((1, 2)), np.array([1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_se(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0]), 1) - assert np.abs(covariance.cov_se(np.zeros((1, 2)), np.zeros((1, 2)), 1.0, 0.1)[0] - 0.01) < TEST_EPSILON + package_target.cov_se(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0]), 1) + assert np.abs(package_target.cov_se(np.zeros((1, 2)), np.zeros((1, 2)), 1.0, 0.1)[0] - 0.01) < TEST_EPSILON X = np.array([[1.0, 2.0, 0.0]]) Xp = np.array([[2.0, 1.0, 1.0]]) cur_hyps = utils_covariance.get_hyps('se', 3) - cov_ = covariance.cov_se(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_se(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = 0.22313016014842987 assert np.abs(cov_[0] - truth_cov_) < TEST_EPSILON @@ -188,13 +188,13 @@ def test_cov_se(): Xp = np.array([[2.0, 1.0, 1.0], [0.0, 0.0, 0.0]]) cur_hyps = utils_covariance.get_hyps('se', 3) cur_hyps['lengthscales'] = 1.0 - cov_ = covariance.cov_se(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_se(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = np.array([[0.22313016, 0.082085]]) assert np.all(np.abs(cov_[0] - truth_cov_) < TEST_EPSILON) def test_grad_cov_se_typing(): - annos = covariance.grad_cov_se.__annotations__ + annos = package_target.grad_cov_se.__annotations__ assert annos['cov_X_Xp'] == np.ndarray assert annos['X'] == np.ndarray @@ -212,24 +212,24 @@ def test_grad_cov_se(): [1.0, 1.0], ]) num_hyps = X_train.shape[1] + 1 - cov_ = covariance.cov_main(str_cov, X_train, X_train, cur_hyps, True) + cov_ = package_target.cov_main(str_cov, X_train, X_train, cur_hyps, True) print(cov_) with pytest.raises(AssertionError) as error: - covariance.grad_cov_se('abc', X_train, X_train, cur_hyps, num_hyps, True) + package_target.grad_cov_se('abc', X_train, X_train, cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_se(cov_, 'abc', X_train, cur_hyps, num_hyps, True) + package_target.grad_cov_se(cov_, 'abc', X_train, cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_se(cov_, X_train, 'abc', cur_hyps, num_hyps, True) + package_target.grad_cov_se(cov_, X_train, 'abc', cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_se(cov_, X_train, X_train, 'abc', num_hyps, True) + package_target.grad_cov_se(cov_, X_train, X_train, 'abc', num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_se(cov_, X_train, X_train, cur_hyps, 'abc', True) + package_target.grad_cov_se(cov_, X_train, X_train, cur_hyps, 'abc', True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_se(cov_, X_train, X_train, cur_hyps, num_hyps, 'abc') + package_target.grad_cov_se(cov_, X_train, X_train, cur_hyps, num_hyps, 'abc') num_hyps = X_train.shape[1] + 2 - grad_cov_ = covariance.grad_cov_se(cov_, X_train, X_train, cur_hyps, num_hyps, False) + grad_cov_ = package_target.grad_cov_se(cov_, X_train, X_train, cur_hyps, num_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([ @@ -245,7 +245,7 @@ def test_grad_cov_se(): assert np.all(np.abs(truth_grad_cov_ - grad_cov_) < TEST_EPSILON) def test_cov_matern32_typing(): - annos = covariance.cov_matern32.__annotations__ + annos = package_target.cov_matern32.__annotations__ assert annos['X'] == np.ndarray assert annos['Xp'] == np.ndarray @@ -255,19 +255,19 @@ def test_cov_matern32_typing(): def test_cov_matern32(): with pytest.raises(AssertionError) as error: - covariance.cov_matern32(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_matern32(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_matern32(np.zeros((1, 2)), np.zeros((1, 3)), np.array([1.0, 1.0]), 0.1) + package_target.cov_matern32(np.zeros((1, 2)), np.zeros((1, 3)), np.array([1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_matern32(np.zeros((1, 3)), np.zeros((1, 2)), np.array([1.0, 1.0]), 0.1) + package_target.cov_matern32(np.zeros((1, 3)), np.zeros((1, 2)), np.array([1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_matern32(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0]), 1) - assert np.abs(covariance.cov_matern32(np.zeros((1, 2)), np.zeros((1, 2)), 1.0, 0.1)[0] - 0.01) < TEST_EPSILON + package_target.cov_matern32(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0]), 1) + assert np.abs(package_target.cov_matern32(np.zeros((1, 2)), np.zeros((1, 2)), 1.0, 0.1)[0] - 0.01) < TEST_EPSILON X = np.array([[1.0, 2.0, 0.0]]) Xp = np.array([[2.0, 1.0, 1.0]]) cur_hyps = utils_covariance.get_hyps('matern32', 3) - cov_ = covariance.cov_matern32(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_matern32(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = 0.19914827347145583 assert np.abs(cov_[0] - truth_cov_) < TEST_EPSILON @@ -276,13 +276,13 @@ def test_cov_matern32(): Xp = np.array([[2.0, 1.0, 1.0], [0.0, 0.0, 0.0]]) cur_hyps = utils_covariance.get_hyps('matern32', 3) cur_hyps['lengthscales'] = 1.0 - cov_ = covariance.cov_matern32(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_matern32(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = np.array([[0.19914827, 0.1013397]]) assert np.all(np.abs(cov_[0] - truth_cov_) < TEST_EPSILON) def test_grad_cov_matern32_typing(): - annos = covariance.grad_cov_matern32.__annotations__ + annos = package_target.grad_cov_matern32.__annotations__ assert annos['cov_X_Xp'] == np.ndarray assert annos['X'] == np.ndarray @@ -300,24 +300,24 @@ def test_grad_cov_matern32(): [1.0, 1.0], ]) num_hyps = X_train.shape[1] + 1 - cov_ = covariance.cov_main(str_cov, X_train, X_train, cur_hyps, True) + cov_ = package_target.cov_main(str_cov, X_train, X_train, cur_hyps, True) print(cov_) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern32('abc', X_train, X_train, cur_hyps, num_hyps, True) + package_target.grad_cov_matern32('abc', X_train, X_train, cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern32(cov_, 'abc', X_train, cur_hyps, num_hyps, True) + package_target.grad_cov_matern32(cov_, 'abc', X_train, cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern32(cov_, X_train, 'abc', cur_hyps, num_hyps, True) + package_target.grad_cov_matern32(cov_, X_train, 'abc', cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern32(cov_, X_train, X_train, 'abc', num_hyps, True) + package_target.grad_cov_matern32(cov_, X_train, X_train, 'abc', num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, 'abc', True) + package_target.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, 'abc', True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, num_hyps, 'abc') + package_target.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, num_hyps, 'abc') num_hyps = X_train.shape[1] + 2 - grad_cov_ = covariance.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, num_hyps, False) + grad_cov_ = package_target.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, num_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([ @@ -333,7 +333,7 @@ def test_grad_cov_matern32(): assert np.all(np.abs(truth_grad_cov_ - grad_cov_) < TEST_EPSILON) num_hyps = X_train.shape[1] + 1 - grad_cov_ = covariance.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, num_hyps, True) + grad_cov_ = package_target.grad_cov_matern32(cov_, X_train, X_train, cur_hyps, num_hyps, True) print(grad_cov_) truth_grad_cov_ = np.array([ @@ -349,7 +349,7 @@ def test_grad_cov_matern32(): assert np.all(np.abs(truth_grad_cov_ - grad_cov_) < TEST_EPSILON) def test_cov_matern52_typing(): - annos = covariance.cov_matern52.__annotations__ + annos = package_target.cov_matern52.__annotations__ assert annos['X'] == np.ndarray assert annos['Xp'] == np.ndarray @@ -359,19 +359,19 @@ def test_cov_matern52_typing(): def test_cov_matern52(): with pytest.raises(AssertionError) as error: - covariance.cov_matern52(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_matern52(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_matern52(np.zeros((1, 2)), np.zeros((1, 3)), np.array([1.0, 1.0]), 0.1) + package_target.cov_matern52(np.zeros((1, 2)), np.zeros((1, 3)), np.array([1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_matern52(np.zeros((1, 3)), np.zeros((1, 2)), np.array([1.0, 1.0]), 0.1) + package_target.cov_matern52(np.zeros((1, 3)), np.zeros((1, 2)), np.array([1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_matern52(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0]), 1) - assert np.abs(covariance.cov_matern52(np.zeros((1, 2)), np.zeros((1, 2)), 1.0, 0.1)[0] - 0.01) < TEST_EPSILON + package_target.cov_matern52(np.zeros((1, 2)), np.zeros((1, 2)), np.array([1.0, 1.0]), 1) + assert np.abs(package_target.cov_matern52(np.zeros((1, 2)), np.zeros((1, 2)), 1.0, 0.1)[0] - 0.01) < TEST_EPSILON X = np.array([[1.0, 2.0, 0.0]]) Xp = np.array([[2.0, 1.0, 1.0]]) cur_hyps = utils_covariance.get_hyps('matern52', 3) - cov_ = covariance.cov_matern52(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_matern52(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = 0.20532087608359792 assert np.abs(cov_[0] - truth_cov_) < TEST_EPSILON @@ -380,13 +380,13 @@ def test_cov_matern52(): Xp = np.array([[2.0, 1.0, 1.0], [0.0, 0.0, 0.0]]) cur_hyps = utils_covariance.get_hyps('matern52', 3) cur_hyps['lengthscales'] = 1.0 - cov_ = covariance.cov_matern52(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_matern52(X, Xp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = np.array([[0.20532088, 0.09657724]]) assert np.all(np.abs(cov_[0] - truth_cov_) < TEST_EPSILON) def test_grad_cov_matern52_typing(): - annos = covariance.grad_cov_matern52.__annotations__ + annos = package_target.grad_cov_matern52.__annotations__ assert annos['cov_X_Xp'] == np.ndarray assert annos['X'] == np.ndarray @@ -404,24 +404,24 @@ def test_grad_cov_matern52(): [1.0, 1.0], ]) num_hyps = X_train.shape[1] + 1 - cov_ = covariance.cov_main(str_cov, X_train, X_train, cur_hyps, True) + cov_ = package_target.cov_main(str_cov, X_train, X_train, cur_hyps, True) print(cov_) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern52('abc', X_train, X_train, cur_hyps, num_hyps, True) + package_target.grad_cov_matern52('abc', X_train, X_train, cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern52(cov_, 'abc', X_train, cur_hyps, num_hyps, True) + package_target.grad_cov_matern52(cov_, 'abc', X_train, cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern52(cov_, X_train, 'abc', cur_hyps, num_hyps, True) + package_target.grad_cov_matern52(cov_, X_train, 'abc', cur_hyps, num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern52(cov_, X_train, X_train, 'abc', num_hyps, True) + package_target.grad_cov_matern52(cov_, X_train, X_train, 'abc', num_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, 'abc', True) + package_target.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, 'abc', True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, num_hyps, 'abc') + package_target.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, num_hyps, 'abc') num_hyps = X_train.shape[1] + 2 - grad_cov_ = covariance.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, num_hyps, False) + grad_cov_ = package_target.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, num_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([ @@ -437,7 +437,7 @@ def test_grad_cov_matern52(): assert np.all(np.abs(truth_grad_cov_ - grad_cov_) < TEST_EPSILON) num_hyps = X_train.shape[1] + 1 - grad_cov_ = covariance.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, num_hyps, True) + grad_cov_ = package_target.grad_cov_matern52(cov_, X_train, X_train, cur_hyps, num_hyps, True) print(grad_cov_) truth_grad_cov_ = np.array([ @@ -453,7 +453,7 @@ def test_grad_cov_matern52(): assert np.all(np.abs(truth_grad_cov_ - grad_cov_) < TEST_EPSILON) def test_cov_set_typing(): - annos = covariance.cov_set.__annotations__ + annos = package_target.cov_set.__annotations__ assert annos['str_cov'] == str assert annos['X'] == np.ndarray @@ -467,29 +467,29 @@ def test_cov_set(): num_dim = 3 str_cov = 'matern52' with pytest.raises(AssertionError) as error: - covariance.cov_set(1, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_set(1, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_set('abc', np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_set('abc', np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0, 1.0]), 0.1) + package_target.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_set(str_cov, np.zeros((num_instances, num_dim+1)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_set(str_cov, np.zeros((num_instances, num_dim+1)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim+1)), np.array([1.0, 1.0, 1.0]), 0.1) + package_target.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim+1)), np.array([1.0, 1.0, 1.0]), 0.1) with pytest.raises(AssertionError) as error: - covariance.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 1) - assert np.abs(covariance.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), 1.0, 0.1) - 0.01) < TEST_EPSILON + package_target.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), np.array([1.0, 1.0, 1.0]), 1) + assert np.abs(package_target.cov_set(str_cov, np.zeros((num_instances, num_dim)), np.zeros((num_instances, num_dim)), 1.0, 0.1) - 0.01) < TEST_EPSILON bx = np.array([[1.0, 2.0, 0.0], [2.0, 1.0, 0.0]]) bxp = np.array([[2.0, 1.0, 1.0], [2.0, 2.0, 2.0]]) cur_hyps = utils_covariance.get_hyps('matern52', 3) - cov_ = covariance.cov_set(str_cov, bx, bxp, cur_hyps['lengthscales'], cur_hyps['signal']) + cov_ = package_target.cov_set(str_cov, bx, bxp, cur_hyps['lengthscales'], cur_hyps['signal']) print(cov_) truth_cov_ = 0.23061736638896702 assert np.abs(cov_ - truth_cov_) < TEST_EPSILON def test_cov_main_typing(): - annos = covariance.cov_main.__annotations__ + annos = package_target.cov_main.__annotations__ assert annos['str_cov'] == str assert annos['X'] == np.ndarray @@ -502,69 +502,69 @@ def test_cov_main_typing(): def test_cov_main(): cur_hyps = utils_covariance.get_hyps('se', 3) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 2)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + package_target.cov_main('se', np.zeros((10, 2)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 3)), np.zeros((20, 2)), cur_hyps, False, jitter=0.001) + package_target.cov_main('se', np.zeros((10, 3)), np.zeros((20, 2)), cur_hyps, False, jitter=0.001) with pytest.raises(ValueError) as error: - covariance.cov_main('se', np.zeros((10, 2)), np.zeros((20, 2)), cur_hyps, False, jitter=0.001) + package_target.cov_main('se', np.zeros((10, 2)), np.zeros((20, 2)), cur_hyps, False, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', 1.0, np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + package_target.cov_main('se', 1.0, np.zeros((20, 3)), cur_hyps, False, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 2)), 1.0, cur_hyps, False, jitter=0.001) + package_target.cov_main('se', np.zeros((10, 2)), 1.0, cur_hyps, False, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main(1.0, np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + package_target.cov_main(1.0, np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), 2.1, False, jitter=0.001) + package_target.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), 2.1, False, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, 'abc', jitter=0.001) + package_target.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, 'abc', jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 3)), np.zeros((12, 3)), cur_hyps, True, jitter=0.001) + package_target.cov_main('se', np.zeros((10, 3)), np.zeros((12, 3)), cur_hyps, True, jitter=0.001) with pytest.raises(AssertionError) as error: - covariance.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=1) + package_target.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=1) with pytest.raises(AssertionError) as error: - covariance.cov_main('abc', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + package_target.cov_main('abc', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) cur_hyps.pop('signal', None) with pytest.raises(ValueError) as error: - covariance.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False) + package_target.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False) with pytest.raises(ValueError) as error: - covariance.cov_main('set_se', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False) + package_target.cov_main('set_se', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False) cur_hyps = utils_covariance.get_hyps('se', 3) - cov_ = covariance.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('se', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 20))) - cov_ = covariance.cov_main('set_se', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('set_se', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 20))) cur_hyps = utils_covariance.get_hyps('matern32', 3) - cov_ = covariance.cov_main('matern32', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('matern32', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 20))) - cov_ = covariance.cov_main('set_matern32', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('set_matern32', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 20))) - cov_ = covariance.cov_main('set_matern32', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('set_matern32', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 10))) - cov_ = covariance.cov_main('set_matern32', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, True, jitter=0.001) + cov_ = package_target.cov_main('set_matern32', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, True, jitter=0.001) assert np.all(cov_ == np.ones((10, 10)) + np.eye(10) * 1e-3) cur_hyps = utils_covariance.get_hyps('matern52', 3) - cov_ = covariance.cov_main('matern52', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('matern52', np.zeros((10, 3)), np.zeros((20, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 20))) - cov_ = covariance.cov_main('set_matern52', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('set_matern52', np.zeros((10, 5, 3)), np.zeros((20, 5, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 20))) - cov_ = covariance.cov_main('set_matern52', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, False, jitter=0.001) + cov_ = package_target.cov_main('set_matern52', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, False, jitter=0.001) assert np.all(cov_ == np.ones((10, 10))) - cov_ = covariance.cov_main('set_matern52', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, True, jitter=0.001) + cov_ = package_target.cov_main('set_matern52', np.zeros((10, 5, 3)), np.zeros((10, 5, 3)), cur_hyps, True, jitter=0.001) assert np.all(cov_ == np.ones((10, 10)) + np.eye(10) * 1e-3) def test_grad_cov_main_typing(): - annos = covariance.grad_cov_main.__annotations__ + annos = package_target.grad_cov_main.__annotations__ assert annos['str_cov'] == str assert annos['X'] == np.ndarray @@ -579,56 +579,56 @@ def test_grad_cov_main(): cur_hyps = utils_covariance.get_hyps('se', 2) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main(123, np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True) + package_target.grad_cov_main(123, np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', 123, np.zeros((10, 2)), cur_hyps, True) + package_target.grad_cov_main('se', 123, np.zeros((10, 2)), cur_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', np.zeros((10, 2)), 123, cur_hyps, True) + package_target.grad_cov_main('se', np.zeros((10, 2)), 123, cur_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), 123, True) + package_target.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), 123, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, 'abc') + package_target.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, 'abc') with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('abc', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True) + package_target.grad_cov_main('abc', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True, same_X_Xp='abc') + package_target.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True, same_X_Xp='abc') with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True, same_X_Xp=False) + package_target.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True, same_X_Xp=False) with pytest.raises(AssertionError) as error: - covariance.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True, jitter='abc') + package_target.grad_cov_main('se', np.zeros((10, 2)), np.zeros((10, 2)), cur_hyps, True, jitter='abc') - grad_cov_ = covariance.grad_cov_main('se', np.ones((1, 2)), np.ones((1, 2)), cur_hyps, True) + grad_cov_ = package_target.grad_cov_main('se', np.ones((1, 2)), np.ones((1, 2)), cur_hyps, True) print(grad_cov_) truth_grad_cov_ = np.array([[[2.00002, 0.0, 0.0]]]) assert np.all(np.abs(grad_cov_ - truth_grad_cov_) < TEST_EPSILON) - grad_cov_ = covariance.grad_cov_main('se', np.ones((1, 2)), np.ones((1, 2)), cur_hyps, False) + grad_cov_ = package_target.grad_cov_main('se', np.ones((1, 2)), np.ones((1, 2)), cur_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([[[0.02, 2.00002, 0.0, 0.0]]]) assert np.all(np.abs(grad_cov_ - truth_grad_cov_) < TEST_EPSILON) cur_hyps['lengthscales'] = 1.0 - grad_cov_ = covariance.grad_cov_main('se', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, False) + grad_cov_ = package_target.grad_cov_main('se', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([[[0.02, 0.73577888, 0.73577888]]]) assert np.all(np.abs(grad_cov_ - truth_grad_cov_) < TEST_EPSILON) - grad_cov_ = covariance.grad_cov_main('matern32', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, False) + grad_cov_ = package_target.grad_cov_main('matern32', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([[[0.02, 0.59566154, 0.51802578]]]) assert np.all(np.abs(grad_cov_ - truth_grad_cov_) < TEST_EPSILON) - grad_cov_ = covariance.grad_cov_main('matern32', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, True) + grad_cov_ = package_target.grad_cov_main('matern32', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, True) print(grad_cov_) truth_grad_cov_ = np.array([[[0.59566154, 0.51802578]]]) assert np.all(np.abs(grad_cov_ - truth_grad_cov_) < TEST_EPSILON) - grad_cov_ = covariance.grad_cov_main('matern52', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, False) + grad_cov_ = package_target.grad_cov_main('matern52', np.ones((1, 2)), np.zeros((1, 2)), cur_hyps, False) print(grad_cov_) truth_grad_cov_ = np.array([[[0.02, 0.63458673, 0.8305486]]]) diff --git a/tests/common/test_gp.py b/tests/common/test_gp.py index 733558b..4e9417b 100644 --- a/tests/common/test_gp.py +++ b/tests/common/test_gp.py @@ -9,7 +9,7 @@ import numpy as np from bayeso import constants -from bayeso.gp import gp +from bayeso.gp import gp as package_target try: from bayeso.gp import gp_tensorflow except: # pragma: no cover @@ -24,7 +24,7 @@ TEST_EPSILON = 1e-7 def test_sample_functions_typing(): - annos = gp.sample_functions.__annotations__ + annos = package_target.sample_functions.__annotations__ assert annos['mu'] == np.ndarray assert annos['Sigma'] == np.ndarray @@ -38,29 +38,29 @@ def test_sample_functions(): num_samples = 5 with pytest.raises(AssertionError) as error: - gp.sample_functions(mu, 'abc') + package_target.sample_functions(mu, 'abc') with pytest.raises(AssertionError) as error: - gp.sample_functions('abc', Sigma) + package_target.sample_functions('abc', Sigma) with pytest.raises(AssertionError) as error: - gp.sample_functions(mu, np.eye(20)) + package_target.sample_functions(mu, np.eye(20)) with pytest.raises(AssertionError) as error: - gp.sample_functions(mu, np.ones(num_points)) + package_target.sample_functions(mu, np.ones(num_points)) with pytest.raises(AssertionError) as error: - gp.sample_functions(np.zeros(20), Sigma) + package_target.sample_functions(np.zeros(20), Sigma) with pytest.raises(AssertionError) as error: - gp.sample_functions(np.eye(10), Sigma) + package_target.sample_functions(np.eye(10), Sigma) with pytest.raises(AssertionError) as error: - gp.sample_functions(mu, Sigma, num_samples='abc') + package_target.sample_functions(mu, Sigma, num_samples='abc') with pytest.raises(AssertionError) as error: - gp.sample_functions(mu, Sigma, num_samples=1.2) + package_target.sample_functions(mu, Sigma, num_samples=1.2) - functions = gp.sample_functions(mu, Sigma, num_samples=num_samples) + functions = package_target.sample_functions(mu, Sigma, num_samples=num_samples) assert functions.shape[1] == num_points assert functions.shape[0] == num_samples def test_get_optimized_kernel_typing(): - annos = gp.get_optimized_kernel.__annotations__ + annos = package_target.get_optimized_kernel.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -84,81 +84,81 @@ def test_get_optimized_kernel(): prior_mu = None with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 1) + package_target.get_optimized_kernel(X, Y, prior_mu, 1) with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, 1, 'se') + package_target.get_optimized_kernel(X, Y, 1, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, 1, prior_mu, 'se') + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(1, Y, prior_mu, 'se') + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') with pytest.raises(ValueError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'abc') + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework=1) with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) # INFO: tests for set inputs with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X_set, Y, prior_mu, 'se') + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X, Y, prior_mu, 'set_se') + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') with pytest.raises(AssertionError) as error: - gp.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'eq') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'matern32') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'matern52') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='scipy') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='scipy') print(hyps) if gp_tensorflow is not None: - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='tensorflow') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='tensorflow') print(hyps) if gp_gpytorch is not None: - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='gpytorch') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='gpytorch') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv') print(hyps) def test_predict_with_cov_typing(): - annos = gp.predict_with_cov.__annotations__ + annos = package_target.predict_with_cov.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -180,43 +180,43 @@ def test_predict_with_cov(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu='abc') + package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu='abc') with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov=1, prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov=1, prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, 1, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, 1, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, cov_X_X, 1, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, cov_X_X, 1, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, 1, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, 1, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, 1, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, 1, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, 1, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, 1, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(1, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(1, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(np.random.randn(num_X, 1), Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(np.random.randn(num_X, 1), Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(np.random.randn(10, dim_X), Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(np.random.randn(10, dim_X), Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, np.random.randn(10, 1), X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, np.random.randn(10, 1), X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, np.random.randn(3, 3), inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, np.random.randn(3, 3), inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, np.random.randn(10), inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, np.random.randn(10), inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, cov_X_X, np.random.randn(10), hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, cov_X_X, np.random.randn(10), hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_cov(X, Y, X_test, np.random.randn(10), np.random.randn(10), hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_cov(X, Y, X_test, np.random.randn(10), np.random.randn(10), hyps, str_cov='se', prior_mu=prior_mu) def test_predict_with_hyps_typing(): - annos = gp.predict_with_hyps.__annotations__ + annos = package_target.predict_with_hyps.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -236,34 +236,34 @@ def test_predict_with_hyps(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu='abc') + package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu='abc') with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(X, Y, X_test, hyps, str_cov=1, prior_mu=prior_mu) + package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov=1, prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(X, Y, X_test, 1, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(X, Y, X_test, 1, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(X, Y, 1, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(X, Y, 1, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(X, 1, X_test, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(X, 1, X_test, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(1, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(1, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(np.random.randn(num_X, 1), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(np.random.randn(num_X, 1), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(np.random.randn(10, dim_X), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(np.random.randn(10, dim_X), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_hyps(X, np.random.randn(10, 1), X_test, hyps, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_hyps(X, np.random.randn(10, 1), X_test, hyps, str_cov='se', prior_mu=prior_mu) - mu_test, sigma_test, Sigma_test = gp.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + mu_test, sigma_test, Sigma_test = package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) print(mu_test) print(sigma_test) print(Sigma_test) def test_predict_with_optimized_hyps_typing(): - annos = gp.predict_with_optimized_hyps.__annotations__ + annos = package_target.predict_with_optimized_hyps.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -286,31 +286,31 @@ def test_predict_with_optimized_hyps(): prior_mu = None with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, Y, X_test, str_cov='se', prior_mu='abc') + package_target.predict_with_optimized_hyps(X, Y, X_test, str_cov='se', prior_mu='abc') with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, Y, X_test, str_cov=1, prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(X, Y, X_test, str_cov=1, prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, Y, 1, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(X, Y, 1, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, 1, X_test, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(X, 1, X_test, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(1, Y, X_test, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(1, Y, X_test, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(np.random.randn(num_X, 1), Y, X_test, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(np.random.randn(num_X, 1), Y, X_test, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(np.random.randn(10, dim_X), Y, X_test, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(np.random.randn(10, dim_X), Y, X_test, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, np.random.randn(10, 1), X_test, str_cov='se', prior_mu=prior_mu) + package_target.predict_with_optimized_hyps(X, np.random.randn(10, 1), X_test, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, Y, X_test, str_optimizer_method=1) + package_target.predict_with_optimized_hyps(X, Y, X_test, str_optimizer_method=1) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, Y, X_test, fix_noise=1) + package_target.predict_with_optimized_hyps(X, Y, X_test, fix_noise=1) with pytest.raises(AssertionError) as error: - gp.predict_with_optimized_hyps(X, Y, X_test, debug=1) + package_target.predict_with_optimized_hyps(X, Y, X_test, debug=1) - mu_test, sigma_test, Sigma_test = gp.predict_with_optimized_hyps(X, Y, X_test, debug=True) + mu_test, sigma_test, Sigma_test = package_target.predict_with_optimized_hyps(X, Y, X_test, debug=True) print(mu_test) print(sigma_test) print(Sigma_test) diff --git a/tests/common/test_gp_gpytorch.py b/tests/common/test_gp_gpytorch.py index be5c1b8..45b77bb 100644 --- a/tests/common/test_gp_gpytorch.py +++ b/tests/common/test_gp_gpytorch.py @@ -9,18 +9,18 @@ import numpy as np try: - from bayeso.gp import gp_gpytorch + from bayeso.gp import gp_gpytorch as package_target except: # pragma: no cover - gp_gpytorch = None + package_target = None TEST_EPSILON = 1e-7 def test_get_optimized_kernel_typing(): - if gp_gpytorch is None: # pragma: no cover + if package_target is None: # pragma: no cover pytest.skip('GPyTorch is not installed.') - annos = gp_gpytorch.get_optimized_kernel.__annotations__ + annos = package_target.get_optimized_kernel.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -41,57 +41,57 @@ def test_get_optimized_kernel(): Y = np.random.randn(num_X, 1) prior_mu = None - if gp_gpytorch is None: # pragma: no cover + if package_target is None: # pragma: no cover pytest.skip('GPyTorch is not installed.') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 1) + package_target.get_optimized_kernel(X, Y, prior_mu, 1) with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, 1, 'se') + package_target.get_optimized_kernel(X, Y, 1, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, 1, prior_mu, 'se') + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(1, Y, prior_mu, 'se') + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') with pytest.raises(ValueError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'abc') + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters='abc') + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters='abc') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) # INFO: tests for set inputs with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X_set, Y, prior_mu, 'se') + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'set_se') + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') with pytest.raises(AssertionError) as error: - gp_gpytorch.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'eq', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'matern32', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0, debug=True) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0, debug=True) print(hyps) with pytest.raises(NotImplementedError) as error: - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') print(hyps) diff --git a/tests/common/test_gp_scipy.py b/tests/common/test_gp_scipy.py index 0f97733..45e27bd 100644 --- a/tests/common/test_gp_scipy.py +++ b/tests/common/test_gp_scipy.py @@ -9,14 +9,14 @@ import numpy as np from bayeso import constants -from bayeso.gp import gp_scipy +from bayeso.gp import gp_scipy as package_target from bayeso.utils import utils_covariance TEST_EPSILON = 1e-7 def test_neg_log_ml_typing(): - annos = gp_scipy.neg_log_ml.__annotations__ + annos = package_target.neg_log_ml.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -41,46 +41,46 @@ def test_neg_log_ml(): prior_mu_X = np.zeros((3, 1)) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_ml(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_ml(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, dict_hyps, str_cov, prior_mu_X) + package_target.neg_log_ml(X, Y, dict_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, 1, prior_mu_X) + package_target.neg_log_ml(X, Y, arr_hyps, 1, prior_mu_X) with pytest.raises(ValueError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, 'abc', prior_mu_X) + package_target.neg_log_ml(X, Y, arr_hyps, 'abc', prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, np.arange(0, 3)) + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.arange(0, 3)) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_ml(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_ml(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, use_cholesky=1) + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, use_cholesky=1) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) - neg_log_ml_ = gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False, use_cholesky=True) + neg_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False, use_cholesky=True) print(neg_log_ml_) truth_log_ml_ = 21.916650988532854 assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON - neg_log_ml_ = gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False, use_cholesky=False) + neg_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False, use_cholesky=False) print(neg_log_ml_) truth_log_ml_ = 21.91665090519953 assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON - neg_log_ml_ = gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True, use_cholesky=False) + neg_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True, use_cholesky=False) print(neg_log_ml_) truth_log_ml_ = 21.91665090519953 assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON - neg_log_ml_, neg_grad_log_ml_ = gp_scipy.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True, use_cholesky=True) + neg_log_ml_, neg_grad_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True, use_cholesky=True) print(neg_log_ml_) print(neg_grad_log_ml_) @@ -96,7 +96,7 @@ def test_neg_log_ml(): assert np.all(np.abs(neg_grad_log_ml_ - truth_grad_log_ml_) < TEST_EPSILON) def test_neg_log_pseudo_l_loocv_typing(): - annos = gp_scipy.neg_log_pseudo_l_loocv.__annotations__ + annos = package_target.neg_log_pseudo_l_loocv.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -117,35 +117,35 @@ def test_neg_log_pseudo_l_loocv(): prior_mu_X = np.zeros((3, 1)) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_pseudo_l_loocv(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_pseudo_l_loocv(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, dict_hyps, str_cov, prior_mu_X) + package_target.neg_log_pseudo_l_loocv(X, Y, dict_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, 1, prior_mu_X) + package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, 1, prior_mu_X) with pytest.raises(ValueError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, 'abc', prior_mu_X) + package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, 'abc', prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, np.arange(0, 3)) + package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, np.arange(0, 3)) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_pseudo_l_loocv(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) + package_target.neg_log_pseudo_l_loocv(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) + package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) + package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) with pytest.raises(AssertionError) as error: - gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) + package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) - neg_log_pseudo_l_ = gp_scipy.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, prior_mu_X) + neg_log_pseudo_l_ = package_target.neg_log_pseudo_l_loocv(X, Y, arr_hyps, str_cov, prior_mu_X) print(neg_log_pseudo_l_) truth_log_pseudo_l_ = 21.916822991658695 assert np.abs(neg_log_pseudo_l_ - truth_log_pseudo_l_) < TEST_EPSILON def test_get_optimized_kernel_typing(): - annos = gp_scipy.get_optimized_kernel.__annotations__ + annos = package_target.get_optimized_kernel.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -168,55 +168,55 @@ def test_get_optimized_kernel(): prior_mu = None with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 1) + package_target.get_optimized_kernel(X, Y, prior_mu, 1) with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, 1, 'se') + package_target.get_optimized_kernel(X, Y, 1, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, 1, prior_mu, 'se') + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(1, Y, prior_mu, 'se') + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') with pytest.raises(ValueError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'abc') + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) # INFO: tests for set inputs with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X_set, Y, prior_mu, 'se') + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'set_se') + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') with pytest.raises(AssertionError) as error: - gp_scipy.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS', debug=True) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS', debug=True) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B', debug=True) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B', debug=True) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead', debug=True) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead', debug=True) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv', debug=True) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv', debug=True) print(hyps) diff --git a/tests/common/test_gp_tensorflow.py b/tests/common/test_gp_tensorflow.py index 388ace4..4c5e851 100644 --- a/tests/common/test_gp_tensorflow.py +++ b/tests/common/test_gp_tensorflow.py @@ -9,18 +9,18 @@ import numpy as np try: - from bayeso.gp import gp_tensorflow + from bayeso.gp import gp_tensorflow as package_target except: # pragma: no cover - gp_tensorflow = None + package_target = None TEST_EPSILON = 1e-7 def test_get_optimized_kernel_typing(): - if gp_tensorflow is None: # pragma: no cover + if package_target is None: # pragma: no cover pytest.skip('TensorFlow is not installed.') - annos = gp_tensorflow.get_optimized_kernel.__annotations__ + annos = package_target.get_optimized_kernel.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -41,57 +41,57 @@ def test_get_optimized_kernel(): Y = np.random.randn(num_X, 1) prior_mu = None - if gp_tensorflow is None: # pragma: no cover + if package_target is None: # pragma: no cover pytest.skip('TensorFlow is not installed.') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 1) + package_target.get_optimized_kernel(X, Y, prior_mu, 1) with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, 1, 'se') + package_target.get_optimized_kernel(X, Y, 1, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, 1, prior_mu, 'se') + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(1, Y, prior_mu, 'se') + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') with pytest.raises(ValueError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'abc') + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters='abc') + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters='abc') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) # INFO: tests for set inputs with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X_set, Y, prior_mu, 'se') + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'set_se') + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') with pytest.raises(AssertionError) as error: - gp_tensorflow.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'eq', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'matern32', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0) print(hyps) - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0, debug=True) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0, debug=True) print(hyps) with pytest.raises(NotImplementedError) as error: - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') print(hyps) diff --git a/tests/common/test_utils_bo.py b/tests/common/test_utils_bo.py index 66019d4..f4497f9 100644 --- a/tests/common/test_utils_bo.py +++ b/tests/common/test_utils_bo.py @@ -8,11 +8,11 @@ import numpy as np from bayeso import constants -from bayeso.utils import utils_bo +from bayeso.utils import utils_bo as package_target def test_get_best_acquisition_by_evaluation_typing(): - annos = utils_bo.get_best_acquisition_by_evaluation.__annotations__ + annos = package_target.get_best_acquisition_by_evaluation.__annotations__ assert annos['initials'] == np.ndarray assert annos['fun_objective'] == callable @@ -23,13 +23,13 @@ def test_get_best_acquisition_by_evaluation(): arr_initials = np.expand_dims(np.arange(-5, 5), axis=1) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_evaluation(1, fun_objective) + package_target.get_best_acquisition_by_evaluation(1, fun_objective) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_evaluation(arr_initials, None) + package_target.get_best_acquisition_by_evaluation(arr_initials, None) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_evaluation(np.arange(-5, 5), fun_objective) + package_target.get_best_acquisition_by_evaluation(np.arange(-5, 5), fun_objective) - best_initial = utils_bo.get_best_acquisition_by_evaluation(arr_initials, fun_objective) + best_initial = package_target.get_best_acquisition_by_evaluation(arr_initials, fun_objective) assert len(best_initial.shape) == 2 assert best_initial.shape[0] == 1 assert best_initial.shape[1] == arr_initials.shape[1] @@ -39,20 +39,20 @@ def test_get_best_acquisition_by_evaluation(): arr_initials = np.reshape(np.arange(-10, 10), (5, 4)) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_evaluation(1, fun_objective) + package_target.get_best_acquisition_by_evaluation(1, fun_objective) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_evaluation(arr_initials, None) + package_target.get_best_acquisition_by_evaluation(arr_initials, None) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_evaluation(np.arange(-5, 5), fun_objective) + package_target.get_best_acquisition_by_evaluation(np.arange(-5, 5), fun_objective) - best_initial = utils_bo.get_best_acquisition_by_evaluation(arr_initials, fun_objective) + best_initial = package_target.get_best_acquisition_by_evaluation(arr_initials, fun_objective) assert len(best_initial.shape) == 2 assert best_initial.shape[0] == 1 assert best_initial.shape[1] == arr_initials.shape[1] assert np.all(best_initial == np.array([[-2, -1, 0, 1]])) def test_check_optimizer_method_bo_typing(): - annos = utils_bo.check_optimizer_method_bo.__annotations__ + annos = package_target.check_optimizer_method_bo.__annotations__ assert annos['str_optimizer_method_bo'] == str assert annos['dim'] == int @@ -64,20 +64,20 @@ def test_check_optimizer_method_bo(): cma = None with pytest.raises(AssertionError) as error: - utils_bo.check_optimizer_method_bo(2, 2, True) + package_target.check_optimizer_method_bo(2, 2, True) with pytest.raises(AssertionError) as error: - utils_bo.check_optimizer_method_bo('DIRECT', 'abc', True) + package_target.check_optimizer_method_bo('DIRECT', 'abc', True) with pytest.raises(AssertionError) as error: - utils_bo.check_optimizer_method_bo('DIRECT', 2, 'abc') + package_target.check_optimizer_method_bo('DIRECT', 2, 'abc') with pytest.raises(AssertionError) as error: - utils_bo.check_optimizer_method_bo('ABC', 2, True) + package_target.check_optimizer_method_bo('ABC', 2, True) - utils_bo.check_optimizer_method_bo('L-BFGS-B', 2, False) - utils_bo.check_optimizer_method_bo('DIRECT', 2, False) - utils_bo.check_optimizer_method_bo('CMA-ES', 2, False) + package_target.check_optimizer_method_bo('L-BFGS-B', 2, False) + package_target.check_optimizer_method_bo('DIRECT', 2, False) + package_target.check_optimizer_method_bo('CMA-ES', 2, False) def test_choose_fun_acquisition_typing(): - annos = utils_bo.choose_fun_acquisition.__annotations__ + annos = package_target.choose_fun_acquisition.__annotations__ assert annos['str_acq'] == str assert annos['hyps'] == dict @@ -86,14 +86,14 @@ def test_choose_fun_acquisition_typing(): def test_choose_fun_acquisition(): dict_hyps = {'lengthscales': np.array([1.0, 1.0]), 'signal': 1.0, 'noise': 0.01} with pytest.raises(AssertionError) as error: - utils_bo.choose_fun_acquisition(1, dict_hyps) + package_target.choose_fun_acquisition(1, dict_hyps) with pytest.raises(AssertionError) as error: - utils_bo.choose_fun_acquisition('abc', dict_hyps) + package_target.choose_fun_acquisition('abc', dict_hyps) with pytest.raises(AssertionError) as error: - utils_bo.choose_fun_acquisition('pi', 1) + package_target.choose_fun_acquisition('pi', 1) def test_check_hyps_convergence_typing(): - annos = utils_bo.check_hyps_convergence.__annotations__ + annos = package_target.check_hyps_convergence.__annotations__ assert annos['list_hyps'] == list assert annos['hyps'] == dict @@ -107,19 +107,19 @@ def test_check_hyps_convergence(): dict_hyps_2 = {'lengthscales': np.array([2.0, 1.0]), 'signal': 1.0, 'noise': 0.01} with pytest.raises(AssertionError) as error: - utils_bo.check_hyps_convergence(1, dict_hyps_1, 'se', True) + package_target.check_hyps_convergence(1, dict_hyps_1, 'se', True) with pytest.raises(AssertionError) as error: - utils_bo.check_hyps_convergence([dict_hyps_1], 1, 'se', True) + package_target.check_hyps_convergence([dict_hyps_1], 1, 'se', True) with pytest.raises(AssertionError) as error: - utils_bo.check_hyps_convergence([dict_hyps_1], dict_hyps_1, 1, True) + package_target.check_hyps_convergence([dict_hyps_1], dict_hyps_1, 1, True) with pytest.raises(AssertionError) as error: - utils_bo.check_hyps_convergence([dict_hyps_1], dict_hyps_1, 1, 'abc') + package_target.check_hyps_convergence([dict_hyps_1], dict_hyps_1, 1, 'abc') - assert utils_bo.check_hyps_convergence([dict_hyps_1], dict_hyps_1, 'se', False) - assert not utils_bo.check_hyps_convergence([dict_hyps_2], dict_hyps_1, 'se', False) + assert package_target.check_hyps_convergence([dict_hyps_1], dict_hyps_1, 'se', False) + assert not package_target.check_hyps_convergence([dict_hyps_2], dict_hyps_1, 'se', False) def test_get_next_best_acquisition_typing(): - annos = utils_bo.get_next_best_acquisition.__annotations__ + annos = package_target.get_next_best_acquisition.__annotations__ assert annos['points'] == np.ndarray assert annos['acquisitions'] == np.ndarray @@ -141,23 +141,23 @@ def test_get_next_best_acquisition(): ]) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(1, arr_acquisitions, cur_points) + package_target.get_next_best_acquisition(1, arr_acquisitions, cur_points) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(np.arange(0, 4), arr_acquisitions, cur_points) + package_target.get_next_best_acquisition(np.arange(0, 4), arr_acquisitions, cur_points) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(arr_points, 1, cur_points) + package_target.get_next_best_acquisition(arr_points, 1, cur_points) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(arr_points, np.ones((4, 2)), cur_points) + package_target.get_next_best_acquisition(arr_points, np.ones((4, 2)), cur_points) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(arr_points, arr_acquisitions, 1) + package_target.get_next_best_acquisition(arr_points, arr_acquisitions, 1) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(arr_points, arr_acquisitions, np.arange(0, 3)) + package_target.get_next_best_acquisition(arr_points, arr_acquisitions, np.arange(0, 3)) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(arr_points, np.arange(0, 10), cur_points) + package_target.get_next_best_acquisition(arr_points, np.arange(0, 10), cur_points) with pytest.raises(AssertionError) as error: - utils_bo.get_next_best_acquisition(arr_points, arr_acquisitions, np.ones((3, 5))) + package_target.get_next_best_acquisition(arr_points, arr_acquisitions, np.ones((3, 5))) - next_point = utils_bo.get_next_best_acquisition(arr_points, arr_acquisitions, cur_points) + next_point = package_target.get_next_best_acquisition(arr_points, arr_acquisitions, cur_points) assert (next_point == np.array([-2.0, -4.0])).all() cur_points = np.array([ @@ -169,11 +169,11 @@ def test_get_next_best_acquisition(): [-2.0, -4.0], [1.0, 3.0], ]) - next_point = utils_bo.get_next_best_acquisition(arr_points, arr_acquisitions, cur_points) + next_point = package_target.get_next_best_acquisition(arr_points, arr_acquisitions, cur_points) assert (next_point == np.array([1.0, 3.0])).all() def test_get_best_acquisition_by_history_typing(): - annos = utils_bo.get_best_acquisition_by_history.__annotations__ + annos = package_target.get_best_acquisition_by_history.__annotations__ assert annos['X'] == np.ndarray assert annos['Y'] == np.ndarray @@ -196,17 +196,17 @@ def test_get_best_acquisition_by_history(): ]) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_history(1, Y) + package_target.get_best_acquisition_by_history(1, Y) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_history(X, 1) + package_target.get_best_acquisition_by_history(X, 1) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_history(X[:4], Y) + package_target.get_best_acquisition_by_history(X[:4], Y) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_history(X, Y[:3]) + package_target.get_best_acquisition_by_history(X, Y[:3]) with pytest.raises(AssertionError) as error: - utils_bo.get_best_acquisition_by_history(X, Y[:, 0]) + package_target.get_best_acquisition_by_history(X, Y[:, 0]) - bx_best, y_best = utils_bo.get_best_acquisition_by_history(X, Y) + bx_best, y_best = package_target.get_best_acquisition_by_history(X, Y) assert np.all(bx_best == np.array([2.0, 3.1, 2.2, 5.1])) assert y_best == 0.0 diff --git a/tests/common/test_utils_common.py b/tests/common/test_utils_common.py index d42183b..c76d89c 100644 --- a/tests/common/test_utils_common.py +++ b/tests/common/test_utils_common.py @@ -8,13 +8,13 @@ import pytest import numpy as np -from bayeso.utils import utils_common +from bayeso.utils import utils_common as package_target TEST_EPSILON = 1e-5 def test_get_grids_typing(): - annos = utils_common.get_grids.__annotations__ + annos = package_target.get_grids.__annotations__ assert annos['ranges'] == np.ndarray assert annos['num_grids'] == int @@ -93,24 +93,24 @@ def test_get_grids(): ]) with pytest.raises(AssertionError) as error: - utils_common.get_grids('abc', 3) + package_target.get_grids('abc', 3) with pytest.raises(AssertionError) as error: - utils_common.get_grids(arr_range_1, 'abc') + package_target.get_grids(arr_range_1, 'abc') with pytest.raises(AssertionError) as error: - utils_common.get_grids(np.arange(0, 10), 3) + package_target.get_grids(np.arange(0, 10), 3) with pytest.raises(AssertionError) as error: - utils_common.get_grids(np.ones((3, 3)), 3) + package_target.get_grids(np.ones((3, 3)), 3) with pytest.raises(AssertionError) as error: - utils_common.get_grids(np.array([[0.0, -2.0], [10.0, 20.0]]), 3) + package_target.get_grids(np.array([[0.0, -2.0], [10.0, 20.0]]), 3) - arr_grid_1 = utils_common.get_grids(arr_range_1, 3) - arr_grid_2 = utils_common.get_grids(arr_range_2, 3) + arr_grid_1 = package_target.get_grids(arr_range_1, 3) + arr_grid_2 = package_target.get_grids(arr_range_2, 3) assert (arr_grid_1 == truth_arr_grid_1).all() assert (arr_grid_2 == truth_arr_grid_2).all() def test_get_minimum_typing(): - annos = utils_common.get_minimum.__annotations__ + annos = package_target.get_minimum.__annotations__ assert annos['Y_all'] == np.ndarray assert annos['num_init'] == int @@ -118,17 +118,17 @@ def test_get_minimum_typing(): def test_get_minimum(): with pytest.raises(AssertionError) as error: - utils_common.get_minimum(1.2, 2.1) + package_target.get_minimum(1.2, 2.1) with pytest.raises(AssertionError) as error: - utils_common.get_minimum(1.2, 3) + package_target.get_minimum(1.2, 3) num_init = 3 num_exp = 3 num_data = 10 all_data = np.zeros((num_exp, num_init + num_data)) with pytest.raises(AssertionError) as error: - utils_common.get_minimum(all_data, 2.1) - cur_minimum, cur_mean, cur_std = utils_common.get_minimum(all_data, num_init) + package_target.get_minimum(all_data, 2.1) + cur_minimum, cur_mean, cur_std = package_target.get_minimum(all_data, num_init) assert len(cur_minimum.shape) == 2 assert cur_minimum.shape == (num_exp, 1 + num_data) assert len(cur_mean.shape) == 1 @@ -141,7 +141,7 @@ def test_get_minimum(): num_data = -2 all_data = np.zeros((num_exp, num_init + num_data)) with pytest.raises(AssertionError) as error: - utils_common.get_minimum(all_data, num_init) + package_target.get_minimum(all_data, num_init) num_init = 3 all_data = np.array([ @@ -154,13 +154,13 @@ def test_get_minimum(): [2.3, 2.3, 2.3, 2.3, 2.3], [0.8, 0.8, 0.3, 0.3, 0.3], ]) - cur_minimum, cur_mean, cur_std = utils_common.get_minimum(all_data, num_init) + cur_minimum, cur_mean, cur_std = package_target.get_minimum(all_data, num_init) assert (cur_minimum == truth_all_data).all() assert (cur_mean == np.mean(truth_all_data, axis=0)).all() assert (cur_std == np.std(truth_all_data, axis=0)).all() def test_get_time_typing(): - annos = utils_common.get_time.__annotations__ + annos = package_target.get_time.__annotations__ assert annos['time_all'] == np.ndarray assert annos['num_init'] == int @@ -176,20 +176,20 @@ def test_get_time(): int_init = 2 is_initial = True with pytest.raises(AssertionError) as error: - utils_common.get_time(arr_time, int_init, 1) + package_target.get_time(arr_time, int_init, 1) with pytest.raises(AssertionError) as error: - utils_common.get_time(arr_time, 'abc', is_initial) + package_target.get_time(arr_time, 'abc', is_initial) with pytest.raises(AssertionError) as error: - utils_common.get_time('abc', int_init, is_initial) + package_target.get_time('abc', int_init, is_initial) with pytest.raises(AssertionError) as error: - utils_common.get_time(np.arange(0, 10), int_init, is_initial) + package_target.get_time(np.arange(0, 10), int_init, is_initial) with pytest.raises(AssertionError) as error: - utils_common.get_time(arr_time, 10, is_initial) + package_target.get_time(arr_time, 10, is_initial) - cur_time = utils_common.get_time(arr_time, int_init, is_initial) + cur_time = package_target.get_time(arr_time, int_init, is_initial) truth_cur_time = np.array([0.0, 0.8, 1.2, 2.6]) assert (np.abs(cur_time - truth_cur_time) < TEST_EPSILON).all() - cur_time = utils_common.get_time(arr_time, int_init, False) + cur_time = package_target.get_time(arr_time, int_init, False) truth_cur_time = np.array([0.0, 1.06666667, 1.5, 2.3, 2.7, 4.1]) assert (np.abs(cur_time - truth_cur_time) < TEST_EPSILON).all() diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index 323f016..6a2c633 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -8,17 +8,17 @@ import pytest import numpy as np -from bayeso.utils import utils_covariance from bayeso import constants +from bayeso.utils import utils_covariance as package_target def test_get_list_first_typing(): - annos = utils_covariance._get_list_first.__annotations__ + annos = package_target._get_list_first.__annotations__ assert annos['return'] == list def test_get_hyps_typing(): - annos = utils_covariance.get_hyps.__annotations__ + annos = package_target.get_hyps.__annotations__ assert annos['str_cov'] == str assert annos['dim'] == int @@ -27,39 +27,39 @@ def test_get_hyps_typing(): def test_get_hyps(): with pytest.raises(AssertionError) as error: - utils_covariance.get_hyps(1.2, 2.1) + package_target.get_hyps(1.2, 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.get_hyps(1.2, 2) + package_target.get_hyps(1.2, 2) with pytest.raises(AssertionError) as error: - utils_covariance.get_hyps('se', 2.1) + package_target.get_hyps('se', 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.get_hyps('abc', 2) + package_target.get_hyps('abc', 2) with pytest.raises(AssertionError) as error: - utils_covariance.get_hyps('se', 2, use_ard='abc') + package_target.get_hyps('se', 2, use_ard='abc') - cur_hyps = utils_covariance.get_hyps('se', 2) + cur_hyps = package_target.get_hyps('se', 2) assert cur_hyps['noise'] == constants.GP_NOISE assert cur_hyps['signal'] == 1.0 assert len(cur_hyps['lengthscales'].shape) == 1 assert (cur_hyps['lengthscales'] == np.array([1.0, 1.0])).all() - cur_hyps = utils_covariance.get_hyps('se', 2, use_ard=False) + cur_hyps = package_target.get_hyps('se', 2, use_ard=False) assert cur_hyps['noise'] == constants.GP_NOISE assert cur_hyps['signal'] == 1.0 assert cur_hyps['lengthscales'] == 1.0 - cur_hyps = utils_covariance.get_hyps('matern32', 2, use_ard=False) + cur_hyps = package_target.get_hyps('matern32', 2, use_ard=False) assert cur_hyps['noise'] == constants.GP_NOISE assert cur_hyps['signal'] == 1.0 assert cur_hyps['lengthscales'] == 1.0 - cur_hyps = utils_covariance.get_hyps('matern52', 2, use_ard=False) + cur_hyps = package_target.get_hyps('matern52', 2, use_ard=False) assert cur_hyps['noise'] == constants.GP_NOISE assert cur_hyps['signal'] == 1.0 assert cur_hyps['lengthscales'] == 1.0 def test_get_range_hyps_typing(): - annos = utils_covariance.get_range_hyps.__annotations__ + annos = package_target.get_range_hyps.__annotations__ assert annos['str_cov'] == str assert annos['dim'] == int @@ -69,28 +69,28 @@ def test_get_range_hyps_typing(): def test_get_range_hyps(): with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps(1.0, 2) + package_target.get_range_hyps(1.0, 2) with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps('abc', 2) + package_target.get_range_hyps('abc', 2) with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps('se', 1.2) + package_target.get_range_hyps('se', 1.2) with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps('se', 2, use_ard='abc') + package_target.get_range_hyps('se', 2, use_ard='abc') with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps('se', 2, use_ard=1) + package_target.get_range_hyps('se', 2, use_ard=1) with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps('se', 2, fix_noise=1) + package_target.get_range_hyps('se', 2, fix_noise=1) with pytest.raises(AssertionError) as error: - utils_covariance.get_range_hyps('se', 2, fix_noise='abc') + package_target.get_range_hyps('se', 2, fix_noise='abc') - cur_range = utils_covariance.get_range_hyps('se', 2, use_ard=False, fix_noise=False) + cur_range = package_target.get_range_hyps('se', 2, use_ard=False, fix_noise=False) print(type(cur_range)) print(cur_range) assert isinstance(cur_range, list) assert cur_range == [[0.001, 10.0], [0.01, 1000.0], [0.01, 1000.0]] def test_convert_hyps_typing(): - annos = utils_covariance.convert_hyps.__annotations__ + annos = package_target.convert_hyps.__annotations__ assert annos['str_cov'] == str assert annos['hyps'] == dict @@ -101,33 +101,33 @@ def test_convert_hyps(): cur_hyps = {'noise': 0.1, 'signal': 1.0, 'lengthscales': np.array([2.0, 2.0])} with pytest.raises(AssertionError) as error: - utils_covariance.convert_hyps(1.2, 2.1) + package_target.convert_hyps(1.2, 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.convert_hyps(1.2, dict()) + package_target.convert_hyps(1.2, dict()) with pytest.raises(AssertionError) as error: - utils_covariance.convert_hyps('se', 2.1) + package_target.convert_hyps('se', 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.convert_hyps('abc', 2.1) + package_target.convert_hyps('abc', 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.convert_hyps('abc', cur_hyps) + package_target.convert_hyps('abc', cur_hyps) with pytest.raises(AssertionError) as error: - utils_covariance.convert_hyps('se', dict(), fix_noise=1) + package_target.convert_hyps('se', dict(), fix_noise=1) - converted_hyps = utils_covariance.convert_hyps('se', cur_hyps, fix_noise=False) + converted_hyps = package_target.convert_hyps('se', cur_hyps, fix_noise=False) assert len(converted_hyps.shape) == 1 assert converted_hyps.shape[0] == 4 assert converted_hyps[0] == cur_hyps['noise'] assert converted_hyps[1] == cur_hyps['signal'] assert (converted_hyps[2:] == cur_hyps['lengthscales']).all() - converted_hyps = utils_covariance.convert_hyps('se', cur_hyps, fix_noise=True) + converted_hyps = package_target.convert_hyps('se', cur_hyps, fix_noise=True) assert len(converted_hyps.shape) == 1 assert converted_hyps.shape[0] == 3 assert converted_hyps[0] == cur_hyps['signal'] assert (converted_hyps[1:] == cur_hyps['lengthscales']).all() def test_restore_hyps_typing(): - annos = utils_covariance.restore_hyps.__annotations__ + annos = package_target.restore_hyps.__annotations__ assert annos['str_cov'] == str assert annos['hyps'] == np.ndarray @@ -137,33 +137,33 @@ def test_restore_hyps_typing(): def test_restore_hyps(): with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps(1.2, 2.1) + package_target.restore_hyps(1.2, 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps(1.2, np.array([1.0, 1.0])) + package_target.restore_hyps(1.2, np.array([1.0, 1.0])) with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps('se', 2.1) + package_target.restore_hyps('se', 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps('abc', 2.1) + package_target.restore_hyps('abc', 2.1) with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps('se', np.array([[1.0, 1.0], [1.0, 1.0]])) + package_target.restore_hyps('se', np.array([[1.0, 1.0], [1.0, 1.0]])) with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps('se', np.array([1.0, 1.0, 1.0]), fix_noise=1) + package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), fix_noise=1) with pytest.raises(AssertionError) as error: - utils_covariance.restore_hyps('se', np.array([1.0, 1.0, 1.0]), noise='abc') + package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), noise='abc') cur_hyps = np.array([0.1, 1.0, 1.0, 1.0, 1.0]) - restored_hyps = utils_covariance.restore_hyps('se', cur_hyps, fix_noise=False) + restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=False) assert restored_hyps['noise'] == cur_hyps[0] assert restored_hyps['signal'] == cur_hyps[1] assert (restored_hyps['lengthscales'] == cur_hyps[2:]).all() - restored_hyps = utils_covariance.restore_hyps('se', cur_hyps, fix_noise=True) + restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=True) assert restored_hyps['noise'] == constants.GP_NOISE assert restored_hyps['signal'] == cur_hyps[0] assert (restored_hyps['lengthscales'] == cur_hyps[1:]).all() def test_validate_hyps_dict_typing(): - annos = utils_covariance.validate_hyps_dict.__annotations__ + annos = package_target.validate_hyps_dict.__annotations__ assert annos['hyps'] == dict assert annos['str_cov'] == str @@ -174,62 +174,62 @@ def test_validate_hyps_dict(): num_dim = 2 str_cov = 'matern32' - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(123, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(123, str_cov, num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, 'abc', num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, 'abc', num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, 'abc') + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, 'abc') - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('noise') with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('lengthscales') with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('signal') with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['noise'] = 'abc' with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['noise'] = np.inf - cur_hyps, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + cur_hyps, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert cur_hyps['noise'] == constants.BOUND_UPPER_GP_NOISE - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, 123) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, 123) assert is_valid == True - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['lengthscales'] = 'abc' with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) + cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['signal'] = 'abc' with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_dict(cur_hyps, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True def test_validate_hyps_arr_typing(): - annos = utils_covariance.validate_hyps_arr.__annotations__ + annos = package_target.validate_hyps_arr.__annotations__ assert annos['hyps'] == np.ndarray assert annos['str_cov'] == str @@ -240,17 +240,17 @@ def test_validate_hyps_arr(): num_dim = 2 str_cov = 'matern32' - cur_hyps = utils_covariance.get_hyps(str_cov, num_dim) - cur_hyps = utils_covariance.convert_hyps(str_cov, cur_hyps, fix_noise=False) + cur_hyps = package_target.get_hyps(str_cov, num_dim) + cur_hyps = package_target.convert_hyps(str_cov, cur_hyps, fix_noise=False) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_arr(123, str_cov, num_dim) + _, is_valid = package_target.validate_hyps_arr(123, str_cov, num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_arr(cur_hyps, 'abc', num_dim) + _, is_valid = package_target.validate_hyps_arr(cur_hyps, 'abc', num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = utils_covariance.validate_hyps_arr(cur_hyps, str_cov, 'abc') + _, is_valid = package_target.validate_hyps_arr(cur_hyps, str_cov, 'abc') def test_check_str_cov_typing(): - annos = utils_covariance.check_str_cov.__annotations__ + annos = package_target.check_str_cov.__annotations__ assert annos['str_fun'] == str assert annos['str_cov'] == str @@ -260,21 +260,21 @@ def test_check_str_cov_typing(): def test_check_str_cov(): with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov(1, 'se', (2, 1)) + package_target.check_str_cov(1, 'se', (2, 1)) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 1, (2, 1)) + package_target.check_str_cov('test', 1, (2, 1)) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 'se', 1) + package_target.check_str_cov('test', 'se', 1) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 'se', (2, 100, 100)) + package_target.check_str_cov('test', 'se', (2, 100, 100)) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 'se', (2, 100), shape_X2=(2, 100, 100)) + package_target.check_str_cov('test', 'se', (2, 100), shape_X2=(2, 100, 100)) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 'set_se', (2, 100), shape_X2=(2, 100, 100)) + package_target.check_str_cov('test', 'set_se', (2, 100), shape_X2=(2, 100, 100)) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 'set_se', (2, 100, 100), shape_X2=(2, 100)) + package_target.check_str_cov('test', 'set_se', (2, 100, 100), shape_X2=(2, 100)) with pytest.raises(AssertionError) as error: - utils_covariance.check_str_cov('test', 'se', (2, 1), shape_X2=1) + package_target.check_str_cov('test', 'se', (2, 1), shape_X2=1) with pytest.raises(ValueError) as error: - utils_covariance.check_str_cov('test', 'abc', (2, 1)) + package_target.check_str_cov('test', 'abc', (2, 1)) diff --git a/tests/common/test_utils_gp.py b/tests/common/test_utils_gp.py index 44be308..ffb3c9e 100644 --- a/tests/common/test_utils_gp.py +++ b/tests/common/test_utils_gp.py @@ -8,13 +8,13 @@ import pytest import numpy as np -from bayeso.utils import utils_gp +from bayeso.utils import utils_gp as package_target TEST_EPSILON = 1e-7 def test_get_prior_mu_typing(): - annos = utils_gp.get_prior_mu.__annotations__ + annos = package_target.get_prior_mu.__annotations__ assert annos['prior_mu'] == typing.Union[callable, type(None)] assert annos['X'] == np.ndarray @@ -26,15 +26,15 @@ def test_get_prior_mu(): X = np.reshape(np.arange(0, 90), (30, 3)) with pytest.raises(AssertionError) as error: - utils_gp.get_prior_mu(1, X) + package_target.get_prior_mu(1, X) with pytest.raises(AssertionError) as error: - utils_gp.get_prior_mu(fun_prior, 1) + package_target.get_prior_mu(fun_prior, 1) with pytest.raises(AssertionError) as error: - utils_gp.get_prior_mu(fun_prior, np.arange(0, 100)) + package_target.get_prior_mu(fun_prior, np.arange(0, 100)) with pytest.raises(AssertionError) as error: - utils_gp.get_prior_mu(None, np.arange(0, 100)) + package_target.get_prior_mu(None, np.arange(0, 100)) with pytest.raises(AssertionError) as error: - utils_gp.get_prior_mu(fun_prior_1d, X) + package_target.get_prior_mu(fun_prior_1d, X) - assert (utils_gp.get_prior_mu(None, X) == np.zeros((X.shape[0], 1))).all() - assert (utils_gp.get_prior_mu(fun_prior, X) == fun_prior(X)).all() + assert (package_target.get_prior_mu(None, X) == np.zeros((X.shape[0], 1))).all() + assert (package_target.get_prior_mu(fun_prior, X) == fun_prior(X)).all() diff --git a/tests/common/test_utils_logger.py b/tests/common/test_utils_logger.py index c6dd17f..ec51c65 100644 --- a/tests/common/test_utils_logger.py +++ b/tests/common/test_utils_logger.py @@ -8,157 +8,157 @@ import pytest import numpy as np -from bayeso.utils import utils_logger +from bayeso.utils import utils_logger as package_target def test_get_logger_typing(): - annos = utils_logger.get_logger.__annotations__ + annos = package_target.get_logger.__annotations__ assert annos['str_name'] == str assert annos['return'] == logging.Logger def test_get_logger(): with pytest.raises(AssertionError) as error: - utils_logger.get_logger(123) + package_target.get_logger(123) with pytest.raises(AssertionError) as error: - utils_logger.get_logger(12.3) + package_target.get_logger(12.3) - logger = utils_logger.get_logger('abc') + logger = package_target.get_logger('abc') assert type(logger) == logging.Logger def test_get_str_array_1d_typing(): - annos = utils_logger.get_str_array_1d.__annotations__ + annos = package_target.get_str_array_1d.__annotations__ assert annos['arr'] == np.ndarray assert annos['return'] == str def test_get_str_array_1d(): with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_1d(123) + package_target.get_str_array_1d(123) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_1d(12.3) + package_target.get_str_array_1d(12.3) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_1d(np.zeros((10, 2))) + package_target.get_str_array_1d(np.zeros((10, 2))) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_1d(np.zeros((10, 2, 2))) + package_target.get_str_array_1d(np.zeros((10, 2, 2))) - str_ = utils_logger.get_str_array_1d(np.array([1, 2, 3])) + str_ = package_target.get_str_array_1d(np.array([1, 2, 3])) print(str_) assert str_ == '[1, 2, 3]' - str_ = utils_logger.get_str_array_1d(np.array([1.1, 2.5, 3.0])) + str_ = package_target.get_str_array_1d(np.array([1.1, 2.5, 3.0])) print(str_) assert str_ == '[1.100, 2.500, 3.000]' def test_get_str_array_2d_typing(): - annos = utils_logger.get_str_array_2d.__annotations__ + annos = package_target.get_str_array_2d.__annotations__ assert annos['arr'] == np.ndarray assert annos['return'] == str def test_get_str_array_2d(): with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_2d(123) + package_target.get_str_array_2d(123) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_2d(12.3) + package_target.get_str_array_2d(12.3) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_2d(np.zeros(10)) + package_target.get_str_array_2d(np.zeros(10)) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_2d(np.zeros((10, 2, 2))) + package_target.get_str_array_2d(np.zeros((10, 2, 2))) - str_ = utils_logger.get_str_array_2d(np.array([[1, 2, 3], [2, 2, 2]])) + str_ = package_target.get_str_array_2d(np.array([[1, 2, 3], [2, 2, 2]])) print(str_) assert str_ == '[[1, 2, 3],\n[2, 2, 2]]' - str_ = utils_logger.get_str_array_2d(np.array([[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]])) + str_ = package_target.get_str_array_2d(np.array([[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]])) print(str_) assert str_ == '[[1.100, 2.200, 3.330],\n[2.200, 2.400, 2.900]]' def test_get_str_array_3d_typing(): - annos = utils_logger.get_str_array_3d.__annotations__ + annos = package_target.get_str_array_3d.__annotations__ assert annos['arr'] == np.ndarray assert annos['return'] == str def test_get_str_array_3d(): with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_3d(123) + package_target.get_str_array_3d(123) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_3d(12.3) + package_target.get_str_array_3d(12.3) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_3d(np.zeros(10)) + package_target.get_str_array_3d(np.zeros(10)) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array_3d(np.zeros((10, 2))) + package_target.get_str_array_3d(np.zeros((10, 2))) - str_ = utils_logger.get_str_array_3d(np.array([[[1, 2, 3], [2, 2, 2]], [[1, 2, 3], [2, 2, 2]]])) + str_ = package_target.get_str_array_3d(np.array([[[1, 2, 3], [2, 2, 2]], [[1, 2, 3], [2, 2, 2]]])) print(str_) assert str_ == '[[[1, 2, 3],\n[2, 2, 2]],\n[[1, 2, 3],\n[2, 2, 2]]]' - str_ = utils_logger.get_str_array_3d(np.array([[[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]], [[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]]])) + str_ = package_target.get_str_array_3d(np.array([[[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]], [[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]]])) print(str_) assert str_ == '[[[1.100, 2.200, 3.330],\n[2.200, 2.400, 2.900]],\n[[1.100, 2.200, 3.330],\n[2.200, 2.400, 2.900]]]' def test_get_str_array_typing(): - annos = utils_logger.get_str_array.__annotations__ + annos = package_target.get_str_array.__annotations__ assert annos['arr'] == np.ndarray assert annos['return'] == str def test_get_str_array(): with pytest.raises(AssertionError) as error: - utils_logger.get_str_array(123) + package_target.get_str_array(123) with pytest.raises(AssertionError) as error: - utils_logger.get_str_array(12.3) + package_target.get_str_array(12.3) - str_ = utils_logger.get_str_array(np.array([1, 2, 3])) + str_ = package_target.get_str_array(np.array([1, 2, 3])) print(str_) assert str_ == '[1, 2, 3]' - str_ = utils_logger.get_str_array(np.array([1.1, 2.5, 3.0])) + str_ = package_target.get_str_array(np.array([1.1, 2.5, 3.0])) print(str_) assert str_ == '[1.100, 2.500, 3.000]' - str_ = utils_logger.get_str_array(np.array([[1, 2, 3], [2, 2, 2]])) + str_ = package_target.get_str_array(np.array([[1, 2, 3], [2, 2, 2]])) print(str_) assert str_ == '[[1, 2, 3],\n[2, 2, 2]]' - str_ = utils_logger.get_str_array(np.array([[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]])) + str_ = package_target.get_str_array(np.array([[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]])) print(str_) assert str_ == '[[1.100, 2.200, 3.330],\n[2.200, 2.400, 2.900]]' - str_ = utils_logger.get_str_array(np.array([[[1, 2, 3], [2, 2, 2]], [[1, 2, 3], [2, 2, 2]]])) + str_ = package_target.get_str_array(np.array([[[1, 2, 3], [2, 2, 2]], [[1, 2, 3], [2, 2, 2]]])) print(str_) assert str_ == '[[[1, 2, 3],\n[2, 2, 2]],\n[[1, 2, 3],\n[2, 2, 2]]]' - str_ = utils_logger.get_str_array(np.array([[[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]], [[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]]])) + str_ = package_target.get_str_array(np.array([[[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]], [[1.1, 2.2, 3.33], [2.2, 2.4, 2.9]]])) print(str_) assert str_ == '[[[1.100, 2.200, 3.330],\n[2.200, 2.400, 2.900]],\n[[1.100, 2.200, 3.330],\n[2.200, 2.400, 2.900]]]' def test_get_str_hyps_typing(): - annos = utils_logger.get_str_hyps.__annotations__ + annos = package_target.get_str_hyps.__annotations__ assert annos['hyps'] == dict assert annos['return'] == str def test_get_str_hyps(): with pytest.raises(AssertionError) as error: - utils_logger.get_str_hyps(123) + package_target.get_str_hyps(123) with pytest.raises(AssertionError) as error: - utils_logger.get_str_hyps(12.3) + package_target.get_str_hyps(12.3) with pytest.raises(AssertionError) as error: - utils_logger.get_str_hyps('abc') + package_target.get_str_hyps('abc') with pytest.raises(AssertionError) as error: - utils_logger.get_str_hyps(np.zeros(3)) + package_target.get_str_hyps(np.zeros(3)) hyps = {'signal': 1.0, 'noise': 1e-4, 'lengthscales': np.array([1.0, 2.0])} - str_ = utils_logger.get_str_hyps(hyps) + str_ = package_target.get_str_hyps(hyps) print(str_) list_truths = [ "{'signal': 1.000, 'noise': 0.000, 'lengthscales': [1.000, 2.000]}", @@ -171,7 +171,7 @@ def test_get_str_hyps(): assert str_ in list_truths hyps = {'signal': 1, 'noise': 1e-3, 'lengthscales': np.array([1.0, 2.0])} - str_ = utils_logger.get_str_hyps(hyps) + str_ = package_target.get_str_hyps(hyps) print(str_) list_truths = [ "{'signal': 1, 'noise': 0.001, 'lengthscales': [1.000, 2.000]}", diff --git a/tests/common/test_utils_plotting.py b/tests/common/test_utils_plotting.py index 2133e0c..21b1b4e 100644 --- a/tests/common/test_utils_plotting.py +++ b/tests/common/test_utils_plotting.py @@ -8,13 +8,13 @@ import pytest import numpy as np -from bayeso.utils import utils_plotting +from bayeso.utils import utils_plotting as package_target TEST_EPSILON = 1e-5 def test_plot_gp_via_sample_typing(): - annos = utils_plotting.plot_gp_via_sample.__annotations__ + annos = package_target.plot_gp_via_sample.__annotations__ assert annos['X'] == np.ndarray assert annos['Ys'] == np.ndarray @@ -38,38 +38,38 @@ def test_plot_gp_via_sample(): Ys = np.ones((num_train, num_Ys)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample('abc', Ys) + package_target.plot_gp_via_sample('abc', Ys) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, 'abc') + package_target.plot_gp_via_sample(X, 'abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(np.zeros(num_train), Ys) + package_target.plot_gp_via_sample(np.zeros(num_train), Ys) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, np.ones(num_train)) + package_target.plot_gp_via_sample(X, np.ones(num_train)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, np.ones((10, num_Ys))) + package_target.plot_gp_via_sample(X, np.ones((10, num_Ys))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, path_save=123) + package_target.plot_gp_via_sample(X, Ys, path_save=123) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, str_postfix=123) + package_target.plot_gp_via_sample(X, Ys, str_postfix=123) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, str_x_axis=123) + package_target.plot_gp_via_sample(X, Ys, str_x_axis=123) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, str_y_axis=123) + package_target.plot_gp_via_sample(X, Ys, str_y_axis=123) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, use_tex='abc') + package_target.plot_gp_via_sample(X, Ys, use_tex='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, draw_zero_axis='abc') + package_target.plot_gp_via_sample(X, Ys, draw_zero_axis='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, pause_figure='abc') + package_target.plot_gp_via_sample(X, Ys, pause_figure='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, time_pause='abc') + package_target.plot_gp_via_sample(X, Ys, time_pause='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_sample(X, Ys, colors='abc') + package_target.plot_gp_via_sample(X, Ys, colors='abc') def test_plot_gp_via_distribution_typing(): - annos = utils_plotting.plot_gp_via_distribution.__annotations__ + annos = package_target.plot_gp_via_distribution.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -101,78 +101,78 @@ def test_plot_gp_via_distribution(): mu = np.zeros((num_test, dim_Y)) sigma = np.zeros((num_test, dim_Y)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, 1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, 1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, np.arange(0, num_test)) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, np.arange(0, num_test)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, 1, sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, 1, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, np.arange(0, num_test), sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, np.arange(0, num_test), sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, 1, mu, sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, 1, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, np.arange(0, num_test), mu, sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, np.arange(0, num_test), mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, 1, X_test, mu, sigma) + package_target.plot_gp_via_distribution(X_train, 1, X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, np.arange(0, num_train), X_test, mu, sigma) + package_target.plot_gp_via_distribution(X_train, np.arange(0, num_train), X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(1, Y_train, X_test, mu, sigma) + package_target.plot_gp_via_distribution(1, Y_train, X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(np.arange(0, num_test), Y_train, X_test, mu, sigma) + package_target.plot_gp_via_distribution(np.arange(0, num_test), Y_train, X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(np.zeros((num_train, 2)), Y_train, X_test, mu, sigma) + package_target.plot_gp_via_distribution(np.zeros((num_train, 2)), Y_train, X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, np.zeros((num_train, 2)), mu, sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, np.zeros((num_train, 2)), mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, np.ones((num_train, 2)), X_test, mu, sigma) + package_target.plot_gp_via_distribution(X_train, np.ones((num_train, 2)), X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, np.ones((10, 1)), X_test, mu, sigma) + package_target.plot_gp_via_distribution(X_train, np.ones((10, 1)), X_test, mu, sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, np.zeros((num_test, 2)), sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, np.zeros((num_test, 2)), sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, np.zeros((num_test, 2))) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, np.zeros((num_test, 2))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, np.zeros((11, 1))) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, np.zeros((11, 1))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, np.zeros((11, 1)), sigma) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, np.zeros((11, 1)), sigma) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=np.arange(0, num_test)) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=np.arange(0, num_test)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=np.zeros((num_test, 2))) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=np.zeros((num_test, 2))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=np.zeros((20, 1))) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=np.zeros((20, 1))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, Y_test=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, path_save=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, path_save=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, str_postfix=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, str_postfix=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, str_x_axis=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, str_x_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, str_y_axis=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, str_y_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, use_tex=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, use_tex=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, draw_zero_axis=1) + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, draw_zero_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, pause_figure='abc') + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, pause_figure='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, time_pause='abc') + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, time_pause='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, range_shade='abc') + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, range_shade='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, colors='abc') + package_target.plot_gp_via_distribution(X_train, Y_train, X_test, mu, sigma, colors='abc') def test_plot_minimum_vs_iter_typing(): - annos = utils_plotting.plot_minimum_vs_iter.__annotations__ + annos = package_target.plot_minimum_vs_iter.__annotations__ assert annos['minima'] == np.ndarray assert annos['list_str_label'] == list @@ -202,51 +202,51 @@ def test_plot_minimum_vs_iter(): draw_std = True with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, 1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, 1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, 'abc', draw_std) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, 'abc', draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, 1, num_init, draw_std) + package_target.plot_minimum_vs_iter(arr_minima, 1, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(1, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_iter(1, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(np.ones((10, 2)), list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_iter(np.ones((10, 2)), list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(np.ones(2), list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_iter(np.ones(2), list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, ['abc', 'def', 'ghi'], num_init, draw_std) + package_target.plot_minimum_vs_iter(arr_minima, ['abc', 'def', 'ghi'], num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, 12, draw_std) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, 12, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, include_marker=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, include_marker=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, include_legend=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, include_legend=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, use_tex=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, use_tex=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, path_save=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, path_save=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, str_postfix=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, str_postfix=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, str_x_axis=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, str_x_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, str_y_axis=1) + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, str_y_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, pause_figure='abc') + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, pause_figure='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, time_pause='abc') + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, time_pause='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, range_shade='abc') + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, range_shade='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, markers='abc') + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, markers='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, colors='abc') + package_target.plot_minimum_vs_iter(arr_minima, list_str_label, num_init, draw_std, colors='abc') def test_plot_minimum_vs_time_typing(): - annos = utils_plotting.plot_minimum_vs_time.__annotations__ + annos = package_target.plot_minimum_vs_time.__annotations__ assert annos['times'] == np.ndarray assert annos['minima'] == np.ndarray @@ -278,65 +278,65 @@ def test_plot_minimum_vs_time(): draw_std = True with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, 1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, 1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, 'abc', draw_std) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, 'abc', draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, 1, num_init, draw_std) + package_target.plot_minimum_vs_time(arr_times, arr_minima, 1, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, 1, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(arr_times, 1, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(1, arr_minima, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(1, arr_minima, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(np.ones((4, num_bo, num_iter)), arr_minima, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(np.ones((4, num_bo, num_iter)), arr_minima, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(np.ones((num_model, 4, num_iter)), arr_minima, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(np.ones((num_model, 4, num_iter)), arr_minima, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(np.ones((num_model, num_bo, 25)), arr_minima, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(np.ones((num_model, num_bo, 25)), arr_minima, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(np.ones((num_bo, num_iter)), arr_minima, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(np.ones((num_bo, num_iter)), arr_minima, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(np.ones(num_iter), arr_minima, list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(np.ones(num_iter), arr_minima, list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, np.ones((10, 2)), list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(arr_times, np.ones((10, 2)), list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, np.ones(2), list_str_label, num_init, draw_std) + package_target.plot_minimum_vs_time(arr_times, np.ones(2), list_str_label, num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, ['abc', 'def', 'ghi'], num_init, draw_std) + package_target.plot_minimum_vs_time(arr_times, arr_minima, ['abc', 'def', 'ghi'], num_init, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, 12, draw_std) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, 12, draw_std) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, include_marker=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, include_marker=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, include_legend=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, include_legend=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, use_tex=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, use_tex=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, path_save=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, path_save=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, str_postfix=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, str_postfix=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, str_x_axis=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, str_x_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, str_y_axis=1) + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, str_y_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, pause_figure='abc') + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, pause_figure='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, time_pause='abc') + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, time_pause='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, range_shade='abc') + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, range_shade='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, markers='abc') + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, markers='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, colors='abc') + package_target.plot_minimum_vs_time(arr_times, arr_minima, list_str_label, num_init, draw_std, colors='abc') def test_plot_bo_step_typing(): - annos = utils_plotting.plot_bo_step.__annotations__ + annos = package_target.plot_bo_step.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -369,76 +369,76 @@ def test_plot_bo_step(): std_test = np.ones((num_test, num_dim_Y)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, 1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, 1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, 1, std_test) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, 1, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, 1, mean_test, std_test) + package_target.plot_bo_step(X_train, Y_train, X_test, 1, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, 1, Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, Y_train, 1, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, 1, X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, 1, X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(1, Y_train, X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(1, Y_train, X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(np.arange(0, 10), Y_train, X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(np.arange(0, 10), Y_train, X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, np.arange(0, 10), X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, np.arange(0, 10), X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, np.arange(0, 10), Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, Y_train, np.arange(0, 10), Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, np.arange(0, 10), mean_test, std_test) + package_target.plot_bo_step(X_train, Y_train, X_test, np.arange(0, 10), mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, np.arange(0, 10), std_test) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, np.arange(0, 10), std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, np.arange(0, 10)) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, np.arange(0, 10)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(np.ones((num_train, 2)), Y_train, X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(np.ones((num_train, 2)), Y_train, X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, np.ones((num_test, 2)), Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, Y_train, np.ones((num_test, 2)), Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, np.ones((num_train, 2)), X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, np.ones((num_train, 2)), X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(np.ones((30, num_dim_X)), Y_train, X_test, Y_test, mean_test, std_test) + package_target.plot_bo_step(np.ones((30, num_dim_X)), Y_train, X_test, Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, np.ones((num_test, 2)), std_test) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, np.ones((num_test, 2)), std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, np.ones((num_test, 2))) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, np.ones((num_test, 2))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, np.ones((30, num_dim_X)), Y_test, mean_test, std_test) + package_target.plot_bo_step(X_train, Y_train, np.ones((30, num_dim_X)), Y_test, mean_test, std_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, np.ones((30, num_dim_Y))) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, np.ones((30, num_dim_Y))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, num_init=20) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, num_init=20) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, path_save=1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, path_save=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, str_postfix=1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, str_postfix=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, str_x_axis=1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, str_x_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, str_y_axis=1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, str_y_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, num_init='abc') + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, num_init='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, use_tex=1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, use_tex=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, draw_zero_axis=1) + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, draw_zero_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, pause_figure='abc') + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, pause_figure='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, time_pause='abc') + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, time_pause='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, range_shade='abc') + package_target.plot_bo_step(X_train, Y_train, X_test, Y_test, mean_test, std_test, range_shade='abc') def test_plot_bo_step_with_acq_typing(): - annos = utils_plotting.plot_bo_step_with_acq.__annotations__ + annos = package_target.plot_bo_step_with_acq.__annotations__ assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray @@ -474,74 +474,74 @@ def test_plot_bo_step_with_acq(): acq_test = np.ones((num_test, num_dim_Y)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(np.arange(0, 10), Y_train, X_test, Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(np.arange(0, 10), Y_train, X_test, Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, np.arange(0, 10), X_test, Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, np.arange(0, 10), X_test, Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, np.arange(0, 10), Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, np.arange(0, 10), Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, np.arange(0, 10), mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, np.arange(0, 10), mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, np.arange(0, 10), std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, np.arange(0, 10), std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, np.arange(0, 10), acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, np.arange(0, 10), acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, np.arange(0, 10)) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, np.arange(0, 10)) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(np.ones((num_train, 2)), Y_train, X_test, Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(np.ones((num_train, 2)), Y_train, X_test, Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, np.ones((num_test, 2)), Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, np.ones((num_test, 2)), Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, np.ones((num_train, 2)), X_test, Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, np.ones((num_train, 2)), X_test, Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, np.ones((num_test, 2)), mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, np.ones((num_test, 2)), mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, np.ones((num_test, 2)), Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, np.ones((num_test, 2)), Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(np.ones((30, num_dim_X)), Y_train, X_test, Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(np.ones((30, num_dim_X)), Y_train, X_test, Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, np.ones((30, num_dim_Y)), X_test, Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, np.ones((30, num_dim_Y)), X_test, Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, np.ones((num_test, 2)), std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, np.ones((num_test, 2)), std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, np.ones((num_test, 2)), acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, np.ones((num_test, 2)), acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, np.ones((num_test, 2))) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, np.ones((num_test, 2))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, np.ones((30, num_dim_X)), Y_test, mean_test, std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, np.ones((30, num_dim_X)), Y_test, mean_test, std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, np.ones((30, num_dim_Y))) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, np.ones((30, num_dim_Y))) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, np.ones((30, num_dim_Y)), acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, np.ones((30, num_dim_Y)), acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, np.ones((30, num_dim_Y)), std_test, acq_test) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, np.ones((30, num_dim_Y)), std_test, acq_test) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, num_init=30) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, num_init=30) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, path_save=1) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, path_save=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_postfix=1) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_postfix=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_x_axis=1) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_x_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_y_axis=1) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_y_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_acq_axis=1) + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, str_acq_axis=1) with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, num_init='abc') + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, num_init='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, use_tex='abc') + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, use_tex='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, draw_zero_axis='abc') + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, draw_zero_axis='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, pause_figure='abc') + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, pause_figure='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, time_pause='abc') + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, time_pause='abc') with pytest.raises(AssertionError) as error: - utils_plotting.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, range_shade='abc') + package_target.plot_bo_step_with_acq(X_train, Y_train, X_test, Y_test, mean_test, std_test, acq_test, range_shade='abc') diff --git a/tests/common/test_wrappers_bo.py b/tests/common/test_wrappers_bo.py index 9248b30..abe7dba 100644 --- a/tests/common/test_wrappers_bo.py +++ b/tests/common/test_wrappers_bo.py @@ -9,11 +9,11 @@ import numpy as np from bayeso import bo -from bayeso.wrappers import wrappers_bo +from bayeso.wrappers import wrappers_bo as package_target def test_run_single_round_with_all_initial_information_typing(): - annos = wrappers_bo.run_single_round_with_all_initial_information.__annotations__ + annos = package_target.run_single_round_with_all_initial_information.__annotations__ assert annos['model_bo'] == bo.BO assert annos['fun_target'] == callable @@ -39,31 +39,31 @@ def test_run_single_round_with_all_initial_information(): model_bo = bo.BO(arr_range) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(1, fun_target, X, Y, num_iter) + package_target.run_single_round_with_all_initial_information(1, fun_target, X, Y, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, 1, X, Y, num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, 1, X, Y, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, 1, Y, num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, 1, Y, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, 1, num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, 1, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, 'abc') + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, 'abc') with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, np.random.randn(num_X), Y, num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, np.random.randn(num_X), Y, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, np.random.randn(num_X), num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, np.random.randn(num_X), num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, np.random.randn(2, dim_X), Y, num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, np.random.randn(2, dim_X), Y, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, np.random.randn(num_X, 2), num_iter) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, np.random.randn(num_X, 2), num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter, str_sampling_method_ao=1) + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter, str_sampling_method_ao=1) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter, str_sampling_method_ao='abc') + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter, str_sampling_method_ao='abc') with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter, num_samples_ao='abc') + package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter, num_samples_ao='abc') - X_final, Y_final, time_all_final, time_gp_final, time_acq_final = wrappers_bo.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter) + X_final, Y_final, time_all_final, time_gp_final, time_acq_final = package_target.run_single_round_with_all_initial_information(model_bo, fun_target, X, Y, num_iter) assert len(X_final.shape) == 2 assert len(Y_final.shape) == 2 assert len(time_all_final.shape) == 1 @@ -76,7 +76,7 @@ def test_run_single_round_with_all_initial_information(): assert time_gp_final.shape[0] == time_acq_final.shape[0] def test_run_single_round_with_initial_inputs_typing(): - annos = wrappers_bo.run_single_round_with_initial_inputs.__annotations__ + annos = package_target.run_single_round_with_initial_inputs.__annotations__ assert annos['model_bo'] == bo.BO assert annos['fun_target'] == callable @@ -100,23 +100,23 @@ def test_run_single_round_with_initial_inputs(): model_bo = bo.BO(arr_range) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(1, fun_target, X, num_iter) + package_target.run_single_round_with_initial_inputs(1, fun_target, X, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, 1, X, num_iter) + package_target.run_single_round_with_initial_inputs(model_bo, 1, X, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, 1, num_iter) + package_target.run_single_round_with_initial_inputs(model_bo, fun_target, 1, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, X, 1.2) + package_target.run_single_round_with_initial_inputs(model_bo, fun_target, X, 1.2) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, np.random.randn(num_X), num_iter) + package_target.run_single_round_with_initial_inputs(model_bo, fun_target, np.random.randn(num_X), num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter, str_sampling_method_ao=1) + package_target.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter, str_sampling_method_ao=1) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter, str_sampling_method_ao='abc') + package_target.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter, str_sampling_method_ao='abc') with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter, num_samples_ao='abc') + package_target.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter, num_samples_ao='abc') - X_final, Y_final, time_all_final, time_gp_final, time_acq_final = wrappers_bo.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter) + X_final, Y_final, time_all_final, time_gp_final, time_acq_final = package_target.run_single_round_with_initial_inputs(model_bo, fun_target, X, num_iter) assert len(X_final.shape) == 2 assert len(Y_final.shape) == 2 assert len(time_all_final.shape) == 1 @@ -128,7 +128,7 @@ def test_run_single_round_with_initial_inputs(): assert time_gp_final.shape[0] == time_acq_final.shape[0] def test_run_single_round_typing(): - annos = wrappers_bo.run_single_round.__annotations__ + annos = package_target.run_single_round.__annotations__ assert annos['model_bo'] == bo.BO assert annos['fun_target'] == callable @@ -152,27 +152,27 @@ def test_run_single_round(): model_bo = bo.BO(arr_range, debug=True) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(1, fun_target, num_X, num_iter) + package_target.run_single_round(1, fun_target, num_X, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, 1, num_X, num_iter) + package_target.run_single_round(model_bo, 1, num_X, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, 1.2, num_iter) + package_target.run_single_round(model_bo, fun_target, 1.2, num_iter) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, 1.2) + package_target.run_single_round(model_bo, fun_target, num_X, 1.2) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo=1) + package_target.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo=1) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo='abc') + package_target.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo='abc') with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo='grid') + package_target.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo='grid') with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, str_sampling_method_ao=1) + package_target.run_single_round(model_bo, fun_target, num_X, num_iter, str_sampling_method_ao=1) with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, str_sampling_method_ao='abc') + package_target.run_single_round(model_bo, fun_target, num_X, num_iter, str_sampling_method_ao='abc') with pytest.raises(AssertionError) as error: - wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, seed=1.2) + package_target.run_single_round(model_bo, fun_target, num_X, num_iter, seed=1.2) - X_final, Y_final, time_all_final, time_gp_final, time_acq_final = wrappers_bo.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo='uniform') + X_final, Y_final, time_all_final, time_gp_final, time_acq_final = package_target.run_single_round(model_bo, fun_target, num_X, num_iter, str_initial_method_bo='uniform') assert len(X_final.shape) == 2 assert len(Y_final.shape) == 2 assert len(time_all_final.shape) == 1 From f99b9e3f721ea3fa05a911bcbc6358e847d73274 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 30 Dec 2020 18:05:07 +0900 Subject: [PATCH 07/37] Add student t process --- bayeso/constants.py | 3 + bayeso/gp/gp_scipy.py | 5 +- bayeso/tp/__init__.py | 5 + bayeso/tp/tp.py | 476 +++++++++++++++++++++++++ bayeso/utils/utils_covariance.py | 47 ++- examples/99_notebooks/example_tp.ipynb | 267 ++++++++++++++ 6 files changed, 798 insertions(+), 5 deletions(-) create mode 100644 bayeso/tp/__init__.py create mode 100644 bayeso/tp/tp.py create mode 100644 examples/99_notebooks/example_tp.ipynb diff --git a/bayeso/constants.py b/bayeso/constants.py index 20c35c2..fcdc711 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -42,6 +42,7 @@ RANGE_SHADE = 1.96 ALLOWED_OPTIMIZER_METHOD_GP = ['BFGS', 'L-BFGS-B', 'Nelder-Mead', 'DIRECT'] +ALLOWED_OPTIMIZER_METHOD_TP = ['L-BFGS-B'] ALLOWED_OPTIMIZER_METHOD_BO = ['L-BFGS-B', 'DIRECT', 'CMA-ES'] # INFO: Do not use _ (underscore) in base str_cov. ALLOWED_GP_COV_BASE = ['eq', 'se', 'matern32', 'matern52'] @@ -101,6 +102,8 @@ TYPING_TUPLE_TWO_ARRAYS_DICT = typing.Tuple[np.ndarray, np.ndarray, dict] TYPING_TUPLE_THREE_ARRAYS = typing.Tuple[np.ndarray, np.ndarray, np.ndarray] TYPING_TUPLE_FIVE_ARRAYS = typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] +TYPING_TUPLE_FLOAT_THREE_ARRAYS = typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] +TYPING_TUPLE_FLOAT_ARRAY = typing.Tuple[float, np.ndarray] TYPING_UNION_INT_NONE = typing.Union[int, TYPE_NONE] TYPING_UNION_INT_FLOAT = typing.Union[int, float] diff --git a/bayeso/gp/gp_scipy.py b/bayeso/gp/gp_scipy.py index ae714e9..e5d131f 100644 --- a/bayeso/gp/gp_scipy.py +++ b/bayeso/gp/gp_scipy.py @@ -8,6 +8,7 @@ import numpy as np import scipy.linalg import scipy.optimize +import typing from bayeso import covariance from bayeso import constants @@ -26,7 +27,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, use_cholesky: bool=True, use_gradient: bool=True, debug: bool=False -) -> constants.TYPING_UNION_FLOAT_TWO_FLOATS: +) -> typing.Union[float, constants.TYPING_TUPLE_FLOAT_ARRAY]: """ This function computes a negative log marginal likelihood. @@ -52,7 +53,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, :returns: negative log marginal likelihood, or (negative log marginal likelihood, gradients of the likelihood). - :rtype: float, or tuple of (float, float) + :rtype: float, or tuple of (float, np.ndarray) :raises: AssertionError diff --git a/bayeso/tp/__init__.py b/bayeso/tp/__init__.py new file mode 100644 index 0000000..dd83ce5 --- /dev/null +++ b/bayeso/tp/__init__.py @@ -0,0 +1,5 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: December 29, 2020 +# +"""These files are for implementing Student-$t$ process regression.""" diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py new file mode 100644 index 0000000..909856d --- /dev/null +++ b/bayeso/tp/tp.py @@ -0,0 +1,476 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: December 30, 2020 +# +"""It defines Student-$t$ process regression.""" + +import time +import numpy as np +import scipy.stats +import scipy.linalg +import scipy.optimize +import scipy.special +import typing + +from bayeso import covariance +from bayeso import constants +from bayeso.utils import utils_gp +from bayeso.utils import utils_covariance +from bayeso.utils import utils_common +from bayeso.utils import utils_logger + +logger = utils_logger.get_logger('tp') + + +@utils_common.validate_types +def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, + str_cov: str, prior_mu_train: np.ndarray, + fix_noise: bool=constants.FIX_GP_NOISE, + use_gradient: bool=True, + debug: bool=False +) -> typing.Union[float, constants.TYPING_TUPLE_FLOAT_ARRAY]: + """ + This function computes a negative log marginal likelihood. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param hyps: hyperparameters for Gaussian process. Shape: (h, ). + :type hyps: numpy.ndarray + :param str_cov: the name of covariance function. + :type str_cov: str. + :param prior_mu_train: the prior values computed by get_prior_mu(). Shape: (n, 1). + :type prior_mu_train: numpy.ndarray + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param use_gradient: flag for computing and returning gradients of + negative log marginal likelihood. + :type use_gradient: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: negative log marginal likelihood, or (negative log marginal + likelihood, gradients of the likelihood). + :rtype: float, or tuple of (float, np.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(hyps, np.ndarray) + assert isinstance(str_cov, str) + assert isinstance(prior_mu_train, np.ndarray) + assert isinstance(fix_noise, bool) + assert isinstance(use_gradient, bool) + assert isinstance(debug, bool) + assert len(Y_train.shape) == 2 + assert len(prior_mu_train.shape) == 2 + assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] + utils_covariance.check_str_cov('neg_log_ml', str_cov, X_train.shape) + + num_X = float(X_train.shape[0]) + hyps = utils_covariance.restore_hyps(str_cov, hyps, + fix_noise=fix_noise, use_gp=False) + new_Y_train = Y_train - prior_mu_train + nu = hyps['dof'] + + cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X_train, + hyps, str_cov, fix_noise=fix_noise, use_gradient=use_gradient, + debug=debug) + + alpha = np.dot(inv_cov_X_X, new_Y_train) + beta = np.squeeze(np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train)) + + first_term = -0.5 * num_X * np.log((nu - 2.0) * np.pi) + second_term = -0.5 * np.log(np.linalg.det(cov_X_X) + constants.JITTER_LOG) + third_term = np.log(scipy.special.gamma((nu + num_X) / 2.0) / scipy.special.gamma(nu / 2.0)) + fourth_term = -0.5 * (nu + num_X) * np.log(1.0 + beta / (nu - 2.0)) + + log_ml_ = np.squeeze(first_term + second_term + third_term + fourth_term) + log_ml_ /= num_X + + if use_gradient: + assert grad_cov_X_X is not None + grad_log_ml_ = np.zeros(grad_cov_X_X.shape[2] + 1) + + first_term_grad = ((nu + num_X) / (nu + beta - 2.0) * np.dot(alpha, alpha.T) - inv_cov_X_X) + nu_grad = -num_X / (2.0 * (nu - 2.0)) + scipy.special.digamma((nu + num_X) / 2.0) - scipy.special.digamma(nu / 2.0) - 0.5 * np.log(1.0 + beta / (nu - 2.0)) + (nu + num_X) * beta / (2.0 * (nu - 2.0)**2 + 2.0 * beta * (nu - 2.0)) + + if fix_noise: + grad_log_ml_[0] = nu_grad + else: + grad_log_ml_[1] = nu_grad + + for ind in range(0, grad_cov_X_X.shape[2]): + cur_grad = 0.5 * np.trace(np.dot(first_term_grad, grad_cov_X_X[:, :, ind])) + if fix_noise: + grad_log_ml_[ind + 1] = cur_grad + else: + if ind == 0: + cur_ind = 0 + else: + cur_ind = ind + 1 + + grad_log_ml_[cur_ind] = cur_grad + + if use_gradient: + return -1.0 * log_ml_, -1.0 * grad_log_ml_ / num_X + + return -1.0 * log_ml_ + +@utils_common.validate_types +def sample_functions(nu: float, mu: np.ndarray, Sigma: np.ndarray, + num_samples: int=1 +) -> np.ndarray: + """ + It samples `num_samples` functions from multivariate Student-$t$ distribution (nu, mu, Sigma). + + :param mu: mean vector. Shape: (n, ). + :type mu: numpy.ndarray + :param Sigma: covariance matrix. Shape: (n, n). + :type Sigma: numpy.ndarray + :param num_samples: the number of sampled functions + :type num_samples: int., optional + + :returns: sampled functions. Shape: (num_samples, n). + :rtype: numpy.ndarray + + :raises: AssertionError + + """ + + assert isinstance(nu, float) + assert isinstance(mu, np.ndarray) + assert isinstance(Sigma, np.ndarray) + assert isinstance(num_samples, int) + assert len(mu.shape) == 1 + assert len(Sigma.shape) == 2 + assert mu.shape[0] == Sigma.shape[0] == Sigma.shape[1] + + if nu == np.inf: + x = np.array([1.0] * num_samples) + else: + x = np.random.chisquare(nu, num_samples) / nu + + rv = scipy.stats.multivariate_normal(mean=np.zeros(mu.shape[0]), cov=Sigma) + list_samples = [rv.rvs() for _ in range(0, num_samples)] + + samples = np.array(list_samples) + samples = mu[np.newaxis, ...] + samples / np.sqrt(x)[..., np.newaxis] + assert samples.shape == (num_samples, mu.shape[0]) + + return samples + +@utils_common.validate_types +def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, + str_optimizer_method: str='L-BFGS-B', + fix_noise: bool=constants.FIX_GP_NOISE, + debug: bool=False +) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: + """ + This function computes the kernel matrix optimized by optimization + method specified, its inverse matrix, and the optimized hyperparameters. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param prior_mu: prior mean function or None. + :type prior_mu: function or NoneType + :param str_cov: the name of covariance function. + :type str_cov: str. + :param str_optimizer_method: the name of optimization method. + :type str_optimizer_method: str., optional + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of kernel matrix over `X_train`, kernel matrix + inverse, and dictionary of hyperparameters. + :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) + + :raises: AssertionError, ValueError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert callable(prior_mu) or prior_mu is None + assert isinstance(str_cov, str) + assert isinstance(str_optimizer_method, str) + assert isinstance(fix_noise, bool) + assert isinstance(debug, bool) + assert len(Y_train.shape) == 2 + assert X_train.shape[0] == Y_train.shape[0] + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_TP + + # TODO: Fix it later. + use_gradient = True + + time_start = time.time() + + if debug: + logger.debug('str_optimizer_method: %s', str_optimizer_method) + + prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) + if str_cov in constants.ALLOWED_GP_COV_BASE: + num_dim = X_train.shape[1] + elif str_cov in constants.ALLOWED_GP_COV_SET: + num_dim = X_train.shape[2] + use_gradient = False + + neg_log_ml_ = lambda hyps: neg_log_ml(X_train, Y_train, hyps, str_cov, + prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, + debug=debug) + + hyps_converted = utils_covariance.convert_hyps( + str_cov, + utils_covariance.get_hyps(str_cov, num_dim, use_gp=False), + fix_noise=fix_noise, + use_gp=False + ) + + if str_optimizer_method == 'L-BFGS-B': + bounds = utils_covariance.get_range_hyps(str_cov, num_dim, + fix_noise=fix_noise, use_gp=False) + result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, + method=str_optimizer_method, bounds=bounds, jac=use_gradient, + options={'disp': False}) + if debug: + logger.debug('scipy message: %s', result_optimized.message) + + result_optimized = result_optimized.x + else: # pragma: no cover + raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') + + hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise, use_gp=False) + + hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, + hyps, str_cov, fix_noise=fix_noise, debug=debug) + + time_end = time.time() + + if debug: + logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) + logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + return cov_X_X, inv_cov_X_X, hyps + +@utils_common.validate_types +def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, + cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, + str_cov: str=constants.STR_GP_COV, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + debug: bool=False +) -> constants.TYPING_TUPLE_FLOAT_THREE_ARRAYS: + """ + This function returns degree of freedom, posterior mean, + posterior standard variance, and + posterior covariance functions over `X_test`, + computed by Student-$t$ process regression with + `X_train`, `Y_train`, `cov_X_X`, `inv_cov_X_X`, and `hyps`. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param X_test: inputs. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray + :param cov_X_X: kernel matrix over `X_train`. Shape: (n, n). + :type cov_X_X: numpy.ndarray + :param inv_cov_X_X: kernel matrix inverse over `X_train`. Shape: (n, n). + :type inv_cov_X_X: numpy.ndarray + :param hyps: dictionary of hyperparameters for Student-$t$ process. + :type hyps: dict. + :param str_cov: the name of covariance function. + :type str_cov: str., optional + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function, optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of degree of freedom, + posterior mean function over `X_test`, + posterior standrad variance function over `X_test`, and + posterior covariance matrix over `X_test`. + Shape: ((), (l, 1), (l, 1), (l, l)). + :rtype: tuple of (float, numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(X_test, np.ndarray) + assert isinstance(cov_X_X, np.ndarray) + assert isinstance(inv_cov_X_X, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(str_cov, str) + assert isinstance(debug, bool) + assert callable(prior_mu) or prior_mu is None + assert len(Y_train.shape) == 2 + assert len(cov_X_X.shape) == 2 + assert len(inv_cov_X_X.shape) == 2 + assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() + utils_covariance.check_str_cov('predict_with_cov', str_cov, + X_train.shape, shape_X2=X_test.shape) + assert X_train.shape[0] == Y_train.shape[0] + assert X_train.shape[1] == X_test.shape[1] + + prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) + prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test) + cov_X_Xs = covariance.cov_main(str_cov, X_train, X_test, hyps, False) + cov_Xs_Xs = covariance.cov_main(str_cov, X_test, X_test, hyps, True) + cov_Xs_Xs = (cov_Xs_Xs + cov_Xs_Xs.T) / 2.0 + + num_X = X_train.shape[0] + new_Y_train = Y_train - prior_mu_train + nu = hyps['dof'] + beta = np.squeeze(np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train)) + + nu_Xs = nu + float(num_X) + mu_Xs = np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), new_Y_train) + prior_mu_test + Sigma_Xs = cov_Xs_Xs - np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), cov_X_Xs) + Sigma_Xs = (nu + beta - 2.0) / (nu + num_X - 2.0) * Sigma_Xs + + sigma_Xs = np.expand_dims(np.sqrt(np.maximum(np.diag(Sigma_Xs), 0.0)), axis=1) + + return nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs + +@utils_common.validate_types +def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, hyps: dict, + str_cov: str=constants.STR_GP_COV, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + debug: bool=False +) -> constants.TYPING_TUPLE_FLOAT_THREE_ARRAYS: + """ + This function returns degree of freedom, posterior mean, + posterior standard variance, and + posterior covariance functions over `X_test`, + computed by Student-$t$ process regression with + `X_train`, `Y_train`, and `hyps`. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param X_test: inputs. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray + :param hyps: dictionary of hyperparameters for Student-$t$ process. + :type hyps: dict. + :param str_cov: the name of covariance function. + :type str_cov: str., optional + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function, optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of degree of freedom, + posterior mean function over `X_test`, + posterior standrad variance function over `X_test`, and + posterior covariance matrix over `X_test`. + Shape: ((), (l, 1), (l, 1), (l, l)). + :rtype: tuple of (float, numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(X_test, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(str_cov, str) + assert isinstance(debug, bool) + assert callable(prior_mu) or prior_mu is None + assert len(Y_train.shape) == 2 + utils_covariance.check_str_cov('predict_with_hyps', str_cov, X_train.shape, shape_X2=X_test.shape) + assert X_train.shape[0] == Y_train.shape[0] + assert X_train.shape[1] == X_test.shape[1] + + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, + hyps, str_cov, debug=debug) + nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, + cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, + prior_mu=prior_mu, debug=debug) + + return nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs + +@utils_common.validate_types +def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, + str_cov: str=constants.STR_GP_COV, + str_optimizer_method: str='L-BFGS-B', + prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + fix_noise: float=constants.FIX_GP_NOISE, + debug: bool=False +) -> constants.TYPING_TUPLE_FLOAT_THREE_ARRAYS: + """ + This function returns degree of freedom, posterior mean, + posterior standard variance, and + posterior covariance functions over `X_test`, + computed by the Student-$t$ process regression + optimized with `X_train` and `Y_train`. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param X_test: inputs. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray + :param str_cov: the name of covariance function. + :type str_cov: str., optional + :param str_optimizer_method: the name of optimization method. + :type str_optimizer_method: str., optional + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function, optional + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of degree of freedom, + posterior mean function over `X_test`, + posterior standrad variance function over `X_test`, and + posterior covariance matrix over `X_test`. + Shape: ((), (l, 1), (l, 1), (l, l)). + :rtype: tuple of (float, numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(X_test, np.ndarray) + assert isinstance(str_cov, str) + assert isinstance(str_optimizer_method, str) + assert isinstance(fix_noise, bool) + assert isinstance(debug, bool) + assert callable(prior_mu) or prior_mu is None + assert len(Y_train.shape) == 2 + utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov, + X_train.shape, shape_X2=X_test.shape) + assert X_train.shape[0] == Y_train.shape[0] + assert X_train.shape[1] == X_test.shape[1] + assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP + + time_start = time.time() + + cov_X_X, inv_cov_X_X, hyps = get_optimized_kernel(X_train, Y_train, + prior_mu, str_cov, str_optimizer_method=str_optimizer_method, + fix_noise=fix_noise, debug=debug) + nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, + cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, prior_mu=prior_mu, + debug=debug) + + time_end = time.time() + if debug: + logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + return nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index 72940dc..22ca239 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -28,7 +28,8 @@ def _get_list_first() -> list: @utils_common.validate_types def get_hyps(str_cov: str, dim: int, - use_ard: bool=True + use_gp: bool=True, + use_ard: bool=True, ) -> dict: """ It returns a dictionary of default hyperparameters for covariance @@ -39,6 +40,8 @@ def get_hyps(str_cov: str, dim: int, :type str_cov: str. :param dim: dimensionality of the problem we are solving. :type dim: int. + :param use_gp: flag for Gaussian process or Student-$t$ process. + :type use_gp: bool., optional :param use_ard: flag for automatic relevance determination. :type use_ard: bool., optional @@ -51,12 +54,16 @@ def get_hyps(str_cov: str, dim: int, assert isinstance(str_cov, str) assert isinstance(dim, int) + assert isinstance(use_gp, bool) assert isinstance(use_ard, bool) assert str_cov in constants.ALLOWED_GP_COV hyps = dict() hyps['noise'] = constants.GP_NOISE + if not use_gp: + hyps['dof'] = 5.0 + list_first = _get_list_first() if str_cov in list_first: @@ -72,6 +79,7 @@ def get_hyps(str_cov: str, dim: int, @utils_common.validate_types def get_range_hyps(str_cov: str, dim: int, + use_gp: bool=True, use_ard: bool=True, fix_noise: bool=False ) -> list: @@ -82,6 +90,8 @@ def get_range_hyps(str_cov: str, dim: int, :type str_cov: str. :param dim: dimensionality of the problem we are solving. :type dim: int. + :param use_gp: flag for Gaussian process or Student-$t$ process. + :type use_gp: bool., optional :param use_ard: flag for automatic relevance determination. :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. @@ -96,6 +106,7 @@ def get_range_hyps(str_cov: str, dim: int, assert isinstance(str_cov, str) assert isinstance(dim, int) + assert isinstance(use_gp, bool) assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) assert str_cov in constants.ALLOWED_GP_COV @@ -107,6 +118,9 @@ def get_range_hyps(str_cov: str, dim: int, if not fix_noise: range_hyps += constants.RANGE_NOISE + if not use_gp: + range_hyps += [[2.00001, 200.0]] + if str_cov in list_first: range_hyps += constants.RANGE_SIGNAL # for signal scale if use_ard: # for lengthscales @@ -121,6 +135,7 @@ def get_range_hyps(str_cov: str, dim: int, @utils_common.validate_types def convert_hyps(str_cov: str, hyps: dict, + use_gp: bool=True, fix_noise: bool=False ) -> np.ndarray: """ @@ -130,6 +145,8 @@ def convert_hyps(str_cov: str, hyps: dict, :type str_cov: str. :param hyps: dictionary of hyperparameters for covariance function. :type hyps: dict. + :param use_gp: flag for Gaussian process or Student-$t$ process. + :type use_gp: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional @@ -142,6 +159,7 @@ def convert_hyps(str_cov: str, hyps: dict, assert isinstance(str_cov, str) assert isinstance(hyps, dict) + assert isinstance(use_gp, bool) assert isinstance(fix_noise, bool) assert str_cov in constants.ALLOWED_GP_COV @@ -149,6 +167,9 @@ def convert_hyps(str_cov: str, hyps: dict, if not fix_noise: list_hyps.append(hyps['noise']) + if not use_gp: + list_hyps.append(hyps['dof']) + list_first = _get_list_first() if str_cov in list_first: @@ -161,6 +182,7 @@ def convert_hyps(str_cov: str, hyps: dict, @utils_common.validate_types def restore_hyps(str_cov: str, hyps: np.ndarray, + use_gp: bool=True, fix_noise: bool=False, noise: float=constants.GP_NOISE ) -> dict: @@ -171,6 +193,8 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, :type str_cov: str. :param hyps: array of hyperparameters for covariance function. :type hyps: numpy.ndarray + :param use_gp: flag for Gaussian process or Student-$t$ process. + :type use_gp: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param noise: fixed noise value. @@ -185,6 +209,7 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, assert isinstance(str_cov, str) assert isinstance(hyps, np.ndarray) + assert isinstance(use_gp, bool) assert isinstance(fix_noise, bool) assert isinstance(noise, float) assert len(hyps.shape) == 1 @@ -198,12 +223,16 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, dict_hyps['noise'] = noise ind_start = 0 + if not use_gp: + dict_hyps['dof'] = hyps[ind_start] + ind_start += 1 + list_first = _get_list_first() if str_cov in list_first: dict_hyps['signal'] = hyps[ind_start] list_lengthscales = [] - for ind_elem in range(ind_start+1, len(hyps)): + for ind_elem in range(ind_start + 1, len(hyps)): list_lengthscales.append(hyps[ind_elem]) dict_hyps['lengthscales'] = np.array(list_lengthscales) else: @@ -211,7 +240,9 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, return dict_hyps @utils_common.validate_types -def validate_hyps_dict(hyps: dict, str_cov: str, dim: int) -> constants.TYPING_TUPLE_DICT_BOOL: +def validate_hyps_dict(hyps: dict, str_cov: str, dim: int, + use_gp: bool=True +) -> constants.TYPING_TUPLE_DICT_BOOL: """ It validates hyperparameters dictionary, `hyps`. @@ -221,6 +252,8 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int) -> constants.TYPING_T :type str_cov: str. :param dim: dimensionality of the problem we are solving. :type dim: int. + :param use_gp: flag for Gaussian process or Student-$t$ process. + :type use_gp: bool., optional :returns: a tuple of valid hyperparameters and validity flag. :rtype: (dict., bool.) @@ -232,6 +265,7 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int) -> constants.TYPING_T assert isinstance(hyps, dict) assert isinstance(str_cov, str) assert isinstance(dim, int) + assert isinstance(use_gp, bool) assert str_cov in constants.ALLOWED_GP_COV is_valid = True @@ -245,6 +279,13 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int) -> constants.TYPING_T if np.abs(hyps['noise']) >= constants.BOUND_UPPER_GP_NOISE: hyps['noise'] = constants.BOUND_UPPER_GP_NOISE + if not use_gp: + if 'dof' not in hyps: + is_valid = False + else: + if hyps['dof'] <= 2.0: + hyps['dof'] = 2.00001 + if str_cov in ('eq', 'se', 'matern32', 'matern52'): if 'lengthscales' not in hyps: is_valid = False diff --git a/examples/99_notebooks/example_tp.ipynb b/examples/99_notebooks/example_tp.ipynb new file mode 100644 index 0000000..cc915c4 --- /dev/null +++ b/examples/99_notebooks/example_tp.ipynb @@ -0,0 +1,267 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "%matplotlib inline\n", + "\n", + "import numpy as np\n", + "\n", + "from bayeso import covariance\n", + "from bayeso.tp import tp\n", + "from bayeso.gp import gp\n", + "from bayeso.utils import utils_covariance\n", + "from bayeso.utils import utils_plotting" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "use_tex = False\n", + "num_test = 200\n", + "str_cov = 'matern52'" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = np.array([\n", + " [-3.0],\n", + " [-2.0],\n", + " [-1.0],\n", + " [1.1],\n", + " [1.2],\n", + "])\n", + "Y_train = np.cos(X_train) + 10.0 + np.random.randn(*X_train.shape)\n", + "X_test = np.linspace(-3, 3, num_test)\n", + "X_test = X_test.reshape((num_test, 1))\n", + "Y_test = np.cos(X_test) + 10.0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "mu = np.zeros(num_test)\n", + "hyps = utils_covariance.get_hyps(str_cov, 1, use_gp=False)\n", + "Sigma = covariance.cov_main(str_cov, X_test, X_test, hyps, True)\n", + "\n", + "Ys = tp.sample_functions(hyps['dof'], mu, Sigma, num_samples=200)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')\n", + "\n", + "hyps = utils_covariance.get_hyps(str_cov, 1)\n", + "Sigma = covariance.cov_main(str_cov, X_test, X_test, hyps, True)\n", + "\n", + "Ys = gp.sample_functions(mu, Sigma, num_samples=200)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "hyps = utils_covariance.get_hyps(str_cov, 1, use_gp=False)\n", + "nu, mu, sigma, Sigma = tp.predict_with_hyps(X_train, Y_train, X_test, hyps, str_cov=str_cov)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')\n", + "\n", + "hyps = utils_covariance.get_hyps(str_cov, 1)\n", + "mu, sigma, Sigma = gp.predict_with_hyps(X_train, Y_train, X_test, hyps, str_cov=str_cov)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = gp.sample_functions(mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": false + }, + "outputs": [], + "source": [ + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test, str_cov=str_cov, fix_noise=True,\n", + " debug=True)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')\n", + "\n", + "mu, sigma, Sigma = gp.predict_with_optimized_hyps(X_train, Y_train, X_test, str_cov=str_cov, fix_noise=True,\n", + " debug=True)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = gp.sample_functions(mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def cosine(X):\n", + " return np.cos(X)\n", + "\n", + "def linear_down(X):\n", + " list_up = []\n", + " for elem_X in X:\n", + " list_up.append([-0.5 * np.sum(elem_X)])\n", + " return np.array(list_up)\n", + "\n", + "def linear_up(X):\n", + " list_up = []\n", + " for elem_X in X:\n", + " list_up.append([0.5 * np.sum(elem_X)])\n", + " return np.array(list_up)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "X_train = np.array([\n", + " [-3.0],\n", + " [-2.0],\n", + " [-1.0],\n", + "])\n", + "Y_train = np.cos(X_train) + 2.0 + np.random.randn(*X_train.shape)\n", + "X_test = np.linspace(-3, 6, num_test)\n", + "X_test = X_test.reshape((num_test, 1))\n", + "Y_test = np.cos(X_test) + 2.0" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prior_mu = cosine\n", + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test,\n", + " str_cov=str_cov, prior_mu=prior_mu,\n", + " debug=True)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prior_mu = linear_down\n", + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test,\n", + " str_cov=str_cov, prior_mu=prior_mu,\n", + " debug=True)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "prior_mu = linear_up\n", + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test,\n", + " str_cov=str_cov, prior_mu=prior_mu,\n", + " debug=True)\n", + "utils_plotting.plot_gp_via_distribution(\n", + " X_train, Y_train, X_test, mu, sigma,\n", + " Y_test=Y_test, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$'\n", + ")\n", + "\n", + "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", + "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + " str_x_axis='$x$', str_y_axis='$y$')" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "kernelspec": { + "display_name": "Python 3", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.6.9" + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} From d4997167a09136fe64f6aa5df9d2fdc297f39a26 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 30 Dec 2020 18:43:11 +0900 Subject: [PATCH 08/37] Fix an error and add a condition for slogdet --- bayeso/constants.py | 1 + bayeso/gp/gp_scipy.py | 9 ++++++--- bayeso/tp/tp.py | 12 ++++++++---- tests/common/test_gp_scipy.py | 2 +- 4 files changed, 16 insertions(+), 8 deletions(-) diff --git a/bayeso/constants.py b/bayeso/constants.py index fcdc711..ea77cca 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -109,6 +109,7 @@ TYPING_UNION_INT_FLOAT = typing.Union[int, float] TYPING_UNION_FLOAT_NONE = typing.Union[float, TYPE_NONE] TYPING_UNION_FLOAT_TWO_FLOATS = typing.Union[float, typing.Tuple[float, float]] +TYPING_UNION_FLOAT_FA = typing.Union[float, TYPING_TUPLE_FLOAT_ARRAY] TYPING_UNION_ARRAY_NONE = typing.Union[np.ndarray, TYPE_NONE] TYPING_UNION_ARRAY_FLOAT = typing.Union[np.ndarray, float] TYPING_UNION_CALLABLE_NONE = typing.Union[callable, TYPE_NONE] diff --git a/bayeso/gp/gp_scipy.py b/bayeso/gp/gp_scipy.py index e5d131f..624a47f 100644 --- a/bayeso/gp/gp_scipy.py +++ b/bayeso/gp/gp_scipy.py @@ -8,7 +8,6 @@ import numpy as np import scipy.linalg import scipy.optimize -import typing from bayeso import covariance from bayeso import constants @@ -27,7 +26,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, use_cholesky: bool=True, use_gradient: bool=True, debug: bool=False -) -> typing.Union[float, constants.TYPING_TUPLE_FLOAT_ARRAY]: +) -> constants.TYPING_UNION_FLOAT_FA: """ This function computes a negative log marginal likelihood. @@ -101,7 +100,11 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, debug=debug) first_term = -0.5 * np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train) - second_term = -0.5 * np.log(np.linalg.det(cov_X_X) + constants.JITTER_LOG) + sign_second_term, second_term = np.linalg.slogdet(cov_X_X) + # TODO: let me think. + if sign_second_term <= 0: + second_term = 0.0 + second_term = -0.5 * second_term third_term = -float(X_train.shape[0]) / 2.0 * np.log(2.0 * np.pi) log_ml_ = np.squeeze(first_term + second_term + third_term) diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index 909856d..7ad5ad5 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -10,7 +10,6 @@ import scipy.linalg import scipy.optimize import scipy.special -import typing from bayeso import covariance from bayeso import constants @@ -28,7 +27,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, fix_noise: bool=constants.FIX_GP_NOISE, use_gradient: bool=True, debug: bool=False -) -> typing.Union[float, constants.TYPING_TUPLE_FLOAT_ARRAY]: +) -> constants.TYPING_UNION_FLOAT_FA: """ This function computes a negative log marginal likelihood. @@ -85,7 +84,12 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, beta = np.squeeze(np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train)) first_term = -0.5 * num_X * np.log((nu - 2.0) * np.pi) - second_term = -0.5 * np.log(np.linalg.det(cov_X_X) + constants.JITTER_LOG) + sign_second_term, second_term = np.linalg.slogdet(cov_X_X) + # TODO: let me think. + if sign_second_term <= 0: + second_term = 0.0 + second_term = -0.5 * second_term + third_term = np.log(scipy.special.gamma((nu + num_X) / 2.0) / scipy.special.gamma(nu / 2.0)) fourth_term = -0.5 * (nu + num_X) * np.log(1.0 + beta / (nu - 2.0)) @@ -397,7 +401,7 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, debug=debug) - nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, + nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, prior_mu=prior_mu, debug=debug) diff --git a/tests/common/test_gp_scipy.py b/tests/common/test_gp_scipy.py index 45e27bd..4453e3e 100644 --- a/tests/common/test_gp_scipy.py +++ b/tests/common/test_gp_scipy.py @@ -27,7 +27,7 @@ def test_neg_log_ml_typing(): assert annos['use_cholesky'] == bool assert annos['use_gradient'] == bool assert annos['debug'] == bool - assert annos['return'] == typing.Union[float, typing.Tuple[float, float]] + assert annos['return'] == typing.Union[float, typing.Tuple[float, np.ndarray]] def test_neg_log_ml(): dim_X = 3 From d3bd9407f990fd0b426d244fd24aef32348c319a Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Thu, 31 Dec 2020 14:46:04 +0900 Subject: [PATCH 09/37] Add tests for student-t process and add SLSQP optimizer --- bayeso/constants.py | 6 +- bayeso/gp/gp_scipy.py | 14 +- bayeso/tp/tp.py | 9 +- bayeso/utils/utils_covariance.py | 2 +- examples/99_notebooks/example_tp.ipynb | 36 +-- tests/common/test_gp.py | 10 +- tests/common/test_gp_scipy.py | 8 + tests/common/test_tp.py | 367 +++++++++++++++++++++++++ 8 files changed, 419 insertions(+), 33 deletions(-) create mode 100644 tests/common/test_tp.py diff --git a/bayeso/constants.py b/bayeso/constants.py index ea77cca..595363f 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -14,6 +14,7 @@ JITTER_LOG = 1e-7 STR_OPTIMIZER_METHOD_GP = 'BFGS' +STR_OPTIMIZER_METHOD_TP = 'SLSQP' STR_GP_COV = 'matern52' STR_BO_ACQ = 'ei' STR_INITIALIZING_METHOD_BO = 'sobol' @@ -37,12 +38,13 @@ RANGE_SIGNAL = [[1e-2, 1e3]] RANGE_LENGTHSCALES = [[1e-2, 1e3]] RANGE_NOISE = [[1e-3, 1e1]] +RANGE_DOF = [[2.00001, 200.0]] TIME_PAUSE = 2.0 RANGE_SHADE = 1.96 -ALLOWED_OPTIMIZER_METHOD_GP = ['BFGS', 'L-BFGS-B', 'Nelder-Mead', 'DIRECT'] -ALLOWED_OPTIMIZER_METHOD_TP = ['L-BFGS-B'] +ALLOWED_OPTIMIZER_METHOD_GP = ['BFGS', 'L-BFGS-B', 'Nelder-Mead', 'DIRECT', 'SLSQP', 'SLSQP-Bounded'] +ALLOWED_OPTIMIZER_METHOD_TP = ['L-BFGS-B', 'SLSQP'] ALLOWED_OPTIMIZER_METHOD_BO = ['L-BFGS-B', 'DIRECT', 'CMA-ES'] # INFO: Do not use _ (underscore) in base str_cov. ALLOWED_GP_COV_BASE = ['eq', 'se', 'matern32', 'matern52'] diff --git a/bayeso/gp/gp_scipy.py b/bayeso/gp/gp_scipy.py index 624a47f..56d5ef1 100644 --- a/bayeso/gp/gp_scipy.py +++ b/bayeso/gp/gp_scipy.py @@ -102,7 +102,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, first_term = -0.5 * np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train) sign_second_term, second_term = np.linalg.slogdet(cov_X_X) # TODO: let me think. - if sign_second_term <= 0: + if sign_second_term <= 0: # pragma: no cover second_term = 0.0 second_term = -0.5 * second_term @@ -272,25 +272,31 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, fix_noise=fix_noise, ) - if str_optimizer_method == 'BFGS': + if str_optimizer_method in ['BFGS', 'SLSQP']: result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, jac=use_gradient, options={'disp': False}) + if debug: logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x - elif str_optimizer_method == 'L-BFGS-B': + elif str_optimizer_method in ['L-BFGS-B', 'SLSQP-Bounded']: + if str_optimizer_method == 'SLSQP-Bounded': + str_optimizer_method = 'SLSQP' + bounds = utils_covariance.get_range_hyps(str_cov, num_dim, fix_noise=fix_noise) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=use_gradient, options={'disp': False}) + if debug: logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x - elif str_optimizer_method == 'Nelder-Mead': + elif str_optimizer_method in ['Nelder-Mead']: result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, options={'disp': False}) + if debug: logger.debug('scipy message: %s', result_optimized.message) diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index 7ad5ad5..4cd0cd5 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -86,7 +86,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, first_term = -0.5 * num_X * np.log((nu - 2.0) * np.pi) sign_second_term, second_term = np.linalg.slogdet(cov_X_X) # TODO: let me think. - if sign_second_term <= 0: + if sign_second_term <= 0: # pragma: no cover second_term = 0.0 second_term = -0.5 * second_term @@ -171,7 +171,7 @@ def sample_functions(nu: float, mu: np.ndarray, Sigma: np.ndarray, @utils_common.validate_types def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - str_optimizer_method: str='L-BFGS-B', + str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_TP, fix_noise: bool=constants.FIX_GP_NOISE, debug: bool=False ) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: @@ -240,12 +240,13 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, use_gp=False ) - if str_optimizer_method == 'L-BFGS-B': + if str_optimizer_method in ['L-BFGS-B', 'SLSQP']: bounds = utils_covariance.get_range_hyps(str_cov, num_dim, fix_noise=fix_noise, use_gp=False) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=use_gradient, options={'disp': False}) + if debug: logger.debug('scipy message: %s', result_optimized.message) @@ -410,7 +411,7 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr @utils_common.validate_types def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, str_cov: str=constants.STR_GP_COV, - str_optimizer_method: str='L-BFGS-B', + str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_TP, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, fix_noise: float=constants.FIX_GP_NOISE, debug: bool=False diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index 22ca239..24df4cc 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -119,7 +119,7 @@ def get_range_hyps(str_cov: str, dim: int, range_hyps += constants.RANGE_NOISE if not use_gp: - range_hyps += [[2.00001, 200.0]] + range_hyps += constants.RANGE_DOF if str_cov in list_first: range_hyps += constants.RANGE_SIGNAL # for signal scale diff --git a/examples/99_notebooks/example_tp.ipynb b/examples/99_notebooks/example_tp.ipynb index cc915c4..9f7647b 100644 --- a/examples/99_notebooks/example_tp.ipynb +++ b/examples/99_notebooks/example_tp.ipynb @@ -40,8 +40,10 @@ " [-1.0],\n", " [1.1],\n", " [1.2],\n", + " [2.5],\n", "])\n", "Y_train = np.cos(X_train) + 10.0 + np.random.randn(*X_train.shape)\n", + "\n", "X_test = np.linspace(-3, 3, num_test)\n", "X_test = X_test.reshape((num_test, 1))\n", "Y_test = np.cos(X_test) + 10.0" @@ -161,15 +163,15 @@ "metadata": {}, "outputs": [], "source": [ - "X_train = np.array([\n", + "X_train_ = np.array([\n", " [-3.0],\n", " [-2.0],\n", " [-1.0],\n", "])\n", - "Y_train = np.cos(X_train) + 2.0 + np.random.randn(*X_train.shape)\n", - "X_test = np.linspace(-3, 6, num_test)\n", - "X_test = X_test.reshape((num_test, 1))\n", - "Y_test = np.cos(X_test) + 2.0" + "Y_train_ = np.cos(X_train_) + 2.0 + np.random.randn(*X_train_.shape)\n", + "X_test_ = np.linspace(-3, 6, num_test)\n", + "X_test_ = X_test_.reshape((num_test, 1))\n", + "Y_test_ = np.cos(X_test_) + 2.0" ] }, { @@ -179,17 +181,17 @@ "outputs": [], "source": [ "prior_mu = cosine\n", - "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test,\n", + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train_, Y_train_, X_test_,\n", " str_cov=str_cov, prior_mu=prior_mu,\n", " debug=True)\n", "utils_plotting.plot_gp_via_distribution(\n", - " X_train, Y_train, X_test, mu, sigma,\n", - " Y_test=Y_test, use_tex=use_tex,\n", + " X_train_, Y_train_, X_test_, mu, sigma,\n", + " Y_test=Y_test_, use_tex=use_tex,\n", " str_x_axis='$x$', str_y_axis='$y$'\n", ")\n", "\n", "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", - "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + "utils_plotting.plot_gp_via_sample(X_test_, Ys, use_tex=use_tex,\n", " str_x_axis='$x$', str_y_axis='$y$')" ] }, @@ -200,17 +202,17 @@ "outputs": [], "source": [ "prior_mu = linear_down\n", - "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test,\n", + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train_, Y_train_, X_test_,\n", " str_cov=str_cov, prior_mu=prior_mu,\n", " debug=True)\n", "utils_plotting.plot_gp_via_distribution(\n", - " X_train, Y_train, X_test, mu, sigma,\n", - " Y_test=Y_test, use_tex=use_tex,\n", + " X_train_, Y_train_, X_test_, mu, sigma,\n", + " Y_test=Y_test_, use_tex=use_tex,\n", " str_x_axis='$x$', str_y_axis='$y$'\n", ")\n", "\n", "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", - "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + "utils_plotting.plot_gp_via_sample(X_test_, Ys, use_tex=use_tex,\n", " str_x_axis='$x$', str_y_axis='$y$')" ] }, @@ -221,17 +223,17 @@ "outputs": [], "source": [ "prior_mu = linear_up\n", - "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train, Y_train, X_test,\n", + "nu, mu, sigma, Sigma = tp.predict_with_optimized_hyps(X_train_, Y_train_, X_test_,\n", " str_cov=str_cov, prior_mu=prior_mu,\n", " debug=True)\n", "utils_plotting.plot_gp_via_distribution(\n", - " X_train, Y_train, X_test, mu, sigma,\n", - " Y_test=Y_test, use_tex=use_tex,\n", + " X_train_, Y_train_, X_test_, mu, sigma,\n", + " Y_test=Y_test_, use_tex=use_tex,\n", " str_x_axis='$x$', str_y_axis='$y$'\n", ")\n", "\n", "Ys = tp.sample_functions(nu, mu.flatten(), Sigma, num_samples=5)\n", - "utils_plotting.plot_gp_via_sample(X_test, Ys, use_tex=use_tex,\n", + "utils_plotting.plot_gp_via_sample(X_test_, Ys, use_tex=use_tex,\n", " str_x_axis='$x$', str_y_axis='$y$')" ] }, diff --git a/tests/common/test_gp.py b/tests/common/test_gp.py index 4e9417b..0921373 100644 --- a/tests/common/test_gp.py +++ b/tests/common/test_gp.py @@ -181,7 +181,7 @@ def test_predict_with_cov(): X_test = np.random.randn(num_X_test, dim_X) prior_mu = None cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') - + with pytest.raises(AssertionError) as error: package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu='abc') with pytest.raises(AssertionError) as error: @@ -237,7 +237,7 @@ def test_predict_with_hyps(): X_test = np.random.randn(num_X_test, dim_X) prior_mu = None cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') - + with pytest.raises(AssertionError) as error: package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu='abc') with pytest.raises(AssertionError) as error: @@ -256,7 +256,7 @@ def test_predict_with_hyps(): package_target.predict_with_hyps(np.random.randn(10, dim_X), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) with pytest.raises(AssertionError) as error: package_target.predict_with_hyps(X, np.random.randn(10, 1), X_test, hyps, str_cov='se', prior_mu=prior_mu) - + mu_test, sigma_test, Sigma_test = package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) print(mu_test) print(sigma_test) @@ -284,7 +284,7 @@ def test_predict_with_optimized_hyps(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - + with pytest.raises(AssertionError) as error: package_target.predict_with_optimized_hyps(X, Y, X_test, str_cov='se', prior_mu='abc') with pytest.raises(AssertionError) as error: @@ -309,7 +309,7 @@ def test_predict_with_optimized_hyps(): with pytest.raises(AssertionError) as error: package_target.predict_with_optimized_hyps(X, Y, X_test, debug=1) - + mu_test, sigma_test, Sigma_test = package_target.predict_with_optimized_hyps(X, Y, X_test, debug=True) print(mu_test) print(sigma_test) diff --git a/tests/common/test_gp_scipy.py b/tests/common/test_gp_scipy.py index 4453e3e..8e5107d 100644 --- a/tests/common/test_gp_scipy.py +++ b/tests/common/test_gp_scipy.py @@ -188,6 +188,8 @@ def test_get_optimized_kernel(): package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') with pytest.raises(AssertionError) as error: package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='abc') with pytest.raises(AssertionError) as error: package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) @@ -212,11 +214,17 @@ def test_get_optimized_kernel(): print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead', debug=True) print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP', debug=True) + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP-Bounded', debug=True) + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv', debug=True) print(hyps) diff --git a/tests/common/test_tp.py b/tests/common/test_tp.py new file mode 100644 index 0000000..a7f6134 --- /dev/null +++ b/tests/common/test_tp.py @@ -0,0 +1,367 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: December 31, 2020 +# +"""test_tp""" + +import typing +import pytest +import numpy as np + +from bayeso import constants +from bayeso.tp import tp as package_target +from bayeso.utils import utils_covariance + + +TEST_EPSILON = 1e-7 + +def test_neg_log_ml_typing(): + annos = package_target.neg_log_ml.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['hyps'] == np.ndarray + assert annos['str_cov'] == str + assert annos['prior_mu_train'] == np.ndarray + assert annos['fix_noise'] == bool + assert annos['use_gradient'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Union[float, typing.Tuple[float, np.ndarray]] + +def test_neg_log_ml(): + dim_X = 3 + str_cov = 'se' + X = np.reshape(np.arange(0, 9), (3, dim_X)) + Y = np.expand_dims(np.arange(3, 10, 3), axis=1) + fix_noise = False + use_gp = False + + dict_hyps = utils_covariance.get_hyps(str_cov, dim_X, use_gp=use_gp) + arr_hyps = utils_covariance.convert_hyps(str_cov, dict_hyps, fix_noise=fix_noise, use_gp=use_gp) + prior_mu_X = np.zeros((3, 1)) + + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, dict_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, 1, prior_mu_X) + with pytest.raises(ValueError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, 'abc', prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.arange(0, 3)) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) + + neg_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False) + print(neg_log_ml_) + truth_log_ml_ = 5.634155417555853 + assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON + + neg_log_ml_, neg_grad_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True) + print(neg_log_ml_) + print(neg_grad_log_ml_) + + truth_log_ml_ = 5.634155417555853 + truth_grad_log_ml_ = np.array([ + -1.60446383e-02, + 1.75087448e-01, + -1.60448396e+00, + -5.50871167e-05, + -5.50871167e-05, + -5.50871167e-05, + ]) + assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON + assert np.all(np.abs(neg_grad_log_ml_ - truth_grad_log_ml_) < TEST_EPSILON) + +def test_sample_functions_typing(): + annos = package_target.sample_functions.__annotations__ + + assert annos['nu'] == float + assert annos['mu'] == np.ndarray + assert annos['Sigma'] == np.ndarray + assert annos['num_samples'] == int + assert annos['return'] == np.ndarray + +def test_sample_functions(): + num_points = 10 + nu = 4.0 + mu = np.zeros(num_points) + Sigma = np.eye(num_points) + num_samples = 20 + + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, mu, 'abc') + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, 'abc', Sigma) + with pytest.raises(AssertionError) as error: + package_target.sample_functions('abc', mu, Sigma) + with pytest.raises(AssertionError) as error: + package_target.sample_functions(4, mu, Sigma) + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, mu, np.eye(20)) + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, mu, np.ones(num_points)) + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, np.zeros(20), Sigma) + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, np.eye(10), Sigma) + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, mu, Sigma, num_samples='abc') + with pytest.raises(AssertionError) as error: + package_target.sample_functions(nu, mu, Sigma, num_samples=1.2) + + + functions = package_target.sample_functions(nu, mu, Sigma, num_samples=num_samples) + assert functions.shape[1] == num_points + assert functions.shape[0] == num_samples + +def test_get_optimized_kernel_typing(): + annos = package_target.get_optimized_kernel.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['str_cov'] == str + assert annos['str_optimizer_method'] == str + assert annos['fix_noise'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] + +def test_get_optimized_kernel(): + np.random.seed(42) + dim_X = 3 + num_X = 10 + num_instances = 5 + X = np.random.randn(num_X, dim_X) + X_set = np.random.randn(num_X, num_instances, dim_X) + Y = np.random.randn(num_X, 1) + prior_mu = None + + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, 1, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + with pytest.raises(ValueError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + + # INFO: tests for set inputs + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP') + print(hyps) + +# cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') +# print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') + print(hyps) + +def test_predict_with_cov_typing(): + annos = package_target.predict_with_cov.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['X_test'] == np.ndarray + assert annos['cov_X_X'] == np.ndarray + assert annos['inv_cov_X_X'] == np.ndarray + assert annos['hyps'] == dict + assert annos['str_cov'] == str + assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] + +def test_predict_with_cov(): + np.random.seed(42) + dim_X = 2 + num_X = 5 + num_X_test = 20 + X = np.random.randn(num_X, dim_X) + Y = np.random.randn(num_X, 1) + X_test = np.random.randn(num_X_test, dim_X) + prior_mu = None + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu='abc') + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov=1, prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, 1, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, cov_X_X, 1, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, 1, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, 1, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, 1, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(1, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(np.random.randn(num_X, 1), Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(np.random.randn(10, dim_X), Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, np.random.randn(10, 1), X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, np.random.randn(3, 3), inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, np.random.randn(10), inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, cov_X_X, np.random.randn(10), hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_cov(X, Y, X_test, np.random.randn(10), np.random.randn(10), hyps, str_cov='se', prior_mu=prior_mu) + + nu_test, mu_test, sigma_test, Sigma_test = package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu=prior_mu) + print(nu_test) + print(mu_test) + print(sigma_test) + print(Sigma_test) + +def test_predict_with_hyps_typing(): + annos = package_target.predict_with_hyps.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['X_test'] == np.ndarray + assert annos['hyps'] == dict + assert annos['str_cov'] == str + assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] + +def test_predict_with_hyps(): + np.random.seed(42) + dim_X = 2 + num_X = 5 + num_X_test = 20 + X = np.random.randn(num_X, dim_X) + Y = np.random.randn(num_X, 1) + X_test = np.random.randn(num_X_test, dim_X) + prior_mu = None + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu='abc') + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov=1, prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(X, Y, X_test, 1, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(X, Y, 1, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(X, 1, X_test, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(1, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(np.random.randn(num_X, 1), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(np.random.randn(10, dim_X), Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_hyps(X, np.random.randn(10, 1), X_test, hyps, str_cov='se', prior_mu=prior_mu) + + nu_test, mu_test, sigma_test, Sigma_test = package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu=prior_mu) + print(nu_test) + print(mu_test) + print(sigma_test) + print(Sigma_test) + +def test_predict_with_optimized_hyps_typing(): + annos = package_target.predict_with_optimized_hyps.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['X_test'] == np.ndarray + assert annos['str_cov'] == str + assert annos['str_optimizer_method'] == str + assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['fix_noise'] == float + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] + +def test_predict_with_optimized_hyps(): + np.random.seed(42) + dim_X = 2 + num_X = 5 + num_X_test = 20 + X = np.random.randn(num_X, dim_X) + Y = np.random.randn(num_X, 1) + X_test = np.random.randn(num_X_test, dim_X) + prior_mu = None + + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, Y, X_test, str_cov='se', prior_mu='abc') + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, Y, X_test, str_cov=1, prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, Y, 1, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, 1, X_test, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(1, Y, X_test, str_cov='se', prior_mu=prior_mu) + + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(np.random.randn(num_X, 1), Y, X_test, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(np.random.randn(10, dim_X), Y, X_test, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, np.random.randn(10, 1), X_test, str_cov='se', prior_mu=prior_mu) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, Y, X_test, str_optimizer_method=1) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, Y, X_test, fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.predict_with_optimized_hyps(X, Y, X_test, debug=1) + + nu_test, mu_test, sigma_test, Sigma_test = package_target.predict_with_optimized_hyps(X, Y, X_test, debug=True) + print(nu_test) + print(mu_test) + print(sigma_test) + print(Sigma_test) From 48492341e91e5bc642338456d05b5ec4f89fa578 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Thu, 31 Dec 2020 15:26:02 +0900 Subject: [PATCH 10/37] Improve coverage --- bayeso/utils/utils_covariance.py | 10 ++- tests/common/test_tp.py | 4 ++ tests/common/test_utils_covariance.py | 93 +++++++++++++++++++++++++++ 3 files changed, 105 insertions(+), 2 deletions(-) diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index 24df4cc..b1f00bc 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -283,7 +283,9 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int, if 'dof' not in hyps: is_valid = False else: - if hyps['dof'] <= 2.0: + if not isinstance(hyps['dof'], float): + is_valid = False + if isinstance(hyps['dof'], float) and hyps['dof'] <= 2.0: hyps['dof'] = 2.00001 if str_cov in ('eq', 'se', 'matern32', 'matern52'): @@ -307,7 +309,8 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int, return hyps, is_valid @utils_common.validate_types -def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int +def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int, + use_gp: bool=True ) -> constants.TYPING_TUPLE_ARRAY_BOOL: """ It validates hyperparameters array, `hyps`. @@ -318,6 +321,8 @@ def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int :type str_cov: str. :param dim: dimensionality of the problem we are solving. :type dim: int. + :param use_gp: flag for Gaussian process or Student-$t$ process. + :type use_gp: bool., optional :returns: a tuple of valid hyperparameters and validity flag. :rtype: (numpy.ndarray, bool.) @@ -329,6 +334,7 @@ def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int assert isinstance(hyps, np.ndarray) assert isinstance(str_cov, str) assert isinstance(dim, int) + assert isinstance(use_gp, bool) assert str_cov in constants.ALLOWED_GP_COV # is_valid = True diff --git a/tests/common/test_tp.py b/tests/common/test_tp.py index a7f6134..fba5095 100644 --- a/tests/common/test_tp.py +++ b/tests/common/test_tp.py @@ -126,6 +126,10 @@ def test_sample_functions(): assert functions.shape[1] == num_points assert functions.shape[0] == num_samples + functions = package_target.sample_functions(np.inf, mu, Sigma, num_samples=num_samples) + assert functions.shape[1] == num_points + assert functions.shape[0] == num_samples + def test_get_optimized_kernel_typing(): annos = package_target.get_optimized_kernel.__annotations__ diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index 6a2c633..401ab71 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -23,6 +23,7 @@ def test_get_hyps_typing(): assert annos['str_cov'] == str assert annos['dim'] == int assert annos['use_ard'] == bool + assert annos['use_gp'] == bool assert annos['return'] == dict def test_get_hyps(): @@ -36,6 +37,8 @@ def test_get_hyps(): package_target.get_hyps('abc', 2) with pytest.raises(AssertionError) as error: package_target.get_hyps('se', 2, use_ard='abc') + with pytest.raises(AssertionError) as error: + package_target.get_hyps('se', 2, use_gp='abc') cur_hyps = package_target.get_hyps('se', 2) assert cur_hyps['noise'] == constants.GP_NOISE @@ -58,12 +61,25 @@ def test_get_hyps(): assert cur_hyps['signal'] == 1.0 assert cur_hyps['lengthscales'] == 1.0 + cur_hyps = package_target.get_hyps('matern32', 2, use_ard=False, use_gp=False) + assert cur_hyps['noise'] == constants.GP_NOISE + assert cur_hyps['signal'] == 1.0 + assert cur_hyps['lengthscales'] == 1.0 + assert cur_hyps['dof'] == 5.0 + + cur_hyps = package_target.get_hyps('matern32', 2, use_ard=True, use_gp=False) + assert cur_hyps['noise'] == constants.GP_NOISE + assert cur_hyps['signal'] == 1.0 + assert np.all(cur_hyps['lengthscales'] == np.array([1.0, 1.0])) + assert cur_hyps['dof'] == 5.0 + def test_get_range_hyps_typing(): annos = package_target.get_range_hyps.__annotations__ assert annos['str_cov'] == str assert annos['dim'] == int assert annos['use_ard'] == bool + assert annos['use_gp'] == bool assert annos['fix_noise'] == bool assert annos['return'] == list @@ -78,6 +94,10 @@ def test_get_range_hyps(): package_target.get_range_hyps('se', 2, use_ard='abc') with pytest.raises(AssertionError) as error: package_target.get_range_hyps('se', 2, use_ard=1) + with pytest.raises(AssertionError) as error: + package_target.get_range_hyps('se', 2, use_gp='abc') + with pytest.raises(AssertionError) as error: + package_target.get_range_hyps('se', 2, use_gp=1) with pytest.raises(AssertionError) as error: package_target.get_range_hyps('se', 2, fix_noise=1) with pytest.raises(AssertionError) as error: @@ -89,12 +109,19 @@ def test_get_range_hyps(): assert isinstance(cur_range, list) assert cur_range == [[0.001, 10.0], [0.01, 1000.0], [0.01, 1000.0]] + cur_range = package_target.get_range_hyps('se', 2, use_ard=False, fix_noise=False, use_gp=False) + print(type(cur_range)) + print(cur_range) + assert isinstance(cur_range, list) + assert cur_range == [[0.001, 10.0], [2.00001, 200.0], [0.01, 1000.0], [0.01, 1000.0]] + def test_convert_hyps_typing(): annos = package_target.convert_hyps.__annotations__ assert annos['str_cov'] == str assert annos['hyps'] == dict assert annos['fix_noise'] == bool + assert annos['use_gp'] == bool assert annos['return'] == np.ndarray def test_convert_hyps(): @@ -112,6 +139,10 @@ def test_convert_hyps(): package_target.convert_hyps('abc', cur_hyps) with pytest.raises(AssertionError) as error: package_target.convert_hyps('se', dict(), fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.convert_hyps('se', cur_hyps, use_gp=1) + with pytest.raises(AssertionError) as error: + package_target.convert_hyps('se', cur_hyps, use_gp='abc') converted_hyps = package_target.convert_hyps('se', cur_hyps, fix_noise=False) assert len(converted_hyps.shape) == 1 @@ -126,11 +157,29 @@ def test_convert_hyps(): assert converted_hyps[0] == cur_hyps['signal'] assert (converted_hyps[1:] == cur_hyps['lengthscales']).all() + cur_hyps = {'noise': 0.1, 'signal': 1.0, 'lengthscales': np.array([2.0, 2.0]), 'dof': 100.0} + converted_hyps = package_target.convert_hyps('se', cur_hyps, fix_noise=False, use_gp=False) + + assert len(converted_hyps.shape) == 1 + assert converted_hyps.shape[0] == 5 + assert converted_hyps[0] == cur_hyps['noise'] + assert converted_hyps[1] == cur_hyps['dof'] + assert converted_hyps[2] == cur_hyps['signal'] + assert (converted_hyps[3:] == cur_hyps['lengthscales']).all() + + converted_hyps = package_target.convert_hyps('se', cur_hyps, fix_noise=True, use_gp=False) + assert len(converted_hyps.shape) == 1 + assert converted_hyps.shape[0] == 4 + assert converted_hyps[0] == cur_hyps['dof'] + assert converted_hyps[1] == cur_hyps['signal'] + assert (converted_hyps[2:] == cur_hyps['lengthscales']).all() + def test_restore_hyps_typing(): annos = package_target.restore_hyps.__annotations__ assert annos['str_cov'] == str assert annos['hyps'] == np.ndarray + assert annos['use_gp'] == bool assert annos['fix_noise'] == bool assert annos['noise'] == float assert annos['return'] == dict @@ -150,6 +199,10 @@ def test_restore_hyps(): package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), fix_noise=1) with pytest.raises(AssertionError) as error: package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), noise='abc') + with pytest.raises(AssertionError) as error: + package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), use_gp=1) + with pytest.raises(AssertionError) as error: + package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), use_gp='abc') cur_hyps = np.array([0.1, 1.0, 1.0, 1.0, 1.0]) restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=False) @@ -162,12 +215,27 @@ def test_restore_hyps(): assert restored_hyps['signal'] == cur_hyps[0] assert (restored_hyps['lengthscales'] == cur_hyps[1:]).all() + cur_hyps = np.array([0.1, 100.0, 20.0, 1.0, 1.0, 1.0]) + restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=False, use_gp=False) + assert restored_hyps['noise'] == cur_hyps[0] + assert restored_hyps['dof'] == cur_hyps[1] + assert restored_hyps['signal'] == cur_hyps[2] + assert (restored_hyps['lengthscales'] == cur_hyps[3:]).all() + + cur_hyps = np.array([100.0, 20.0, 1.0, 1.0, 1.0]) + restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=True, use_gp=False) + assert restored_hyps['noise'] == constants.GP_NOISE + assert restored_hyps['dof'] == cur_hyps[0] + assert restored_hyps['signal'] == cur_hyps[1] + assert (restored_hyps['lengthscales'] == cur_hyps[2:]).all() + def test_validate_hyps_dict_typing(): annos = package_target.validate_hyps_dict.__annotations__ assert annos['hyps'] == dict assert annos['str_cov'] == str assert annos['dim'] == int + assert annos['use_gp'] == bool assert annos['return'] == typing.Tuple[dict, bool] def test_validate_hyps_dict(): @@ -181,6 +249,10 @@ def test_validate_hyps_dict(): _, is_valid = package_target.validate_hyps_dict(cur_hyps, 'abc', num_dim) with pytest.raises(AssertionError) as error: _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, 'abc') + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=1) + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp='abc') cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('noise') @@ -228,12 +300,31 @@ def test_validate_hyps_dict(): _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert is_valid == True + cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) + cur_hyps['signal'] = 'abc' + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) + assert is_valid == True + + cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) + cur_hyps['dof'] = 'abc' + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) + assert is_valid == True + + cur_hyps = package_target.get_hyps(str_cov, num_dim, use_ard=False, use_gp=False) + cur_hyps['lengthscales'] = 'abc' + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) + assert is_valid == True + def test_validate_hyps_arr_typing(): annos = package_target.validate_hyps_arr.__annotations__ assert annos['hyps'] == np.ndarray assert annos['str_cov'] == str assert annos['dim'] == int + assert annos['use_gp'] == bool assert annos['return'] == typing.Tuple[np.ndarray, bool] def test_validate_hyps_arr(): @@ -248,6 +339,8 @@ def test_validate_hyps_arr(): _, is_valid = package_target.validate_hyps_arr(cur_hyps, 'abc', num_dim) with pytest.raises(AssertionError) as error: _, is_valid = package_target.validate_hyps_arr(cur_hyps, str_cov, 'abc') + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_arr(cur_hyps, str_cov, num_dim, use_gp='abc') def test_check_str_cov_typing(): annos = package_target.check_str_cov.__annotations__ From c3acee02e83df33183c20a3b4bf9cbfcac519971 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Thu, 31 Dec 2020 15:37:47 +0900 Subject: [PATCH 11/37] Improve coverage --- tests/common/test_utils_covariance.py | 14 ++++++++++++++ 1 file changed, 14 insertions(+) diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index 401ab71..f05330b 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -312,6 +312,20 @@ def test_validate_hyps_dict(): _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) assert is_valid == True + cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) + cur_hyps.pop('dof') + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) + assert is_valid == True + + cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) + cur_hyps['dof'] = 1.5 + with pytest.raises(AssertionError) as error: + _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) + if cur_hyps['dof'] == 2.00001: + assert False + assert cur_hyps['dof'] == 1.5 + cur_hyps = package_target.get_hyps(str_cov, num_dim, use_ard=False, use_gp=False) cur_hyps['lengthscales'] = 'abc' with pytest.raises(AssertionError) as error: From 7d25e9b5d0d122b43ea1434ff0e36b64fb6fc50b Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Thu, 31 Dec 2020 15:46:12 +0900 Subject: [PATCH 12/37] Improve coverage --- tests/common/test_utils_covariance.py | 1 + 1 file changed, 1 insertion(+) diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index f05330b..1c78083 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -324,6 +324,7 @@ def test_validate_hyps_dict(): _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) if cur_hyps['dof'] == 2.00001: assert False + with pytest.raises(AssertionError) as error: assert cur_hyps['dof'] == 1.5 cur_hyps = package_target.get_hyps(str_cov, num_dim, use_ard=False, use_gp=False) From 72b7ea58b7c22c4f65101d9bf2e68d855c737389 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Sat, 2 Jan 2021 00:47:02 +0900 Subject: [PATCH 13/37] Change 2020 to 2021 --- LICENSE | 2 +- docs/conf.py | 2 +- setup.py | 8 +++++++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/LICENSE b/LICENSE index 128f8e4..da6e1d4 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2017-2020 Jungtaek Kim +Copyright (c) 2017-2021 Jungtaek Kim Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/docs/conf.py b/docs/conf.py index 0cd495c..ba4ae11 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -20,7 +20,7 @@ # -- Project information ----------------------------------------------------- project = 'BayesO' -copyright = '2017-2020, Jungtaek Kim and Seungjin Choi' +copyright = '2017-2021, Jungtaek Kim and Seungjin Choi' author = 'Jungtaek Kim and Seungjin Choi' # The short X.Y version diff --git a/setup.py b/setup.py index 4d5b51f..85cfe6f 100644 --- a/setup.py +++ b/setup.py @@ -10,7 +10,13 @@ sys.exit('[ERROR] bayeso does not support Python {}.{} version in this system.'.format(sys.version_info.major, sys.version_info.minor)) path_requirements = 'requirements.txt' -list_packages = ['bayeso', 'bayeso.gp', 'bayeso.wrappers', 'bayeso.utils'] +list_packages = [ + 'bayeso', + 'bayeso.gp', + 'bayeso.tp', + 'bayeso.wrappers', + 'bayeso.utils' +] with open(path_requirements) as f: required = f.read().splitlines() From 875ffb960f1181908934a3ebd1152d3fea000e32 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 12 Jan 2021 13:18:04 +0900 Subject: [PATCH 14/37] Add str_surrogate for gp and tp, and its tests --- bayeso/bo.py | 2 ++ bayeso/constants.py | 2 ++ tests/common/test_bo.py | 4 ++++ tests/common/test_import.py | 6 ++++++ 4 files changed, 14 insertions(+) diff --git a/bayeso/bo.py b/bayeso/bo.py index c1d52a9..2a23336 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -63,6 +63,7 @@ def __init__(self, range_X: np.ndarray, normalize_Y: bool=constants.NORMALIZE_RESPONSE, use_ard: bool=True, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + str_surrogate: str=constants.STR_SURROGATE, str_optimizer_method_gp: str=constants.STR_OPTIMIZER_METHOD_GP, str_optimizer_method_bo: str=constants.STR_OPTIMIZER_METHOD_AO, str_modelselection_method: str=constants.STR_MODELSELECTION_METHOD, @@ -92,6 +93,7 @@ def __init__(self, range_X: np.ndarray, assert str_optimizer_method_gp in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_optimizer_method_bo in constants.ALLOWED_OPTIMIZER_METHOD_BO assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD + assert str_surrogate in constants.ALLOWED_SURROGATE self.range_X = range_X self.num_dim = range_X.shape[0] diff --git a/bayeso/constants.py b/bayeso/constants.py index 595363f..06cba90 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -13,6 +13,7 @@ JITTER_COV = 1e-5 JITTER_LOG = 1e-7 +STR_SURROGATE = 'gp' STR_OPTIMIZER_METHOD_GP = 'BFGS' STR_OPTIMIZER_METHOD_TP = 'SLSQP' STR_GP_COV = 'matern52' @@ -56,6 +57,7 @@ ALLOWED_MLM_METHOD = ['regular', 'converged'] ALLOWED_MODELSELECTION_METHOD = ['ml', 'loocv'] ALLOWED_FRAMEWORK_GP = ['scipy', 'tensorflow', 'gpytorch'] +ALLOWED_SURROGATE = ['gp', 'tp'] KEYS_INFO_BENCHMARK = ['dim_fun', 'bounds', 'global_minimum_X', 'global_minimum_y'] diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index dff1cfc..97b7f48 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -70,6 +70,10 @@ def test_load_bo(): model_bo = package_target.BO(arr_range_1, str_modelselection_method=1) with pytest.raises(AssertionError) as error: model_bo = package_target.BO(arr_range_1, str_modelselection_method='abc') + with pytest.raises(AssertionError) as error: + model_bo = package_target.BO(arr_range_1, str_surrogate='abc') + with pytest.raises(AssertionError) as error: + model_bo = package_target.BO(arr_range_1, str_surrogate=1) with pytest.raises(AssertionError) as error: model_bo = package_target.BO(arr_range_1, debug=1) diff --git a/tests/common/test_import.py b/tests/common/test_import.py index ed0b759..f9260f7 100644 --- a/tests/common/test_import.py +++ b/tests/common/test_import.py @@ -34,6 +34,12 @@ def test_import_gp_gp_tensorflow(): def test_import_gp_gp_gpytorch(): import bayeso.gp.gp_gpytorch +def test_import_tp(): + import bayeso.tp + +def test_import_tp_tp(): + import bayeso.tp.tp + def test_import_utils(): import bayeso.utils From f2e9b4512d758fde8efd91f9dd3459290604261a Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 17 Feb 2021 01:25:32 +0900 Subject: [PATCH 15/37] Update a minor thing for README and installation doc --- README.md | 2 +- docs/getting_started/installation.rst | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 0d727f4..26d22cc 100644 --- a/README.md +++ b/README.md @@ -15,7 +15,7 @@ Simple, but essential Bayesian optimization package. * [Online documentation](http://bayeso.readthedocs.io) ## Installation -We recommend it should be installed in `virtualenv`. +We recommend installing it with `virtualenv`. You can choose one of three installation options. * Using PyPI repository (for user installation) diff --git a/docs/getting_started/installation.rst b/docs/getting_started/installation.rst index fe588b2..9f7b274 100644 --- a/docs/getting_started/installation.rst +++ b/docs/getting_started/installation.rst @@ -1,7 +1,7 @@ Installing BayesO ################# -We recommend it should be installed in **virtualenv**. +We recommend installing it with **virtualenv**. You can choose one of three installation options. Installing from PyPI From 52af375016e714b2bbef165190b5452c0c00f335 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 17 Feb 2021 02:05:15 +0900 Subject: [PATCH 16/37] Fix an error related to qmcpy --- tests/common/test_bo.py | 13 +++++++------ 1 file changed, 7 insertions(+), 6 deletions(-) diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index 97b7f48..d4b205d 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -122,10 +122,11 @@ def test_get_samples(): arr_initials_ = model_bo.get_samples('sobol', num_samples=3) arr_initials = model_bo.get_samples('sobol', num_samples=3, seed=42) truth_arr_initials = np.array([ - [6.863512583076954, -0.1525135599076748, -1.732824514620006], - [0.4720448818989098, 1.9830138171091676, 1.8962347391061485], - [3.9235182013362646, -1.8038121052086353, -3.2463264442048967], + [5.051551531068981, 0.8090446023270488, -1.0847891168668866], + [1.4649059670045972, -1.925125477835536, 1.4882571692578495], + [3.202530408743769, 1.6943757990375161, -3.383688726462424], ]) + assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() arr_initials_ = model_bo.get_samples('halton', num_samples=3) @@ -183,9 +184,9 @@ def test_get_initials(): arr_initials = model_bo.get_initials('sobol', 3) arr_initials = model_bo.get_initials('sobol', 3, seed=42) truth_arr_initials = np.array([ - [6.863512583076954, -0.1525135599076748, -1.732824514620006], - [0.4720448818989098, 1.9830138171091676, 1.8962347391061485], - [3.9235182013362646, -1.8038121052086353, -3.2463264442048967], + [5.051551531068981, 0.8090446023270488, -1.0847891168668866], + [1.4649059670045972, -1.925125477835536, 1.4882571692578495], + [3.202530408743769, 1.6943757990375161, -3.383688726462424], ]) assert (np.abs(arr_initials - truth_arr_initials) < TEST_EPSILON).all() From 6c6bcacfb10021f566a4c1b53150c4d73abace17 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Sun, 14 Mar 2021 14:25:19 +0900 Subject: [PATCH 17/37] Update docstring --- bayeso/__init__.py | 5 +++-- bayeso/acquisition.py | 5 +++-- bayeso/constants.py | 5 +++-- 3 files changed, 9 insertions(+), 6 deletions(-) diff --git a/bayeso/__init__.py b/bayeso/__init__.py index ab16c03..e6cb7da 100644 --- a/bayeso/__init__.py +++ b/bayeso/__init__.py @@ -1,7 +1,8 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 28, 2020 +# last updated: March 14, 2021 # -"""BayesO is a simple, but essential Bayesian optimization package, implemented in Python.""" +"""BayesO is a simple, but essential Bayesian optimization +package, implemented in Python.""" __version__ = '0.5.0' diff --git a/bayeso/acquisition.py b/bayeso/acquisition.py index 2853da2..989a45c 100644 --- a/bayeso/acquisition.py +++ b/bayeso/acquisition.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: September 24, 2020 # -"""It defines acquisition functions.""" +"""It defines acquisition functions, each of which +is employed to determine where next to evaluate.""" import numpy as np import scipy.stats @@ -16,7 +17,7 @@ def pi(pred_mean: np.ndarray, pred_std: np.ndarray, Y_train: np.ndarray, jitter: float=constants.JITTER_ACQ ) -> np.ndarray: """ - It is a probability improvement criterion. + It is a probability of improvement criterion. :param pred_mean: posterior predictive mean function over `X_test`. Shape: (l, ). diff --git a/bayeso/constants.py b/bayeso/constants.py index 06cba90..0f06c2a 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -2,8 +2,9 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 28, 2020 # -"""This file is for declaring various default constants. -If you would like to see the details, check out the repository.""" +"""This file declares various default constants. +If you would like to see the details, check out +the Python script in the repository directly.""" import typing import numpy as np From 05b4862a10a0c98fdaf26fcf1dfaae9d4fe268ef Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 22 Mar 2021 10:13:00 +0900 Subject: [PATCH 18/37] Add docs for tp --- docs/index.rst | 1 + docs/python_api/bayeso.tp.rst | 12 ++++++++++++ docs/python_api/bayeso.tp.tp.rst | 8 ++++++++ 3 files changed, 21 insertions(+) create mode 100644 docs/python_api/bayeso.tp.rst create mode 100644 docs/python_api/bayeso.tp.tp.rst diff --git a/docs/index.rst b/docs/index.rst index aa2e667..ae9b965 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -43,6 +43,7 @@ The code can be found in `our GitHub repository Date: Mon, 22 Mar 2021 10:29:56 +0900 Subject: [PATCH 19/37] Change to math mode --- bayeso/tp/__init__.py | 2 +- bayeso/tp/tp.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/bayeso/tp/__init__.py b/bayeso/tp/__init__.py index dd83ce5..ff0b794 100644 --- a/bayeso/tp/__init__.py +++ b/bayeso/tp/__init__.py @@ -2,4 +2,4 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 29, 2020 # -"""These files are for implementing Student-$t$ process regression.""" +"""These files are for implementing Student-:math:`t` process regression.""" diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index 4cd0cd5..c475de5 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -2,7 +2,7 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 30, 2020 # -"""It defines Student-$t$ process regression.""" +"""It defines Student-:math:`t` process regression.""" import time import numpy as np From d53ebaa74b8aff6e2329aed9bb42a8b47ba887a2 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 22 Mar 2021 17:17:07 +0900 Subject: [PATCH 20/37] Remove tensorflow and gpytorch --- bayeso/bo.py | 4 +- bayeso/constants.py | 4 +- bayeso/gp/gp.py | 44 +----- bayeso/gp/gp_gpytorch.py | 166 --------------------- bayeso/gp/gp_tensorflow.py | 186 ------------------------ docs/python_api/bayeso.gp.gp_common.rst | 8 - docs/python_api/bayeso.gp.rst | 1 - requirements-optional.txt | 4 - tests/common/test_gp.py | 21 +-- tests/common/test_gp_gpytorch.py | 97 ------------ tests/common/test_gp_tensorflow.py | 97 ------------ tests/common/test_import.py | 8 +- 12 files changed, 14 insertions(+), 626 deletions(-) delete mode 100644 bayeso/gp/gp_gpytorch.py delete mode 100644 bayeso/gp/gp_tensorflow.py delete mode 100644 docs/python_api/bayeso.gp.gp_common.rst delete mode 100644 tests/common/test_gp_gpytorch.py delete mode 100644 tests/common/test_gp_tensorflow.py diff --git a/bayeso/bo.py b/bayeso/bo.py index 2a23336..63bcc1e 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -43,6 +43,8 @@ class BO: :type use_ard: bool., optional :param prior_mu: None, or prior mean function. :type prior_mu: NoneType, or function, optional + :param str_surrogate: the name of surrogate model. + :type str_surrogate: str., optional :param str_optimizer_method_gp: the name of optimization method for Gaussian process regression. :type str_optimizer_method_gp: str., optional @@ -326,7 +328,7 @@ def get_initials(self, str_initial_method: str, num_initials: int, :param str_initial_method: the name of sampling method. :type str_initial_method: str. :param num_initials: the number of samples. - :type num_initials: int., optional + :type num_initials: int. :param seed: None, or random seed. :type seed: NoneType or int., optional diff --git a/bayeso/constants.py b/bayeso/constants.py index 0f06c2a..877ecbd 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 28, 2020 +# last updated: March 22, 2021 # """This file declares various default constants. If you would like to see the details, check out @@ -24,7 +24,6 @@ STR_SAMPLING_METHOD_AO = 'sobol' STR_MLM_METHOD = 'regular' STR_MODELSELECTION_METHOD = 'ml' -STR_FRAMEWORK_GP = 'scipy' NUM_GRIDS_AO = 50 NUM_SAMPLES_AO = 100 @@ -57,7 +56,6 @@ ALLOWED_SAMPLING_METHOD = ALLOWED_INITIALIZING_METHOD_BO + ['grid'] ALLOWED_MLM_METHOD = ['regular', 'converged'] ALLOWED_MODELSELECTION_METHOD = ['ml', 'loocv'] -ALLOWED_FRAMEWORK_GP = ['scipy', 'tensorflow', 'gpytorch'] ALLOWED_SURROGATE = ['gp', 'tp'] KEYS_INFO_BENCHMARK = ['dim_fun', 'bounds', 'global_minimum_X', 'global_minimum_y'] diff --git a/bayeso/gp/gp.py b/bayeso/gp/gp.py index 4681359..bbf77f0 100644 --- a/bayeso/gp/gp.py +++ b/bayeso/gp/gp.py @@ -54,7 +54,6 @@ def sample_functions(mu: np.ndarray, Sigma: np.ndarray, @utils_common.validate_types def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - str_framework: str=constants.STR_FRAMEWORK_GP, str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, str_modelselection_method: str=constants.STR_MODELSELECTION_METHOD, fix_noise: bool=constants.FIX_GP_NOISE, @@ -72,8 +71,6 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, :type prior_mu: function or NoneType :param str_cov: the name of covariance function. :type str_cov: str. - :param str_framework: the name of framework for optimizing kernel hyperparameters. - :type str_framework: str. :param str_optimizer_method: the name of optimization method. :type str_optimizer_method: str., optional :param str_modelselection_method: the name of model selection method. @@ -95,7 +92,6 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(Y_train, np.ndarray) assert callable(prior_mu) or prior_mu is None assert isinstance(str_cov, str) - assert isinstance(str_framework, str) assert isinstance(str_optimizer_method, str) assert isinstance(str_modelselection_method, str) assert isinstance(fix_noise, bool) @@ -105,38 +101,14 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD - assert str_framework in constants.ALLOWED_FRAMEWORK_GP - - try: - if str_framework == 'tensorflow': - from bayeso.gp import gp_tensorflow - elif str_framework == 'gpytorch': - from bayeso.gp import gp_gpytorch - except: # pragma: no cover - str_framework = 'scipy' - - if str_framework == 'scipy': - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel( - X_train, Y_train, prior_mu, str_cov, - str_optimizer_method=str_optimizer_method, - str_modelselection_method=str_modelselection_method, - fix_noise=fix_noise, - debug=debug - ) - elif str_framework == 'tensorflow': - cov_X_X, inv_cov_X_X, hyps = gp_tensorflow.get_optimized_kernel( - X_train, Y_train, prior_mu, str_cov, - fix_noise=fix_noise, - debug=debug - ) - elif str_framework == 'gpytorch': - cov_X_X, inv_cov_X_X, hyps = gp_gpytorch.get_optimized_kernel( - X_train, Y_train, prior_mu, str_cov, - fix_noise=fix_noise, - debug=debug - ) - else: # pragma: no cover - raise ValueError('{}: invalid str_framework.'.format(str_framework)) + + cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel( + X_train, Y_train, prior_mu, str_cov, + str_optimizer_method=str_optimizer_method, + str_modelselection_method=str_modelselection_method, + fix_noise=fix_noise, + debug=debug + ) return cov_X_X, inv_cov_X_X, hyps diff --git a/bayeso/gp/gp_gpytorch.py b/bayeso/gp/gp_gpytorch.py deleted file mode 100644 index 46686ad..0000000 --- a/bayeso/gp/gp_gpytorch.py +++ /dev/null @@ -1,166 +0,0 @@ -# -# author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 29, 2020 -# -"""It is Gaussian process regression implementations with GPyTorch.""" - -import time -import numpy as np -import torch -import gpytorch - -from bayeso import covariance -from bayeso import constants -from bayeso.utils import utils_covariance -from bayeso.utils import utils_common -from bayeso.utils import utils_logger - -logger = utils_logger.get_logger('gp_gpytorch') - - -class ExactGPModel(gpytorch.models.ExactGP): - """ExactGPModel""" - def __init__(self, str_cov, prior_mu, X_train, Y_train, likelihood): - super().__init__(X_train, Y_train, likelihood) - - self.dim_X = X_train.shape[1] - - if prior_mu is None: - self.mean_module = gpytorch.means.ConstantMean() - else: - raise NotImplementedError() - - if str_cov in ('eq', 'se'): - self.covar_module = gpytorch.kernels.ScaleKernel( - gpytorch.kernels.RBFKernel(ard_num_dims=self.dim_X)) - elif str_cov == 'matern32': - self.covar_module = gpytorch.kernels.ScaleKernel( - gpytorch.kernels.MaternKernel(nu=1.5, ard_num_dims=self.dim_X)) - elif str_cov == 'matern52': - self.covar_module = gpytorch.kernels.ScaleKernel( - gpytorch.kernels.MaternKernel(nu=2.5, ard_num_dims=self.dim_X)) - else: - raise NotImplementedError('allowed str_cov conditions, but it is not implemented.') - - def forward(self, X): - mean = self.mean_module(X) - cov = self.covar_module(X) - - return gpytorch.distributions.MultivariateNormal(mean, cov) - -@utils_common.validate_types -def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - fix_noise: bool=constants.FIX_GP_NOISE, - num_iters: int=1000, - debug: bool=False -) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: - """ - This function computes the kernel matrix optimized by optimization - method specified, its inverse matrix, and the optimized hyperparameters, - using GPyTorch. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType - :param str_cov: the name of covariance function. - :type str_cov: str. - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param num_iters: the number of iterations for optimizing negative log likelihood. - :type num_iters: int., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, kernel matrix - inverse, and dictionary of hyperparameters. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) - - :raises: AssertionError, ValueError - - """ - - # TODO: check to input same fix_noise to convert_hyps and restore_hyps - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) - assert isinstance(fix_noise, bool) - assert isinstance(num_iters, int) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] - utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) - assert num_iters >= 10 or num_iters == 0 - - # TODO: prior_mu and fix_noise are not working now. - prior_mu = None - fix_noise = False - - time_start = time.time() - - if str_cov in constants.ALLOWED_GP_COV_BASE: -# num_dim = X_train.shape[1] - pass - elif str_cov in constants.ALLOWED_GP_COV_SET: -# num_dim = X_train.shape[2] - raise NotImplementedError('It is not implemented yet.') - else: - raise NotImplementedError - - X_train_ = torch.from_numpy(X_train).double() - Y_train_ = torch.from_numpy(Y_train.flatten()).double() - - likelihood = gpytorch.likelihoods.GaussianLikelihood() - model = ExactGPModel(str_cov, prior_mu, X_train_, Y_train_, likelihood) - - model.train() - likelihood.train() - - optimizer = torch.optim.Adam([ - {'params': model.parameters()}, - ], lr=1e-2) - - mll = gpytorch.mlls.ExactMarginalLogLikelihood(likelihood, model) - list_neg_log_likelihoods = [] - ind_iter = 0 - - while num_iters >= 10: - optimizer.zero_grad() - outputs = model(X_train_) - loss = -1.0 * mll(outputs, Y_train_) - loss.backward() - optimizer.step() - list_neg_log_likelihoods.append(loss.item()) - - if ind_iter > num_iters and np.abs(np.mean( - list_neg_log_likelihoods[-6:-1]) - loss.item()) < 5e-2: - break - if ind_iter > 10 * num_iters: # pragma: no cover - break - - ind_iter += 1 - - model.eval() - likelihood.eval() - - hyps = { - 'signal': np.sqrt(model.covar_module.outputscale.item()), - 'lengthscales': model.covar_module.base_kernel.lengthscale.detach().numpy()[0], - 'noise': np.sqrt(model.likelihood.noise.item()) - } - - cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, - str_cov, fix_noise=fix_noise, debug=debug) - - time_end = time.time() - - if debug: - logger.debug('iterations to be converged: %d', ind_iter) - logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) - - return cov_X_X, inv_cov_X_X, hyps diff --git a/bayeso/gp/gp_tensorflow.py b/bayeso/gp/gp_tensorflow.py deleted file mode 100644 index 826e545..0000000 --- a/bayeso/gp/gp_tensorflow.py +++ /dev/null @@ -1,186 +0,0 @@ -# -# author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 29, 2020 -# -"""It is Gaussian process regression implementations with TensorFlow.""" - -import time -import numpy as np -import tensorflow as tf -import tensorflow_probability as tfp - -from bayeso import covariance -from bayeso import constants -from bayeso.utils import utils_covariance -from bayeso.utils import utils_common -from bayeso.utils import utils_logger - -logger = utils_logger.get_logger('gp_tensorflow') -gpus = tf.config.experimental.list_physical_devices('GPU') -if gpus: # pragma: no cover - try: - for gpu in gpus: - tf.config.experimental.set_memory_growth(gpu, True) - except RuntimeError as e: - print(e) - - -@utils_common.validate_types -def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - fix_noise: bool=constants.FIX_GP_NOISE, - num_iters: int=1000, - debug: bool=False -) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: - """ - This function computes the kernel matrix optimized by optimization - method specified, its inverse matrix, and the optimized hyperparameters, - using TensorFlow and TensorFlow probability. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType - :param str_cov: the name of covariance function. - :type str_cov: str. - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param num_iters: the number of iterations for optimizing negative log - likelihood. - :type num_iters: int., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, kernel matrix inverse, - and dictionary of hyperparameters. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) - - :raises: AssertionError, ValueError - - """ - - # TODO: check to input same fix_noise to convert_hyps and restore_hyps - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) - assert isinstance(fix_noise, bool) - assert isinstance(num_iters, int) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] - utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) - assert num_iters >= 10 or num_iters == 0 - - # TODO: prior_mu and fix_noise are not working now. - prior_mu = None - fix_noise = False - - time_start = time.time() - - if str_cov in constants.ALLOWED_GP_COV_BASE: - num_dim = X_train.shape[1] - elif str_cov in constants.ALLOWED_GP_COV_SET: - num_dim = X_train.shape[2] - raise NotImplementedError('It is not implemented yet.') - - constraint_positive = tfp.bijectors.Shift(np.finfo(np.float64).tiny)(tfp.bijectors.Exp()) - - var_amplitude = tfp.util.TransformedVariable( - initial_value=1.0, - bijector=constraint_positive, - dtype=np.float64 - ) - - var_length_scale = tfp.util.TransformedVariable( - initial_value=[1.0] * num_dim, - bijector=constraint_positive, - dtype=np.float64 - ) - - var_observation_noise_variance = tfp.util.TransformedVariable( - initial_value=1.0, - bijector=constraint_positive, - dtype=np.float64 - ) - - def create_kernel(str_cov): - if str_cov in ('eq', 'se'): - kernel_main = tfp.math.psd_kernels.ExponentiatedQuadratic( - amplitude=var_amplitude, length_scale=None) - elif str_cov == 'matern32': - kernel_main = tfp.math.psd_kernels.MaternThreeHalves( - amplitude=var_amplitude, length_scale=None) - elif str_cov == 'matern52': - kernel_main = tfp.math.psd_kernels.MaternFiveHalves( - amplitude=var_amplitude, length_scale=None) - else: - raise NotImplementedError('allowed str_cov conditions, but it is not implemented.') - - kernel = tfp.math.psd_kernels.FeatureScaled( - kernel_main, - var_length_scale - ) - - return kernel - - model_gp = tfp.distributions.GaussianProcess( - kernel=create_kernel(str_cov), - index_points=X_train, - observation_noise_variance=var_observation_noise_variance, - mean_fn=prior_mu - ) - - @tf.function() - def log_prob_outputs(): # pragma: no cover - return model_gp.log_prob(np.ravel(Y_train)) - - optimizer = tf.optimizers.Adam(learning_rate=1e-2) - trainable_variables = [ - var_.trainable_variables[0] for var_ in [ - var_amplitude, - var_length_scale, - var_observation_noise_variance - ] - ] - - list_neg_log_likelihoods = [] - ind_iter = 0 - - while num_iters >= 10: - with tf.GradientTape() as tape: - loss = -1.0 * log_prob_outputs() - - grads = tape.gradient(loss, trainable_variables) - optimizer.apply_gradients(zip(grads, trainable_variables)) - list_neg_log_likelihoods.append(loss) - - if ind_iter > num_iters and np.abs(np.mean(list_neg_log_likelihoods[-6:-1]) - loss) < 5e-2: - break - if ind_iter > 10 * num_iters: # pragma: no cover - break - - ind_iter += 1 - - hyps = { - 'signal': tf.convert_to_tensor(var_amplitude).numpy(), - 'lengthscales': tf.convert_to_tensor(var_length_scale).numpy(), - 'noise': np.sqrt(tf.convert_to_tensor(var_observation_noise_variance).numpy()) -# 'signal': var_amplitude._value().numpy(), -# 'lengthscales': var_length_scale._value().numpy(), -# 'noise': np.sqrt(var_observation_noise_variance._value().numpy()) - } - - cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, - str_cov, fix_noise=fix_noise, debug=debug) - - time_end = time.time() - - if debug: - logger.debug('iterations to be converged: %d', ind_iter) - logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) - - return cov_X_X, inv_cov_X_X, hyps diff --git a/docs/python_api/bayeso.gp.gp_common.rst b/docs/python_api/bayeso.gp.gp_common.rst deleted file mode 100644 index 3f029ca..0000000 --- a/docs/python_api/bayeso.gp.gp_common.rst +++ /dev/null @@ -1,8 +0,0 @@ -bayeso.gp.gp\_common -==================== - -.. automodule:: bayeso.gp.gp_common - :members: - :private-members: - :undoc-members: - :show-inheritance: diff --git a/docs/python_api/bayeso.gp.rst b/docs/python_api/bayeso.gp.rst index e0b1269..a13592c 100644 --- a/docs/python_api/bayeso.gp.rst +++ b/docs/python_api/bayeso.gp.rst @@ -10,7 +10,6 @@ bayeso.gp .. toctree:: bayeso.gp.gp - bayeso.gp.gp_common bayeso.gp.gp_gpytorch bayeso.gp.gp_scipy bayeso.gp.gp_tensorflow diff --git a/requirements-optional.txt b/requirements-optional.txt index cef4f7b..aef7cb7 100644 --- a/requirements-optional.txt +++ b/requirements-optional.txt @@ -1,7 +1,3 @@ scipydirect matplotlib -tensorflow -tensorflow-probability -torch -gpytorch bayeso-benchmarks diff --git a/tests/common/test_gp.py b/tests/common/test_gp.py index 0921373..b0486d8 100644 --- a/tests/common/test_gp.py +++ b/tests/common/test_gp.py @@ -10,14 +10,6 @@ from bayeso import constants from bayeso.gp import gp as package_target -try: - from bayeso.gp import gp_tensorflow -except: # pragma: no cover - gp_tensorflow = None -try: - from bayeso.gp import gp_gpytorch -except: # pragma: no cover - gp_gpytorch = None from bayeso.utils import utils_covariance @@ -66,7 +58,6 @@ def test_get_optimized_kernel_typing(): assert annos['Y_train'] == np.ndarray assert annos['prior_mu'] == typing.Union[callable, type(None)] assert annos['str_cov'] == str - assert annos['str_framework'] == str assert annos['str_optimizer_method'] == str assert annos['str_modelselection_method'] == str assert annos['fix_noise'] == bool @@ -101,8 +92,6 @@ def test_get_optimized_kernel(): package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') with pytest.raises(ValueError) as error: package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework=1) with pytest.raises(AssertionError) as error: package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) with pytest.raises(AssertionError) as error: @@ -139,17 +128,9 @@ def test_get_optimized_kernel(): cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='scipy') + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') print(hyps) - if gp_tensorflow is not None: - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='tensorflow') - print(hyps) - - if gp_gpytorch is not None: - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_framework='gpytorch') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') diff --git a/tests/common/test_gp_gpytorch.py b/tests/common/test_gp_gpytorch.py deleted file mode 100644 index 45b77bb..0000000 --- a/tests/common/test_gp_gpytorch.py +++ /dev/null @@ -1,97 +0,0 @@ -# -# author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 -# -"""test_gp_gpytorch""" - -import typing -import pytest -import numpy as np - -try: - from bayeso.gp import gp_gpytorch as package_target -except: # pragma: no cover - package_target = None - -TEST_EPSILON = 1e-7 - - -def test_get_optimized_kernel_typing(): - if package_target is None: # pragma: no cover - pytest.skip('GPyTorch is not installed.') - - annos = package_target.get_optimized_kernel.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] - assert annos['str_cov'] == str - assert annos['fix_noise'] == bool - assert annos['num_iters'] == int - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] - -def test_get_optimized_kernel(): - np.random.seed(42) - dim_X = 3 - num_X = 10 - num_instances = 5 - X = np.random.randn(num_X, dim_X) - X_set = np.random.randn(num_X, num_instances, dim_X) - Y = np.random.randn(num_X, 1) - prior_mu = None - - if package_target is None: # pragma: no cover - pytest.skip('GPyTorch is not installed.') - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, 1, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, 1, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(1, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') - with pytest.raises(ValueError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters='abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) - - # INFO: tests for set inputs - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0, debug=True) - print(hyps) - - with pytest.raises(NotImplementedError) as error: - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') - print(hyps) diff --git a/tests/common/test_gp_tensorflow.py b/tests/common/test_gp_tensorflow.py deleted file mode 100644 index 4c5e851..0000000 --- a/tests/common/test_gp_tensorflow.py +++ /dev/null @@ -1,97 +0,0 @@ -# -# author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 -# -"""test_gp_tensorflow""" - -import typing -import pytest -import numpy as np - -try: - from bayeso.gp import gp_tensorflow as package_target -except: # pragma: no cover - package_target = None - -TEST_EPSILON = 1e-7 - - -def test_get_optimized_kernel_typing(): - if package_target is None: # pragma: no cover - pytest.skip('TensorFlow is not installed.') - - annos = package_target.get_optimized_kernel.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] - assert annos['str_cov'] == str - assert annos['fix_noise'] == bool - assert annos['num_iters'] == int - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] - -def test_get_optimized_kernel(): - np.random.seed(42) - dim_X = 3 - num_X = 10 - num_instances = 5 - X = np.random.randn(num_X, dim_X) - X_set = np.random.randn(num_X, num_instances, dim_X) - Y = np.random.randn(num_X, 1) - prior_mu = None - - if package_target is None: # pragma: no cover - pytest.skip('TensorFlow is not installed.') - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, 1, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, 1, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(1, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') - with pytest.raises(ValueError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters='abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) - - # INFO: tests for set inputs - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52', num_iters=0, debug=True) - print(hyps) - - with pytest.raises(NotImplementedError) as error: - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') - print(hyps) diff --git a/tests/common/test_import.py b/tests/common/test_import.py index f9260f7..f6c5d4c 100644 --- a/tests/common/test_import.py +++ b/tests/common/test_import.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 29, 2020 +# last updated: March 22, 2021 # """test_import""" @@ -28,12 +28,6 @@ def test_import_gp_gp(): def test_import_gp_gp_scipy(): import bayeso.gp.gp_scipy -def test_import_gp_gp_tensorflow(): - import bayeso.gp.gp_tensorflow - -def test_import_gp_gp_gpytorch(): - import bayeso.gp.gp_gpytorch - def test_import_tp(): import bayeso.tp From efc731f6ac1703138f5c15d302b7ac14e2f68ae3 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 22 Mar 2021 18:23:02 +0900 Subject: [PATCH 21/37] Merge gp and gp_scipy, and split new gp to subdivisions --- bayeso/bo.py | 5 +- bayeso/gp/__init__.py | 2 +- bayeso/gp/gp.py | 67 +--- bayeso/gp/gp_kernel.py | 345 ++++++++++++++++++ bayeso/gp/{gp_scipy.py => gp_likelihood.py} | 141 +------ tests/common/test_gp.py | 94 +---- tests/common/test_gp_kernel.py | 103 ++++++ ...test_gp_scipy.py => test_gp_likelihood.py} | 93 +---- tests/common/test_import.py | 3 - 9 files changed, 466 insertions(+), 387 deletions(-) create mode 100644 bayeso/gp/gp_kernel.py rename bayeso/gp/{gp_scipy.py => gp_likelihood.py} (54%) create mode 100644 tests/common/test_gp_kernel.py rename tests/common/{test_gp_scipy.py => test_gp_likelihood.py} (59%) diff --git a/bayeso/bo.py b/bayeso/bo.py index 63bcc1e..1833633 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -20,6 +20,7 @@ from bayeso import covariance from bayeso import constants from bayeso.gp import gp +from bayeso.gp import gp_kernel from bayeso.utils import utils_bo from bayeso.utils import utils_common from bayeso.utils import utils_logger @@ -526,7 +527,7 @@ def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, time_start_gp = time.time() if str_mlm_method == 'regular': - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X_train, Y_train, + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=self.str_optimizer_method_gp, str_modelselection_method=self.str_modelselection_method, @@ -535,7 +536,7 @@ def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, fix_noise = constants.FIX_GP_NOISE if self.is_optimize_hyps: - cov_X_X, inv_cov_X_X, hyps = gp.get_optimized_kernel(X_train, Y_train, + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=self.str_optimizer_method_gp, str_modelselection_method=self.str_modelselection_method, diff --git a/bayeso/gp/__init__.py b/bayeso/gp/__init__.py index d15cacb..cb304b0 100644 --- a/bayeso/gp/__init__.py +++ b/bayeso/gp/__init__.py @@ -1,5 +1,5 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: March 22, 2021 # """These files are for implementing Gaussian process regression.""" diff --git a/bayeso/gp/gp.py b/bayeso/gp/gp.py index bbf77f0..b64d5cd 100644 --- a/bayeso/gp/gp.py +++ b/bayeso/gp/gp.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 29, 2020 +# last updated: March 22, 2021 # """It defines Gaussian process regression.""" @@ -10,7 +10,7 @@ from bayeso import covariance from bayeso import constants -from bayeso.gp import gp_scipy +from bayeso.gp import gp_kernel from bayeso.utils import utils_gp from bayeso.utils import utils_covariance from bayeso.utils import utils_common @@ -51,67 +51,6 @@ def sample_functions(mu: np.ndarray, Sigma: np.ndarray, list_rvs = [rv.rvs() for _ in range(0, num_samples)] return np.array(list_rvs) -@utils_common.validate_types -def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, - str_modelselection_method: str=constants.STR_MODELSELECTION_METHOD, - fix_noise: bool=constants.FIX_GP_NOISE, - debug: bool=False -) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: - """ - This function computes the kernel matrix optimized by optimization - method specified, its inverse matrix, and the optimized hyperparameters. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType - :param str_cov: the name of covariance function. - :type str_cov: str. - :param str_optimizer_method: the name of optimization method. - :type str_optimizer_method: str., optional - :param str_modelselection_method: the name of model selection method. - :type str_modelselection_method: str., optional - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, kernel matrix - inverse, and dictionary of hyperparameters. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) - - :raises: AssertionError, ValueError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) - assert isinstance(str_optimizer_method, str) - assert isinstance(str_modelselection_method, str) - assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] - utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) - assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP - assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD - - cov_X_X, inv_cov_X_X, hyps = gp_scipy.get_optimized_kernel( - X_train, Y_train, prior_mu, str_cov, - str_optimizer_method=str_optimizer_method, - str_modelselection_method=str_modelselection_method, - fix_noise=fix_noise, - debug=debug - ) - - return cov_X_X, inv_cov_X_X, hyps - @utils_common.validate_types def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, @@ -292,7 +231,7 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test time_start = time.time() - cov_X_X, inv_cov_X_X, hyps = get_optimized_kernel(X_train, Y_train, + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X_train, Y_train, prior_mu, str_cov, str_optimizer_method=str_optimizer_method, fix_noise=fix_noise, debug=debug) mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py new file mode 100644 index 0000000..c00f85e --- /dev/null +++ b/bayeso/gp/gp_kernel.py @@ -0,0 +1,345 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: March 22, 2021 +# +"""It defines Gaussian process regression.""" + +import time +import numpy as np +import scipy.optimize + +from bayeso import covariance +from bayeso import constants +from bayeso.gp import gp_likelihood +from bayeso.utils import utils_gp +from bayeso.utils import utils_covariance +from bayeso.utils import utils_common +from bayeso.utils import utils_logger + +logger = utils_logger.get_logger('gp_kernel') + + +@utils_common.validate_types +def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, + str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, + str_modelselection_method: str=constants.STR_MODELSELECTION_METHOD, + fix_noise: bool=constants.FIX_GP_NOISE, + debug: bool=False +) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: + """ + This function computes the kernel matrix optimized by optimization + method specified, its inverse matrix, and the optimized hyperparameters. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param prior_mu: prior mean function or None. + :type prior_mu: function or NoneType + :param str_cov: the name of covariance function. + :type str_cov: str. + :param str_optimizer_method: the name of optimization method. + :type str_optimizer_method: str., optional + :param str_modelselection_method: the name of model selection method. + :type str_modelselection_method: str., optional + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of kernel matrix over `X_train`, kernel matrix + inverse, and dictionary of hyperparameters. + :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) + + :raises: AssertionError, ValueError + + """ + + # TODO: check to input same fix_noise to convert_hyps and restore_hyps + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert callable(prior_mu) or prior_mu is None + assert isinstance(str_cov, str) + assert isinstance(str_optimizer_method, str) + assert isinstance(str_modelselection_method, str) + assert isinstance(fix_noise, bool) + assert isinstance(debug, bool) + assert len(Y_train.shape) == 2 + assert X_train.shape[0] == Y_train.shape[0] + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP + assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD + # TODO: fix this. + use_gradient = bool(str_optimizer_method != 'Nelder-Mead') + + time_start = time.time() + + if debug: + logger.debug('str_optimizer_method: %s', str_optimizer_method) + logger.debug('str_modelselection_method: %s', str_modelselection_method) + + prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) + if str_cov in constants.ALLOWED_GP_COV_BASE: + num_dim = X_train.shape[1] + elif str_cov in constants.ALLOWED_GP_COV_SET: + num_dim = X_train.shape[2] + use_gradient = False + + if str_modelselection_method == 'ml': + neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_ml(X_train, Y_train, hyps, str_cov, + prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, + debug=debug) + elif str_modelselection_method == 'loocv': + neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_pseudo_l_loocv(X_train, Y_train, + hyps, str_cov, prior_mu_train, fix_noise=fix_noise, debug=debug) + use_gradient = False + else: # pragma: no cover + raise ValueError('get_optimized_kernel: missing conditions for str_modelselection_method.') + + hyps_converted = utils_covariance.convert_hyps( + str_cov, + utils_covariance.get_hyps(str_cov, num_dim), + fix_noise=fix_noise, + ) + + if str_optimizer_method in ['BFGS', 'SLSQP']: + result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, + method=str_optimizer_method, jac=use_gradient, options={'disp': False}) + + if debug: + logger.debug('scipy message: %s', result_optimized.message) + + result_optimized = result_optimized.x + elif str_optimizer_method in ['L-BFGS-B', 'SLSQP-Bounded']: + if str_optimizer_method == 'SLSQP-Bounded': + str_optimizer_method = 'SLSQP' + + bounds = utils_covariance.get_range_hyps(str_cov, num_dim, fix_noise=fix_noise) + result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, + method=str_optimizer_method, bounds=bounds, jac=use_gradient, + options={'disp': False}) + + if debug: + logger.debug('scipy message: %s', result_optimized.message) + + result_optimized = result_optimized.x + elif str_optimizer_method in ['Nelder-Mead']: + result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, + method=str_optimizer_method, options={'disp': False}) + + if debug: + logger.debug('scipy message: %s', result_optimized.message) + + result_optimized = result_optimized.x + # TODO: Fill this conditions + elif str_optimizer_method == 'DIRECT': # pragma: no cover + raise NotImplementedError('get_optimized_kernel: allowed str_optimizer_method,\ + but it is not implemented.') + else: # pragma: no cover + raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') + + hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise) + + hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, + hyps, str_cov, fix_noise=fix_noise, debug=debug) + + time_end = time.time() + + if debug: + logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) + logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + return cov_X_X, inv_cov_X_X, hyps + +@utils_common.validate_types +def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, + cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, + str_cov: str=constants.STR_GP_COV, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + debug: bool=False +) -> constants.TYPING_TUPLE_THREE_ARRAYS: + """ + This function returns posterior mean and posterior standard deviation + functions over `X_test`, computed by Gaussian process regression with + `X_train`, `Y_train`, `cov_X_X`, `inv_cov_X_X`, and `hyps`. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param X_test: inputs. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray + :param cov_X_X: kernel matrix over `X_train`. Shape: (n, n). + :type cov_X_X: numpy.ndarray + :param inv_cov_X_X: kernel matrix inverse over `X_train`. Shape: (n, n). + :type inv_cov_X_X: numpy.ndarray + :param hyps: dictionary of hyperparameters for Gaussian process. + :type hyps: dict. + :param str_cov: the name of covariance function. + :type str_cov: str., optional + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function, optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of posterior mean function over `X_test`, posterior + standard deviation function over `X_test`, and posterior covariance + matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). + :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(X_test, np.ndarray) + assert isinstance(cov_X_X, np.ndarray) + assert isinstance(inv_cov_X_X, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(str_cov, str) + assert isinstance(debug, bool) + assert callable(prior_mu) or prior_mu is None + assert len(Y_train.shape) == 2 + assert len(cov_X_X.shape) == 2 + assert len(inv_cov_X_X.shape) == 2 + assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() + utils_covariance.check_str_cov('predict_with_cov', str_cov, + X_train.shape, shape_X2=X_test.shape) + assert X_train.shape[0] == Y_train.shape[0] + assert X_train.shape[1] == X_test.shape[1] + + prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) + prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test) + cov_X_Xs = covariance.cov_main(str_cov, X_train, X_test, hyps, False) + cov_Xs_Xs = covariance.cov_main(str_cov, X_test, X_test, hyps, True) + cov_Xs_Xs = (cov_Xs_Xs + cov_Xs_Xs.T) / 2.0 + + mu_Xs = np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), Y_train - prior_mu_train) + prior_mu_test + Sigma_Xs = cov_Xs_Xs - np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), cov_X_Xs) + return mu_Xs, np.expand_dims(np.sqrt(np.maximum(np.diag(Sigma_Xs), 0.0)), axis=1), Sigma_Xs + +@utils_common.validate_types +def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, hyps: dict, + str_cov: str=constants.STR_GP_COV, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + debug: bool=False +) -> constants.TYPING_TUPLE_THREE_ARRAYS: + """ + This function returns posterior mean and posterior standard deviation + functions over `X_test`, computed by Gaussian process regression with + `X_train`, `Y_train`, and `hyps`. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param X_test: inputs. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray + :param hyps: dictionary of hyperparameters for Gaussian process. + :type hyps: dict. + :param str_cov: the name of covariance function. + :type str_cov: str., optional + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function, optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of posterior mean function over `X_test`, posterior + standard deviation function over `X_test`, and posterior covariance + matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). + :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(X_test, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(str_cov, str) + assert isinstance(debug, bool) + assert callable(prior_mu) or prior_mu is None + assert len(Y_train.shape) == 2 + utils_covariance.check_str_cov('predict_with_hyps', str_cov, + X_train.shape, shape_X2=X_test.shape) + assert X_train.shape[0] == Y_train.shape[0] + assert X_train.shape[1] == X_test.shape[1] + + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, + hyps, str_cov, debug=debug) + mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, + cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, + prior_mu=prior_mu, debug=debug) + + return mu_Xs, sigma_Xs, Sigma_Xs + +@utils_common.validate_types +def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, + str_cov: str=constants.STR_GP_COV, + str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, + fix_noise: float=constants.FIX_GP_NOISE, + debug: bool=False +) -> constants.TYPING_TUPLE_THREE_ARRAYS: + """ + This function returns posterior mean and posterior standard deviation + functions over `X_test`, computed by the Gaussian process regression + optimized with `X_train` and `Y_train`. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param X_test: inputs. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray + :param str_cov: the name of covariance function. + :type str_cov: str., optional + :param str_optimizer_method: the name of optimization method. + :type str_optimizer_method: str., optional + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function, optional + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of posterior mean function over `X_test`, posterior + standard deviation function over `X_test`, and posterior covariance + matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). + :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(X_test, np.ndarray) + assert isinstance(str_cov, str) + assert isinstance(str_optimizer_method, str) + assert isinstance(fix_noise, bool) + assert isinstance(debug, bool) + assert callable(prior_mu) or prior_mu is None + assert len(Y_train.shape) == 2 + utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov, + X_train.shape, shape_X2=X_test.shape) + assert X_train.shape[0] == Y_train.shape[0] + assert X_train.shape[1] == X_test.shape[1] + assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP + + time_start = time.time() + + cov_X_X, inv_cov_X_X, hyps = get_optimized_kernel(X_train, Y_train, + prior_mu, str_cov, str_optimizer_method=str_optimizer_method, + fix_noise=fix_noise, debug=debug) + mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, + cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, prior_mu=prior_mu, + debug=debug) + + time_end = time.time() + if debug: + logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + return mu_Xs, sigma_Xs, Sigma_Xs diff --git a/bayeso/gp/gp_scipy.py b/bayeso/gp/gp_likelihood.py similarity index 54% rename from bayeso/gp/gp_scipy.py rename to bayeso/gp/gp_likelihood.py index 56d5ef1..cb2ed2d 100644 --- a/bayeso/gp/gp_scipy.py +++ b/bayeso/gp/gp_likelihood.py @@ -1,22 +1,20 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 29, 2020 +# last updated: March 22, 2021 # -"""It is Gaussian process regression implementations with SciPy.""" +"""It defines Gaussian process regression.""" import time import numpy as np import scipy.linalg -import scipy.optimize from bayeso import covariance from bayeso import constants -from bayeso.utils import utils_gp from bayeso.utils import utils_covariance from bayeso.utils import utils_common from bayeso.utils import utils_logger -logger = utils_logger.get_logger('gp_scipy') +logger = utils_logger.get_logger('gp_likelihood') @utils_common.validate_types @@ -187,136 +185,3 @@ def neg_log_pseudo_l_loocv(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.nd log_pseudo_l_ *= -1.0 return log_pseudo_l_ - -@utils_common.validate_types -def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, - str_modelselection_method: str=constants.STR_MODELSELECTION_METHOD, - fix_noise: bool=constants.FIX_GP_NOISE, - debug: bool=False -) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: - """ - This function computes the kernel matrix optimized by optimization - method specified, its inverse matrix, and the optimized hyperparameters. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType - :param str_cov: the name of covariance function. - :type str_cov: str. - :param str_optimizer_method: the name of optimization method. - :type str_optimizer_method: str., optional - :param str_modelselection_method: the name of model selection method. - :type str_modelselection_method: str., optional - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, kernel matrix - inverse, and dictionary of hyperparameters. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) - - :raises: AssertionError, ValueError - - """ - - # TODO: check to input same fix_noise to convert_hyps and restore_hyps - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) - assert isinstance(str_optimizer_method, str) - assert isinstance(str_modelselection_method, str) - assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] - utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) - assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP - assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD - # TODO: fix this. - use_gradient = bool(str_optimizer_method != 'Nelder-Mead') - - time_start = time.time() - - if debug: - logger.debug('str_optimizer_method: %s', str_optimizer_method) - logger.debug('str_modelselection_method: %s', str_modelselection_method) - - prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) - if str_cov in constants.ALLOWED_GP_COV_BASE: - num_dim = X_train.shape[1] - elif str_cov in constants.ALLOWED_GP_COV_SET: - num_dim = X_train.shape[2] - use_gradient = False - - if str_modelselection_method == 'ml': - neg_log_ml_ = lambda hyps: neg_log_ml(X_train, Y_train, hyps, str_cov, - prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, - debug=debug) - elif str_modelselection_method == 'loocv': - neg_log_ml_ = lambda hyps: neg_log_pseudo_l_loocv(X_train, Y_train, - hyps, str_cov, prior_mu_train, fix_noise=fix_noise, debug=debug) - use_gradient = False - else: # pragma: no cover - raise ValueError('get_optimized_kernel: missing conditions for str_modelselection_method.') - - hyps_converted = utils_covariance.convert_hyps( - str_cov, - utils_covariance.get_hyps(str_cov, num_dim), - fix_noise=fix_noise, - ) - - if str_optimizer_method in ['BFGS', 'SLSQP']: - result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, - method=str_optimizer_method, jac=use_gradient, options={'disp': False}) - - if debug: - logger.debug('scipy message: %s', result_optimized.message) - - result_optimized = result_optimized.x - elif str_optimizer_method in ['L-BFGS-B', 'SLSQP-Bounded']: - if str_optimizer_method == 'SLSQP-Bounded': - str_optimizer_method = 'SLSQP' - - bounds = utils_covariance.get_range_hyps(str_cov, num_dim, fix_noise=fix_noise) - result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, - method=str_optimizer_method, bounds=bounds, jac=use_gradient, - options={'disp': False}) - - if debug: - logger.debug('scipy message: %s', result_optimized.message) - - result_optimized = result_optimized.x - elif str_optimizer_method in ['Nelder-Mead']: - result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, - method=str_optimizer_method, options={'disp': False}) - - if debug: - logger.debug('scipy message: %s', result_optimized.message) - - result_optimized = result_optimized.x - # TODO: Fill this conditions - elif str_optimizer_method == 'DIRECT': # pragma: no cover - raise NotImplementedError('get_optimized_kernel: allowed str_optimizer_method,\ - but it is not implemented.') - else: # pragma: no cover - raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') - - hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise) - - hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) - cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, - hyps, str_cov, fix_noise=fix_noise, debug=debug) - - time_end = time.time() - - if debug: - logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) - return cov_X_X, inv_cov_X_X, hyps diff --git a/tests/common/test_gp.py b/tests/common/test_gp.py index b0486d8..5a15dd8 100644 --- a/tests/common/test_gp.py +++ b/tests/common/test_gp.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: March 22, 2021 # """test_gp""" @@ -10,6 +10,7 @@ from bayeso import constants from bayeso.gp import gp as package_target +from bayeso.gp import gp_kernel from bayeso.utils import utils_covariance @@ -51,93 +52,6 @@ def test_sample_functions(): assert functions.shape[1] == num_points assert functions.shape[0] == num_samples -def test_get_optimized_kernel_typing(): - annos = package_target.get_optimized_kernel.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] - assert annos['str_cov'] == str - assert annos['str_optimizer_method'] == str - assert annos['str_modelselection_method'] == str - assert annos['fix_noise'] == bool - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] - -def test_get_optimized_kernel(): - np.random.seed(42) - dim_X = 3 - num_X = 10 - num_instances = 5 - X = np.random.randn(num_X, dim_X) - X_set = np.random.randn(num_X, num_instances, dim_X) - Y = np.random.randn(num_X, 1) - prior_mu = None - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, 1, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, 1, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(1, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') - with pytest.raises(ValueError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) - - # INFO: tests for set inputs - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv') - print(hyps) - def test_predict_with_cov_typing(): annos = package_target.predict_with_cov.__annotations__ @@ -161,7 +75,7 @@ def test_predict_with_cov(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu='abc') @@ -217,7 +131,7 @@ def test_predict_with_hyps(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu='abc') diff --git a/tests/common/test_gp_kernel.py b/tests/common/test_gp_kernel.py new file mode 100644 index 0000000..918dcfd --- /dev/null +++ b/tests/common/test_gp_kernel.py @@ -0,0 +1,103 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: March 22, 2021 +# +"""test_gp_kernel""" + +import typing +import pytest +import numpy as np + +from bayeso import constants +from bayeso.gp import gp_kernel as package_target +from bayeso.utils import utils_covariance + + +TEST_EPSILON = 1e-7 + +def test_get_optimized_kernel_typing(): + annos = package_target.get_optimized_kernel.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['str_cov'] == str + assert annos['str_optimizer_method'] == str + assert annos['str_modelselection_method'] == str + assert annos['fix_noise'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] + +def test_get_optimized_kernel(): + np.random.seed(42) + dim_X = 3 + num_X = 10 + num_instances = 5 + X = np.random.randn(num_X, dim_X) + X_set = np.random.randn(num_X, num_instances, dim_X) + Y = np.random.randn(num_X, 1) + prior_mu = None + + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, 1, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + with pytest.raises(ValueError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + + # INFO: tests for set inputs + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv') + print(hyps) diff --git a/tests/common/test_gp_scipy.py b/tests/common/test_gp_likelihood.py similarity index 59% rename from tests/common/test_gp_scipy.py rename to tests/common/test_gp_likelihood.py index 8e5107d..41da9e5 100644 --- a/tests/common/test_gp_scipy.py +++ b/tests/common/test_gp_likelihood.py @@ -1,19 +1,19 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: September 24, 2020 +# last updated: March 22, 2021 # -"""test_gp_scipy""" +"""test_gp_likelihood""" import typing import pytest import numpy as np from bayeso import constants -from bayeso.gp import gp_scipy as package_target +from bayeso.gp import gp_likelihood as package_target from bayeso.utils import utils_covariance -TEST_EPSILON = 1e-7 +TEST_EPSILON = 1e-7 def test_neg_log_ml_typing(): annos = package_target.neg_log_ml.__annotations__ @@ -143,88 +143,3 @@ def test_neg_log_pseudo_l_loocv(): print(neg_log_pseudo_l_) truth_log_pseudo_l_ = 21.916822991658695 assert np.abs(neg_log_pseudo_l_ - truth_log_pseudo_l_) < TEST_EPSILON - -def test_get_optimized_kernel_typing(): - annos = package_target.get_optimized_kernel.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] - assert annos['str_cov'] == str - assert annos['str_optimizer_method'] == str - assert annos['str_modelselection_method'] == str - assert annos['fix_noise'] == bool - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] - -def test_get_optimized_kernel(): - np.random.seed(42) - dim_X = 3 - num_X = 10 - num_instances = 5 - X = np.random.randn(num_X, dim_X) - X_set = np.random.randn(num_X, num_instances, dim_X) - Y = np.random.randn(num_X, 1) - prior_mu = None - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, 1, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, 1, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(1, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') - with pytest.raises(ValueError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='abc') - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) - - # INFO: tests for set inputs - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='BFGS', debug=True) - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B', debug=True) - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead', debug=True) - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP', debug=True) - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP-Bounded', debug=True) - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv', debug=True) - print(hyps) diff --git a/tests/common/test_import.py b/tests/common/test_import.py index f6c5d4c..e2d4478 100644 --- a/tests/common/test_import.py +++ b/tests/common/test_import.py @@ -25,9 +25,6 @@ def test_import_gp(): def test_import_gp_gp(): import bayeso.gp.gp -def test_import_gp_gp_scipy(): - import bayeso.gp.gp_scipy - def test_import_tp(): import bayeso.tp From 2fabfb200e6733e6b87fc18c727d2e57d6f9f9ec Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 22 Mar 2021 18:46:25 +0900 Subject: [PATCH 22/37] Split tp and update docs --- bayeso/tp/tp.py | 211 +----------------- bayeso/tp/tp_kernel.py | 119 ++++++++++ bayeso/tp/tp_likelihood.py | 121 ++++++++++ docs/python_api/bayeso.gp.gp_gpytorch.rst | 8 - docs/python_api/bayeso.gp.gp_kernel.rst | 8 + ...orflow.rst => bayeso.gp.gp_likelihood.rst} | 4 +- docs/python_api/bayeso.gp.gp_scipy.rst | 8 - docs/python_api/bayeso.gp.rst | 5 +- docs/python_api/bayeso.tp.rst | 2 + docs/python_api/bayeso.tp.tp_kernel.rst | 8 + docs/python_api/bayeso.tp.tp_likelihood.rst | 8 + tests/common/test_import.py | 12 + tests/common/test_tp.py | 151 +------------ tests/common/test_tp_kernel.py | 91 ++++++++ tests/common/test_tp_likelihood.py | 85 +++++++ 15 files changed, 465 insertions(+), 376 deletions(-) create mode 100644 bayeso/tp/tp_kernel.py create mode 100644 bayeso/tp/tp_likelihood.py delete mode 100644 docs/python_api/bayeso.gp.gp_gpytorch.rst create mode 100644 docs/python_api/bayeso.gp.gp_kernel.rst rename docs/python_api/{bayeso.gp.gp_tensorflow.rst => bayeso.gp.gp_likelihood.rst} (60%) delete mode 100644 docs/python_api/bayeso.gp.gp_scipy.rst create mode 100644 docs/python_api/bayeso.tp.tp_kernel.rst create mode 100644 docs/python_api/bayeso.tp.tp_likelihood.rst create mode 100644 tests/common/test_tp_kernel.py create mode 100644 tests/common/test_tp_likelihood.py diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index c475de5..d09e789 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -1,18 +1,16 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 30, 2020 +# last updated: March 22, 2021 # """It defines Student-:math:`t` process regression.""" import time import numpy as np import scipy.stats -import scipy.linalg -import scipy.optimize -import scipy.special from bayeso import covariance from bayeso import constants +from bayeso.tp import tp_kernel from bayeso.utils import utils_gp from bayeso.utils import utils_covariance from bayeso.utils import utils_common @@ -21,110 +19,6 @@ logger = utils_logger.get_logger('tp') -@utils_common.validate_types -def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, - str_cov: str, prior_mu_train: np.ndarray, - fix_noise: bool=constants.FIX_GP_NOISE, - use_gradient: bool=True, - debug: bool=False -) -> constants.TYPING_UNION_FLOAT_FA: - """ - This function computes a negative log marginal likelihood. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param hyps: hyperparameters for Gaussian process. Shape: (h, ). - :type hyps: numpy.ndarray - :param str_cov: the name of covariance function. - :type str_cov: str. - :param prior_mu_train: the prior values computed by get_prior_mu(). Shape: (n, 1). - :type prior_mu_train: numpy.ndarray - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param use_gradient: flag for computing and returning gradients of - negative log marginal likelihood. - :type use_gradient: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: negative log marginal likelihood, or (negative log marginal - likelihood, gradients of the likelihood). - :rtype: float, or tuple of (float, np.ndarray) - - :raises: AssertionError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(hyps, np.ndarray) - assert isinstance(str_cov, str) - assert isinstance(prior_mu_train, np.ndarray) - assert isinstance(fix_noise, bool) - assert isinstance(use_gradient, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert len(prior_mu_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] - utils_covariance.check_str_cov('neg_log_ml', str_cov, X_train.shape) - - num_X = float(X_train.shape[0]) - hyps = utils_covariance.restore_hyps(str_cov, hyps, - fix_noise=fix_noise, use_gp=False) - new_Y_train = Y_train - prior_mu_train - nu = hyps['dof'] - - cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X_train, - hyps, str_cov, fix_noise=fix_noise, use_gradient=use_gradient, - debug=debug) - - alpha = np.dot(inv_cov_X_X, new_Y_train) - beta = np.squeeze(np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train)) - - first_term = -0.5 * num_X * np.log((nu - 2.0) * np.pi) - sign_second_term, second_term = np.linalg.slogdet(cov_X_X) - # TODO: let me think. - if sign_second_term <= 0: # pragma: no cover - second_term = 0.0 - second_term = -0.5 * second_term - - third_term = np.log(scipy.special.gamma((nu + num_X) / 2.0) / scipy.special.gamma(nu / 2.0)) - fourth_term = -0.5 * (nu + num_X) * np.log(1.0 + beta / (nu - 2.0)) - - log_ml_ = np.squeeze(first_term + second_term + third_term + fourth_term) - log_ml_ /= num_X - - if use_gradient: - assert grad_cov_X_X is not None - grad_log_ml_ = np.zeros(grad_cov_X_X.shape[2] + 1) - - first_term_grad = ((nu + num_X) / (nu + beta - 2.0) * np.dot(alpha, alpha.T) - inv_cov_X_X) - nu_grad = -num_X / (2.0 * (nu - 2.0)) + scipy.special.digamma((nu + num_X) / 2.0) - scipy.special.digamma(nu / 2.0) - 0.5 * np.log(1.0 + beta / (nu - 2.0)) + (nu + num_X) * beta / (2.0 * (nu - 2.0)**2 + 2.0 * beta * (nu - 2.0)) - - if fix_noise: - grad_log_ml_[0] = nu_grad - else: - grad_log_ml_[1] = nu_grad - - for ind in range(0, grad_cov_X_X.shape[2]): - cur_grad = 0.5 * np.trace(np.dot(first_term_grad, grad_cov_X_X[:, :, ind])) - if fix_noise: - grad_log_ml_[ind + 1] = cur_grad - else: - if ind == 0: - cur_ind = 0 - else: - cur_ind = ind + 1 - - grad_log_ml_[cur_ind] = cur_grad - - if use_gradient: - return -1.0 * log_ml_, -1.0 * grad_log_ml_ / num_X - - return -1.0 * log_ml_ - @utils_common.validate_types def sample_functions(nu: float, mu: np.ndarray, Sigma: np.ndarray, num_samples: int=1 @@ -168,105 +62,6 @@ def sample_functions(nu: float, mu: np.ndarray, Sigma: np.ndarray, return samples -@utils_common.validate_types -def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, - str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_TP, - fix_noise: bool=constants.FIX_GP_NOISE, - debug: bool=False -) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: - """ - This function computes the kernel matrix optimized by optimization - method specified, its inverse matrix, and the optimized hyperparameters. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType - :param str_cov: the name of covariance function. - :type str_cov: str. - :param str_optimizer_method: the name of optimization method. - :type str_optimizer_method: str., optional - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of kernel matrix over `X_train`, kernel matrix - inverse, and dictionary of hyperparameters. - :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) - - :raises: AssertionError, ValueError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) - assert isinstance(str_optimizer_method, str) - assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] - utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) - assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_TP - - # TODO: Fix it later. - use_gradient = True - - time_start = time.time() - - if debug: - logger.debug('str_optimizer_method: %s', str_optimizer_method) - - prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) - if str_cov in constants.ALLOWED_GP_COV_BASE: - num_dim = X_train.shape[1] - elif str_cov in constants.ALLOWED_GP_COV_SET: - num_dim = X_train.shape[2] - use_gradient = False - - neg_log_ml_ = lambda hyps: neg_log_ml(X_train, Y_train, hyps, str_cov, - prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, - debug=debug) - - hyps_converted = utils_covariance.convert_hyps( - str_cov, - utils_covariance.get_hyps(str_cov, num_dim, use_gp=False), - fix_noise=fix_noise, - use_gp=False - ) - - if str_optimizer_method in ['L-BFGS-B', 'SLSQP']: - bounds = utils_covariance.get_range_hyps(str_cov, num_dim, - fix_noise=fix_noise, use_gp=False) - result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, - method=str_optimizer_method, bounds=bounds, jac=use_gradient, - options={'disp': False}) - - if debug: - logger.debug('scipy message: %s', result_optimized.message) - - result_optimized = result_optimized.x - else: # pragma: no cover - raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') - - hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise, use_gp=False) - - hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) - cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, - hyps, str_cov, fix_noise=fix_noise, debug=debug) - - time_end = time.time() - - if debug: - logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) - return cov_X_X, inv_cov_X_X, hyps - @utils_common.validate_types def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, @@ -468,7 +263,7 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test time_start = time.time() - cov_X_X, inv_cov_X_X, hyps = get_optimized_kernel(X_train, Y_train, + cov_X_X, inv_cov_X_X, hyps = tp_kernel.get_optimized_kernel(X_train, Y_train, prior_mu, str_cov, str_optimizer_method=str_optimizer_method, fix_noise=fix_noise, debug=debug) nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py new file mode 100644 index 0000000..6176ba2 --- /dev/null +++ b/bayeso/tp/tp_kernel.py @@ -0,0 +1,119 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: March 22, 2021 +# +"""It defines Student-:math:`t` process regression.""" + +import time +import numpy as np +import scipy.optimize + +from bayeso import covariance +from bayeso import constants +from bayeso.tp import tp_likelihood +from bayeso.utils import utils_gp +from bayeso.utils import utils_covariance +from bayeso.utils import utils_common +from bayeso.utils import utils_logger + +logger = utils_logger.get_logger('tp_kernel') + + +@utils_common.validate_types +def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, + prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, + str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_TP, + fix_noise: bool=constants.FIX_GP_NOISE, + debug: bool=False +) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: + """ + This function computes the kernel matrix optimized by optimization + method specified, its inverse matrix, and the optimized hyperparameters. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param prior_mu: prior mean function or None. + :type prior_mu: function or NoneType + :param str_cov: the name of covariance function. + :type str_cov: str. + :param str_optimizer_method: the name of optimization method. + :type str_optimizer_method: str., optional + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: a tuple of kernel matrix over `X_train`, kernel matrix + inverse, and dictionary of hyperparameters. + :rtype: tuple of (numpy.ndarray, numpy.ndarray, dict.) + + :raises: AssertionError, ValueError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert callable(prior_mu) or prior_mu is None + assert isinstance(str_cov, str) + assert isinstance(str_optimizer_method, str) + assert isinstance(fix_noise, bool) + assert isinstance(debug, bool) + assert len(Y_train.shape) == 2 + assert X_train.shape[0] == Y_train.shape[0] + utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) + assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_TP + + # TODO: Fix it later. + use_gradient = True + + time_start = time.time() + + if debug: + logger.debug('str_optimizer_method: %s', str_optimizer_method) + + prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) + if str_cov in constants.ALLOWED_GP_COV_BASE: + num_dim = X_train.shape[1] + elif str_cov in constants.ALLOWED_GP_COV_SET: + num_dim = X_train.shape[2] + use_gradient = False + + neg_log_ml_ = lambda hyps: tp_likelihood.neg_log_ml(X_train, Y_train, hyps, str_cov, + prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, + debug=debug) + + hyps_converted = utils_covariance.convert_hyps( + str_cov, + utils_covariance.get_hyps(str_cov, num_dim, use_gp=False), + fix_noise=fix_noise, + use_gp=False + ) + + if str_optimizer_method in ['L-BFGS-B', 'SLSQP']: + bounds = utils_covariance.get_range_hyps(str_cov, num_dim, + fix_noise=fix_noise, use_gp=False) + result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, + method=str_optimizer_method, bounds=bounds, jac=use_gradient, + options={'disp': False}) + + if debug: + logger.debug('scipy message: %s', result_optimized.message) + + result_optimized = result_optimized.x + else: # pragma: no cover + raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') + + hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise, use_gp=False) + + hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, + hyps, str_cov, fix_noise=fix_noise, debug=debug) + + time_end = time.time() + + if debug: + logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) + logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + return cov_X_X, inv_cov_X_X, hyps diff --git a/bayeso/tp/tp_likelihood.py b/bayeso/tp/tp_likelihood.py new file mode 100644 index 0000000..ef97418 --- /dev/null +++ b/bayeso/tp/tp_likelihood.py @@ -0,0 +1,121 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: March 22, 2021 +# +"""It defines Student-:math:`t` process regression.""" + +import numpy as np +import scipy.special + +from bayeso import covariance +from bayeso import constants +from bayeso.utils import utils_covariance +from bayeso.utils import utils_common +from bayeso.utils import utils_logger + +logger = utils_logger.get_logger('tp_likelihood') + + +@utils_common.validate_types +def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, + str_cov: str, prior_mu_train: np.ndarray, + fix_noise: bool=constants.FIX_GP_NOISE, + use_gradient: bool=True, + debug: bool=False +) -> constants.TYPING_UNION_FLOAT_FA: + """ + This function computes a negative log marginal likelihood. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param hyps: hyperparameters for Gaussian process. Shape: (h, ). + :type hyps: numpy.ndarray + :param str_cov: the name of covariance function. + :type str_cov: str. + :param prior_mu_train: the prior values computed by get_prior_mu(). Shape: (n, 1). + :type prior_mu_train: numpy.ndarray + :param fix_noise: flag for fixing a noise. + :type fix_noise: bool., optional + :param use_gradient: flag for computing and returning gradients of + negative log marginal likelihood. + :type use_gradient: bool., optional + :param debug: flag for printing log messages. + :type debug: bool., optional + + :returns: negative log marginal likelihood, or (negative log marginal + likelihood, gradients of the likelihood). + :rtype: float, or tuple of (float, np.ndarray) + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(hyps, np.ndarray) + assert isinstance(str_cov, str) + assert isinstance(prior_mu_train, np.ndarray) + assert isinstance(fix_noise, bool) + assert isinstance(use_gradient, bool) + assert isinstance(debug, bool) + assert len(Y_train.shape) == 2 + assert len(prior_mu_train.shape) == 2 + assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] + utils_covariance.check_str_cov('neg_log_ml', str_cov, X_train.shape) + + num_X = float(X_train.shape[0]) + hyps = utils_covariance.restore_hyps(str_cov, hyps, + fix_noise=fix_noise, use_gp=False) + new_Y_train = Y_train - prior_mu_train + nu = hyps['dof'] + + cov_X_X, inv_cov_X_X, grad_cov_X_X = covariance.get_kernel_inverse(X_train, + hyps, str_cov, fix_noise=fix_noise, use_gradient=use_gradient, + debug=debug) + + alpha = np.dot(inv_cov_X_X, new_Y_train) + beta = np.squeeze(np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train)) + + first_term = -0.5 * num_X * np.log((nu - 2.0) * np.pi) + sign_second_term, second_term = np.linalg.slogdet(cov_X_X) + # TODO: let me think. + if sign_second_term <= 0: # pragma: no cover + second_term = 0.0 + second_term = -0.5 * second_term + + third_term = np.log(scipy.special.gamma((nu + num_X) / 2.0) / scipy.special.gamma(nu / 2.0)) + fourth_term = -0.5 * (nu + num_X) * np.log(1.0 + beta / (nu - 2.0)) + + log_ml_ = np.squeeze(first_term + second_term + third_term + fourth_term) + log_ml_ /= num_X + + if use_gradient: + assert grad_cov_X_X is not None + grad_log_ml_ = np.zeros(grad_cov_X_X.shape[2] + 1) + + first_term_grad = ((nu + num_X) / (nu + beta - 2.0) * np.dot(alpha, alpha.T) - inv_cov_X_X) + nu_grad = -num_X / (2.0 * (nu - 2.0)) + scipy.special.digamma((nu + num_X) / 2.0) - scipy.special.digamma(nu / 2.0) - 0.5 * np.log(1.0 + beta / (nu - 2.0)) + (nu + num_X) * beta / (2.0 * (nu - 2.0)**2 + 2.0 * beta * (nu - 2.0)) + + if fix_noise: + grad_log_ml_[0] = nu_grad + else: + grad_log_ml_[1] = nu_grad + + for ind in range(0, grad_cov_X_X.shape[2]): + cur_grad = 0.5 * np.trace(np.dot(first_term_grad, grad_cov_X_X[:, :, ind])) + if fix_noise: + grad_log_ml_[ind + 1] = cur_grad + else: + if ind == 0: + cur_ind = 0 + else: + cur_ind = ind + 1 + + grad_log_ml_[cur_ind] = cur_grad + + if use_gradient: + return -1.0 * log_ml_, -1.0 * grad_log_ml_ / num_X + + return -1.0 * log_ml_ diff --git a/docs/python_api/bayeso.gp.gp_gpytorch.rst b/docs/python_api/bayeso.gp.gp_gpytorch.rst deleted file mode 100644 index e5d492a..0000000 --- a/docs/python_api/bayeso.gp.gp_gpytorch.rst +++ /dev/null @@ -1,8 +0,0 @@ -bayeso.gp.gp\_gpytorch -====================== - -.. automodule:: bayeso.gp.gp_gpytorch - :members: - :private-members: - :undoc-members: - :show-inheritance: diff --git a/docs/python_api/bayeso.gp.gp_kernel.rst b/docs/python_api/bayeso.gp.gp_kernel.rst new file mode 100644 index 0000000..02e8492 --- /dev/null +++ b/docs/python_api/bayeso.gp.gp_kernel.rst @@ -0,0 +1,8 @@ +bayeso.gp.gp\_kernel +==================== + +.. automodule:: bayeso.gp.gp_kernel + :members: + :private-members: + :undoc-members: + :show-inheritance: diff --git a/docs/python_api/bayeso.gp.gp_tensorflow.rst b/docs/python_api/bayeso.gp.gp_likelihood.rst similarity index 60% rename from docs/python_api/bayeso.gp.gp_tensorflow.rst rename to docs/python_api/bayeso.gp.gp_likelihood.rst index 45a6f11..ca56421 100644 --- a/docs/python_api/bayeso.gp.gp_tensorflow.rst +++ b/docs/python_api/bayeso.gp.gp_likelihood.rst @@ -1,7 +1,7 @@ -bayeso.gp.gp\_tensorflow +bayeso.gp.gp\_likelihood ======================== -.. automodule:: bayeso.gp.gp_tensorflow +.. automodule:: bayeso.gp.gp_likelihood :members: :private-members: :undoc-members: diff --git a/docs/python_api/bayeso.gp.gp_scipy.rst b/docs/python_api/bayeso.gp.gp_scipy.rst deleted file mode 100644 index 63725e9..0000000 --- a/docs/python_api/bayeso.gp.gp_scipy.rst +++ /dev/null @@ -1,8 +0,0 @@ -bayeso.gp.gp\_scipy -=================== - -.. automodule:: bayeso.gp.gp_scipy - :members: - :private-members: - :undoc-members: - :show-inheritance: diff --git a/docs/python_api/bayeso.gp.rst b/docs/python_api/bayeso.gp.rst index a13592c..7924bfa 100644 --- a/docs/python_api/bayeso.gp.rst +++ b/docs/python_api/bayeso.gp.rst @@ -10,6 +10,5 @@ bayeso.gp .. toctree:: bayeso.gp.gp - bayeso.gp.gp_gpytorch - bayeso.gp.gp_scipy - bayeso.gp.gp_tensorflow + bayeso.gp.gp_likelihood + bayeso.gp.gp_kernel diff --git a/docs/python_api/bayeso.tp.rst b/docs/python_api/bayeso.tp.rst index ae31d79..5695c89 100644 --- a/docs/python_api/bayeso.tp.rst +++ b/docs/python_api/bayeso.tp.rst @@ -10,3 +10,5 @@ bayeso.tp .. toctree:: bayeso.tp.tp + bayeso.tp.tp_likelihood + bayeso.tp.tp_kernel diff --git a/docs/python_api/bayeso.tp.tp_kernel.rst b/docs/python_api/bayeso.tp.tp_kernel.rst new file mode 100644 index 0000000..79ee1fa --- /dev/null +++ b/docs/python_api/bayeso.tp.tp_kernel.rst @@ -0,0 +1,8 @@ +bayeso.tp.tp\_kernel +==================== + +.. automodule:: bayeso.tp.tp_kernel + :members: + :private-members: + :undoc-members: + :show-inheritance: diff --git a/docs/python_api/bayeso.tp.tp_likelihood.rst b/docs/python_api/bayeso.tp.tp_likelihood.rst new file mode 100644 index 0000000..616a596 --- /dev/null +++ b/docs/python_api/bayeso.tp.tp_likelihood.rst @@ -0,0 +1,8 @@ +bayeso.tp.tp\_likelihood +======================== + +.. automodule:: bayeso.tp.tp_likelihood + :members: + :private-members: + :undoc-members: + :show-inheritance: diff --git a/tests/common/test_import.py b/tests/common/test_import.py index e2d4478..dc6689f 100644 --- a/tests/common/test_import.py +++ b/tests/common/test_import.py @@ -25,12 +25,24 @@ def test_import_gp(): def test_import_gp_gp(): import bayeso.gp.gp +def test_import_gp_gp_kernel(): + import bayeso.gp.gp_kernel + +def test_import_gp_gp_likelihood(): + import bayeso.gp.gp_likelihood + def test_import_tp(): import bayeso.tp def test_import_tp_tp(): import bayeso.tp.tp +def test_import_tp_tp_kernel(): + import bayeso.tp.tp_kernel + +def test_import_tp_tp_likelihood(): + import bayeso.tp.tp_likelihood + def test_import_utils(): import bayeso.utils diff --git a/tests/common/test_tp.py b/tests/common/test_tp.py index fba5095..ca19b7b 100644 --- a/tests/common/test_tp.py +++ b/tests/common/test_tp.py @@ -1,6 +1,6 @@ # # author: Jungtaek Kim (jtkim@postech.ac.kr) -# last updated: December 31, 2020 +# last updated: March 22, 2021 # """test_tp""" @@ -10,80 +10,12 @@ from bayeso import constants from bayeso.tp import tp as package_target +from bayeso.tp import tp_kernel from bayeso.utils import utils_covariance TEST_EPSILON = 1e-7 -def test_neg_log_ml_typing(): - annos = package_target.neg_log_ml.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['Y_train'] == np.ndarray - assert annos['hyps'] == np.ndarray - assert annos['str_cov'] == str - assert annos['prior_mu_train'] == np.ndarray - assert annos['fix_noise'] == bool - assert annos['use_gradient'] == bool - assert annos['debug'] == bool - assert annos['return'] == typing.Union[float, typing.Tuple[float, np.ndarray]] - -def test_neg_log_ml(): - dim_X = 3 - str_cov = 'se' - X = np.reshape(np.arange(0, 9), (3, dim_X)) - Y = np.expand_dims(np.arange(3, 10, 3), axis=1) - fix_noise = False - use_gp = False - - dict_hyps = utils_covariance.get_hyps(str_cov, dim_X, use_gp=use_gp) - arr_hyps = utils_covariance.convert_hyps(str_cov, dict_hyps, fix_noise=fix_noise, use_gp=use_gp) - prior_mu_X = np.zeros((3, 1)) - - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, Y, dict_hyps, str_cov, prior_mu_X) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, Y, arr_hyps, 1, prior_mu_X) - with pytest.raises(ValueError) as error: - package_target.neg_log_ml(X, Y, arr_hyps, 'abc', prior_mu_X) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.arange(0, 3)) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) - with pytest.raises(AssertionError) as error: - package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) - - neg_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False) - print(neg_log_ml_) - truth_log_ml_ = 5.634155417555853 - assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON - - neg_log_ml_, neg_grad_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True) - print(neg_log_ml_) - print(neg_grad_log_ml_) - - truth_log_ml_ = 5.634155417555853 - truth_grad_log_ml_ = np.array([ - -1.60446383e-02, - 1.75087448e-01, - -1.60448396e+00, - -5.50871167e-05, - -5.50871167e-05, - -5.50871167e-05, - ]) - assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON - assert np.all(np.abs(neg_grad_log_ml_ - truth_grad_log_ml_) < TEST_EPSILON) - def test_sample_functions_typing(): annos = package_target.sample_functions.__annotations__ @@ -130,81 +62,6 @@ def test_sample_functions(): assert functions.shape[1] == num_points assert functions.shape[0] == num_samples -def test_get_optimized_kernel_typing(): - annos = package_target.get_optimized_kernel.__annotations__ - - assert annos['X_train'] == np.ndarray - assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] - assert annos['str_cov'] == str - assert annos['str_optimizer_method'] == str - assert annos['fix_noise'] == bool - assert annos['debug'] == bool - assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] - -def test_get_optimized_kernel(): - np.random.seed(42) - dim_X = 3 - num_X = 10 - num_instances = 5 - X = np.random.randn(num_X, dim_X) - X_set = np.random.randn(num_X, num_instances, dim_X) - Y = np.random.randn(num_X, 1) - prior_mu = None - - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, 1, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, 1, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(1, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') - with pytest.raises(ValueError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) - - # INFO: tests for set inputs - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') - with pytest.raises(AssertionError) as error: - package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32') - print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') - print(hyps) - - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP') - print(hyps) - -# cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') -# print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') - print(hyps) - def test_predict_with_cov_typing(): annos = package_target.predict_with_cov.__annotations__ @@ -228,7 +85,7 @@ def test_predict_with_cov(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = tp_kernel.get_optimized_kernel(X, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: package_target.predict_with_cov(X, Y, X_test, cov_X_X, inv_cov_X_X, hyps, str_cov='se', prior_mu='abc') @@ -290,7 +147,7 @@ def test_predict_with_hyps(): Y = np.random.randn(num_X, 1) X_test = np.random.randn(num_X_test, dim_X) prior_mu = None - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + cov_X_X, inv_cov_X_X, hyps = tp_kernel.get_optimized_kernel(X, Y, prior_mu, 'se') with pytest.raises(AssertionError) as error: package_target.predict_with_hyps(X, Y, X_test, hyps, str_cov='se', prior_mu='abc') diff --git a/tests/common/test_tp_kernel.py b/tests/common/test_tp_kernel.py new file mode 100644 index 0000000..6c86035 --- /dev/null +++ b/tests/common/test_tp_kernel.py @@ -0,0 +1,91 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: March 22, 2021 +# +"""test_tp_kernel""" + +import typing +import pytest +import numpy as np + +from bayeso import constants +from bayeso.tp import tp_kernel as package_target +from bayeso.utils import utils_covariance + + +TEST_EPSILON = 1e-7 + +def test_get_optimized_kernel_typing(): + annos = package_target.get_optimized_kernel.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['str_cov'] == str + assert annos['str_optimizer_method'] == str + assert annos['fix_noise'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] + +def test_get_optimized_kernel(): + np.random.seed(42) + dim_X = 3 + num_X = 10 + num_instances = 5 + X = np.random.randn(num_X, dim_X) + X_set = np.random.randn(num_X, num_instances, dim_X) + Y = np.random.randn(num_X, 1) + prior_mu = None + + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, 1, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, 1, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(1, Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(np.ones(num_X), Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, np.ones(num_X), prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(np.ones((50, 3)), Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, np.ones((50, 1)), prior_mu, 'se') + with pytest.raises(ValueError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'abc') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'se', debug=1) + + # INFO: tests for set inputs + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X, Y, prior_mu, 'set_se') + with pytest.raises(AssertionError) as error: + package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', debug=1) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'eq') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern32') + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'matern52') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') + print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP') + print(hyps) + +# cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se') +# print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_optimizer_method='L-BFGS-B') + print(hyps) diff --git a/tests/common/test_tp_likelihood.py b/tests/common/test_tp_likelihood.py new file mode 100644 index 0000000..924f14d --- /dev/null +++ b/tests/common/test_tp_likelihood.py @@ -0,0 +1,85 @@ +# +# author: Jungtaek Kim (jtkim@postech.ac.kr) +# last updated: March 22, 2021 +# +"""test_tp_likelihood""" + +import typing +import pytest +import numpy as np + +from bayeso import constants +from bayeso.tp import tp_likelihood as package_target +from bayeso.utils import utils_covariance + + +TEST_EPSILON = 1e-7 + +def test_neg_log_ml_typing(): + annos = package_target.neg_log_ml.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['hyps'] == np.ndarray + assert annos['str_cov'] == str + assert annos['prior_mu_train'] == np.ndarray + assert annos['fix_noise'] == bool + assert annos['use_gradient'] == bool + assert annos['debug'] == bool + assert annos['return'] == typing.Union[float, typing.Tuple[float, np.ndarray]] + +def test_neg_log_ml(): + dim_X = 3 + str_cov = 'se' + X = np.reshape(np.arange(0, 9), (3, dim_X)) + Y = np.expand_dims(np.arange(3, 10, 3), axis=1) + fix_noise = False + use_gp = False + + dict_hyps = utils_covariance.get_hyps(str_cov, dim_X, use_gp=use_gp) + arr_hyps = utils_covariance.convert_hyps(str_cov, dict_hyps, fix_noise=fix_noise, use_gp=use_gp) + prior_mu_X = np.zeros((3, 1)) + + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(np.arange(0, 3), Y, arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, np.arange(0, 3), arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, dict_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, 1, prior_mu_X) + with pytest.raises(ValueError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, 'abc', prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.arange(0, 3)) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(np.reshape(np.arange(0, 12), (4, dim_X)), Y, arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, np.expand_dims(np.arange(0, 4), axis=1), arr_hyps, str_cov, prior_mu_X) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, np.expand_dims(np.arange(0, 4), axis=1)) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=1) + with pytest.raises(AssertionError) as error: + package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, debug=1) + + neg_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=False) + print(neg_log_ml_) + truth_log_ml_ = 5.634155417555853 + assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON + + neg_log_ml_, neg_grad_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=fix_noise, use_gradient=True) + print(neg_log_ml_) + print(neg_grad_log_ml_) + + truth_log_ml_ = 5.634155417555853 + truth_grad_log_ml_ = np.array([ + -1.60446383e-02, + 1.75087448e-01, + -1.60448396e+00, + -5.50871167e-05, + -5.50871167e-05, + -5.50871167e-05, + ]) + assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON + assert np.all(np.abs(neg_grad_log_ml_ - truth_grad_log_ml_) < TEST_EPSILON) From e55a4c45636bb42742280b52052fa785aec5c7a8 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 23 Mar 2021 10:11:55 +0900 Subject: [PATCH 23/37] Pass pylint tests more --- .pylintrc | 2 +- bayeso/constants.py | 19 +++- bayeso/covariance.py | 68 ++++++----- bayeso/gp/gp_kernel.py | 192 -------------------------------- bayeso/gp/gp_likelihood.py | 1 - bayeso/tp/tp.py | 3 +- bayeso/tp/tp_kernel.py | 3 +- bayeso/tp/tp_likelihood.py | 8 +- bayeso/utils/utils_bo.py | 3 +- tests/common/test_covariance.py | 32 ++++-- 10 files changed, 90 insertions(+), 241 deletions(-) diff --git a/.pylintrc b/.pylintrc index f5ea5c2..c566f3d 100644 --- a/.pylintrc +++ b/.pylintrc @@ -555,7 +555,7 @@ max-attributes=15 max-bool-expr=5 # Maximum number of branch for function / method body. -max-branches=15 +max-branches=16 # Maximum number of locals for function / method body. max-locals=35 diff --git a/bayeso/constants.py b/bayeso/constants.py index 877ecbd..7699e83 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -3,7 +3,7 @@ # last updated: March 22, 2021 # """This file declares various default constants. -If you would like to see the details, check out +If you would like to see the details, check out the Python script in the repository directly.""" import typing @@ -44,7 +44,14 @@ TIME_PAUSE = 2.0 RANGE_SHADE = 1.96 -ALLOWED_OPTIMIZER_METHOD_GP = ['BFGS', 'L-BFGS-B', 'Nelder-Mead', 'DIRECT', 'SLSQP', 'SLSQP-Bounded'] +ALLOWED_OPTIMIZER_METHOD_GP = [ + 'BFGS', + 'L-BFGS-B', + 'Nelder-Mead', + 'DIRECT', + 'SLSQP', + 'SLSQP-Bounded', +] ALLOWED_OPTIMIZER_METHOD_TP = ['L-BFGS-B', 'SLSQP'] ALLOWED_OPTIMIZER_METHOD_BO = ['L-BFGS-B', 'DIRECT', 'CMA-ES'] # INFO: Do not use _ (underscore) in base str_cov. @@ -60,7 +67,7 @@ KEYS_INFO_BENCHMARK = ['dim_fun', 'bounds', 'global_minimum_X', 'global_minimum_y'] -COLORS = [ +COLORS = np.array([ 'red', 'green', 'blue', @@ -76,9 +83,9 @@ 'rosybrown', 'darkkhaki', 'darkslategray', -] +]) -MARKERS = [ +MARKERS = np.array([ '.', 'x', '*', @@ -94,7 +101,7 @@ '1', '2', '3', -] +]) TYPE_NONE = type(None) TYPING_TUPLE_DICT_BOOL = typing.Tuple[dict, bool] diff --git a/bayeso/covariance.py b/bayeso/covariance.py index fddaabe..c628890 100644 --- a/bayeso/covariance.py +++ b/bayeso/covariance.py @@ -14,18 +14,14 @@ @utils_common.validate_types -def choose_fun_cov(str_cov: str, choose_grad: bool) -> callable: +def choose_fun_cov(str_cov: str) -> callable: """ - It is for choosing a covariance function or a function for computing - gradients of covariance function. + It is for choosing a covariance function. :param str_cov: the name of covariance function. :type str_cov: str. - :param choose_grad: flag for returning a function for the gradients - :type choose_grad: bool. - :returns: covariance function, or function for computing gradients of - covariance function. + :returns: covariance function. :rtype: function :raises: AssertionError @@ -33,26 +29,44 @@ def choose_fun_cov(str_cov: str, choose_grad: bool) -> callable: """ assert isinstance(str_cov, str) - assert isinstance(choose_grad, bool) if str_cov in ('eq', 'se'): - if choose_grad: - fun_cov = grad_cov_se - else: - fun_cov = cov_se + fun_cov = cov_se elif str_cov == 'matern32': - if choose_grad: - fun_cov = grad_cov_matern32 - else: - fun_cov = cov_matern32 + fun_cov = cov_matern32 elif str_cov == 'matern52': - if choose_grad: - fun_cov = grad_cov_matern52 - else: - fun_cov = cov_matern52 + fun_cov = cov_matern52 else: - raise NotImplementedError('choose_fun_cov: allowed str_cov and \ - choose_grad conditions, but it is not implemented.') + raise NotImplementedError('choose_fun_cov: allowed str_cov condition,\ + but it is not implemented.') + return fun_cov + +@utils_common.validate_types +def choose_fun_grad_cov(str_cov: str) -> callable: + """ + It is for choosing a function for computing gradients of covariance function. + + :param str_cov: the name of covariance function. + :type str_cov: str. + + :returns: function for computing gradients of covariance function. + :rtype: function + + :raises: AssertionError + + """ + + assert isinstance(str_cov, str) + + if str_cov in ('eq', 'se'): + fun_cov = grad_cov_se + elif str_cov == 'matern32': + fun_cov = grad_cov_matern32 + elif str_cov == 'matern52': + fun_cov = grad_cov_matern52 + else: + raise NotImplementedError('choose_fun_grad_cov: allowed str_cov condition,\ + but it is not implemented.') return fun_cov @utils_common.validate_types @@ -496,7 +510,7 @@ def cov_set(str_cov: str, X: np.ndarray, Xp: np.ndarray, num_X = X.shape[0] num_Xp = Xp.shape[0] - fun_cov = choose_fun_cov(str_cov, False) + fun_cov = choose_fun_cov(str_cov) cov_X_Xp = fun_cov(X, Xp, lengthscales, signal) cov_X_Xp = np.sum(cov_X_Xp) @@ -559,16 +573,20 @@ def cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, same_X_Xp: if not is_valid: raise ValueError('cov_main: invalid hyperparameters.') - fun_cov = choose_fun_cov(str_cov, False) + fun_cov = choose_fun_cov(str_cov) cov_X_Xp += fun_cov(X, Xp, hyps['lengthscales'], hyps['signal']) + assert cov_X_Xp.shape == (num_X, num_Xp) elif str_cov in constants.ALLOWED_GP_COV_SET: list_str_cov = str_cov.split('_') str_cov = list_str_cov[1] + assert len(X.shape) == 3 assert len(Xp.shape) == 3 + dim_X = X.shape[2] dim_Xp = Xp.shape[2] + assert dim_X == dim_Xp hyps, is_valid = utils_covariance.validate_hyps_dict(hyps, str_cov, dim_X) @@ -647,7 +665,7 @@ def grad_cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, fix_n cov_X_Xp = cov_main(str_cov, X, Xp, hyps, same_X_Xp, jitter=jitter) - fun_grad_cov = choose_fun_cov(str_cov, True) + fun_grad_cov = choose_fun_grad_cov(str_cov) grad_cov_X_Xp = fun_grad_cov(cov_X_Xp, X, Xp, hyps, num_hyps, fix_noise) return grad_cov_X_Xp diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index c00f85e..b9c52f3 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -151,195 +151,3 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) return cov_X_X, inv_cov_X_X, hyps - -@utils_common.validate_types -def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, - cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, - str_cov: str=constants.STR_GP_COV, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, - debug: bool=False -) -> constants.TYPING_TUPLE_THREE_ARRAYS: - """ - This function returns posterior mean and posterior standard deviation - functions over `X_test`, computed by Gaussian process regression with - `X_train`, `Y_train`, `cov_X_X`, `inv_cov_X_X`, and `hyps`. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param X_test: inputs. Shape: (l, d) or (l, m, d). - :type X_test: numpy.ndarray - :param cov_X_X: kernel matrix over `X_train`. Shape: (n, n). - :type cov_X_X: numpy.ndarray - :param inv_cov_X_X: kernel matrix inverse over `X_train`. Shape: (n, n). - :type inv_cov_X_X: numpy.ndarray - :param hyps: dictionary of hyperparameters for Gaussian process. - :type hyps: dict. - :param str_cov: the name of covariance function. - :type str_cov: str., optional - :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of posterior mean function over `X_test`, posterior - standard deviation function over `X_test`, and posterior covariance - matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). - :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) - - :raises: AssertionError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) - assert isinstance(cov_X_X, np.ndarray) - assert isinstance(inv_cov_X_X, np.ndarray) - assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 - assert len(cov_X_X.shape) == 2 - assert len(inv_cov_X_X.shape) == 2 - assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() - utils_covariance.check_str_cov('predict_with_cov', str_cov, - X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] - - prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) - prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test) - cov_X_Xs = covariance.cov_main(str_cov, X_train, X_test, hyps, False) - cov_Xs_Xs = covariance.cov_main(str_cov, X_test, X_test, hyps, True) - cov_Xs_Xs = (cov_Xs_Xs + cov_Xs_Xs.T) / 2.0 - - mu_Xs = np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), Y_train - prior_mu_train) + prior_mu_test - Sigma_Xs = cov_Xs_Xs - np.dot(np.dot(cov_X_Xs.T, inv_cov_X_X), cov_X_Xs) - return mu_Xs, np.expand_dims(np.sqrt(np.maximum(np.diag(Sigma_Xs), 0.0)), axis=1), Sigma_Xs - -@utils_common.validate_types -def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, hyps: dict, - str_cov: str=constants.STR_GP_COV, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, - debug: bool=False -) -> constants.TYPING_TUPLE_THREE_ARRAYS: - """ - This function returns posterior mean and posterior standard deviation - functions over `X_test`, computed by Gaussian process regression with - `X_train`, `Y_train`, and `hyps`. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param X_test: inputs. Shape: (l, d) or (l, m, d). - :type X_test: numpy.ndarray - :param hyps: dictionary of hyperparameters for Gaussian process. - :type hyps: dict. - :param str_cov: the name of covariance function. - :type str_cov: str., optional - :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of posterior mean function over `X_test`, posterior - standard deviation function over `X_test`, and posterior covariance - matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). - :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) - - :raises: AssertionError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) - assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 - utils_covariance.check_str_cov('predict_with_hyps', str_cov, - X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] - - cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, - hyps, str_cov, debug=debug) - mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, - cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, - prior_mu=prior_mu, debug=debug) - - return mu_Xs, sigma_Xs, Sigma_Xs - -@utils_common.validate_types -def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, - str_cov: str=constants.STR_GP_COV, - str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, - prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, - fix_noise: float=constants.FIX_GP_NOISE, - debug: bool=False -) -> constants.TYPING_TUPLE_THREE_ARRAYS: - """ - This function returns posterior mean and posterior standard deviation - functions over `X_test`, computed by the Gaussian process regression - optimized with `X_train` and `Y_train`. - - :param X_train: inputs. Shape: (n, d) or (n, m, d). - :type X_train: numpy.ndarray - :param Y_train: outputs. Shape: (n, 1). - :type Y_train: numpy.ndarray - :param X_test: inputs. Shape: (l, d) or (l, m, d). - :type X_test: numpy.ndarray - :param str_cov: the name of covariance function. - :type str_cov: str., optional - :param str_optimizer_method: the name of optimization method. - :type str_optimizer_method: str., optional - :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional - :param fix_noise: flag for fixing a noise. - :type fix_noise: bool., optional - :param debug: flag for printing log messages. - :type debug: bool., optional - - :returns: a tuple of posterior mean function over `X_test`, posterior - standard deviation function over `X_test`, and posterior covariance - matrix over `X_test`. Shape: ((l, 1), (l, 1), (l, l)). - :rtype: tuple of (numpy.ndarray, numpy.ndarray, numpy.ndarray) - - :raises: AssertionError - - """ - - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) - assert isinstance(str_cov, str) - assert isinstance(str_optimizer_method, str) - assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 - utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov, - X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] - assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP - - time_start = time.time() - - cov_X_X, inv_cov_X_X, hyps = get_optimized_kernel(X_train, Y_train, - prior_mu, str_cov, str_optimizer_method=str_optimizer_method, - fix_noise=fix_noise, debug=debug) - mu_Xs, sigma_Xs, Sigma_Xs = predict_with_cov(X_train, Y_train, X_test, - cov_X_X, inv_cov_X_X, hyps, str_cov=str_cov, prior_mu=prior_mu, - debug=debug) - - time_end = time.time() - if debug: - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) - return mu_Xs, sigma_Xs, Sigma_Xs diff --git a/bayeso/gp/gp_likelihood.py b/bayeso/gp/gp_likelihood.py index cb2ed2d..5269f84 100644 --- a/bayeso/gp/gp_likelihood.py +++ b/bayeso/gp/gp_likelihood.py @@ -4,7 +4,6 @@ # """It defines Gaussian process regression.""" -import time import numpy as np import scipy.linalg diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index d09e789..6a5b582 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -191,7 +191,8 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr assert isinstance(debug, bool) assert callable(prior_mu) or prior_mu is None assert len(Y_train.shape) == 2 - utils_covariance.check_str_cov('predict_with_hyps', str_cov, X_train.shape, shape_X2=X_test.shape) + utils_covariance.check_str_cov('predict_with_hyps', str_cov, X_train.shape, + shape_X2=X_test.shape) assert X_train.shape[0] == Y_train.shape[0] assert X_train.shape[1] == X_test.shape[1] diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py index 6176ba2..3e2e6ef 100644 --- a/bayeso/tp/tp_kernel.py +++ b/bayeso/tp/tp_kernel.py @@ -105,7 +105,8 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, else: # pragma: no cover raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') - hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise, use_gp=False) + hyps = utils_covariance.restore_hyps(str_cov, result_optimized, + fix_noise=fix_noise, use_gp=False) hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, diff --git a/bayeso/tp/tp_likelihood.py b/bayeso/tp/tp_likelihood.py index ef97418..4aa79d1 100644 --- a/bayeso/tp/tp_likelihood.py +++ b/bayeso/tp/tp_likelihood.py @@ -96,7 +96,11 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, grad_log_ml_ = np.zeros(grad_cov_X_X.shape[2] + 1) first_term_grad = ((nu + num_X) / (nu + beta - 2.0) * np.dot(alpha, alpha.T) - inv_cov_X_X) - nu_grad = -num_X / (2.0 * (nu - 2.0)) + scipy.special.digamma((nu + num_X) / 2.0) - scipy.special.digamma(nu / 2.0) - 0.5 * np.log(1.0 + beta / (nu - 2.0)) + (nu + num_X) * beta / (2.0 * (nu - 2.0)**2 + 2.0 * beta * (nu - 2.0)) + nu_grad = -num_X / (2.0 * (nu - 2.0))\ + + scipy.special.digamma((nu + num_X) / 2.0)\ + - scipy.special.digamma(nu / 2.0)\ + - 0.5 * np.log(1.0 + beta / (nu - 2.0))\ + + (nu + num_X) * beta / (2.0 * (nu - 2.0)**2 + 2.0 * beta * (nu - 2.0)) if fix_noise: grad_log_ml_[0] = nu_grad @@ -112,7 +116,7 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, cur_ind = 0 else: cur_ind = ind + 1 - + grad_log_ml_[cur_ind] = cur_grad if use_gradient: diff --git a/bayeso/utils/utils_bo.py b/bayeso/utils/utils_bo.py index a847309..6bb5e7a 100644 --- a/bayeso/utils/utils_bo.py +++ b/bayeso/utils/utils_bo.py @@ -56,7 +56,8 @@ def get_best_acquisition_by_evaluation(initials: np.ndarray, fun_objective: call return initial_best @utils_common.validate_types -def get_best_acquisition_by_history(X: np.ndarray, Y: np.ndarray) -> constants.TYPING_TUPLE_ARRAY_FLOAT: +def get_best_acquisition_by_history(X: np.ndarray, Y: np.ndarray +) -> constants.TYPING_TUPLE_ARRAY_FLOAT: """ It returns the best acquisition that has shown minimum result, and its minimum result. diff --git a/tests/common/test_covariance.py b/tests/common/test_covariance.py index d28b3ce..145946d 100644 --- a/tests/common/test_covariance.py +++ b/tests/common/test_covariance.py @@ -18,23 +18,33 @@ def test_choose_fun_cov_typing(): annos = package_target.choose_fun_cov.__annotations__ assert annos['str_cov'] == str - assert annos['choose_grad'] == bool assert annos['return'] == callable def test_choose_fun_cov(): with pytest.raises(AssertionError) as error: - package_target.choose_fun_cov(123, False) + package_target.choose_fun_cov(123) + with pytest.raises(NotImplementedError) as error: + package_target.choose_fun_cov('abc') + + assert package_target.choose_fun_cov('se') == package_target.cov_se + assert package_target.choose_fun_cov('matern32') == package_target.cov_matern32 + assert package_target.choose_fun_cov('matern52') == package_target.cov_matern52 + +def test_choose_fun_grad_cov_typing(): + annos = package_target.choose_fun_grad_cov.__annotations__ + + assert annos['str_cov'] == str + assert annos['return'] == callable + +def test_choose_fun_grad_cov(): with pytest.raises(AssertionError) as error: - package_target.choose_fun_cov('se', 'abc') + package_target.choose_fun_grad_cov(123) with pytest.raises(NotImplementedError) as error: - package_target.choose_fun_cov('abc', False) - - assert package_target.choose_fun_cov('se', False) == package_target.cov_se - assert package_target.choose_fun_cov('matern32', False) == package_target.cov_matern32 - assert package_target.choose_fun_cov('matern52', False) == package_target.cov_matern52 - assert package_target.choose_fun_cov('se', True) == package_target.grad_cov_se - assert package_target.choose_fun_cov('matern32', True) == package_target.grad_cov_matern32 - assert package_target.choose_fun_cov('matern52', True) == package_target.grad_cov_matern52 + package_target.choose_fun_grad_cov('abc') + + assert package_target.choose_fun_grad_cov('se') == package_target.grad_cov_se + assert package_target.choose_fun_grad_cov('matern32') == package_target.grad_cov_matern32 + assert package_target.choose_fun_grad_cov('matern52') == package_target.grad_cov_matern52 def test_get_kernel_inverse_typing(): annos = package_target.get_kernel_inverse.__annotations__ From 18db7b082c4898e7e5d6254727ec8cdcccc28eda Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 23 Mar 2021 11:56:13 +0900 Subject: [PATCH 24/37] Update docstring --- bayeso/bo.py | 2 +- bayeso/covariance.py | 12 ++++++------ bayeso/gp/gp_kernel.py | 3 ++- bayeso/gp/gp_likelihood.py | 3 ++- bayeso/tp/tp_kernel.py | 3 ++- bayeso/tp/tp_likelihood.py | 3 ++- 6 files changed, 15 insertions(+), 11 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 1833633..2f966df 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -2,7 +2,7 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 29, 2020 # -"""It defines a class for Bayesian optimization.""" +"""It defines a class of Bayesian optimization.""" import time import numpy as np diff --git a/bayeso/covariance.py b/bayeso/covariance.py index c628890..78d10cf 100644 --- a/bayeso/covariance.py +++ b/bayeso/covariance.py @@ -16,7 +16,7 @@ @utils_common.validate_types def choose_fun_cov(str_cov: str) -> callable: """ - It is for choosing a covariance function. + It chooses a covariance function. :param str_cov: the name of covariance function. :type str_cov: str. @@ -44,7 +44,7 @@ def choose_fun_cov(str_cov: str) -> callable: @utils_common.validate_types def choose_fun_grad_cov(str_cov: str) -> callable: """ - It is for choosing a function for computing gradients of covariance function. + It chooses a function for computing gradients of covariance function. :param str_cov: the name of covariance function. :type str_cov: str. @@ -59,15 +59,15 @@ def choose_fun_grad_cov(str_cov: str) -> callable: assert isinstance(str_cov, str) if str_cov in ('eq', 'se'): - fun_cov = grad_cov_se + fun_grad_cov = grad_cov_se elif str_cov == 'matern32': - fun_cov = grad_cov_matern32 + fun_grad_cov = grad_cov_matern32 elif str_cov == 'matern52': - fun_cov = grad_cov_matern52 + fun_grad_cov = grad_cov_matern52 else: raise NotImplementedError('choose_fun_grad_cov: allowed str_cov condition,\ but it is not implemented.') - return fun_cov + return fun_grad_cov @utils_common.validate_types def get_kernel_inverse(X_train: np.ndarray, hyps: dict, str_cov: str, diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index b9c52f3..dfda27a 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: March 22, 2021 # -"""It defines Gaussian process regression.""" +"""It defines the functions related to kernels for +Gaussian process regression.""" import time import numpy as np diff --git a/bayeso/gp/gp_likelihood.py b/bayeso/gp/gp_likelihood.py index 5269f84..078dda1 100644 --- a/bayeso/gp/gp_likelihood.py +++ b/bayeso/gp/gp_likelihood.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: March 22, 2021 # -"""It defines Gaussian process regression.""" +"""It defines the functions related to likelihood for +Gaussian process regression.""" import numpy as np import scipy.linalg diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py index 3e2e6ef..e0da6f3 100644 --- a/bayeso/tp/tp_kernel.py +++ b/bayeso/tp/tp_kernel.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: March 22, 2021 # -"""It defines Student-:math:`t` process regression.""" +"""It defines the functions related to kernels for +Student-:math:`t` process regression.""" import time import numpy as np diff --git a/bayeso/tp/tp_likelihood.py b/bayeso/tp/tp_likelihood.py index 4aa79d1..d6bc993 100644 --- a/bayeso/tp/tp_likelihood.py +++ b/bayeso/tp/tp_likelihood.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: March 22, 2021 # -"""It defines Student-:math:`t` process regression.""" +"""It defines the functions related to likelihood for +Student-:math:`t` process regression.""" import numpy as np import scipy.special From 734089bbb2af115fa09aa1c3baf6ec817c3eb662 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 23 Mar 2021 14:44:02 +0900 Subject: [PATCH 25/37] Add python 3.9 and pypy3 --- .travis.yml | 6 ++---- tests/common/test_gp_kernel.py | 7 ++++++- 2 files changed, 8 insertions(+), 5 deletions(-) diff --git a/.travis.yml b/.travis.yml index da8e8aa..df0b72b 100644 --- a/.travis.yml +++ b/.travis.yml @@ -4,12 +4,10 @@ python: - '3.6' - '3.7' - '3.8' + - '3.9' + - 'pypy3' install: - pip install . - - pip install tensorflow - - pip install tensorflow-probability - - pip install torch - - pip install gpytorch script: - pip install coveralls - pip install pytest-timeout diff --git a/tests/common/test_gp_kernel.py b/tests/common/test_gp_kernel.py index 918dcfd..3f6622e 100644 --- a/tests/common/test_gp_kernel.py +++ b/tests/common/test_gp_kernel.py @@ -86,7 +86,12 @@ def test_get_optimized_kernel(): print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') print(hyps) - cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead') + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='Nelder-Mead', debug=True) + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP', debug=True) + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP-Bounded', debug=True) print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_modelselection_method='loocv') From b6b513a8bea6d2a28720ae9c9d59ad0859f5ffe3 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 23 Mar 2021 14:53:45 +0900 Subject: [PATCH 26/37] Add 3.9 in setup.py and remove pypy3 --- .gitignore | 4 ++-- .travis.yml | 1 - setup.py | 4 +++- 3 files changed, 5 insertions(+), 4 deletions(-) diff --git a/.gitignore b/.gitignore index b930963..364b20e 100644 --- a/.gitignore +++ b/.gitignore @@ -102,9 +102,9 @@ ENV/ # Jungtaek *.swp +*.dat .DS_Store +.pytest_cache/ __MACOSX/ results/ figures/ -.pytest_cache/ -*.dat diff --git a/.travis.yml b/.travis.yml index df0b72b..2218a9f 100644 --- a/.travis.yml +++ b/.travis.yml @@ -5,7 +5,6 @@ python: - '3.7' - '3.8' - '3.9' - - 'pypy3' install: - pip install . script: diff --git a/setup.py b/setup.py index 85cfe6f..99845a4 100644 --- a/setup.py +++ b/setup.py @@ -4,7 +4,8 @@ if (sys.version_info.major == 3 and sys.version_info.minor == 6) or\ (sys.version_info.major == 3 and sys.version_info.minor == 7) or\ - (sys.version_info.major == 3 and sys.version_info.minor == 8): + (sys.version_info.major == 3 and sys.version_info.minor == 8) or\ + (sys.version_info.major == 3 and sys.version_info.minor == 9): print('[SETUP] bayeso supports Python {}.{} version in this system.'.format(sys.version_info.major, sys.version_info.minor)) else: sys.exit('[ERROR] bayeso does not support Python {}.{} version in this system.'.format(sys.version_info.major, sys.version_info.minor)) @@ -36,6 +37,7 @@ 'Programming Language :: Python :: 3.6', 'Programming Language :: Python :: 3.7', 'Programming Language :: Python :: 3.8', + 'Programming Language :: Python :: 3.9', 'License :: OSI Approved :: MIT License', 'Topic :: Scientific/Engineering', 'Topic :: Scientific/Engineering :: Artificial Intelligence', From 6733d710e6754b201755e1ad827f1050e62dc2b2 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 23 Mar 2021 15:03:57 +0900 Subject: [PATCH 27/37] Add 3.9 to docs and README.md --- .coveragerc | 2 +- .readthedocs.yml | 2 +- MANIFEST.in | 2 +- README.md | 1 + create_wheels_source.txt | 4 ++-- docs/about/about_bayeso.rst | 1 + requirements-dev.txt | 2 +- requirements-examples.txt | 2 +- 8 files changed, 9 insertions(+), 7 deletions(-) diff --git a/.coveragerc b/.coveragerc index 81e3521..72d9f20 100644 --- a/.coveragerc +++ b/.coveragerc @@ -6,5 +6,5 @@ source = [report] exclude_lines = pragma: no cover - raise NotImplementedError print + raise NotImplementedError diff --git a/.readthedocs.yml b/.readthedocs.yml index 2eb2b9c..1fe043b 100644 --- a/.readthedocs.yml +++ b/.readthedocs.yml @@ -5,8 +5,8 @@ sphinx: configuration: docs/conf.py formats: - - pdf - htmlzip + - pdf python: version: 3.7 diff --git a/MANIFEST.in b/MANIFEST.in index 4c0e142..8d6df54 100644 --- a/MANIFEST.in +++ b/MANIFEST.in @@ -1,4 +1,4 @@ -include LICENSE include CODE_OF_CONDUCT.md +include LICENSE include requirements.txt include requirements-optional.txt diff --git a/README.md b/README.md index 26d22cc..e7dc281 100644 --- a/README.md +++ b/README.md @@ -67,6 +67,7 @@ We test our package in the following versions. * Python 3.6 * Python 3.7 * Python 3.8 +* Python 3.9 ## Contributor * [Jungtaek Kim](http://jungtaek.github.io) (POSTECH) diff --git a/create_wheels_source.txt b/create_wheels_source.txt index b799ee8..8545902 100644 --- a/create_wheels_source.txt +++ b/create_wheels_source.txt @@ -1,11 +1,11 @@ -# Install setuptools wheel first. +# Install setuptools wheel first python2 setup.py sdist bdist_wheel python3 setup.py sdist bdist_wheel # Install twine twine upload dist/* -## (Optional) Upload to Anaconda repository. +## (Optional) Upload to Anaconda repository ~/anaconda3/bin/anaconda upload dist/*.tar.gz # or, just anaconda upload dist/*.tar.gz diff --git a/docs/about/about_bayeso.rst b/docs/about/about_bayeso.rst index bea395c..b107925 100644 --- a/docs/about/about_bayeso.rst +++ b/docs/about/about_bayeso.rst @@ -14,6 +14,7 @@ We test our package in the following versions. - Python 3.6 - Python 3.7 - Python 3.8 +- Python 3.9 Related Package for Benchmark Functions ======================================= diff --git a/requirements-dev.txt b/requirements-dev.txt index 1692000..721648a 100644 --- a/requirements-dev.txt +++ b/requirements-dev.txt @@ -1,6 +1,6 @@ pytest -pytest-timeout pytest-benchmark +pytest-timeout coveralls sphinx sphinx_rtd_theme diff --git a/requirements-examples.txt b/requirements-examples.txt index d65559c..c67f162 100644 --- a/requirements-examples.txt +++ b/requirements-examples.txt @@ -1,2 +1,2 @@ -scikit-learn xgboost +scikit-learn From 4dc3b13e3898c0a00b2543a93b46be561c53e7de Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 31 Mar 2021 17:48:34 +0900 Subject: [PATCH 28/37] Update docstring and typing, add references --- bayeso/bo.py | 6 ++-- bayeso/constants.py | 39 +++++++++++--------- bayeso/covariance.py | 14 ++++---- bayeso/gp/__init__.py | 5 ++- bayeso/gp/gp.py | 6 ++-- bayeso/gp/gp_kernel.py | 14 +++++--- bayeso/tp/__init__.py | 6 +++- bayeso/tp/tp.py | 6 ++-- bayeso/tp/tp_kernel.py | 14 +++++--- bayeso/utils/utils_bo.py | 7 ++-- bayeso/utils/utils_common.py | 2 +- bayeso/utils/utils_covariance.py | 48 ++++++++++++++++--------- bayeso/utils/utils_plotting.py | 52 +++++++++++++-------------- bayeso/wrappers/wrappers_bo.py | 9 +++-- tests/common/test_covariance.py | 4 +-- tests/common/test_gp.py | 6 ++-- tests/common/test_gp_kernel.py | 2 +- tests/common/test_tp.py | 6 ++-- tests/common/test_tp_kernel.py | 2 +- tests/common/test_utils_bo.py | 7 ++-- tests/common/test_utils_covariance.py | 10 ++++-- tests/common/test_utils_gp.py | 2 +- tests/common/test_utils_plotting.py | 16 ++++----- tests/common/test_wrappers_bo.py | 6 ++-- 24 files changed, 167 insertions(+), 122 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 2f966df..1b1b77d 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -61,10 +61,10 @@ class BO: """ def __init__(self, range_X: np.ndarray, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, str_acq: str=constants.STR_BO_ACQ, normalize_Y: bool=constants.NORMALIZE_RESPONSE, - use_ard: bool=True, + use_ard: bool=constants.USE_ARD, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, str_surrogate: str=constants.STR_SURROGATE, str_optimizer_method_gp: str=constants.STR_OPTIMIZER_METHOD_GP, @@ -91,7 +91,7 @@ def __init__(self, range_X: np.ndarray, assert len(range_X.shape) == 2 assert range_X.shape[1] == 2 assert (range_X[:, 0] <= range_X[:, 1]).all() - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV assert str_acq in constants.ALLOWED_BO_ACQ assert str_optimizer_method_gp in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_optimizer_method_bo in constants.ALLOWED_OPTIMIZER_METHOD_BO diff --git a/bayeso/constants.py b/bayeso/constants.py index 7699e83..07cf30a 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -17,7 +17,7 @@ STR_SURROGATE = 'gp' STR_OPTIMIZER_METHOD_GP = 'BFGS' STR_OPTIMIZER_METHOD_TP = 'SLSQP' -STR_GP_COV = 'matern52' +STR_COV = 'matern52' STR_BO_ACQ = 'ei' STR_INITIALIZING_METHOD_BO = 'sobol' STR_OPTIMIZER_METHOD_AO = 'L-BFGS-B' @@ -33,6 +33,7 @@ NORMALIZE_RESPONSE = True +USE_ARD = True GP_NOISE = 1e-2 FIX_GP_NOISE = True BOUND_UPPER_GP_NOISE = np.inf @@ -55,9 +56,9 @@ ALLOWED_OPTIMIZER_METHOD_TP = ['L-BFGS-B', 'SLSQP'] ALLOWED_OPTIMIZER_METHOD_BO = ['L-BFGS-B', 'DIRECT', 'CMA-ES'] # INFO: Do not use _ (underscore) in base str_cov. -ALLOWED_GP_COV_BASE = ['eq', 'se', 'matern32', 'matern52'] -ALLOWED_GP_COV_SET = ['set_' + str_cov for str_cov in ALLOWED_GP_COV_BASE] -ALLOWED_GP_COV = ALLOWED_GP_COV_BASE + ALLOWED_GP_COV_SET +ALLOWED_COV_BASE = ['eq', 'se', 'matern32', 'matern52'] +ALLOWED_COV_SET = ['set_' + str_cov for str_cov in ALLOWED_COV_BASE] +ALLOWED_COV = ALLOWED_COV_BASE + ALLOWED_COV_SET ALLOWED_BO_ACQ = ['pi', 'ei', 'ucb', 'aei', 'pure_exploit', 'pure_explore'] ALLOWED_INITIALIZING_METHOD_BO = ['uniform', 'gaussian', 'sobol', 'halton'] ALLOWED_SAMPLING_METHOD = ALLOWED_INITIALIZING_METHOD_BO + ['grid'] @@ -104,23 +105,27 @@ ]) TYPE_NONE = type(None) +TYPE_ARR = np.ndarray + +TYPING_CALLABLE = typing.Callable +TYPING_LIST = typing.List TYPING_TUPLE_DICT_BOOL = typing.Tuple[dict, bool] -TYPING_TUPLE_ARRAY_BOOL = typing.Tuple[np.ndarray, bool] -TYPING_TUPLE_ARRAY_DICT = typing.Tuple[np.ndarray, dict] -TYPING_TUPLE_ARRAY_FLOAT = typing.Tuple[np.ndarray, float] -TYPING_TUPLE_TWO_ARRAYS = typing.Tuple[np.ndarray, np.ndarray] -TYPING_TUPLE_TWO_ARRAYS_DICT = typing.Tuple[np.ndarray, np.ndarray, dict] -TYPING_TUPLE_THREE_ARRAYS = typing.Tuple[np.ndarray, np.ndarray, np.ndarray] -TYPING_TUPLE_FIVE_ARRAYS = typing.Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, np.ndarray] -TYPING_TUPLE_FLOAT_THREE_ARRAYS = typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] -TYPING_TUPLE_FLOAT_ARRAY = typing.Tuple[float, np.ndarray] +TYPING_TUPLE_ARRAY_BOOL = typing.Tuple[TYPE_ARR, bool] +TYPING_TUPLE_ARRAY_DICT = typing.Tuple[TYPE_ARR, dict] +TYPING_TUPLE_ARRAY_FLOAT = typing.Tuple[TYPE_ARR, float] +TYPING_TUPLE_TWO_ARRAYS = typing.Tuple[TYPE_ARR, TYPE_ARR] +TYPING_TUPLE_TWO_ARRAYS_DICT = typing.Tuple[TYPE_ARR, TYPE_ARR, dict] +TYPING_TUPLE_THREE_ARRAYS = typing.Tuple[TYPE_ARR, TYPE_ARR, TYPE_ARR] +TYPING_TUPLE_FIVE_ARRAYS = typing.Tuple[TYPE_ARR, TYPE_ARR, TYPE_ARR, TYPE_ARR, TYPE_ARR] +TYPING_TUPLE_FLOAT_THREE_ARRAYS = typing.Tuple[float, TYPE_ARR, TYPE_ARR, TYPE_ARR] +TYPING_TUPLE_FLOAT_ARRAY = typing.Tuple[float, TYPE_ARR] TYPING_UNION_INT_NONE = typing.Union[int, TYPE_NONE] TYPING_UNION_INT_FLOAT = typing.Union[int, float] TYPING_UNION_FLOAT_NONE = typing.Union[float, TYPE_NONE] -TYPING_UNION_FLOAT_TWO_FLOATS = typing.Union[float, typing.Tuple[float, float]] +TYPING_UNION_FLOAT_TWO_FLOATS = typing.Union[float, TYPING_TUPLE_TWO_ARRAYS] TYPING_UNION_FLOAT_FA = typing.Union[float, TYPING_TUPLE_FLOAT_ARRAY] -TYPING_UNION_ARRAY_NONE = typing.Union[np.ndarray, TYPE_NONE] -TYPING_UNION_ARRAY_FLOAT = typing.Union[np.ndarray, float] -TYPING_UNION_CALLABLE_NONE = typing.Union[callable, TYPE_NONE] +TYPING_UNION_ARRAY_NONE = typing.Union[TYPE_ARR, TYPE_NONE] +TYPING_UNION_ARRAY_FLOAT = typing.Union[TYPE_ARR, float] +TYPING_UNION_CALLABLE_NONE = typing.Union[TYPING_CALLABLE, TYPE_NONE] TYPING_UNION_STR_NONE = typing.Union[str, TYPE_NONE] diff --git a/bayeso/covariance.py b/bayeso/covariance.py index 78d10cf..d49b5aa 100644 --- a/bayeso/covariance.py +++ b/bayeso/covariance.py @@ -14,7 +14,7 @@ @utils_common.validate_types -def choose_fun_cov(str_cov: str) -> callable: +def choose_fun_cov(str_cov: str) -> constants.TYPING_CALLABLE: """ It chooses a covariance function. @@ -42,7 +42,7 @@ def choose_fun_cov(str_cov: str) -> callable: return fun_cov @utils_common.validate_types -def choose_fun_grad_cov(str_cov: str) -> callable: +def choose_fun_grad_cov(str_cov: str) -> constants.TYPING_CALLABLE: """ It chooses a function for computing gradients of covariance function. @@ -506,7 +506,7 @@ def cov_set(str_cov: str, X: np.ndarray, Xp: np.ndarray, assert X.shape[1] == Xp.shape[1] == lengthscales.shape[0] else: assert X.shape[1] == Xp.shape[1] - assert str_cov in constants.ALLOWED_GP_COV_BASE + assert str_cov in constants.ALLOWED_COV_BASE num_X = X.shape[0] num_Xp = Xp.shape[0] @@ -551,7 +551,7 @@ def cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, same_X_Xp: assert isinstance(hyps, dict) assert isinstance(same_X_Xp, bool) assert isinstance(jitter, float) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV num_X = X.shape[0] num_Xp = Xp.shape[0] @@ -561,7 +561,7 @@ def cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, same_X_Xp: assert num_X == num_Xp cov_X_Xp += np.eye(num_X) * jitter - if str_cov in ('eq', 'se', 'matern32', 'matern52'): + if str_cov in constants.ALLOWED_COV_BASE: assert len(X.shape) == 2 assert len(Xp.shape) == 2 dim_X = X.shape[1] @@ -577,7 +577,7 @@ def cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, same_X_Xp: cov_X_Xp += fun_cov(X, Xp, hyps['lengthscales'], hyps['signal']) assert cov_X_Xp.shape == (num_X, num_Xp) - elif str_cov in constants.ALLOWED_GP_COV_SET: + elif str_cov in constants.ALLOWED_COV_SET: list_str_cov = str_cov.split('_') str_cov = list_str_cov[1] @@ -649,7 +649,7 @@ def grad_cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, fix_n assert isinstance(fix_noise, bool) assert isinstance(same_X_Xp, bool) assert isinstance(jitter, float) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV # TODO: X and Xp should be same? assert same_X_Xp diff --git a/bayeso/gp/__init__.py b/bayeso/gp/__init__.py index cb304b0..b5d7615 100644 --- a/bayeso/gp/__init__.py +++ b/bayeso/gp/__init__.py @@ -2,4 +2,7 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: March 22, 2021 # -"""These files are for implementing Gaussian process regression.""" +"""These files are for implementing Gaussian process regression. +It is implemented, based on the following article: +(i) Rasmussen, C. E., & Williams, C. K. (2006). Gaussian Process +Regression for Machine Learning. MIT Press.""" diff --git a/bayeso/gp/gp.py b/bayeso/gp/gp.py index b64d5cd..f2a2978 100644 --- a/bayeso/gp/gp.py +++ b/bayeso/gp/gp.py @@ -54,7 +54,7 @@ def sample_functions(mu: np.ndarray, Sigma: np.ndarray, @utils_common.validate_types def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, debug: bool=False ) -> constants.TYPING_TUPLE_THREE_ARRAYS: @@ -121,7 +121,7 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra @utils_common.validate_types def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, hyps: dict, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, debug: bool=False ) -> constants.TYPING_TUPLE_THREE_ARRAYS: @@ -177,7 +177,7 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr @utils_common.validate_types def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, fix_noise: float=constants.FIX_GP_NOISE, diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index dfda27a..25a417d 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -25,6 +25,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_GP, str_modelselection_method: str=constants.STR_MODELSELECTION_METHOD, + use_ard: bool=constants.USE_ARD, fix_noise: bool=constants.FIX_GP_NOISE, debug: bool=False ) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: @@ -44,6 +45,8 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, :type str_optimizer_method: str., optional :param str_modelselection_method: the name of model selection method. :type str_modelselection_method: str., optional + :param use_ard: flag for using automatic relevance determination. + :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param debug: flag for printing log messages. @@ -64,6 +67,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(str_cov, str) assert isinstance(str_optimizer_method, str) assert isinstance(str_modelselection_method, str) + assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) assert isinstance(debug, bool) assert len(Y_train.shape) == 2 @@ -81,9 +85,9 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, logger.debug('str_modelselection_method: %s', str_modelselection_method) prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) - if str_cov in constants.ALLOWED_GP_COV_BASE: + if str_cov in constants.ALLOWED_COV_BASE: num_dim = X_train.shape[1] - elif str_cov in constants.ALLOWED_GP_COV_SET: + elif str_cov in constants.ALLOWED_COV_SET: num_dim = X_train.shape[2] use_gradient = False @@ -100,7 +104,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, hyps_converted = utils_covariance.convert_hyps( str_cov, - utils_covariance.get_hyps(str_cov, num_dim), + utils_covariance.get_hyps(str_cov, num_dim, use_ard=use_ard), fix_noise=fix_noise, ) @@ -116,7 +120,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, if str_optimizer_method == 'SLSQP-Bounded': str_optimizer_method = 'SLSQP' - bounds = utils_covariance.get_range_hyps(str_cov, num_dim, fix_noise=fix_noise) + bounds = utils_covariance.get_range_hyps(str_cov, num_dim, use_ard=use_ard, fix_noise=fix_noise) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=use_gradient, options={'disp': False}) @@ -140,7 +144,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, else: # pragma: no cover raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') - hyps = utils_covariance.restore_hyps(str_cov, result_optimized, fix_noise=fix_noise) + hyps = utils_covariance.restore_hyps(str_cov, result_optimized, use_ard=use_ard, fix_noise=fix_noise) hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, diff --git a/bayeso/tp/__init__.py b/bayeso/tp/__init__.py index ff0b794..5bc4630 100644 --- a/bayeso/tp/__init__.py +++ b/bayeso/tp/__init__.py @@ -2,4 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 29, 2020 # -"""These files are for implementing Student-:math:`t` process regression.""" +"""These files are for implementing Student-:math:`t` process regression. +It is implemented, based on the following article: +(i) Rasmussen, C. E., & Williams, C. K. (2006). Gaussian Process +Regression for Machine Learning. MIT Press. +(ii) Shah, A., Wilson, A. G., & Ghahramani, Z. (2014). Student-t Processes as Alternatives to Gaussian Processes. In Proceedings of the 17th International Conference on Artificial Intelligence and Statistics (pp. 877-885).""" diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index 6a5b582..11548b3 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -65,7 +65,7 @@ def sample_functions(nu: float, mu: np.ndarray, Sigma: np.ndarray, @utils_common.validate_types def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, debug: bool=False ) -> constants.TYPING_TUPLE_FLOAT_THREE_ARRAYS: @@ -146,7 +146,7 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra @utils_common.validate_types def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, hyps: dict, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, debug: bool=False ) -> constants.TYPING_TUPLE_FLOAT_THREE_ARRAYS: @@ -206,7 +206,7 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr @utils_common.validate_types def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarray, - str_cov: str=constants.STR_GP_COV, + str_cov: str=constants.STR_COV, str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_TP, prior_mu: constants.TYPING_UNION_CALLABLE_NONE=None, fix_noise: float=constants.FIX_GP_NOISE, diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py index e0da6f3..162266b 100644 --- a/bayeso/tp/tp_kernel.py +++ b/bayeso/tp/tp_kernel.py @@ -24,6 +24,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, prior_mu: constants.TYPING_UNION_CALLABLE_NONE, str_cov: str, str_optimizer_method: str=constants.STR_OPTIMIZER_METHOD_TP, + use_ard: bool=constants.USE_ARD, fix_noise: bool=constants.FIX_GP_NOISE, debug: bool=False ) -> constants.TYPING_TUPLE_TWO_ARRAYS_DICT: @@ -41,6 +42,8 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, :type str_cov: str. :param str_optimizer_method: the name of optimization method. :type str_optimizer_method: str., optional + :param use_ard: flag for using automatic relevance determination. + :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param debug: flag for printing log messages. @@ -59,6 +62,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert callable(prior_mu) or prior_mu is None assert isinstance(str_cov, str) assert isinstance(str_optimizer_method, str) + assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) assert isinstance(debug, bool) assert len(Y_train.shape) == 2 @@ -75,9 +79,9 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, logger.debug('str_optimizer_method: %s', str_optimizer_method) prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) - if str_cov in constants.ALLOWED_GP_COV_BASE: + if str_cov in constants.ALLOWED_COV_BASE: num_dim = X_train.shape[1] - elif str_cov in constants.ALLOWED_GP_COV_SET: + elif str_cov in constants.ALLOWED_COV_SET: num_dim = X_train.shape[2] use_gradient = False @@ -87,14 +91,14 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, hyps_converted = utils_covariance.convert_hyps( str_cov, - utils_covariance.get_hyps(str_cov, num_dim, use_gp=False), + utils_covariance.get_hyps(str_cov, num_dim, use_gp=False, use_ard=use_ard), fix_noise=fix_noise, use_gp=False ) if str_optimizer_method in ['L-BFGS-B', 'SLSQP']: bounds = utils_covariance.get_range_hyps(str_cov, num_dim, - fix_noise=fix_noise, use_gp=False) + use_ard=use_ard, fix_noise=fix_noise, use_gp=False) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=use_gradient, options={'disp': False}) @@ -107,7 +111,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') hyps = utils_covariance.restore_hyps(str_cov, result_optimized, - fix_noise=fix_noise, use_gp=False) + use_ard=use_ard, fix_noise=fix_noise, use_gp=False) hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, diff --git a/bayeso/utils/utils_bo.py b/bayeso/utils/utils_bo.py index 6bb5e7a..6e29889 100644 --- a/bayeso/utils/utils_bo.py +++ b/bayeso/utils/utils_bo.py @@ -24,7 +24,7 @@ @utils_common.validate_types -def get_best_acquisition_by_evaluation(initials: np.ndarray, fun_objective: callable) -> np.ndarray: +def get_best_acquisition_by_evaluation(initials: np.ndarray, fun_objective: constants.TYPING_CALLABLE) -> np.ndarray: """ It returns the best acquisition with respect to values of `fun_objective`. Here, the best acquisition is a minimizer of `fun_objective`. @@ -173,7 +173,7 @@ def check_optimizer_method_bo(str_optimizer_method_bo: str, dim: int, debug: boo return str_optimizer_method_bo @utils_common.validate_types -def choose_fun_acquisition(str_acq: str, hyps: dict) -> callable: +def choose_fun_acquisition(str_acq: str, hyps: dict) -> constants.TYPING_CALLABLE: """ It chooses and returns an acquisition function. @@ -213,7 +213,8 @@ def choose_fun_acquisition(str_acq: str, hyps: dict) -> callable: return fun_acquisition @utils_common.validate_types -def check_hyps_convergence(list_hyps: list, hyps: dict, str_cov: str, fix_noise: bool, +def check_hyps_convergence(list_hyps: constants.TYPING_LIST[dict], hyps: dict, + str_cov: str, fix_noise: bool, ratio_threshold: float=0.05 ) -> bool: """ diff --git a/bayeso/utils/utils_common.py b/bayeso/utils/utils_common.py index 45bced9..9d94542 100644 --- a/bayeso/utils/utils_common.py +++ b/bayeso/utils/utils_common.py @@ -10,7 +10,7 @@ from bayeso import constants -def validate_types(func: callable) -> callable: +def validate_types(func: constants.TYPING_CALLABLE) -> constants.TYPING_CALLABLE: """ It is a decorator for validating the number of types, which are declared for typing. diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index b1f00bc..32f1018 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -11,7 +11,7 @@ @utils_common.validate_types -def _get_list_first() -> list: +def _get_list_first() -> constants.TYPING_LIST[str]: """ It provides list of strings. The strings in that list require two hyperparameters, `signal` and `lengthscales`. @@ -56,7 +56,7 @@ def get_hyps(str_cov: str, dim: int, assert isinstance(dim, int) assert isinstance(use_gp, bool) assert isinstance(use_ard, bool) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV hyps = dict() hyps['noise'] = constants.GP_NOISE @@ -82,7 +82,7 @@ def get_range_hyps(str_cov: str, dim: int, use_gp: bool=True, use_ard: bool=True, fix_noise: bool=False -) -> list: +) -> constants.TYPING_LIST[list]: """ It returns default optimization ranges of hyperparameters for Gaussian process regression. @@ -109,7 +109,7 @@ def get_range_hyps(str_cov: str, dim: int, assert isinstance(use_gp, bool) assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV range_hyps = [] @@ -161,7 +161,7 @@ def convert_hyps(str_cov: str, hyps: dict, assert isinstance(hyps, dict) assert isinstance(use_gp, bool) assert isinstance(fix_noise, bool) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV list_hyps = [] if not fix_noise: @@ -174,8 +174,13 @@ def convert_hyps(str_cov: str, hyps: dict, if str_cov in list_first: list_hyps.append(hyps['signal']) - for elem_lengthscale in hyps['lengthscales']: - list_hyps.append(elem_lengthscale) + if isinstance(hyps['lengthscales'], np.ndarray): + for elem_lengthscale in hyps['lengthscales']: + list_hyps.append(elem_lengthscale) + elif isinstance(hyps['lengthscales'], float): + list_hyps.append(hyps['lengthscales']) + else: + raise ValueError('covert_hyps: not allowed type for lengthscales.') else: raise NotImplementedError('convert_hyps: allowed str_cov, but it is not implemented.') return np.array(list_hyps) @@ -183,6 +188,7 @@ def convert_hyps(str_cov: str, hyps: dict, @utils_common.validate_types def restore_hyps(str_cov: str, hyps: np.ndarray, use_gp: bool=True, + use_ard: bool=True, fix_noise: bool=False, noise: float=constants.GP_NOISE ) -> dict: @@ -195,6 +201,8 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, :type hyps: numpy.ndarray :param use_gp: flag for Gaussian process or Student-$t$ process. :type use_gp: bool., optional + :param use_ard: flag for using automatic relevance determination. + :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param noise: fixed noise value. @@ -210,10 +218,11 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, assert isinstance(str_cov, str) assert isinstance(hyps, np.ndarray) assert isinstance(use_gp, bool) + assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) assert isinstance(noise, float) assert len(hyps.shape) == 1 - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV dict_hyps = dict() if not fix_noise: @@ -231,10 +240,15 @@ def restore_hyps(str_cov: str, hyps: np.ndarray, if str_cov in list_first: dict_hyps['signal'] = hyps[ind_start] - list_lengthscales = [] - for ind_elem in range(ind_start + 1, len(hyps)): - list_lengthscales.append(hyps[ind_elem]) - dict_hyps['lengthscales'] = np.array(list_lengthscales) + + if use_ard: + list_lengthscales = [] + for ind_elem in range(ind_start + 1, len(hyps)): + list_lengthscales.append(hyps[ind_elem]) + dict_hyps['lengthscales'] = np.array(list_lengthscales) + else: + assert hyps.shape[0] == ind_start + 2 + dict_hyps['lengthscales'] = hyps[ind_start + 1] else: raise NotImplementedError('restore_hyps: allowed str_cov, but it is not implemented.') return dict_hyps @@ -266,7 +280,7 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int, assert isinstance(str_cov, str) assert isinstance(dim, int) assert isinstance(use_gp, bool) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV is_valid = True @@ -335,7 +349,7 @@ def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int, assert isinstance(str_cov, str) assert isinstance(dim, int) assert isinstance(use_gp, bool) - assert str_cov in constants.ALLOWED_GP_COV + assert str_cov in constants.ALLOWED_COV # is_valid = True @@ -369,15 +383,15 @@ def check_str_cov(str_fun: str, str_cov: str, shape_X1: tuple, assert isinstance(shape_X1, tuple) assert shape_X2 is None or isinstance(shape_X2, tuple) - if str_cov in constants.ALLOWED_GP_COV_BASE: + if str_cov in constants.ALLOWED_COV_BASE: assert len(shape_X1) == 2 if shape_X2 is not None: assert len(shape_X2) == 2 - elif str_cov in constants.ALLOWED_GP_COV_SET: + elif str_cov in constants.ALLOWED_COV_SET: assert len(shape_X1) == 3 if shape_X2 is not None: assert len(shape_X2) == 3 - elif str_cov in constants.ALLOWED_GP_COV: # pragma: no cover + elif str_cov in constants.ALLOWED_COV: # pragma: no cover raise ValueError('{}: missing conditions for str_cov.'.format(str_fun)) else: raise ValueError('{}: invalid str_cov.'.format(str_fun)) diff --git a/bayeso/utils/utils_plotting.py b/bayeso/utils/utils_plotting.py index 07d81ca..b3e71b1 100644 --- a/bayeso/utils/utils_plotting.py +++ b/bayeso/utils/utils_plotting.py @@ -154,7 +154,7 @@ def plot_gp_via_sample(X: np.ndarray, Ys: np.ndarray, draw_zero_axis: bool=False, pause_figure: bool=True, time_pause: constants.TYPING_UNION_INT_FLOAT=constants.TIME_PAUSE, - colors: list=constants.COLORS, + colors: np.ndarray=constants.COLORS, ) -> constants.TYPE_NONE: # pragma: no cover """ It is for plotting sampled functions from multivariate distributions. @@ -179,8 +179,8 @@ def plot_gp_via_sample(X: np.ndarray, Ys: np.ndarray, :type pause_figure: bool., optional :param time_pause: pausing time. :type time_pause: int. or float, optional - :param colors: list of colors. - :type colors: list, optional + :param colors: array of colors. + :type colors: np.ndarray, optional :returns: None. :rtype: NoneType @@ -199,7 +199,7 @@ def plot_gp_via_sample(X: np.ndarray, Ys: np.ndarray, assert isinstance(draw_zero_axis, bool) assert isinstance(pause_figure, bool) assert isinstance(time_pause, (int, float)) - assert isinstance(colors, list) + assert isinstance(colors, np.ndarray) assert len(X.shape) == 2 assert len(Ys.shape) == 2 assert X.shape[1] == 1 @@ -240,7 +240,7 @@ def plot_gp_via_distribution(X_train: np.ndarray, Y_train: np.ndarray, pause_figure: bool=True, time_pause: constants.TYPING_UNION_INT_FLOAT=constants.TIME_PAUSE, range_shade: float=constants.RANGE_SHADE, - colors: list=constants.COLORS, + colors: np.ndarray=constants.COLORS, ) -> constants.TYPE_NONE: # pragma: no cover """ It is for plotting Gaussian process regression. @@ -277,8 +277,8 @@ def plot_gp_via_distribution(X_train: np.ndarray, Y_train: np.ndarray, :type time_pause: int. or float, optional :param range_shade: shade range for standard deviation. :type range_shade: float, optional - :param colors: list of colors. - :type colors: list, optional + :param colors: array of colors. + :type colors: np.ndarray, optional :returns: None. :rtype: NoneType @@ -302,7 +302,7 @@ def plot_gp_via_distribution(X_train: np.ndarray, Y_train: np.ndarray, assert isinstance(pause_figure, bool) assert isinstance(time_pause, (int, float)) assert isinstance(range_shade, float) - assert isinstance(colors, list) + assert isinstance(colors, np.ndarray) assert len(X_train.shape) == 2 assert len(X_test.shape) == 2 assert len(Y_train.shape) == 2 @@ -355,7 +355,7 @@ def plot_gp_via_distribution(X_train: np.ndarray, Y_train: np.ndarray, _show_figure(pause_figure, time_pause) @utils_common.validate_types -def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: list, num_init: int, draw_std: bool, +def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: constants.TYPING_LIST[str], num_init: int, draw_std: bool, include_marker: bool=True, include_legend: bool=False, use_tex: bool=False, @@ -366,8 +366,8 @@ def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: list, num_init: int pause_figure: bool=True, time_pause: constants.TYPING_UNION_INT_FLOAT=constants.TIME_PAUSE, range_shade: float=constants.RANGE_SHADE, - markers: list=constants.MARKERS, - colors: list=constants.COLORS, + markers: np.ndarray=constants.MARKERS, + colors: np.ndarray=constants.COLORS, ) -> constants.TYPE_NONE: # pragma: no cover """ It is for plotting optimization results of Bayesian optimization, in @@ -403,10 +403,10 @@ def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: list, num_init: int :type time_pause: int. or float, optional :param range_shade: shade range for standard deviation. :type range_shade: float, optional - :param markers: list of markers. - :type markers: list, optional - :param colors: list of colors. - :type colors: list, optional + :param markers: array of markers. + :type markers: np.ndarray, optional + :param colors: array of colors. + :type colors: np.ndarray, optional :returns: None. :rtype: NoneType @@ -429,8 +429,8 @@ def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: list, num_init: int assert isinstance(pause_figure, bool) assert isinstance(time_pause, (int, float)) assert isinstance(range_shade, float) - assert isinstance(markers, list) - assert isinstance(colors, list) + assert isinstance(markers, np.ndarray) + assert isinstance(colors, np.ndarray) assert len(minima.shape) == 3 assert minima.shape[0] == len(list_str_label) assert minima.shape[2] >= num_init @@ -499,7 +499,7 @@ def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: list, num_init: int @utils_common.validate_types def plot_minimum_vs_time(times: np.ndarray, minima: np.ndarray, - list_str_label: list, num_init: int, draw_std: bool, + list_str_label: constants.TYPING_LIST[str], num_init: int, draw_std: bool, include_marker: bool=True, include_legend: bool=False, use_tex: bool=False, @@ -510,8 +510,8 @@ def plot_minimum_vs_time(times: np.ndarray, minima: np.ndarray, pause_figure: bool=True, time_pause: constants.TYPING_UNION_INT_FLOAT=constants.TIME_PAUSE, range_shade: float=constants.RANGE_SHADE, - markers: list=constants.MARKERS, - colors: list=constants.COLORS, + markers: np.ndarray=constants.MARKERS, + colors: np.ndarray=constants.COLORS, ) -> constants.TYPE_NONE: # pragma: no cover """ It is for plotting optimization results of Bayesian optimization, in terms of execution time. @@ -550,10 +550,10 @@ def plot_minimum_vs_time(times: np.ndarray, minima: np.ndarray, :type time_pause: int. or float, optional :param range_shade: shade range for standard deviation. :type range_shade: float, optional - :param markers: list of markers. - :type markers: list, optional - :param colors: list of colors. - :type colors: list, optional + :param markers: array of markers. + :type markers: np.ndarray, optional + :param colors: array of colors. + :type colors: np.ndarray, optional :returns: None. :rtype: NoneType @@ -577,8 +577,8 @@ def plot_minimum_vs_time(times: np.ndarray, minima: np.ndarray, assert isinstance(pause_figure, bool) assert isinstance(time_pause, (int, float)) assert isinstance(range_shade, float) - assert isinstance(markers, list) - assert isinstance(colors, list) + assert isinstance(markers, np.ndarray) + assert isinstance(colors, np.ndarray) assert len(times.shape) == 3 assert len(minima.shape) == 3 assert times.shape[0] == minima.shape[0] == len(list_str_label) diff --git a/bayeso/wrappers/wrappers_bo.py b/bayeso/wrappers/wrappers_bo.py index 74987d8..8f62b40 100644 --- a/bayeso/wrappers/wrappers_bo.py +++ b/bayeso/wrappers/wrappers_bo.py @@ -18,7 +18,8 @@ @utils_common.validate_types def run_single_round_with_all_initial_information(model_bo: bo.BO, - fun_target: callable, X_train: np.ndarray, Y_train: np.ndarray, + fun_target: constants.TYPING_CALLABLE, + X_train: np.ndarray, Y_train: np.ndarray, num_iter: int, str_sampling_method_ao: str=constants.STR_SAMPLING_METHOD_AO, num_samples_ao: int=constants.NUM_SAMPLES_AO, @@ -131,7 +132,8 @@ def run_single_round_with_all_initial_information(model_bo: bo.BO, @utils_common.validate_types def run_single_round_with_initial_inputs(model_bo: bo.BO, - fun_target: callable, X_train: np.ndarray, num_iter: int, + fun_target: constants.TYPING_CALLABLE, + X_train: np.ndarray, num_iter: int, str_sampling_method_ao: str=constants.STR_SAMPLING_METHOD_AO, num_samples_ao: int=constants.NUM_SAMPLES_AO, str_mlm_method: str=constants.STR_MLM_METHOD, @@ -209,7 +211,8 @@ def run_single_round_with_initial_inputs(model_bo: bo.BO, time_gp_final, time_acq_final @utils_common.validate_types -def run_single_round(model_bo: bo.BO, fun_target: callable, num_init: int, num_iter: int, +def run_single_round(model_bo: bo.BO, fun_target: constants.TYPING_CALLABLE, + num_init: int, num_iter: int, str_initial_method_bo: str=constants.STR_INITIALIZING_METHOD_BO, str_sampling_method_ao: str=constants.STR_SAMPLING_METHOD_AO, num_samples_ao: int=constants.NUM_SAMPLES_AO, diff --git a/tests/common/test_covariance.py b/tests/common/test_covariance.py index 145946d..7666393 100644 --- a/tests/common/test_covariance.py +++ b/tests/common/test_covariance.py @@ -18,7 +18,7 @@ def test_choose_fun_cov_typing(): annos = package_target.choose_fun_cov.__annotations__ assert annos['str_cov'] == str - assert annos['return'] == callable + assert annos['return'] == typing.Callable def test_choose_fun_cov(): with pytest.raises(AssertionError) as error: @@ -34,7 +34,7 @@ def test_choose_fun_grad_cov_typing(): annos = package_target.choose_fun_grad_cov.__annotations__ assert annos['str_cov'] == str - assert annos['return'] == callable + assert annos['return'] == typing.Callable def test_choose_fun_grad_cov(): with pytest.raises(AssertionError) as error: diff --git a/tests/common/test_gp.py b/tests/common/test_gp.py index 5a15dd8..596ffe6 100644 --- a/tests/common/test_gp.py +++ b/tests/common/test_gp.py @@ -62,7 +62,7 @@ def test_predict_with_cov_typing(): assert annos['inv_cov_X_X'] == np.ndarray assert annos['hyps'] == dict assert annos['str_cov'] == str - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['debug'] == bool assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] @@ -118,7 +118,7 @@ def test_predict_with_hyps_typing(): assert annos['X_test'] == np.ndarray assert annos['hyps'] == dict assert annos['str_cov'] == str - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['debug'] == bool assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] @@ -165,7 +165,7 @@ def test_predict_with_optimized_hyps_typing(): assert annos['X_test'] == np.ndarray assert annos['str_cov'] == str assert annos['str_optimizer_method'] == str - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['fix_noise'] == float assert annos['debug'] == bool assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, np.ndarray] diff --git a/tests/common/test_gp_kernel.py b/tests/common/test_gp_kernel.py index 3f6622e..e3b872c 100644 --- a/tests/common/test_gp_kernel.py +++ b/tests/common/test_gp_kernel.py @@ -20,7 +20,7 @@ def test_get_optimized_kernel_typing(): assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['str_cov'] == str assert annos['str_optimizer_method'] == str assert annos['str_modelselection_method'] == str diff --git a/tests/common/test_tp.py b/tests/common/test_tp.py index ca19b7b..ea9f27b 100644 --- a/tests/common/test_tp.py +++ b/tests/common/test_tp.py @@ -72,7 +72,7 @@ def test_predict_with_cov_typing(): assert annos['inv_cov_X_X'] == np.ndarray assert annos['hyps'] == dict assert annos['str_cov'] == str - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['debug'] == bool assert annos['return'] == typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] @@ -134,7 +134,7 @@ def test_predict_with_hyps_typing(): assert annos['X_test'] == np.ndarray assert annos['hyps'] == dict assert annos['str_cov'] == str - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['debug'] == bool assert annos['return'] == typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] @@ -182,7 +182,7 @@ def test_predict_with_optimized_hyps_typing(): assert annos['X_test'] == np.ndarray assert annos['str_cov'] == str assert annos['str_optimizer_method'] == str - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['fix_noise'] == float assert annos['debug'] == bool assert annos['return'] == typing.Tuple[float, np.ndarray, np.ndarray, np.ndarray] diff --git a/tests/common/test_tp_kernel.py b/tests/common/test_tp_kernel.py index 6c86035..6f736b1 100644 --- a/tests/common/test_tp_kernel.py +++ b/tests/common/test_tp_kernel.py @@ -20,7 +20,7 @@ def test_get_optimized_kernel_typing(): assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['str_cov'] == str assert annos['str_optimizer_method'] == str assert annos['fix_noise'] == bool diff --git a/tests/common/test_utils_bo.py b/tests/common/test_utils_bo.py index f4497f9..096eb9f 100644 --- a/tests/common/test_utils_bo.py +++ b/tests/common/test_utils_bo.py @@ -4,6 +4,7 @@ # """test_utils_bo""" +import typing import pytest import numpy as np @@ -15,7 +16,7 @@ def test_get_best_acquisition_by_evaluation_typing(): annos = package_target.get_best_acquisition_by_evaluation.__annotations__ assert annos['initials'] == np.ndarray - assert annos['fun_objective'] == callable + assert annos['fun_objective'] == typing.Callable assert annos['return'] == np.ndarray def test_get_best_acquisition_by_evaluation(): @@ -81,7 +82,7 @@ def test_choose_fun_acquisition_typing(): assert annos['str_acq'] == str assert annos['hyps'] == dict - assert annos['return'] == callable + assert annos['return'] == typing.Callable def test_choose_fun_acquisition(): dict_hyps = {'lengthscales': np.array([1.0, 1.0]), 'signal': 1.0, 'noise': 0.01} @@ -95,7 +96,7 @@ def test_choose_fun_acquisition(): def test_check_hyps_convergence_typing(): annos = package_target.check_hyps_convergence.__annotations__ - assert annos['list_hyps'] == list + assert annos['list_hyps'] == typing.List[dict] assert annos['hyps'] == dict assert annos['str_cov'] == str assert annos['fix_noise'] == bool diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index 1c78083..40aadbe 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -15,7 +15,7 @@ def test_get_list_first_typing(): annos = package_target._get_list_first.__annotations__ - assert annos['return'] == list + assert annos['return'] == typing.List[str] def test_get_hyps_typing(): annos = package_target.get_hyps.__annotations__ @@ -81,7 +81,7 @@ def test_get_range_hyps_typing(): assert annos['use_ard'] == bool assert annos['use_gp'] == bool assert annos['fix_noise'] == bool - assert annos['return'] == list + assert annos['return'] == typing.List[list] def test_get_range_hyps(): with pytest.raises(AssertionError) as error: @@ -195,6 +195,10 @@ def test_restore_hyps(): package_target.restore_hyps('abc', 2.1) with pytest.raises(AssertionError) as error: package_target.restore_hyps('se', np.array([[1.0, 1.0], [1.0, 1.0]])) + with pytest.raises(AssertionError) as error: + package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), use_ard=1) + with pytest.raises(AssertionError) as error: + package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), use_ard='abc') with pytest.raises(AssertionError) as error: package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), fix_noise=1) with pytest.raises(AssertionError) as error: @@ -203,6 +207,8 @@ def test_restore_hyps(): package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), use_gp=1) with pytest.raises(AssertionError) as error: package_target.restore_hyps('se', np.array([1.0, 1.0, 1.0]), use_gp='abc') + with pytest.raises(AssertionError) as error: + package_target.restore_hyps('se', np.array([0.1, 1.0, 1.0, 1.0]), use_ard=False) cur_hyps = np.array([0.1, 1.0, 1.0, 1.0, 1.0]) restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=False) diff --git a/tests/common/test_utils_gp.py b/tests/common/test_utils_gp.py index ffb3c9e..e6e3b25 100644 --- a/tests/common/test_utils_gp.py +++ b/tests/common/test_utils_gp.py @@ -16,7 +16,7 @@ def test_get_prior_mu_typing(): annos = package_target.get_prior_mu.__annotations__ - assert annos['prior_mu'] == typing.Union[callable, type(None)] + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['X'] == np.ndarray assert annos['return'] == np.ndarray diff --git a/tests/common/test_utils_plotting.py b/tests/common/test_utils_plotting.py index 21b1b4e..8c6d87c 100644 --- a/tests/common/test_utils_plotting.py +++ b/tests/common/test_utils_plotting.py @@ -26,7 +26,7 @@ def test_plot_gp_via_sample_typing(): assert annos['draw_zero_axis'] == bool assert annos['pause_figure'] == bool assert annos['time_pause'] == typing.Union[int, float] - assert annos['colors'] == list + assert annos['colors'] == np.ndarray assert annos['return'] == type(None) def test_plot_gp_via_sample(): @@ -86,7 +86,7 @@ def test_plot_gp_via_distribution_typing(): assert annos['pause_figure'] == bool assert annos['time_pause'] == typing.Union[int, float] assert annos['range_shade'] == float - assert annos['colors'] == list + assert annos['colors'] == np.ndarray assert annos['return'] == type(None) def test_plot_gp_via_distribution(): @@ -175,7 +175,7 @@ def test_plot_minimum_vs_iter_typing(): annos = package_target.plot_minimum_vs_iter.__annotations__ assert annos['minima'] == np.ndarray - assert annos['list_str_label'] == list + assert annos['list_str_label'] == typing.List[str] assert annos['num_init'] == int assert annos['draw_std'] == bool assert annos['include_marker'] == bool @@ -188,8 +188,8 @@ def test_plot_minimum_vs_iter_typing(): assert annos['pause_figure'] == bool assert annos['time_pause'] == typing.Union[int, float] assert annos['range_shade'] == float - assert annos['markers'] == list - assert annos['colors'] == list + assert annos['markers'] == np.ndarray + assert annos['colors'] == np.ndarray assert annos['return'] == type(None) def test_plot_minimum_vs_iter(): @@ -250,7 +250,7 @@ def test_plot_minimum_vs_time_typing(): assert annos['times'] == np.ndarray assert annos['minima'] == np.ndarray - assert annos['list_str_label'] == list + assert annos['list_str_label'] == typing.List[str] assert annos['num_init'] == int assert annos['draw_std'] == bool assert annos['include_marker'] == bool @@ -263,8 +263,8 @@ def test_plot_minimum_vs_time_typing(): assert annos['pause_figure'] == bool assert annos['time_pause'] == typing.Union[int, float] assert annos['range_shade'] == float - assert annos['markers'] == list - assert annos['colors'] == list + assert annos['markers'] == np.ndarray + assert annos['colors'] == np.ndarray assert annos['return'] == type(None) def test_plot_minimum_vs_time(): diff --git a/tests/common/test_wrappers_bo.py b/tests/common/test_wrappers_bo.py index abe7dba..cd9b825 100644 --- a/tests/common/test_wrappers_bo.py +++ b/tests/common/test_wrappers_bo.py @@ -16,7 +16,7 @@ def test_run_single_round_with_all_initial_information_typing(): annos = package_target.run_single_round_with_all_initial_information.__annotations__ assert annos['model_bo'] == bo.BO - assert annos['fun_target'] == callable + assert annos['fun_target'] == typing.Callable assert annos['X_train'] == np.ndarray assert annos['Y_train'] == np.ndarray assert annos['num_iter'] == int @@ -79,7 +79,7 @@ def test_run_single_round_with_initial_inputs_typing(): annos = package_target.run_single_round_with_initial_inputs.__annotations__ assert annos['model_bo'] == bo.BO - assert annos['fun_target'] == callable + assert annos['fun_target'] == typing.Callable assert annos['X_train'] == np.ndarray assert annos['num_iter'] == int assert annos['str_sampling_method_ao'] == str @@ -131,7 +131,7 @@ def test_run_single_round_typing(): annos = package_target.run_single_round.__annotations__ assert annos['model_bo'] == bo.BO - assert annos['fun_target'] == callable + assert annos['fun_target'] == typing.Callable assert annos['num_init'] == int assert annos['num_iter'] == int assert annos['str_sampling_method_ao'] == str From d0de2cb0efb35cd63d8e65fc6ce4ee2295326784 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Mon, 5 Apr 2021 17:27:20 +0900 Subject: [PATCH 29/37] Update to use use_ard --- bayeso/covariance.py | 10 +-- bayeso/gp/gp.py | 30 +-------- bayeso/gp/gp_kernel.py | 31 +++++----- bayeso/gp/gp_likelihood.py | 20 +++--- bayeso/tp/__init__.py | 5 +- bayeso/tp/tp.py | 32 ++-------- bayeso/tp/tp_kernel.py | 18 ++---- bayeso/tp/tp_likelihood.py | 12 ++-- bayeso/utils/utils_bo.py | 4 +- bayeso/utils/utils_covariance.py | 69 ++++++++++----------- bayeso/utils/utils_gp.py | 44 ++++++++++++- bayeso/utils/utils_plotting.py | 3 +- tests/common/test_gp_kernel.py | 4 ++ tests/common/test_tp_kernel.py | 4 ++ tests/common/test_utils_covariance.py | 89 +++++++++++++++------------ tests/common/test_utils_gp.py | 38 ++++++++++++ 16 files changed, 226 insertions(+), 187 deletions(-) diff --git a/bayeso/covariance.py b/bayeso/covariance.py index d49b5aa..2cda950 100644 --- a/bayeso/covariance.py +++ b/bayeso/covariance.py @@ -568,10 +568,7 @@ def cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, same_X_Xp: dim_Xp = Xp.shape[1] assert dim_X == dim_Xp - hyps, is_valid = utils_covariance.validate_hyps_dict(hyps, str_cov, dim_X) - # TODO: ValueError is appropriate? We can just raise AssertionError in validate_hyps_dict. - if not is_valid: - raise ValueError('cov_main: invalid hyperparameters.') + hyps = utils_covariance.validate_hyps_dict(hyps, str_cov, dim_X) fun_cov = choose_fun_cov(str_cov) cov_X_Xp += fun_cov(X, Xp, hyps['lengthscales'], hyps['signal']) @@ -589,10 +586,7 @@ def cov_main(str_cov: str, X: np.ndarray, Xp: np.ndarray, hyps: dict, same_X_Xp: assert dim_X == dim_Xp - hyps, is_valid = utils_covariance.validate_hyps_dict(hyps, str_cov, dim_X) - # TODO: ValueError is appropriate? We can just raise AssertionError in validate_hyps_dict. - if not is_valid: - raise ValueError('cov_main: invalid hyperparameters.') + hyps = utils_covariance.validate_hyps_dict(hyps, str_cov, dim_X) if not same_X_Xp: for ind_X in range(0, num_X): diff --git a/bayeso/gp/gp.py b/bayeso/gp/gp.py index f2a2978..4d51cf1 100644 --- a/bayeso/gp/gp.py +++ b/bayeso/gp/gp.py @@ -91,23 +91,15 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(cov_X_X, np.ndarray) assert isinstance(inv_cov_X_X, np.ndarray) assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 assert len(cov_X_X.shape) == 2 assert len(inv_cov_X_X.shape) == 2 assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() utils_covariance.check_str_cov('predict_with_cov', str_cov, X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test) @@ -154,18 +146,10 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 utils_covariance.check_str_cov('predict_with_hyps', str_cov, X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, debug=debug) @@ -214,19 +198,11 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) - assert isinstance(str_cov, str) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(str_optimizer_method, str) assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov, X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP time_start = time.time() diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index 25a417d..3a898ed 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -61,17 +61,11 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, """ # TODO: check to input same fix_noise to convert_hyps and restore_hyps - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug) assert isinstance(str_optimizer_method, str) assert isinstance(str_modelselection_method, str) assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD @@ -92,10 +86,12 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, use_gradient = False if str_modelselection_method == 'ml': - neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_ml(X_train, Y_train, hyps, str_cov, - prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, - debug=debug) + neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_ml(X_train, Y_train, + hyps, str_cov, prior_mu_train, + use_ard=use_ard, fix_noise=fix_noise, + use_gradient=use_gradient, debug=debug) elif str_modelselection_method == 'loocv': + # TODO: add use_ard. neg_log_ml_ = lambda hyps: gp_likelihood.neg_log_pseudo_l_loocv(X_train, Y_train, hyps, str_cov, prior_mu_train, fix_noise=fix_noise, debug=debug) use_gradient = False @@ -105,7 +101,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, hyps_converted = utils_covariance.convert_hyps( str_cov, utils_covariance.get_hyps(str_cov, num_dim, use_ard=use_ard), - fix_noise=fix_noise, + fix_noise=fix_noise ) if str_optimizer_method in ['BFGS', 'SLSQP']: @@ -120,14 +116,15 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, if str_optimizer_method == 'SLSQP-Bounded': str_optimizer_method = 'SLSQP' - bounds = utils_covariance.get_range_hyps(str_cov, num_dim, use_ard=use_ard, fix_noise=fix_noise) + bounds = utils_covariance.get_range_hyps(str_cov, num_dim, + use_ard=use_ard, + fix_noise=fix_noise) result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, method=str_optimizer_method, bounds=bounds, jac=use_gradient, options={'disp': False}) if debug: logger.debug('scipy message: %s', result_optimized.message) - result_optimized = result_optimized.x elif str_optimizer_method in ['Nelder-Mead']: result_optimized = scipy.optimize.minimize(neg_log_ml_, hyps_converted, @@ -135,7 +132,6 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, if debug: logger.debug('scipy message: %s', result_optimized.message) - result_optimized = result_optimized.x # TODO: Fill this conditions elif str_optimizer_method == 'DIRECT': # pragma: no cover @@ -144,12 +140,13 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, else: # pragma: no cover raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') - hyps = utils_covariance.restore_hyps(str_cov, result_optimized, use_ard=use_ard, fix_noise=fix_noise) + hyps = utils_covariance.restore_hyps(str_cov, result_optimized, + use_ard=use_ard, + fix_noise=fix_noise) - hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) + hyps = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim) cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) - time_end = time.time() if debug: diff --git a/bayeso/gp/gp_likelihood.py b/bayeso/gp/gp_likelihood.py index 078dda1..6f2ecc8 100644 --- a/bayeso/gp/gp_likelihood.py +++ b/bayeso/gp/gp_likelihood.py @@ -10,6 +10,7 @@ from bayeso import covariance from bayeso import constants +from bayeso.utils import utils_gp from bayeso.utils import utils_covariance from bayeso.utils import utils_common from bayeso.utils import utils_logger @@ -20,6 +21,7 @@ @utils_common.validate_types def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, str_cov: str, prior_mu_train: np.ndarray, + use_ard: bool=constants.USE_ARD, fix_noise: bool=constants.FIX_GP_NOISE, use_cholesky: bool=True, use_gradient: bool=True, @@ -38,6 +40,8 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, :type str_cov: str. :param prior_mu_train: the prior values computed by get_prior_mu(). Shape: (n, 1). :type prior_mu_train: numpy.ndarray + :param use_ard: flag for automatic relevance determination. + :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param use_cholesky: flag for using a cholesky decomposition. @@ -56,21 +60,19 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) + # TODO: add use_ard. + utils_gp.validate_common_args(X_train, Y_train, str_cov, None, debug) assert isinstance(hyps, np.ndarray) - assert isinstance(str_cov, str) assert isinstance(prior_mu_train, np.ndarray) + assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) assert isinstance(use_cholesky, bool) assert isinstance(use_gradient, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 assert len(prior_mu_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] utils_covariance.check_str_cov('neg_log_ml', str_cov, X_train.shape) - hyps = utils_covariance.restore_hyps(str_cov, hyps, fix_noise=fix_noise) + hyps = utils_covariance.restore_hyps(str_cov, hyps, use_ard=use_ard, fix_noise=fix_noise) new_Y_train = Y_train - prior_mu_train if use_cholesky: cov_X_X, lower, grad_cov_X_X = covariance.get_kernel_cholesky(X_train, @@ -144,14 +146,10 @@ def neg_log_pseudo_l_loocv(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.nd """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) + utils_gp.validate_common_args(X_train, Y_train, str_cov, None, debug) assert isinstance(hyps, np.ndarray) - assert isinstance(str_cov, str) assert isinstance(prior_mu_train, np.ndarray) assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 assert len(prior_mu_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] utils_covariance.check_str_cov('neg_log_pseudo_l_loocv', str_cov, X_train.shape) diff --git a/bayeso/tp/__init__.py b/bayeso/tp/__init__.py index 5bc4630..d2319ae 100644 --- a/bayeso/tp/__init__.py +++ b/bayeso/tp/__init__.py @@ -6,4 +6,7 @@ It is implemented, based on the following article: (i) Rasmussen, C. E., & Williams, C. K. (2006). Gaussian Process Regression for Machine Learning. MIT Press. -(ii) Shah, A., Wilson, A. G., & Ghahramani, Z. (2014). Student-t Processes as Alternatives to Gaussian Processes. In Proceedings of the 17th International Conference on Artificial Intelligence and Statistics (pp. 877-885).""" +(ii) Shah, A., Wilson, A. G., & Ghahramani, Z. (2014). Student-t Processes +as Alternatives to Gaussian Processes. In Proceedings of the 17th +International Conference on Artificial Intelligence and Statistics +(pp. 877-885).""" diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index 11548b3..f36295a 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -106,23 +106,15 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(cov_X_X, np.ndarray) assert isinstance(inv_cov_X_X, np.ndarray) assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 assert len(cov_X_X.shape) == 2 assert len(inv_cov_X_X.shape) == 2 assert (np.array(cov_X_X.shape) == np.array(inv_cov_X_X.shape)).all() utils_covariance.check_str_cov('predict_with_cov', str_cov, X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] prior_mu_train = utils_gp.get_prior_mu(prior_mu, X_train) prior_mu_test = utils_gp.get_prior_mu(prior_mu, X_test) @@ -183,18 +175,10 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(hyps, dict) - assert isinstance(str_cov, str) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 utils_covariance.check_str_cov('predict_with_hyps', str_cov, X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, debug=debug) @@ -247,19 +231,11 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert isinstance(X_test, np.ndarray) - assert isinstance(str_cov, str) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug, X_test) assert isinstance(str_optimizer_method, str) assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert callable(prior_mu) or prior_mu is None - assert len(Y_train.shape) == 2 utils_covariance.check_str_cov('predict_with_optimized_kernel', str_cov, X_train.shape, shape_X2=X_test.shape) - assert X_train.shape[0] == Y_train.shape[0] - assert X_train.shape[1] == X_test.shape[1] assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP time_start = time.time() @@ -273,5 +249,5 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test time_end = time.time() if debug: - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + logger.debug('time consumed to construct tpr: %.4f sec.', time_end - time_start) return nu_Xs, mu_Xs, sigma_Xs, Sigma_Xs diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py index 162266b..5b12f37 100644 --- a/bayeso/tp/tp_kernel.py +++ b/bayeso/tp/tp_kernel.py @@ -57,16 +57,10 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) - assert callable(prior_mu) or prior_mu is None - assert isinstance(str_cov, str) + utils_gp.validate_common_args(X_train, Y_train, str_cov, prior_mu, debug) assert isinstance(str_optimizer_method, str) assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 - assert X_train.shape[0] == Y_train.shape[0] utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_TP @@ -86,7 +80,9 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, use_gradient = False neg_log_ml_ = lambda hyps: tp_likelihood.neg_log_ml(X_train, Y_train, hyps, str_cov, - prior_mu_train, fix_noise=fix_noise, use_gradient=use_gradient, + prior_mu_train, fix_noise=fix_noise, + use_gradient=use_gradient, + use_ard=use_ard, debug=debug) hyps_converted = utils_covariance.convert_hyps( @@ -105,7 +101,6 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, if debug: logger.debug('scipy message: %s', result_optimized.message) - result_optimized = result_optimized.x else: # pragma: no cover raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') @@ -113,13 +108,12 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, hyps = utils_covariance.restore_hyps(str_cov, result_optimized, use_ard=use_ard, fix_noise=fix_noise, use_gp=False) - hyps, _ = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) + hyps = utils_covariance.validate_hyps_dict(hyps, str_cov, num_dim, use_gp=False) cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X_train, hyps, str_cov, fix_noise=fix_noise, debug=debug) - time_end = time.time() if debug: logger.debug('hyps optimized: %s', utils_logger.get_str_hyps(hyps)) - logger.debug('time consumed to construct gpr: %.4f sec.', time_end - time_start) + logger.debug('time consumed to construct tpr: %.4f sec.', time_end - time_start) return cov_X_X, inv_cov_X_X, hyps diff --git a/bayeso/tp/tp_likelihood.py b/bayeso/tp/tp_likelihood.py index d6bc993..d2c01b7 100644 --- a/bayeso/tp/tp_likelihood.py +++ b/bayeso/tp/tp_likelihood.py @@ -10,6 +10,7 @@ from bayeso import covariance from bayeso import constants +from bayeso.utils import utils_gp from bayeso.utils import utils_covariance from bayeso.utils import utils_common from bayeso.utils import utils_logger @@ -20,6 +21,7 @@ @utils_common.validate_types def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, str_cov: str, prior_mu_train: np.ndarray, + use_ard: bool=constants.USE_ARD, fix_noise: bool=constants.FIX_GP_NOISE, use_gradient: bool=True, debug: bool=False @@ -37,6 +39,8 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, :type str_cov: str. :param prior_mu_train: the prior values computed by get_prior_mu(). Shape: (n, 1). :type prior_mu_train: numpy.ndarray + :param use_ard: flag for automatic relevance determination. + :type use_ard: bool., optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param use_gradient: flag for computing and returning gradients of @@ -53,21 +57,19 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, """ - assert isinstance(X_train, np.ndarray) - assert isinstance(Y_train, np.ndarray) + utils_gp.validate_common_args(X_train, Y_train, str_cov, None, debug) assert isinstance(hyps, np.ndarray) - assert isinstance(str_cov, str) assert isinstance(prior_mu_train, np.ndarray) + assert isinstance(use_ard, bool) assert isinstance(fix_noise, bool) assert isinstance(use_gradient, bool) - assert isinstance(debug, bool) - assert len(Y_train.shape) == 2 assert len(prior_mu_train.shape) == 2 assert X_train.shape[0] == Y_train.shape[0] == prior_mu_train.shape[0] utils_covariance.check_str_cov('neg_log_ml', str_cov, X_train.shape) num_X = float(X_train.shape[0]) hyps = utils_covariance.restore_hyps(str_cov, hyps, + use_ard=use_ard, fix_noise=fix_noise, use_gp=False) new_Y_train = Y_train - prior_mu_train nu = hyps['dof'] diff --git a/bayeso/utils/utils_bo.py b/bayeso/utils/utils_bo.py index 6e29889..1227591 100644 --- a/bayeso/utils/utils_bo.py +++ b/bayeso/utils/utils_bo.py @@ -24,7 +24,9 @@ @utils_common.validate_types -def get_best_acquisition_by_evaluation(initials: np.ndarray, fun_objective: constants.TYPING_CALLABLE) -> np.ndarray: +def get_best_acquisition_by_evaluation(initials: np.ndarray, + fun_objective: constants.TYPING_CALLABLE +) -> np.ndarray: """ It returns the best acquisition with respect to values of `fun_objective`. Here, the best acquisition is a minimizer of `fun_objective`. diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index 32f1018..c787379 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -127,6 +127,7 @@ def get_range_hyps(str_cov: str, dim: int, for _ in range(0, dim): range_hyps += constants.RANGE_LENGTHSCALES else: + # INFO: dim is ignored. range_hyps += constants.RANGE_LENGTHSCALES else: raise NotImplementedError('get_hyps: allowed str_cov, but it is not implemented.') @@ -179,10 +180,11 @@ def convert_hyps(str_cov: str, hyps: dict, list_hyps.append(elem_lengthscale) elif isinstance(hyps['lengthscales'], float): list_hyps.append(hyps['lengthscales']) - else: + else: # pragma: no cover raise ValueError('covert_hyps: not allowed type for lengthscales.') else: raise NotImplementedError('convert_hyps: allowed str_cov, but it is not implemented.') + return np.array(list_hyps) @utils_common.validate_types @@ -282,45 +284,42 @@ def validate_hyps_dict(hyps: dict, str_cov: str, dim: int, assert isinstance(use_gp, bool) assert str_cov in constants.ALLOWED_COV - is_valid = True - if 'noise' not in hyps: - is_valid = False - else: - if not isinstance(hyps['noise'], float): - is_valid = False - else: - if np.abs(hyps['noise']) >= constants.BOUND_UPPER_GP_NOISE: - hyps['noise'] = constants.BOUND_UPPER_GP_NOISE + raise ValueError('validate_hyps_dict: invalid noise.') + + if not isinstance(hyps['noise'], float): + raise ValueError('validate_hyps_dict: invalid noise.') + + if np.abs(hyps['noise']) >= constants.BOUND_UPPER_GP_NOISE: + hyps['noise'] = constants.BOUND_UPPER_GP_NOISE if not use_gp: if 'dof' not in hyps: - is_valid = False - else: - if not isinstance(hyps['dof'], float): - is_valid = False - if isinstance(hyps['dof'], float) and hyps['dof'] <= 2.0: - hyps['dof'] = 2.00001 - - if str_cov in ('eq', 'se', 'matern32', 'matern52'): - if 'lengthscales' not in hyps: - is_valid = False - else: - if isinstance(hyps['lengthscales'], np.ndarray) \ - and hyps['lengthscales'].shape[0] != dim: - is_valid = False - if not isinstance(hyps['lengthscales'], np.ndarray) \ - and not isinstance(hyps['lengthscales'], float): - is_valid = False - if 'signal' not in hyps: - is_valid = False - else: - if not isinstance(hyps['signal'], float): - is_valid = False - else: - is_valid = False + raise ValueError('validate_hyps_dict: invalid dof.') + + if not isinstance(hyps['dof'], float): + raise ValueError('validate_hyps_dict: invalid dof.') + + if isinstance(hyps['dof'], float) and hyps['dof'] <= 2.0: + hyps['dof'] = 2.00001 + + if 'lengthscales' not in hyps: + raise ValueError('validate_hyps_dict: invalid lengthscales.') - return hyps, is_valid + if isinstance(hyps['lengthscales'], np.ndarray) \ + and hyps['lengthscales'].shape[0] != dim: + raise ValueError('validate_hyps_dict: invalid lengthscales.') + if not isinstance(hyps['lengthscales'], np.ndarray) \ + and not isinstance(hyps['lengthscales'], float): + raise ValueError('validate_hyps_dict: invalid lengthscales.') + + if 'signal' not in hyps: + raise ValueError('validate_hyps_dict: invalid signal.') + + if not isinstance(hyps['signal'], float): + raise ValueError('validate_hyps_dict: invalid signal.') + + return hyps @utils_common.validate_types def validate_hyps_arr(hyps: np.ndarray, str_cov: str, dim: int, diff --git a/bayeso/utils/utils_gp.py b/bayeso/utils/utils_gp.py index cf37f50..c7f52b7 100644 --- a/bayeso/utils/utils_gp.py +++ b/bayeso/utils/utils_gp.py @@ -2,7 +2,8 @@ # author: Jungtaek Kim (jtkim@postech.ac.kr) # last updated: December 29, 2020 # -"""It is utilities for Gaussian process regression.""" +"""It is utilities for Gaussian process regression and +Student-:math:`t` process regression.""" import numpy as np @@ -38,3 +39,44 @@ def get_prior_mu(prior_mu: constants.TYPING_UNION_CALLABLE_NONE, X: np.ndarray) assert len(prior_mu_X.shape) == 2 assert X.shape[0] == prior_mu_X.shape[0] return prior_mu_X + +@utils_common.validate_types +def validate_common_args(X_train: np.ndarray, Y_train: np.ndarray, + str_cov: str, prior_mu: constants.TYPING_UNION_CALLABLE_NONE, + debug: bool, + X_test: constants.TYPING_UNION_ARRAY_NONE=None, +) -> constants.TYPE_NONE: + """ + It validates the common arguments for various functions. + + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param str_cov: the name of covariance function. + :type str_cov: str. + :param prior_mu: None, or prior mean function. + :type prior_mu: NoneType, or function + :param debug: flag for printing log messages. + :type debug: bool. + :param X_test: inputs or None. Shape: (l, d) or (l, m, d). + :type X_test: numpy.ndarray, or NoneType, optional + + :returns: None. + :rtype: NoneType + + :raises: AssertionError + + """ + + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert isinstance(str_cov, str) + assert callable(prior_mu) or prior_mu is None + assert isinstance(debug, bool) + assert len(Y_train.shape) == 2 + assert X_train.shape[0] == Y_train.shape[0] + assert isinstance(X_test, (np.ndarray, type(None))) + + if X_test is not None: + assert X_train.shape[1] == X_test.shape[1] diff --git a/bayeso/utils/utils_plotting.py b/bayeso/utils/utils_plotting.py index b3e71b1..e8a3666 100644 --- a/bayeso/utils/utils_plotting.py +++ b/bayeso/utils/utils_plotting.py @@ -355,7 +355,8 @@ def plot_gp_via_distribution(X_train: np.ndarray, Y_train: np.ndarray, _show_figure(pause_figure, time_pause) @utils_common.validate_types -def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: constants.TYPING_LIST[str], num_init: int, draw_std: bool, +def plot_minimum_vs_iter(minima: np.ndarray, list_str_label: constants.TYPING_LIST[str], + num_init: int, draw_std: bool, include_marker: bool=True, include_legend: bool=False, use_tex: bool=False, diff --git a/tests/common/test_gp_kernel.py b/tests/common/test_gp_kernel.py index e3b872c..3350f55 100644 --- a/tests/common/test_gp_kernel.py +++ b/tests/common/test_gp_kernel.py @@ -24,6 +24,7 @@ def test_get_optimized_kernel_typing(): assert annos['str_cov'] == str assert annos['str_optimizer_method'] == str assert annos['str_modelselection_method'] == str + assert annos['use_ard'] == bool assert annos['fix_noise'] == bool assert annos['debug'] == bool assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] @@ -106,3 +107,6 @@ def test_get_optimized_kernel(): print(hyps) cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X_set, Y, prior_mu, 'set_se', str_modelselection_method='loocv') print(hyps) + + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', use_ard=False) + print(hyps) diff --git a/tests/common/test_tp_kernel.py b/tests/common/test_tp_kernel.py index 6f736b1..c0497f3 100644 --- a/tests/common/test_tp_kernel.py +++ b/tests/common/test_tp_kernel.py @@ -23,6 +23,7 @@ def test_get_optimized_kernel_typing(): assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] assert annos['str_cov'] == str assert annos['str_optimizer_method'] == str + assert annos['use_ard'] == bool assert annos['fix_noise'] == bool assert annos['debug'] == bool assert annos['return'] == typing.Tuple[np.ndarray, np.ndarray, dict] @@ -82,6 +83,9 @@ def test_get_optimized_kernel(): cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B') print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='L-BFGS-B', use_ard=False) + print(hyps) + cov_X_X, inv_cov_X_X, hyps = package_target.get_optimized_kernel(X, Y, prior_mu, 'se', str_optimizer_method='SLSQP') print(hyps) diff --git a/tests/common/test_utils_covariance.py b/tests/common/test_utils_covariance.py index 40aadbe..b00de49 100644 --- a/tests/common/test_utils_covariance.py +++ b/tests/common/test_utils_covariance.py @@ -115,6 +115,18 @@ def test_get_range_hyps(): assert isinstance(cur_range, list) assert cur_range == [[0.001, 10.0], [2.00001, 200.0], [0.01, 1000.0], [0.01, 1000.0]] + cur_range = package_target.get_range_hyps('se', 2, use_ard=True, fix_noise=False, use_gp=True) + print(type(cur_range)) + print(cur_range) + assert isinstance(cur_range, list) + assert cur_range == [[0.001, 10.0], [0.01, 1000.0], [0.01, 1000.0], [0.01, 1000.0]] + + cur_range = package_target.get_range_hyps('se', 2, use_ard=False, fix_noise=True, use_gp=True) + print(type(cur_range)) + print(cur_range) + assert isinstance(cur_range, list) + assert cur_range == [[0.01, 1000.0], [0.01, 1000.0]] + def test_convert_hyps_typing(): annos = package_target.convert_hyps.__annotations__ @@ -180,6 +192,7 @@ def test_restore_hyps_typing(): assert annos['str_cov'] == str assert annos['hyps'] == np.ndarray assert annos['use_gp'] == bool + assert annos['use_ard'] == bool assert annos['fix_noise'] == bool assert annos['noise'] == float assert annos['return'] == dict @@ -235,6 +248,12 @@ def test_restore_hyps(): assert restored_hyps['signal'] == cur_hyps[1] assert (restored_hyps['lengthscales'] == cur_hyps[2:]).all() + cur_hyps = np.array([0.1, 1.0, 4.0]) + restored_hyps = package_target.restore_hyps('se', cur_hyps, fix_noise=False) + assert restored_hyps['noise'] == cur_hyps[0] + assert restored_hyps['signal'] == cur_hyps[1] + assert restored_hyps['lengthscales'] == cur_hyps[2] + def test_validate_hyps_dict_typing(): annos = package_target.validate_hyps_dict.__annotations__ @@ -250,94 +269,84 @@ def test_validate_hyps_dict(): cur_hyps = package_target.get_hyps(str_cov, num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(123, str_cov, num_dim) + _ = package_target.validate_hyps_dict(123, str_cov, num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, 'abc', num_dim) + _ = package_target.validate_hyps_dict(cur_hyps, 'abc', num_dim) with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, 'abc') + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, 'abc') with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=1) + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=1) with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp='abc') + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp='abc') cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('noise') - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('lengthscales') - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps.pop('signal') - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['noise'] = 'abc' - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['noise'] = np.inf - cur_hyps, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) + cur_hyps = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) assert cur_hyps['noise'] == constants.BOUND_UPPER_GP_NOISE cur_hyps = package_target.get_hyps(str_cov, num_dim) - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, 123) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, 123) cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['lengthscales'] = 'abc' - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim) cur_hyps['signal'] = 'abc' - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) cur_hyps['signal'] = 'abc' - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim) cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) cur_hyps['dof'] = 'abc' - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) cur_hyps.pop('dof') - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) cur_hyps = package_target.get_hyps(str_cov, num_dim, use_gp=False) cur_hyps['dof'] = 1.5 with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) if cur_hyps['dof'] == 2.00001: assert False with pytest.raises(AssertionError) as error: assert cur_hyps['dof'] == 1.5 cur_hyps = package_target.get_hyps(str_cov, num_dim, use_ard=False, use_gp=False) + print(cur_hyps) cur_hyps['lengthscales'] = 'abc' - with pytest.raises(AssertionError) as error: - _, is_valid = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) - assert is_valid == True + with pytest.raises(ValueError) as error: + _ = package_target.validate_hyps_dict(cur_hyps, str_cov, num_dim, use_gp=False) def test_validate_hyps_arr_typing(): annos = package_target.validate_hyps_arr.__annotations__ diff --git a/tests/common/test_utils_gp.py b/tests/common/test_utils_gp.py index e6e3b25..9f4ebae 100644 --- a/tests/common/test_utils_gp.py +++ b/tests/common/test_utils_gp.py @@ -38,3 +38,41 @@ def test_get_prior_mu(): assert (package_target.get_prior_mu(None, X) == np.zeros((X.shape[0], 1))).all() assert (package_target.get_prior_mu(fun_prior, X) == fun_prior(X)).all() + +def test_validate_common_args_typing(): + annos = package_target.validate_common_args.__annotations__ + + assert annos['X_train'] == np.ndarray + assert annos['Y_train'] == np.ndarray + assert annos['str_cov'] == str + assert annos['prior_mu'] == typing.Union[typing.Callable, type(None)] + assert annos['debug'] == bool + assert annos['X_test'] == typing.Union[np.ndarray, type(None)] + assert annos['return'] == type(None) + +def test_validate_common_args(): + X_train = np.ones((10, 4)) + Y_train = np.zeros((10, 1)) + X_test = np.ones((5, 4)) + str_cov = 'matern32' + prior_mu = lambda x: x + 1 + debug = True + + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(1, Y_train, X_test, str_cov, prior_mu, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, 1, X_test, str_cov, prior_mu, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, Y_train, 1, str_cov, prior_mu, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, Y_train, X_test, 1, prior_mu, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, Y_train, X_test, str_cov, 1, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, Y_train, X_test, str_cov, prior_mu, 1) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, np.zeros(10), X_test, str_cov, prior_mu, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, np.zeros((3, 1)), X_test, str_cov, prior_mu, debug) + with pytest.raises(AssertionError) as error: + package_target.validate_common_args(X_train, Y_train, np.zeros((3, 2)), str_cov, prior_mu, debug) From af7f567d71dd0a7b5670b8ac0da218af398ed42d Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Sun, 11 Apr 2021 16:43:21 +0900 Subject: [PATCH 30/37] Update docstring --- bayeso/gp/__init__.py | 1 + bayeso/tp/__init__.py | 2 ++ pytest.ini | 2 +- 3 files changed, 4 insertions(+), 1 deletion(-) diff --git a/bayeso/gp/__init__.py b/bayeso/gp/__init__.py index b5d7615..27cdc03 100644 --- a/bayeso/gp/__init__.py +++ b/bayeso/gp/__init__.py @@ -4,5 +4,6 @@ # """These files are for implementing Gaussian process regression. It is implemented, based on the following article: + (i) Rasmussen, C. E., & Williams, C. K. (2006). Gaussian Process Regression for Machine Learning. MIT Press.""" diff --git a/bayeso/tp/__init__.py b/bayeso/tp/__init__.py index d2319ae..4341628 100644 --- a/bayeso/tp/__init__.py +++ b/bayeso/tp/__init__.py @@ -4,8 +4,10 @@ # """These files are for implementing Student-:math:`t` process regression. It is implemented, based on the following article: + (i) Rasmussen, C. E., & Williams, C. K. (2006). Gaussian Process Regression for Machine Learning. MIT Press. + (ii) Shah, A., Wilson, A. G., & Ghahramani, Z. (2014). Student-t Processes as Alternatives to Gaussian Processes. In Proceedings of the 17th International Conference on Artificial Intelligence and Statistics diff --git a/pytest.ini b/pytest.ini index 0310972..d6544ea 100644 --- a/pytest.ini +++ b/pytest.ini @@ -1,2 +1,2 @@ [pytest] -timeout = 25 +timeout = 24 From bf883d231f255c2e96ac8ba191a2e83dfe42a063 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Sun, 11 Apr 2021 17:22:44 +0900 Subject: [PATCH 31/37] Update docstring --- bayeso/bo.py | 10 +++++----- bayeso/covariance.py | 4 ++-- bayeso/gp/gp.py | 6 +++--- bayeso/gp/gp_kernel.py | 2 +- bayeso/tp/tp.py | 6 +++--- bayeso/tp/tp_kernel.py | 2 +- bayeso/utils/utils_bo.py | 4 ++-- bayeso/wrappers/wrappers_bo.py | 6 +++--- 8 files changed, 20 insertions(+), 20 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 1b1b77d..a787cce 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -43,7 +43,7 @@ class BO: :param use_ard: flag for automatic relevance determination. :type use_ard: bool., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param str_surrogate: the name of surrogate model. :type str_surrogate: str., optional :param str_optimizer_method_gp: the name of optimization method for @@ -278,7 +278,7 @@ def get_samples(self, str_sampling_method: str, :param str_sampling_method: the name of sampling method. :type str_sampling_method: str. :param fun_objective: None, or objective function. - :type fun_objective: NoneType or function, optional + :type fun_objective: NoneType or callable, optional :param num_samples: the number of samples. :type num_samples: int., optional :param seed: None, or random seed. @@ -355,7 +355,7 @@ def _optimize_objective(self, fun_acquisition: callable, X_train: np.ndarray, It returns acquisition function values over `X_test`. :param fun_acquisition: acquisition function. - :type fun_acquisition: function + :type fun_acquisition: callable :param X_train: inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param Y_train: outputs. Shape: (n, 1). @@ -406,7 +406,7 @@ def _optimize(self, fun_negative_acquisition: callable, str_sampling_method: str start acquisition function optimization. :param fun_objective: negative acquisition function. - :type fun_objective: function + :type fun_objective: callable :param str_sampling_method: the name of sampling method. :type str_sampling_method: str. :param num_samples: the number of samples. @@ -489,7 +489,7 @@ def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, :type X_train: numpy.ndarray :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray - :param str_sampling_method_method: the name of sampling method for + :param str_sampling_method: the name of sampling method for acquisition function optimization. :type str_sampling_method: str., optional :param num_samples: the number of samples. diff --git a/bayeso/covariance.py b/bayeso/covariance.py index 2cda950..6735975 100644 --- a/bayeso/covariance.py +++ b/bayeso/covariance.py @@ -22,7 +22,7 @@ def choose_fun_cov(str_cov: str) -> constants.TYPING_CALLABLE: :type str_cov: str. :returns: covariance function. - :rtype: function + :rtype: callable :raises: AssertionError @@ -50,7 +50,7 @@ def choose_fun_grad_cov(str_cov: str) -> constants.TYPING_CALLABLE: :type str_cov: str. :returns: function for computing gradients of covariance function. - :rtype: function + :rtype: callable :raises: AssertionError diff --git a/bayeso/gp/gp.py b/bayeso/gp/gp.py index 4d51cf1..b6db27d 100644 --- a/bayeso/gp/gp.py +++ b/bayeso/gp/gp.py @@ -78,7 +78,7 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra :param str_cov: the name of covariance function. :type str_cov: str., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param debug: flag for printing log messages. :type debug: bool., optional @@ -133,7 +133,7 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr :param str_cov: the name of covariance function. :type str_cov: str., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param debug: flag for printing log messages. :type debug: bool., optional @@ -183,7 +183,7 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test :param str_optimizer_method: the name of optimization method. :type str_optimizer_method: str., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param debug: flag for printing log messages. diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index 3a898ed..ac8e671 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -38,7 +38,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType + :type prior_mu: callable or NoneType :param str_cov: the name of covariance function. :type str_cov: str. :param str_optimizer_method: the name of optimization method. diff --git a/bayeso/tp/tp.py b/bayeso/tp/tp.py index f36295a..d31c37b 100644 --- a/bayeso/tp/tp.py +++ b/bayeso/tp/tp.py @@ -91,7 +91,7 @@ def predict_with_cov(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarra :param str_cov: the name of covariance function. :type str_cov: str., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param debug: flag for printing log messages. :type debug: bool., optional @@ -160,7 +160,7 @@ def predict_with_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test: np.ndarr :param str_cov: the name of covariance function. :type str_cov: str., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param debug: flag for printing log messages. :type debug: bool., optional @@ -214,7 +214,7 @@ def predict_with_optimized_hyps(X_train: np.ndarray, Y_train: np.ndarray, X_test :param str_optimizer_method: the name of optimization method. :type str_optimizer_method: str., optional :param prior_mu: None, or prior mean function. - :type prior_mu: NoneType, or function, optional + :type prior_mu: NoneType, or callable, optional :param fix_noise: flag for fixing a noise. :type fix_noise: bool., optional :param debug: flag for printing log messages. diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py index 5b12f37..5cc8f2c 100644 --- a/bayeso/tp/tp_kernel.py +++ b/bayeso/tp/tp_kernel.py @@ -37,7 +37,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, :param Y_train: outputs. Shape: (n, 1). :type Y_train: numpy.ndarray :param prior_mu: prior mean function or None. - :type prior_mu: function or NoneType + :type prior_mu: callable or NoneType :param str_cov: the name of covariance function. :type str_cov: str. :param str_optimizer_method: the name of optimization method. diff --git a/bayeso/utils/utils_bo.py b/bayeso/utils/utils_bo.py index 1227591..781d2dc 100644 --- a/bayeso/utils/utils_bo.py +++ b/bayeso/utils/utils_bo.py @@ -34,7 +34,7 @@ def get_best_acquisition_by_evaluation(initials: np.ndarray, :param initials: inputs. Shape: (n, d). :type initials: numpy.ndarray :param fun_objective: an objective function. - :type fun_objective: function + :type fun_objective: callable :returns: the best example of `initials`. Shape: (1, d). :rtype: numpy.ndarray @@ -185,7 +185,7 @@ def choose_fun_acquisition(str_acq: str, hyps: dict) -> constants.TYPING_CALLABL :type hyps: dict. :returns: acquisition function. - :rtype: function + :rtype: callable :raises: AssertionError diff --git a/bayeso/wrappers/wrappers_bo.py b/bayeso/wrappers/wrappers_bo.py index 8f62b40..44e1862 100644 --- a/bayeso/wrappers/wrappers_bo.py +++ b/bayeso/wrappers/wrappers_bo.py @@ -32,7 +32,7 @@ def run_single_round_with_all_initial_information(model_bo: bo.BO, :param model_bo: Bayesian optimization model. :type model_bo: bayeso.bo.BO :param fun_target: a target function. - :type fun_target: function + :type fun_target: callable :param X_train: initial inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param Y_train: initial outputs. Shape: (n, 1). @@ -146,7 +146,7 @@ def run_single_round_with_initial_inputs(model_bo: bo.BO, :param model_bo: Bayesian optimization model. :type model_bo: bayeso.bo.BO :param fun_target: a target function. - :type fun_target: function + :type fun_target: callable :param X_train: initial inputs. Shape: (n, d) or (n, m, d). :type X_train: numpy.ndarray :param num_iter: the number of iterations for Bayesian optimization. @@ -228,7 +228,7 @@ def run_single_round(model_bo: bo.BO, fun_target: constants.TYPING_CALLABLE, :param model_bo: Bayesian optimization model. :type model_bo: bayeso.bo.BO :param fun_target: a target function. - :type fun_target: function + :type fun_target: callable :param num_init: the number of initial examples for Bayesian optimization. :type num_init: int. :param num_iter: the number of iterations for Bayesian optimization. From 7bb1a73e301c2802dacb0172e9ff59d60d032da3 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Sun, 11 Apr 2021 18:18:17 +0900 Subject: [PATCH 32/37] Enable to use use_ard for BO --- bayeso/bo.py | 20 +++++--- bayeso/constants.py | 3 +- bayeso/gp/gp_kernel.py | 5 -- bayeso/gp/gp_likelihood.py | 5 +- bayeso/tp/tp_likelihood.py | 4 +- bayeso/utils/utils_covariance.py | 1 - bayeso/wrappers/wrappers_bo.py | 6 +-- tests/common/test_bo.py | 83 +++++++++++++++++++++++++++++++- 8 files changed, 105 insertions(+), 22 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index a787cce..269b4dc 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -77,7 +77,6 @@ def __init__(self, range_X: np.ndarray, """ - # TODO: use use_ard. assert isinstance(range_X, np.ndarray) assert isinstance(str_cov, str) assert isinstance(str_acq, str) @@ -266,7 +265,6 @@ def _get_samples_halton(self, num_samples: int, + self.range_X[:, 0].flatten() return samples - # TODO: num_grids should be able to be input. def get_samples(self, str_sampling_method: str, fun_objective: constants.TYPING_UNION_CALLABLE_NONE=None, num_samples: int=constants.NUM_SAMPLES_AO, @@ -300,8 +298,8 @@ def get_samples(self, str_sampling_method: str, if str_sampling_method == 'grid': assert fun_objective is not None if self.debug: - logger.debug('num_samples is ignored, because grid is chosen.') - samples = self._get_samples_grid() + logger.debug('For this option, num_samples is used as num_grids.') + samples = self._get_samples_grid(num_grids=num_samples) samples = utils_bo.get_best_acquisition_by_evaluation(samples, fun_objective) elif str_sampling_method == 'uniform': samples = self._get_samples_uniform(num_samples, seed=seed) @@ -527,20 +525,26 @@ def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, time_start_gp = time.time() if str_mlm_method == 'regular': - cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X_train, Y_train, + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel( + X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=self.str_optimizer_method_gp, str_modelselection_method=self.str_modelselection_method, - debug=self.debug) + use_ard=self.use_ard, + debug=self.debug + ) elif str_mlm_method == 'converged': fix_noise = constants.FIX_GP_NOISE if self.is_optimize_hyps: - cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel(X_train, Y_train, + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel( + X_train, Y_train, self.prior_mu, self.str_cov, str_optimizer_method=self.str_optimizer_method_gp, str_modelselection_method=self.str_modelselection_method, - debug=self.debug) + use_ard=self.use_ard, + debug=self.debug + ) self.is_optimize_hyps = not utils_bo.check_hyps_convergence(self.historical_hyps, hyps, self.str_cov, fix_noise) diff --git a/bayeso/constants.py b/bayeso/constants.py index 07cf30a..539537c 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -14,6 +14,8 @@ JITTER_COV = 1e-5 JITTER_LOG = 1e-7 +TOLERANCE_DUPLICATED_ACQ = 1e-4 + STR_SURROGATE = 'gp' STR_OPTIMIZER_METHOD_GP = 'BFGS' STR_OPTIMIZER_METHOD_TP = 'SLSQP' @@ -49,7 +51,6 @@ 'BFGS', 'L-BFGS-B', 'Nelder-Mead', - 'DIRECT', 'SLSQP', 'SLSQP-Bounded', ] diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index ac8e671..efc2bf0 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -69,7 +69,6 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, utils_covariance.check_str_cov('get_optimized_kernel', str_cov, X_train.shape) assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD - # TODO: fix this. use_gradient = bool(str_optimizer_method != 'Nelder-Mead') time_start = time.time() @@ -133,10 +132,6 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, if debug: logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x - # TODO: Fill this conditions - elif str_optimizer_method == 'DIRECT': # pragma: no cover - raise NotImplementedError('get_optimized_kernel: allowed str_optimizer_method,\ - but it is not implemented.') else: # pragma: no cover raise ValueError('get_optimized_kernel: missing conditions for str_optimizer_method') diff --git a/bayeso/gp/gp_likelihood.py b/bayeso/gp/gp_likelihood.py index 6f2ecc8..a6a7c97 100644 --- a/bayeso/gp/gp_likelihood.py +++ b/bayeso/gp/gp_likelihood.py @@ -101,9 +101,11 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, first_term = -0.5 * np.dot(np.dot(new_Y_train.T, inv_cov_X_X), new_Y_train) sign_second_term, second_term = np.linalg.slogdet(cov_X_X) - # TODO: let me think. + + # TODO: It should be checked. if sign_second_term <= 0: # pragma: no cover second_term = 0.0 + second_term = -0.5 * second_term third_term = -float(X_train.shape[0]) / 2.0 * np.log(2.0 * np.pi) @@ -146,6 +148,7 @@ def neg_log_pseudo_l_loocv(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.nd """ + # TODO: add use_ard. utils_gp.validate_common_args(X_train, Y_train, str_cov, None, debug) assert isinstance(hyps, np.ndarray) assert isinstance(prior_mu_train, np.ndarray) diff --git a/bayeso/tp/tp_likelihood.py b/bayeso/tp/tp_likelihood.py index d2c01b7..c328f55 100644 --- a/bayeso/tp/tp_likelihood.py +++ b/bayeso/tp/tp_likelihood.py @@ -83,9 +83,11 @@ def neg_log_ml(X_train: np.ndarray, Y_train: np.ndarray, hyps: np.ndarray, first_term = -0.5 * num_X * np.log((nu - 2.0) * np.pi) sign_second_term, second_term = np.linalg.slogdet(cov_X_X) - # TODO: let me think. + + # TODO: it should be checked. if sign_second_term <= 0: # pragma: no cover second_term = 0.0 + second_term = -0.5 * second_term third_term = np.log(scipy.special.gamma((nu + num_X) / 2.0) / scipy.special.gamma(nu / 2.0)) diff --git a/bayeso/utils/utils_covariance.py b/bayeso/utils/utils_covariance.py index c787379..c9b51ac 100644 --- a/bayeso/utils/utils_covariance.py +++ b/bayeso/utils/utils_covariance.py @@ -71,7 +71,6 @@ def get_hyps(str_cov: str, dim: int, if use_ard: hyps['lengthscales'] = np.ones(dim) else: - # TODO: It makes bunch of errors. I should fix it. hyps['lengthscales'] = 1.0 else: raise NotImplementedError('get_hyps: allowed str_cov, but it is not implemented.') diff --git a/bayeso/wrappers/wrappers_bo.py b/bayeso/wrappers/wrappers_bo.py index 44e1862..3f7ca35 100644 --- a/bayeso/wrappers/wrappers_bo.py +++ b/bayeso/wrappers/wrappers_bo.py @@ -98,10 +98,8 @@ def run_single_round_with_all_initial_information(model_bo: bo.BO, if model_bo.debug: logger.debug('next_point: %s', utils_logger.get_str_array(next_point)) - # TODO: check this code, which uses norm. -# if np.where(np.sum(next_point == X_final, axis=1) == X_final.shape[1])[0].shape[0] > 0: - if np.where(np.linalg.norm(next_point - X_final, axis=1) < 1e-3)[0]\ - .shape[0] > 0: # pragma: no cover + if np.where(np.linalg.norm(next_point - X_final, axis=1)\ + < constants.TOLERANCE_DUPLICATED_ACQ)[0].shape[0] > 0: # pragma: no cover next_point = utils_bo.get_next_best_acquisition(next_points, acquisitions, X_final) if model_bo.debug: logger.debug('next_point is repeated, so next best is selected.\ diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index d4b205d..cb99a7d 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -113,7 +113,7 @@ def test_get_samples(): with pytest.raises(AssertionError) as error: model_bo.get_samples('abc') - arr_initials = model_bo.get_samples('grid', fun_objective=fun_objective) + arr_initials = model_bo.get_samples('grid', num_samples=50, fun_objective=fun_objective) truth_arr_initials = np.array([ [0.0, -2.0, -5.0], ]) @@ -631,3 +631,84 @@ def test_optimize_normalize_Y(): assert next_point.shape[0] == dim_X assert next_points.shape[1] == dim_X assert next_points.shape[0] == acquisitions.shape[0] + +def test_optimize_use_ard(): + np.random.seed(42) + arr_range = np.array([ + [0.0, 10.0], + [-2.0, 2.0], + [-5.0, 5.0], + ]) + dim_X = arr_range.shape[0] + num_X = 5 + X = np.random.randn(num_X, dim_X) + Y = np.random.randn(num_X, 1) + + model_bo = package_target.BO(arr_range, use_ard=False) + next_point, dict_info = model_bo.optimize(X, Y) + next_points = dict_info['next_points'] + acquisitions = dict_info['acquisitions'] + cov_X_X = dict_info['cov_X_X'] + inv_cov_X_X = dict_info['inv_cov_X_X'] + hyps = dict_info['hyps'] + time_overall = dict_info['time_overall'] + time_gp = dict_info['time_gp'] + time_acq = dict_info['time_acq'] + + assert isinstance(next_point, np.ndarray) + assert isinstance(next_points, np.ndarray) + assert isinstance(acquisitions, np.ndarray) + assert isinstance(cov_X_X, np.ndarray) + assert isinstance(inv_cov_X_X, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(time_overall, float) + assert isinstance(time_gp, float) + assert isinstance(time_acq, float) + assert len(next_point.shape) == 1 + assert len(next_points.shape) == 2 + assert len(acquisitions.shape) == 1 + assert next_point.shape[0] == dim_X + assert next_points.shape[1] == dim_X + assert next_points.shape[0] == acquisitions.shape[0] + assert isinstance(hyps['lengthscales'], float) + + X = np.array([ + [3.0, 0.0, 1.0], + [2.0, -1.0, 4.0], + [9.0, 1.5, 3.0], + ]) + Y = np.array([ + [100.0], + [100.0], + [100.0], + ]) + + model_bo = package_target.BO(arr_range, use_ard=True) + next_point, dict_info = model_bo.optimize(X, Y) + next_points = dict_info['next_points'] + acquisitions = dict_info['acquisitions'] + cov_X_X = dict_info['cov_X_X'] + inv_cov_X_X = dict_info['inv_cov_X_X'] + hyps = dict_info['hyps'] + time_overall = dict_info['time_overall'] + time_gp = dict_info['time_gp'] + time_acq = dict_info['time_acq'] + + assert isinstance(next_point, np.ndarray) + assert isinstance(next_points, np.ndarray) + assert isinstance(acquisitions, np.ndarray) + assert isinstance(cov_X_X, np.ndarray) + assert isinstance(inv_cov_X_X, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(time_overall, float) + assert isinstance(time_gp, float) + assert isinstance(time_acq, float) + assert len(next_point.shape) == 1 + assert len(next_points.shape) == 2 + assert len(acquisitions.shape) == 1 + assert next_point.shape[0] == dim_X + assert next_points.shape[1] == dim_X + assert next_points.shape[0] == acquisitions.shape[0] + assert isinstance(hyps['lengthscales'], np.ndarray) + assert len(hyps['lengthscales'].shape) == 1 + assert hyps['lengthscales'].shape[0] == 3 From 88531f8a6284140d51019ee915ca1ae36695e022 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Sun, 11 Apr 2021 19:04:20 +0900 Subject: [PATCH 33/37] Update examples --- examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py | 2 +- examples/04_benchmarks/example_benchmarks_branin_bo_ei.py | 2 +- examples/04_benchmarks/example_benchmarks_branin_gp.py | 2 +- examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py | 2 +- .../04_benchmarks/example_benchmarks_hartmann6d_bo_ei.py | 2 +- examples/99_notebooks/example_bo_branin.ipynb | 2 +- examples/99_notebooks/example_hpo_xgboost.ipynb | 5 +++-- 7 files changed, 9 insertions(+), 8 deletions(-) diff --git a/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py b/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py index c7c6bfd..2a059e7 100644 --- a/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_ackley_bo_ei.py @@ -6,7 +6,7 @@ import os from bayeso import bo -from benchmarks.inf_dim_ackley import Ackley +from bayeso_benchmarks.inf_dim_ackley import Ackley from bayeso.wrappers import wrappers_bo from bayeso.utils import utils_bo from bayeso.utils import utils_plotting diff --git a/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py b/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py index 9311eae..9c72de8 100644 --- a/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_branin_bo_ei.py @@ -6,7 +6,7 @@ import os from bayeso import bo -from benchmarks.two_dim_branin import Branin +from bayeso_benchmarks.two_dim_branin import Branin from bayeso.wrappers import wrappers_bo from bayeso.utils import utils_bo from bayeso.utils import utils_plotting diff --git a/examples/04_benchmarks/example_benchmarks_branin_gp.py b/examples/04_benchmarks/example_benchmarks_branin_gp.py index 75a395b..75e537a 100644 --- a/examples/04_benchmarks/example_benchmarks_branin_gp.py +++ b/examples/04_benchmarks/example_benchmarks_branin_gp.py @@ -9,7 +9,7 @@ from bayeso.gp import gp from bayeso.utils import utils_common from bayeso.utils import utils_plotting -from benchmarks.two_dim_branin import Branin +from bayeso_benchmarks.two_dim_branin import Branin STR_FUN_TARGET = 'branin' diff --git a/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py b/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py index 788d805..406806b 100644 --- a/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_eggholder_bo_ei.py @@ -6,7 +6,7 @@ import os from bayeso import bo -from benchmarks.two_dim_eggholder import Eggholder +from bayeso_benchmarks.two_dim_eggholder import Eggholder from bayeso.wrappers import wrappers_bo from bayeso.utils import utils_bo from bayeso.utils import utils_plotting diff --git a/examples/04_benchmarks/example_benchmarks_hartmann6d_bo_ei.py b/examples/04_benchmarks/example_benchmarks_hartmann6d_bo_ei.py index f832f05..fec3d76 100644 --- a/examples/04_benchmarks/example_benchmarks_hartmann6d_bo_ei.py +++ b/examples/04_benchmarks/example_benchmarks_hartmann6d_bo_ei.py @@ -6,7 +6,7 @@ import os from bayeso import bo -from benchmarks.six_dim_hartmann6d import Hartmann6D +from bayeso_benchmarks.six_dim_hartmann6d import Hartmann6D from bayeso.wrappers import wrappers_bo from bayeso.utils import utils_bo from bayeso.utils import utils_plotting diff --git a/examples/99_notebooks/example_bo_branin.ipynb b/examples/99_notebooks/example_bo_branin.ipynb index 929da74..03485c4 100644 --- a/examples/99_notebooks/example_bo_branin.ipynb +++ b/examples/99_notebooks/example_bo_branin.ipynb @@ -11,7 +11,7 @@ "import numpy as np\n", "\n", "from bayeso import bo\n", - "from benchmarks.two_dim_branin import Branin\n", + "from bayeso_benchmarks.two_dim_branin import Branin\n", "from bayeso.wrappers import wrappers_bo\n", "from bayeso.utils import utils_plotting" ] diff --git a/examples/99_notebooks/example_hpo_xgboost.ipynb b/examples/99_notebooks/example_hpo_xgboost.ipynb index 4ca18a8..c3aaa81 100644 --- a/examples/99_notebooks/example_hpo_xgboost.ipynb +++ b/examples/99_notebooks/example_hpo_xgboost.ipynb @@ -44,9 +44,10 @@ "def fun_target(bx):\n", " model_xgb = xgb.XGBClassifier(\n", " max_depth=int(bx[0]),\n", - " n_estimators=int(bx[1])\n", + " n_estimators=int(bx[1]),\n", + " use_label_encoder=False\n", " )\n", - " model_xgb.fit(data_train, labels_train)\n", + " model_xgb.fit(data_train, labels_train, eval_metric='mlogloss')\n", " preds_test = model_xgb.predict(data_test)\n", " return 1.0 - sklearn.metrics.accuracy_score(labels_test, preds_test)" ] From f226f5c8c6e229b28a650b03bdb1b7c49a5d197b Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 13 Apr 2021 13:39:58 +0900 Subject: [PATCH 34/37] Add combined option for mlm and disable use_gradient --- bayeso/bo.py | 37 +++++++++++++++++++++++++++++++++++++ bayeso/constants.py | 3 ++- bayeso/gp/gp_kernel.py | 5 +++++ bayeso/tp/tp_kernel.py | 1 + docs/example/branin.rst | 4 ++-- tests/common/test_bo.py | 27 +++++++++++++++++++++++++++ 6 files changed, 74 insertions(+), 3 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 269b4dc..849d02c 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -533,6 +533,43 @@ def optimize(self, X_train: np.ndarray, Y_train: np.ndarray, use_ard=self.use_ard, debug=self.debug ) + elif str_mlm_method == 'combined': + from bayeso.gp import gp_likelihood + from bayeso.utils import utils_gp + from bayeso.utils import utils_covariance + + prior_mu_train = utils_gp.get_prior_mu(self.prior_mu, X_train) + + neg_log_ml_best = np.inf + cov_X_X_best = None + inv_cov_X_X_best = None + hyps_best = None + + for cur_str_optimizer_method in ['BFGS', 'Nelder-Mead']: + cov_X_X, inv_cov_X_X, hyps = gp_kernel.get_optimized_kernel( + X_train, Y_train, + self.prior_mu, self.str_cov, + str_optimizer_method=cur_str_optimizer_method, + str_modelselection_method=self.str_modelselection_method, + use_ard=self.use_ard, + debug=self.debug + ) + cur_neg_log_ml_ = gp_likelihood.neg_log_ml(X_train, Y_train, + utils_covariance.convert_hyps(self.str_cov, hyps, + fix_noise=constants.FIX_GP_NOISE), + self.str_cov, prior_mu_train, + use_ard=self.use_ard, fix_noise=constants.FIX_GP_NOISE, + use_gradient=False, debug=self.debug) + + if cur_neg_log_ml_ < neg_log_ml_best: + neg_log_ml_best = cur_neg_log_ml_ + cov_X_X_best = cov_X_X + inv_cov_X_X_best = inv_cov_X_X + hyps_best = hyps + + cov_X_X = cov_X_X_best + inv_cov_X_X = inv_cov_X_X_best + hyps = hyps_best elif str_mlm_method == 'converged': fix_noise = constants.FIX_GP_NOISE diff --git a/bayeso/constants.py b/bayeso/constants.py index 539537c..cb69a75 100644 --- a/bayeso/constants.py +++ b/bayeso/constants.py @@ -56,6 +56,7 @@ ] ALLOWED_OPTIMIZER_METHOD_TP = ['L-BFGS-B', 'SLSQP'] ALLOWED_OPTIMIZER_METHOD_BO = ['L-BFGS-B', 'DIRECT', 'CMA-ES'] + # INFO: Do not use _ (underscore) in base str_cov. ALLOWED_COV_BASE = ['eq', 'se', 'matern32', 'matern52'] ALLOWED_COV_SET = ['set_' + str_cov for str_cov in ALLOWED_COV_BASE] @@ -63,7 +64,7 @@ ALLOWED_BO_ACQ = ['pi', 'ei', 'ucb', 'aei', 'pure_exploit', 'pure_explore'] ALLOWED_INITIALIZING_METHOD_BO = ['uniform', 'gaussian', 'sobol', 'halton'] ALLOWED_SAMPLING_METHOD = ALLOWED_INITIALIZING_METHOD_BO + ['grid'] -ALLOWED_MLM_METHOD = ['regular', 'converged'] +ALLOWED_MLM_METHOD = ['regular', 'combined', 'converged'] ALLOWED_MODELSELECTION_METHOD = ['ml', 'loocv'] ALLOWED_SURROGATE = ['gp', 'tp'] diff --git a/bayeso/gp/gp_kernel.py b/bayeso/gp/gp_kernel.py index efc2bf0..09d020b 100644 --- a/bayeso/gp/gp_kernel.py +++ b/bayeso/gp/gp_kernel.py @@ -70,6 +70,8 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, assert str_optimizer_method in constants.ALLOWED_OPTIMIZER_METHOD_GP assert str_modelselection_method in constants.ALLOWED_MODELSELECTION_METHOD use_gradient = bool(str_optimizer_method != 'Nelder-Mead') + # TODO: Now, use_gradient is fixed as False. + use_gradient = False time_start = time.time() @@ -108,6 +110,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, method=str_optimizer_method, jac=use_gradient, options={'disp': False}) if debug: + logger.debug('negative log marginal likelihood: %.6f', result_optimized.fun) logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x @@ -123,6 +126,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, options={'disp': False}) if debug: + logger.debug('negative log marginal likelihood: %.6f', result_optimized.fun) logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x elif str_optimizer_method in ['Nelder-Mead']: @@ -130,6 +134,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, method=str_optimizer_method, options={'disp': False}) if debug: + logger.debug('negative log marginal likelihood: %.6f', result_optimized.fun) logger.debug('scipy message: %s', result_optimized.message) result_optimized = result_optimized.x else: # pragma: no cover diff --git a/bayeso/tp/tp_kernel.py b/bayeso/tp/tp_kernel.py index 5cc8f2c..1d3b7e9 100644 --- a/bayeso/tp/tp_kernel.py +++ b/bayeso/tp/tp_kernel.py @@ -66,6 +66,7 @@ def get_optimized_kernel(X_train: np.ndarray, Y_train: np.ndarray, # TODO: Fix it later. use_gradient = True + use_gradient = False time_start = time.time() diff --git a/docs/example/branin.rst b/docs/example/branin.rst index 2c7b21d..fcc6fb1 100644 --- a/docs/example/branin.rst +++ b/docs/example/branin.rst @@ -10,7 +10,7 @@ First, import some packages we need. import numpy as np from bayeso import bo - from benchmarks.two_dim_branin import Branin + from bayeso_benchmarks.two_dim_branin import Branin from bayeso.wrappers import wrappers_bo from bayeso.utils import utils_plotting @@ -88,7 +88,7 @@ Full code: import numpy as np from bayeso import bo - from benchmarks.two_dim_branin import Branin + from bayeso_benchmarks.two_dim_branin import Branin from bayeso.wrappers import wrappers_bo from bayeso.utils import utils_plotting diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index cb99a7d..84e49c5 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -516,6 +516,33 @@ def test_optimize_str_mlm_method(): assert next_points.shape[1] == dim_X assert next_points.shape[0] == acquisitions.shape[0] + model_bo = package_target.BO(arr_range_1) + next_point, dict_info = model_bo.optimize(X, Y, str_mlm_method='combined') + next_points = dict_info['next_points'] + acquisitions = dict_info['acquisitions'] + cov_X_X = dict_info['cov_X_X'] + inv_cov_X_X = dict_info['inv_cov_X_X'] + hyps = dict_info['hyps'] + time_overall = dict_info['time_overall'] + time_gp = dict_info['time_gp'] + time_acq = dict_info['time_acq'] + + assert isinstance(next_point, np.ndarray) + assert isinstance(next_points, np.ndarray) + assert isinstance(acquisitions, np.ndarray) + assert isinstance(cov_X_X, np.ndarray) + assert isinstance(inv_cov_X_X, np.ndarray) + assert isinstance(hyps, dict) + assert isinstance(time_overall, float) + assert isinstance(time_gp, float) + assert isinstance(time_acq, float) + assert len(next_point.shape) == 1 + assert len(next_points.shape) == 2 + assert len(acquisitions.shape) == 1 + assert next_point.shape[0] == dim_X + assert next_points.shape[1] == dim_X + assert next_points.shape[0] == acquisitions.shape[0] + def test_optimize_str_modelselection_method(): np.random.seed(42) arr_range_1 = np.array([ From f89c41f292431463d30c7def521dc1c4aa6ddfaf Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Tue, 13 Apr 2021 13:48:42 +0900 Subject: [PATCH 35/37] Improve coverage --- tests/common/test_tp_likelihood.py | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/tests/common/test_tp_likelihood.py b/tests/common/test_tp_likelihood.py index 924f14d..5a1af78 100644 --- a/tests/common/test_tp_likelihood.py +++ b/tests/common/test_tp_likelihood.py @@ -83,3 +83,10 @@ def test_neg_log_ml(): ]) assert np.abs(neg_log_ml_ - truth_log_ml_) < TEST_EPSILON assert np.all(np.abs(neg_grad_log_ml_ - truth_grad_log_ml_) < TEST_EPSILON) + + dict_hyps = utils_covariance.get_hyps(str_cov, dim_X, use_gp=use_gp) + arr_hyps = utils_covariance.convert_hyps(str_cov, dict_hyps, fix_noise=True, use_gp=use_gp) + + neg_log_ml_, neg_grad_log_ml_ = package_target.neg_log_ml(X, Y, arr_hyps, str_cov, prior_mu_X, fix_noise=True, use_gradient=True) + print(neg_log_ml_) + print(neg_grad_log_ml_) From e24b8ff0b78d07d2c324e03229b2d861a3e97094 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 14 Apr 2021 13:55:28 +0900 Subject: [PATCH 36/37] Add compute_acquisitions --- bayeso/bo.py | 62 ++++++++++++++++++++++++++++++++++++++--- tests/common/test_bo.py | 61 ++++++++++++++++++++++++++++++++++++++++ 2 files changed, 119 insertions(+), 4 deletions(-) diff --git a/bayeso/bo.py b/bayeso/bo.py index 849d02c..bbc7406 100644 --- a/bayeso/bo.py +++ b/bayeso/bo.py @@ -345,8 +345,61 @@ def get_initials(self, str_initial_method: str, num_initials: int, return self.get_samples(str_initial_method, num_samples=num_initials, seed=seed) - def _optimize_objective(self, fun_acquisition: callable, X_train: np.ndarray, - Y_train: np.ndarray, X_test: np.ndarray, cov_X_X: np.ndarray, + def compute_acquisitions(self, X: np.ndarray, + X_train: np.ndarray, Y_train: np.ndarray, + cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict + ) -> np.ndarray: + """ + It computes acquisition function values over 'X', + where `X_train`, `Y_train`, `cov_X_X`, `inv_cov_X_X`, and `hyps` + are given. + + :param X: inputs. Shape: (l, d) or (l, m, d). + :type X: numpy.ndarray + :param X_train: inputs. Shape: (n, d) or (n, m, d). + :type X_train: numpy.ndarray + :param Y_train: outputs. Shape: (n, 1). + :type Y_train: numpy.ndarray + :param cov_X_X: kernel matrix over `X_train`. Shape: (n, n). + :type cov_X_X: numpy.ndarray + :param inv_cov_X_X: kernel matrix inverse over `X_train`. Shape: (n, n). + :type inv_cov_X_X: numpy.ndarray + :param hyps: dictionary of hyperparameters. + :type hyps: dict. + + :returns: acquisition function values over `X`. Shape: (l, ). + :rtype: numpy.ndarray + + """ + + assert isinstance(X, np.ndarray) + assert isinstance(X_train, np.ndarray) + assert isinstance(Y_train, np.ndarray) + assert len(X.shape) == 2 or len(X.shape) == 3 + assert len(X_train.shape) == 2 or len(X_train.shape) == 3 + assert len(Y_train.shape) == 2 + assert Y_train.shape[1] == 1 + assert X_train.shape[0] == Y_train.shape[0] + if len(X_train.shape) == 2: + assert X.shape[1] == X_train.shape[1] == self.num_dim + else: + assert X.shape[2] == X_train.shape[2] == self.num_dim + + fun_acquisition = utils_bo.choose_fun_acquisition(self.str_acq, hyps) + + acquisitions = constants.MULTIPLIER_ACQ * self._optimize_objective( + fun_acquisition, X_train, Y_train, + X, cov_X_X, inv_cov_X_X, hyps + ) + + assert isinstance(acquisitions, np.ndarray) + assert len(acquisitions.shape) == 1 + assert X.shape[0] == acquisitions.shape[0] + return acquisitions + + def _optimize_objective(self, fun_acquisition: constants.TYPING_CALLABLE, + X_train: np.ndarray, Y_train: np.ndarray, + X_test: np.ndarray, cov_X_X: np.ndarray, inv_cov_X_X: np.ndarray, hyps: dict ) -> np.ndarray: """ @@ -381,7 +434,7 @@ def _optimize_objective(self, fun_acquisition: callable, X_train: np.ndarray, pred_std=np.ravel(pred_std), Y_train=Y_train) return acquisitions - def _get_bounds(self) -> list: + def _get_bounds(self) -> constants.TYPING_LIST: """ It returns list of range tuples, obtained from `self.range_X`. @@ -395,7 +448,8 @@ def _get_bounds(self) -> list: list_bounds.append(tuple(elem)) return list_bounds - def _optimize(self, fun_negative_acquisition: callable, str_sampling_method: str, + def _optimize(self, fun_negative_acquisition: constants.TYPING_CALLABLE, + str_sampling_method: str, num_samples: int ) -> constants.TYPING_TUPLE_TWO_ARRAYS: """ diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index 84e49c5..1f18522 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -8,6 +8,8 @@ import numpy as np from bayeso import bo as package_target +from bayeso import covariance +from bayeso.utils import utils_covariance TEST_EPSILON = 1e-5 @@ -739,3 +741,62 @@ def test_optimize_use_ard(): assert isinstance(hyps['lengthscales'], np.ndarray) assert len(hyps['lengthscales'].shape) == 1 assert hyps['lengthscales'].shape[0] == 3 + +def test_compute_acquisitions(): + np.random.seed(42) + arr_range_1 = np.array([ + [0.0, 10.0], + [-2.0, 2.0], + [-5.0, 5.0], + ]) + dim_X = arr_range_1.shape[0] + num_X = 5 + X = np.random.randn(num_X, dim_X) + Y = np.random.randn(num_X, 1) + + model_bo = package_target.BO(arr_range_1, str_acq='pi') + hyps = utils_covariance.get_hyps(model_bo.str_cov, dim=dim_X, use_ard=model_bo.use_ard) + + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X, hyps, model_bo.str_cov) + + X_test = model_bo.get_samples('sobol', num_samples=10, seed=111) + truth_X_test = np.array([ + [1.5372224315069616, 0.04384007956832647, -2.2484372765757143], + [7.530325392726809, -1.5013302871957421, 3.6598239350132644], + [5.009974606800824, 1.4473280012607574, -2.9132778802886605], + [4.057266886811703, -0.9762288630008698, 1.8228407809510827], + [2.9087040200829506, 1.576258834451437, 3.9799577672965825], + [6.793604656122625, -0.0973438061773777, -0.07283419137820601], + [9.274512701667845, 0.914928319863975, 0.8972382079809904], + [0.4274896439164877, -1.380226788111031, -4.483412243425846], + [1.1341158207505941, 1.000061221420765, 0.24217900820076466], + [9.78826540056616, -0.5445895120501518, -3.8301817141473293], + ]) + print(X_test) + + assert np.all(np.abs(X_test - truth_X_test) < TEST_EPSILON) + + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(1, X, Y, cov_X_X, inv_cov_X_X, hyps) + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, 1, Y, cov_X_X, inv_cov_X_X, hyps) + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, X, 1, cov_X_X, inv_cov_X_X, hyps) + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, X, Y, 1, inv_cov_X_X, hyps) + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, X, Y, cov_X_X, 1, hyps) + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, X, Y, cov_X_X, inv_cov_X_X, 1) + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, X, Y, cov_X_X, inv_cov_X_X, 'abc') + + acqs = model_bo.compute_acquisitions(X_test, X, Y, cov_X_X, inv_cov_X_X, hyps) + print(acqs) + + truth_acqs = np.array([0.9602042008680384, 0.7893457649458702, 0.7874870212950252, 0.8113392160307042, 0.7900800170056282, 0.789580990650518, 0.7893341902282358, 0.8667465389980766, 0.23928549511387842, 0.7893341160443801]) + + assert isinstance(acqs, np.ndarray) + assert len(acqs.shape) == 1 + assert X_test.shape[0] == acqs.shape[0] + assert np.all(np.abs(acqs - truth_acqs) < TEST_EPSILON) From 39168c9110ea8776a27979b64327269753c60f85 Mon Sep 17 00:00:00 2001 From: Jungtaek Kim Date: Wed, 14 Apr 2021 14:07:42 +0900 Subject: [PATCH 37/37] Improve coverage --- tests/common/test_bo.py | 34 ++++++++++++++++++++++++++++++++++ 1 file changed, 34 insertions(+) diff --git a/tests/common/test_bo.py b/tests/common/test_bo.py index 1f18522..4fc48fc 100644 --- a/tests/common/test_bo.py +++ b/tests/common/test_bo.py @@ -800,3 +800,37 @@ def test_compute_acquisitions(): assert len(acqs.shape) == 1 assert X_test.shape[0] == acqs.shape[0] assert np.all(np.abs(acqs - truth_acqs) < TEST_EPSILON) + +def test_compute_acquisitions_set(): + np.random.seed(42) + arr_range_1 = np.array([ + [0.0, 10.0], + [-2.0, 2.0], + [-5.0, 5.0], + ]) + dim_X = arr_range_1.shape[0] + num_X = 5 + num_instances = 4 + X = np.random.randn(num_X, num_instances, dim_X) + Y = np.random.randn(num_X, 1) + + model_bo = package_target.BO(arr_range_1, str_acq='pi', str_cov='set_se') + hyps = utils_covariance.get_hyps(model_bo.str_cov, dim=dim_X, use_ard=model_bo.use_ard) + + cov_X_X, inv_cov_X_X, _ = covariance.get_kernel_inverse(X, hyps, model_bo.str_cov) + + X_test = np.array([ + [ + [1.0, 0.0, 0.0, 1.0], + [2.0, -1.0, 2.0, 1.0], + [3.0, -2.0, 4.0, 1.0], + ], + [ + [4.0, 2.0, -3.0, 1.0], + [5.0, 0.0, -2.0, 1.0], + [6.0, -2.0, -1.0, 1.0], + ], + ]) + + with pytest.raises(AssertionError) as error: + model_bo.compute_acquisitions(X_test, X, Y, cov_X_X, inv_cov_X_X, hyps)