Commit 4eb061b1 authored by Malthe Kjær Bisbo's avatar Malthe Kjær Bisbo
Browse files

kernels working

parent 4903be8a
import numpy as np
import numpy as np
import unittest
from gpr import gpr
class test_kernel(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setupClass')
@classmethod
def tearDownClass(cls):
print('teardownClass')
def setUp(self):
print('setUp')
#self.kernel = gauss_kernel()
self.gpr = gpr()
def tearDown(self):
print('tearDown\n')
def test_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
y = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,x2),
self.kernel.numerical_jacobian(x1,x2))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,y),
self.kernel.numerical_jacobian(x1,y))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x2,y),
self.kernel.numerical_jacobian(x2,y))
def test_hyper_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
X = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_hyperparameter_gradient(X),
self.kernel.numerical_hyperparameter_gradient(X))
if __name__ == '__main__':
unittest.main()
import numpy as np
import unittest
from kernel import gauss_kernel, double_gauss_kernel
class test_kernel(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setupClass')
@classmethod
def tearDownClass(cls):
print('teardownClass')
def setUp(self):
print('setUp')
#self.kernel = gauss_kernel()
self.kernel = double_gauss_kernel()
def tearDown(self):
print('tearDown\n')
def test_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
y = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,x2),
self.kernel.numerical_jacobian(x1,x2))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,y),
self.kernel.numerical_jacobian(x1,y))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x2,y),
self.kernel.numerical_jacobian(x2,y))
def test_hyper_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
X = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_hyperparameter_gradient(X),
self.kernel.numerical_hyperparameter_gradient(X))
if __name__ == '__main__':
unittest.main()
mkb@s81n11.grendel.cscaa.dk.9244:1576483191
\ No newline at end of file
mkb@s81n11.grendel.cscaa.dk.9244:1576483191
\ No newline at end of file
import numpy as np
from kernel import gauss_kernel, double_gauss_kernel
class gpr():
"""Gaussian Process Regression
Parameters:
descriptor:
kernel:
prior:
"""
def __init__(self, descriptor, kernel, prior):
self.descriptor = descriptor
if kernel is None:
kernel = gauss_kernel()
else:
self.kernel = kernel
self.prior = prior
def predict(self, a):
x = self.descriptor.get_feature(a)
k = self.kernel.kernel_vector(x, self.X)
f = k.T.dot(self.alpha) + self.bias + delta
def train(self):
pass
def optimize_hyperparameters(self):
pass
def neg_log_likelihood(self):
pass
def func(self, x):
dsafas
def name():
"""
import numpy as np
from abc import ABC, abstractmethod
from scipy.spatial.distance import pdist, cdist, squareform
class kernel(ABC):
def __init__(self):
self._theta = None
@abstractmethod
def kernel(self):
pass
# @abstractmethod
# def kernel_vector(self):
# pass
# @abstractmethod
# def kernel_matrix(self):
# pass
@abstractmethod
def kernel_jacobian(self):
pass
@abstractmethod
def kernel_hyperparameter_gradient(self):
pass
@property
def theta(self):
return self._theta
@theta.setter
def theta(self, theta):
self._theta = theta
def numerical_jacobian(self,x,y, dx=1.e-5):
if np.ndim(y) == 1:
y = y.reshape((1,-1))
nx = len(x)
ny = y.shape[0]
f0 = self.kernel(x,y)
f_jac = np.zeros((ny,nx))
for i in range(nx):
x_up = np.copy(x)
x_down = np.copy(x)
x_up[i] += 0.5*dx
x_down[i] -= 0.5*dx
f_up = self.kernel(x_up,y)
f_down = self.kernel(x_down,y)
f_jac[:,i] = (f_up - f_down)/dx
return f_jac
def numerical_hyperparameter_gradient(self,X, dx=1.e-5):
N_data = X.shape[0]
theta = np.copy(self.theta)
N_hyper = len(theta)
dK_dTheta = np.zeros((N_hyper, N_data, N_data))
for i in range(N_hyper):
theta_up = np.copy(theta)
theta_down = np.copy(theta)
theta_up[i] += 0.5*dx
theta_down[i] -= 0.5*dx
self.theta = theta_up
K_up = self.kernel(X,X)
self.theta = theta_down
K_down = self.kernel(X,X)
dK_dTheta[i,:,:] = (K_up - K_down)/dx
return dK_dTheta
class gauss_kernel(kernel):
def __init__(self, amplitude=10.0, length_scale=10.0, amplitude_bounds=(1e0, 1e3), length_scale_bounds=(1e-1, 1e1)):
self.amplitude = amplitude
self.length_scale = length_scale
self.amplitude_bounds = amplitude_bounds
self.length_scale_bounds = length_scale_bounds
self._theta_bounds = [amplitude_bounds, length_scale_bounds]
def __call__(self, X,Y, eval_gradient=False):
pass
def kernel(self, X,Y):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
if np.ndim(Y) == 1:
Y = Y.reshape((1,-1))
d = cdist(X / self.length_scale,
Y / self.length_scale, metric='sqeuclidean')
K = self.amplitude * np.exp(-0.5 * d)
return K
def kernel_value(self, x,y):
d = cdist(x.reshape(1,-1) / self.length_scale,
y.reshape(1,-1) / self.length_scale, metric='sqeuclidean')
K = self.amplitude * np.exp(-0.5 * d)
return K
def kernel_vector(self, x,Y):
d = cdist(x.reshape(1,-1) / self.length_scale,
Y / self.length_scale, metric='sqeuclidean')
K = self.amplitude * np.exp(-0.5 * d)
return K
def kernel_matrix(self, X,Y=None):
if Y is None:
d = cdist(X / self.length_scale, X / self.length_scale, metric='sqeuclidean')
else:
d = cdist(X / self.length_scale, Y / self.length_scale, metric='sqeuclidean')
K = self.amplitude * np.exp(-0.5 * d)
return K
def kernel_jacobian(self, X,Y):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
if np.ndim(Y) == 1:
Y = Y.reshape((1,-1))
K = self.kernel(X,Y).T
dK_dd = -1./(2*self.length_scale**2)*K
dd_df = 2*(X - Y)
dk_df = np.multiply(dK_dd, dd_df)
return dk_df
@property
def theta(self):
self._theta = [self.amplitude, self.length_scale]
return self._theta
@theta.setter
def theta(self, theta):
self._theta = theta
self.amplitude = self._theta[0]
self.length_scale = self._theta[1]
def dK_da(self, X):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
d = cdist(X / self.length_scale,
X / self.length_scale, metric='sqeuclidean')
dK_da = np.exp(-0.5 * d)
return dK_da
def dK_dl(self, X):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
d = cdist(X / self.length_scale,
X / self.length_scale, metric='sqeuclidean')
dK_dl = self.amplitude * d/self.length_scale * np.exp(-0.5 * d)
return dK_dl
def kernel_hyperparameter_gradient(self, X):
return np.array([self.dK_da(X), self.dK_dl(X)])
class double_gauss_kernel(kernel):
def __init__(self, amplitude=10., amplitude_bounds=(1e0,1e3),
length_scale1=10.0, length_scale1_bounds=(1e0, 1e3),
length_scale2=10.0, length_scale2_bounds=(1e0, 1e3),
weight=0.01, weight_bounds=(0.01,0.01),
noise=3e-3, noise_bounds=(3e-3,3e-3)):
self.amplitude = amplitude
self.length_scale1 = length_scale1
self.length_scale2 = length_scale2
self.weight = weight
self.amplitude_bounds = amplitude_bounds
self.length_scale1_bounds = length_scale1_bounds
self.length_scale2_bounds = length_scale2_bounds
self.weight_bounds = weight_bounds
self._theta_bounds = [amplitude_bounds, length_scale1_bounds, length_scale2_bounds, weight_bounds]
def __call__(self, X,Y, eval_gradient=False):
pass
def kernel(self, X,Y):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
if np.ndim(Y) == 1:
Y = Y.reshape((1,-1))
d1 = cdist(X / self.length_scale1,
Y / self.length_scale1, metric='sqeuclidean')
d2 = cdist(X / self.length_scale2,
Y / self.length_scale2, metric='sqeuclidean')
K = self.amplitude * (np.exp(-0.5 * d1) + self.weight*np.exp(-0.5 * d2))
return K
def kernel_vector(self, x,Y):
d = cdist(x.reshape(1,-1) / self.length_scale,
Y / self.length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * d)
return K
def kernel_matrix(self, X,Y=None):
if Y is None:
d = cdist(X / self.length_scale, X / self.length_scale, metric='sqeuclidean')
else:
d = cdist(X / self.length_scale, Y / self.length_scale, metric='sqeuclidean')
K = np.exp(-0.5 * d)
return K
def kernel_jacobian(self, X,Y):
""" Jacobian of the kernel with respect to X
"""
if np.ndim(X) == 1:
X = X.reshape((1,-1))
if np.ndim(Y) == 1:
Y = Y.reshape((1,-1))
d1 = cdist(X / self.length_scale1,
Y / self.length_scale1, metric='sqeuclidean')
d2 = cdist(X / self.length_scale2,
Y / self.length_scale2, metric='sqeuclidean')
dK1_dd1 = -1/(2*self.length_scale1**2) * np.exp(-0.5 * d1)
dK2_dd2 = -1/(2*self.length_scale2**2) * np.exp(-0.5 * d2)
dK_dd = self.amplitude * (dK1_dd1 + self.weight*dK2_dd2)
dd_df = 2*(X - Y)
dk_df = np.multiply(dK_dd.T, dd_df)
return dk_df
@property
def theta(self):
self._theta = [self.amplitude, self.length_scale1, self.length_scale2, self.weight]
return self._theta
@theta.setter
def theta(self, theta):
self._theta = theta
self.amplitude = self._theta[0]
self.length_scale1 = self._theta[1]
self.length_scale2 = self._theta[2]
self.weight = self._theta[3]
def dK_da(self, X):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
d1 = cdist(X / self.length_scale1,
X / self.length_scale1, metric='sqeuclidean')
d2 = cdist(X / self.length_scale2,
X / self.length_scale2, metric='sqeuclidean')
dK_da = np.exp(-0.5 * d1) + self.weight*np.exp(-0.5 * d2)
return dK_da
def dK_dl1(self, X):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
d1 = cdist(X / self.length_scale1,
X / self.length_scale1, metric='sqeuclidean')
dK_dl1 = self.amplitude*d1/self.length_scale1*np.exp(-0.5 * d1)
return dK_dl1
def dK_dl2(self, X):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
d2 = cdist(X / self.length_scale2,
X / self.length_scale2, metric='sqeuclidean')
dK_dl2 = self.amplitude*self.weight*d2/self.length_scale2*np.exp(-0.5 * d2)
return dK_dl2
def dK_dw(self, X):
if np.ndim(X) == 1:
X = X.reshape((1,-1))
d2 = cdist(X / self.length_scale2,
X / self.length_scale2, metric='sqeuclidean')
dK_dl2 = self.amplitude*np.exp(-0.5 * d2)
return dK_dl2
def kernel_hyperparameter_gradient(self, X):
return np.array([self.dK_da(X), self.dK_dl1(X), self.dK_dl2(X), self.dK_dw(X)])
import numpy as np
class prior():
def __init__(self):
pass
def prior(self):
pass
import numpy as np
import unittest
from gpr import gpr
class test_kernel(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setupClass')
@classmethod
def tearDownClass(cls):
print('teardownClass')
def setUp(self):
print('setUp')
#self.kernel = gauss_kernel()
self.kernel = double_gauss_kernel()
def tearDown(self):
print('tearDown\n')
def test_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
y = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,x2),
self.kernel.numerical_jacobian(x1,x2))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,y),
self.kernel.numerical_jacobian(x1,y))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x2,y),
self.kernel.numerical_jacobian(x2,y))
def test_hyper_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
X = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_hyperparameter_gradient(X),
self.kernel.numerical_hyperparameter_gradient(X))
if __name__ == '__main__':
unittest.main()
import numpy as np
import unittest
from kernel import gauss_kernel, double_gauss_kernel
class test_kernel(unittest.TestCase):
@classmethod
def setUpClass(cls):
print('setupClass')
@classmethod
def tearDownClass(cls):
print('teardownClass')
def setUp(self):
print('setUp')
#self.kernel = gauss_kernel()
self.kernel = double_gauss_kernel()
def tearDown(self):
print('tearDown\n')
def test_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
y = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,x2),
self.kernel.numerical_jacobian(x1,x2))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x1,y),
self.kernel.numerical_jacobian(x1,y))
np.testing.assert_almost_equal(self.kernel.kernel_jacobian(x2,y),
self.kernel.numerical_jacobian(x2,y))
def test_hyper_jac(self):
x1 = np.array([1.,2.,3.])
x2 = np.array([1.5,2.,1.])
x3 = np.array([3.,1.,1.])
X = np.c_[x1,x2,x3].T
np.testing.assert_almost_equal(self.kernel.kernel_hyperparameter_gradient(X),
self.kernel.numerical_hyperparameter_gradient(X))
if __name__ == '__main__':
unittest.main()
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment