Skip to content
GitLab
Menu
Projects
Groups
Snippets
Help
Help
Support
Community forum
Keyboard shortcuts
?
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
Menu
Open sidebar
Malthe Kjær Bisbo
GOFEE
Commits
ba29303e
Commit
ba29303e
authored
Dec 23, 2019
by
Malthe Kjær Bisbo
Browse files
gpr done
parent
cb9ee645
Changes
9
Hide whitespace changes
Inline
Side-by-side
surrogate/__pycache__/custom_calculators.cpython-36.pyc
0 → 100644
View file @
ba29303e
File added
surrogate/__pycache__/gpr.cpython-36.pyc
View file @
ba29303e
No preview for this file type
surrogate/__pycache__/kernel.cpython-36.pyc
View file @
ba29303e
No preview for this file type
surrogate/custom_calculators.py
0 → 100644
View file @
ba29303e
import
numpy
as
np
from
scipy.spatial.distance
import
euclidean
from
ase.calculators.calculator
import
Calculator
from
time
import
time
from
ase.calculators.singlepoint
import
SinglePointCalculator
class
krr_calculator
(
Calculator
):
implemented_properties
=
[
'energy'
,
'forces'
]
default_parameters
=
{}
def
__init__
(
self
,
MLmodel
,
label
=
'MLmodel'
,
kappa
=
None
,
**
kwargs
):
self
.
MLmodel
=
MLmodel
self
.
kappa
=
kappa
Calculator
.
__init__
(
self
,
**
kwargs
)
def
calculate
(
self
,
atoms
=
None
,
properties
=
[
'energy'
,
'forces'
],
system_changes
=
[
'positions'
]):
Calculator
.
calculate
(
self
,
atoms
,
properties
,
system_changes
)
if
'energy'
in
properties
:
if
self
.
kappa
is
None
:
E
=
self
.
MLmodel
.
predict_energy
(
atoms
,
return_error
=
False
)
else
:
energy
,
error
,
_
=
self
.
MLmodel
.
predict_energy
(
atoms
,
return_error
=
True
)
E
=
energy
-
self
.
kappa
*
error
self
.
results
[
'energy'
]
=
E
if
'forces'
in
properties
:
if
self
.
kappa
is
None
:
F
=
self
.
MLmodel
.
predict_force
(
atoms
).
reshape
((
-
1
,
3
))
else
:
F
,
F_error
=
self
.
MLmodel
.
predict_force
(
atoms
,
return_error
=
True
)
F
=
(
F
+
self
.
kappa
*
F_error
).
reshape
((
-
1
,
3
))
self
.
results
[
'forces'
]
=
F
class
doubleLJ_calculator
(
Calculator
):
implemented_properties
=
[
'energy'
,
'forces'
]
default_parameters
=
{}
def
__init__
(
self
,
eps
=
1.8
,
r0
=
1.1
,
sigma
=
np
.
sqrt
(
0.02
),
label
=
'doubleLJ'
,
noZ
=
False
,
**
kwargs
):
self
.
eps
=
eps
self
.
r0
=
r0
self
.
sigma
=
sigma
self
.
noZ
=
noZ
Calculator
.
__init__
(
self
,
**
kwargs
)
def
calculate
(
self
,
atoms
=
None
,
properties
=
[
'energy'
,
'forces'
],
system_changes
=
[
'positions'
]):
Calculator
.
calculate
(
self
,
atoms
,
properties
,
system_changes
)
if
'energy'
in
properties
:
self
.
results
[
'energy'
]
=
self
.
energy
(
atoms
)
if
'forces'
in
properties
:
F
=
self
.
forces
(
atoms
)
if
self
.
noZ
:
F
[:,
-
1
]
=
0
self
.
results
[
'forces'
]
=
F
def
energy
(
self
,
a
):
x
=
a
.
get_positions
()
E
=
0
for
i
,
xi
in
enumerate
(
x
):
for
j
,
xj
in
enumerate
(
x
):
if
j
>
i
:
r
=
euclidean
(
xi
,
xj
)
E1
=
1
/
r
**
12
-
2
/
r
**
6
E2
=
-
self
.
eps
*
np
.
exp
(
-
(
r
-
self
.
r0
)
**
2
/
(
2
*
self
.
sigma
**
2
))
E
+=
E1
+
E2
return
E
def
forces
(
self
,
a
):
x
=
a
.
get_positions
()
Natoms
,
dim
=
x
.
shape
dE
=
np
.
zeros
((
Natoms
,
dim
))
for
i
,
xi
in
enumerate
(
x
):
for
j
,
xj
in
enumerate
(
x
):
r
=
euclidean
(
xi
,
xj
)
if
j
!=
i
:
rijVec
=
xi
-
xj
dE1
=
12
*
rijVec
*
(
-
1
/
r
**
14
+
1
/
r
**
8
)
dE2
=
self
.
eps
*
(
r
-
self
.
r0
)
*
rijVec
/
(
r
*
self
.
sigma
**
2
)
*
np
.
exp
(
-
(
r
-
self
.
r0
)
**
2
/
(
2
*
self
.
sigma
**
2
))
dE
[
i
]
+=
dE1
+
dE2
return
-
dE
surrogate/gpr.py
View file @
ba29303e
...
@@ -166,10 +166,11 @@ class gpr():
...
@@ -166,10 +166,11 @@ class gpr():
K0
=
self
.
kernel
.
kernel_value
(
x
,
x
)
K0
=
self
.
kernel
.
kernel_value
(
x
,
x
)
g
=
K0
-
np
.
dot
(
k
.
T
,
vk
)
g
=
K0
-
np
.
dot
(
k
.
T
,
vk
)
assert
g
>=
0
assert
g
>=
0
F_std
=
-
1
/
np
.
sqrt
(
g
)
*
np
.
dot
(
k_ddr
.
T
,
vk
)
F_std
=
1
/
np
.
sqrt
(
g
)
*
np
.
dot
(
k_ddr
.
T
,
vk
)
return
F
,
F_std
#F_std = -1/np.sqrt(g) * np.dot(k_ddr.T, vk)
return
F
.
reshape
((
-
1
,
3
)),
F_std
.
reshape
(
-
1
,
3
)
else
:
else
:
return
F
return
F
.
reshape
(
-
1
,
3
)
def
set_bias
(
self
):
def
set_bias
(
self
):
self
.
bias
=
np
.
mean
(
self
.
memory
.
energies
-
self
.
memory
.
prior_values
)
self
.
bias
=
np
.
mean
(
self
.
memory
.
energies
-
self
.
memory
.
prior_values
)
...
@@ -232,7 +233,7 @@ class gpr():
...
@@ -232,7 +233,7 @@ class gpr():
lml
=
-
0.5
*
np
.
dot
(
self
.
Y
,
alpha
)
lml
=
-
0.5
*
np
.
dot
(
self
.
Y
,
alpha
)
lml
-=
np
.
sum
(
np
.
log
(
np
.
diag
(
L
)))
lml
-=
np
.
sum
(
np
.
log
(
np
.
diag
(
L
)))
lml
-=
K
.
shape
[
0
]
/
2
*
np
.
log
(
2
*
np
.
pi
)
lml
-=
K
.
shape
[
0
]
/
2
*
np
.
log
(
2
*
np
.
pi
)
if
eval_gradient
:
if
eval_gradient
:
# Equation (5.9) in GPML
# Equation (5.9) in GPML
K_inv
=
cho_solve
((
L
,
True
),
np
.
eye
(
K
.
shape
[
0
]))
K_inv
=
cho_solve
((
L
,
True
),
np
.
eye
(
K
.
shape
[
0
]))
...
@@ -252,6 +253,7 @@ class gpr():
...
@@ -252,6 +253,7 @@ class gpr():
options
=
{
'gtol'
:
1e-2
,
'ftol'
:
1e-4
})
options
=
{
'gtol'
:
1e-2
,
'ftol'
:
1e-4
})
if
not
result
.
success
:
if
not
result
.
success
:
warnings
.
warn
(
f
"L_BFGS_B terminated with state:
{
result
.
message
}
"
)
warnings
.
warn
(
f
"L_BFGS_B terminated with state:
{
result
.
message
}
"
)
print
(
'lml:'
,
result
.
fun
)
return
result
.
x
,
result
.
fun
return
result
.
x
,
result
.
fun
def
numerical_neg_lml
(
self
,
dx
=
1e-4
):
def
numerical_neg_lml
(
self
,
dx
=
1e-4
):
...
@@ -265,13 +267,41 @@ class gpr():
...
@@ -265,13 +267,41 @@ class gpr():
theta_up
[
i
]
+=
0.5
*
dx
theta_up
[
i
]
+=
0.5
*
dx
theta_down
[
i
]
-=
0.5
*
dx
theta_down
[
i
]
-=
0.5
*
dx
d_theta
=
np
.
exp
(
theta_up
[
i
])
-
np
.
exp
(
theta_down
[
i
])
lml_up
=
self
.
neg_log_marginal_likelihood
(
theta_up
,
eval_gradient
=
False
)
lml_up
=
self
.
neg_log_marginal_likelihood
(
theta_up
,
eval_gradient
=
False
)
lml_down
=
self
.
neg_log_marginal_likelihood
(
theta_down
,
eval_gradient
=
False
)
lml_down
=
self
.
neg_log_marginal_likelihood
(
theta_down
,
eval_gradient
=
False
)
#lml_ddTheta[i] = (lml_up - lml_down)/dx
lml_ddTheta
[
i
]
=
(
lml_up
-
lml_down
)
/
dx
lml_ddTheta
[
i
]
=
(
lml_up
-
lml_down
)
/
d_theta
return
lml_ddTheta
return
lml_ddTheta
def
numerical_forces
(
self
,
a
,
dx
=
1e-4
,
eval_std
=
False
):
Na
,
Nd
=
a
.
positions
.
shape
if
not
eval_std
:
F
=
np
.
zeros
((
Na
,
Nd
))
for
ia
in
range
(
Na
):
for
idim
in
range
(
Nd
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
0.5
*
dx
a_down
.
positions
[
ia
,
idim
]
-=
0.5
*
dx
E_up
=
self
.
predict_energy
(
a_up
)
E_down
=
self
.
predict_energy
(
a_down
)
F
[
ia
,
idim
]
=
-
(
E_up
-
E_down
)
/
dx
return
F
else
:
F
=
np
.
zeros
((
Na
,
Nd
))
Fstd
=
np
.
zeros
((
Na
,
Nd
))
for
ia
in
range
(
Na
):
for
idim
in
range
(
Nd
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
0.5
*
dx
a_down
.
positions
[
ia
,
idim
]
-=
0.5
*
dx
E_up
,
Estd_up
=
self
.
predict_energy
(
a_up
,
eval_std
=
True
)
E_down
,
Estd_down
=
self
.
predict_energy
(
a_down
,
eval_std
=
True
)
F
[
ia
,
idim
]
=
-
(
E_up
-
E_down
)
/
dx
Fstd
[
ia
,
idim
]
=
(
Estd_up
-
Estd_down
)
/
dx
return
F
,
Fstd
def
get_calculator
():
def
get_calculator
():
pass
pass
...
...
surrogate/kernel.py
View file @
ba29303e
...
@@ -54,6 +54,9 @@ class kernel(ABC):
...
@@ -54,6 +54,9 @@ class kernel(ABC):
return
f_jac
return
f_jac
def
numerical_hyperparameter_gradient
(
self
,
X
,
dx
=
1.e-5
):
def
numerical_hyperparameter_gradient
(
self
,
X
,
dx
=
1.e-5
):
"""Calculates the numerical derivative of the kernel with respect to the
log transformed hyperparameters.
"""
N_data
=
X
.
shape
[
0
]
N_data
=
X
.
shape
[
0
]
theta
=
np
.
copy
(
self
.
theta
)
theta
=
np
.
copy
(
self
.
theta
)
N_hyper
=
len
(
theta
)
N_hyper
=
len
(
theta
)
...
@@ -63,6 +66,12 @@ class kernel(ABC):
...
@@ -63,6 +66,12 @@ class kernel(ABC):
theta_down
=
np
.
copy
(
theta
)
theta_down
=
np
.
copy
(
theta
)
theta_up
[
i
]
+=
0.5
*
dx
theta_up
[
i
]
+=
0.5
*
dx
theta_down
[
i
]
-=
0.5
*
dx
theta_down
[
i
]
-=
0.5
*
dx
#theta_up[i] = np.log(np.exp(theta_up[i]) + 0.5*dx)
#theta_down[i] = np.log(np.exp(theta_down[i]) - 0.5*dx)
#dTheta = theta_up[i] - theta_down[i]
#dTheta = np.log(np.exp(theta_up[i]) + 0.5*dx) - np.log(np.exp(theta_down[i]) - 0.5*dx)
#print('dTheta:', dTheta)
self
.
theta
=
theta_up
self
.
theta
=
theta_up
K_up
=
self
(
X
,
eval_gradient
=
False
)
K_up
=
self
(
X
,
eval_gradient
=
False
)
...
@@ -76,12 +85,24 @@ class gauss_kernel(kernel):
...
@@ -76,12 +85,24 @@ class gauss_kernel(kernel):
def
__init__
(
self
,
amplitude
=
10.0
,
length_scale
=
10.0
,
amplitude_bounds
=
(
1e0
,
1e3
),
length_scale_bounds
=
(
1e-1
,
1e1
)):
def
__init__
(
self
,
amplitude
=
10.0
,
length_scale
=
10.0
,
amplitude_bounds
=
(
1e0
,
1e3
),
length_scale_bounds
=
(
1e-1
,
1e1
)):
self
.
amplitude
=
amplitude
self
.
amplitude
=
amplitude
self
.
length_scale
=
length_scale
self
.
length_scale
=
length_scale
self
.
amplitude_bounds
=
amplitude_bounds
self
.
amplitude_bounds
=
amplitude_bounds
self
.
length_scale_bounds
=
length_scale_bounds
self
.
length_scale_bounds
=
length_scale_bounds
self
.
_theta_bounds
=
[
amplitude_bounds
,
length_scale_bounds
]
def
__call__
(
self
,
X
,
Y
,
eval_gradient
=
False
):
self
.
theta_bounds
=
np
.
array
([
amplitude_bounds
,
length_scale_bounds
])
pass
def
__call__
(
self
,
X
,
eval_gradient
=
False
):
if
np
.
ndim
(
X
)
==
1
:
X
=
X
.
reshape
((
1
,
-
1
))
d
=
cdist
(
X
/
self
.
length_scale
,
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
if
eval_gradient
:
K_gradient
=
self
.
kernel_hyperparameter_gradient
(
X
)
return
K
,
K_gradient
else
:
return
K
def
kernel
(
self
,
X
,
Y
):
def
kernel
(
self
,
X
,
Y
):
if
np
.
ndim
(
X
)
==
1
:
if
np
.
ndim
(
X
)
==
1
:
...
@@ -92,17 +113,13 @@ class gauss_kernel(kernel):
...
@@ -92,17 +113,13 @@ class gauss_kernel(kernel):
Y
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
Y
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
K
return
K
def
kernel_value
(
self
,
x
,
y
):
d
=
cdist
(
x
.
reshape
(
1
,
-
1
)
/
self
.
length_scale
,
y
.
reshape
(
1
,
-
1
)
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
K
def
kernel_value
(
self
,
x
,
y
):
K
=
self
.
kernel
(
x
,
y
)
return
np
.
asscalar
(
K
)
def
kernel_vector
(
self
,
x
,
Y
):
def
kernel_vector
(
self
,
x
,
Y
):
d
=
cdist
(
x
.
reshape
(
1
,
-
1
)
/
self
.
length_scale
,
K
=
self
.
kernel
(
x
,
Y
).
reshape
(
-
1
)
Y
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
K
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
K
return
K
def
kernel_matrix
(
self
,
X
,
Y
=
None
):
def
kernel_matrix
(
self
,
X
,
Y
=
None
):
...
@@ -127,21 +144,29 @@ class gauss_kernel(kernel):
...
@@ -127,21 +144,29 @@ class gauss_kernel(kernel):
@
property
@
property
def
theta
(
self
):
def
theta
(
self
):
self
.
_theta
=
[
self
.
amplitude
,
self
.
length_scale
]
"""Returns the log-transformed hyperparameters of the kernel.
return
self
.
_theta
"""
self
.
_theta
=
np
.
array
([
self
.
amplitude
,
self
.
length_scale
])
return
np
.
log
(
self
.
_theta
)
#return self._theta
@
theta
.
setter
@
theta
.
setter
def
theta
(
self
,
theta
):
def
theta
(
self
,
theta
):
self
.
_theta
=
theta
"""Sets the hyperparameters of the kernel.
theta: log-transformed hyperparameters
"""
self
.
_theta
=
np
.
exp
(
theta
)
#self._theta = theta
self
.
amplitude
=
self
.
_theta
[
0
]
self
.
amplitude
=
self
.
_theta
[
0
]
self
.
length_scale
=
self
.
_theta
[
1
]
self
.
length_scale
=
self
.
_theta
[
1
]
def
dK_da
(
self
,
X
):
def
dK_da
(
self
,
X
):
if
np
.
ndim
(
X
)
==
1
:
if
np
.
ndim
(
X
)
==
1
:
X
=
X
.
reshape
((
1
,
-
1
))
X
=
X
.
reshape
((
1
,
-
1
))
d
=
cdist
(
X
/
self
.
length_scale
,
d
=
cdist
(
X
/
self
.
length_scale
,
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
dK_da
=
np
.
exp
(
-
0.5
*
d
)
dK_da
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d
)
return
dK_da
return
dK_da
def
dK_dl
(
self
,
X
):
def
dK_dl
(
self
,
X
):
...
@@ -149,10 +174,13 @@ class gauss_kernel(kernel):
...
@@ -149,10 +174,13 @@ class gauss_kernel(kernel):
X
=
X
.
reshape
((
1
,
-
1
))
X
=
X
.
reshape
((
1
,
-
1
))
d
=
cdist
(
X
/
self
.
length_scale
,
d
=
cdist
(
X
/
self
.
length_scale
,
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale
,
metric
=
'sqeuclidean'
)
dK_dl
=
self
.
amplitude
*
d
/
self
.
length_scale
*
np
.
exp
(
-
0.5
*
d
)
dK_dl
=
self
.
amplitude
*
d
*
np
.
exp
(
-
0.5
*
d
)
return
dK_dl
return
dK_dl
def
kernel_hyperparameter_gradient
(
self
,
X
):
def
kernel_hyperparameter_gradient
(
self
,
X
):
"""Calculates the derivative of the kernel with respect to the
log transformed hyperparameters.
"""
return
np
.
array
([
self
.
dK_da
(
X
),
self
.
dK_dl
(
X
)])
return
np
.
array
([
self
.
dK_da
(
X
),
self
.
dK_dl
(
X
)])
...
@@ -265,30 +293,33 @@ class double_gauss_kernel(kernel):
...
@@ -265,30 +293,33 @@ class double_gauss_kernel(kernel):
X
/
self
.
length_scale1
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale1
,
metric
=
'sqeuclidean'
)
d2
=
cdist
(
X
/
self
.
length_scale2
,
d2
=
cdist
(
X
/
self
.
length_scale2
,
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
dK_da
=
np
.
exp
(
-
0.5
*
d1
)
+
self
.
weight
*
np
.
exp
(
-
0.5
*
d2
)
+
self
.
noise
*
np
.
eye
(
X
.
shape
[
0
])
dK_da
=
self
.
amplitude
*
(
np
.
exp
(
-
0.5
*
d1
)
+
self
.
weight
*
np
.
exp
(
-
0.5
*
d2
)
+
self
.
noise
*
np
.
eye
(
X
.
shape
[
0
])
)
return
dK_da
return
dK_da
def
dK_dl1
(
self
,
X
):
def
dK_dl1
(
self
,
X
):
d1
=
cdist
(
X
/
self
.
length_scale1
,
d1
=
cdist
(
X
/
self
.
length_scale1
,
X
/
self
.
length_scale1
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale1
,
metric
=
'sqeuclidean'
)
dK_dl1
=
self
.
amplitude
*
d1
/
self
.
length_scale1
*
np
.
exp
(
-
0.5
*
d1
)
dK_dl1
=
self
.
amplitude
*
d1
*
np
.
exp
(
-
0.5
*
d1
)
return
dK_dl1
return
dK_dl1
def
dK_dl2
(
self
,
X
):
def
dK_dl2
(
self
,
X
):
d2
=
cdist
(
X
/
self
.
length_scale2
,
d2
=
cdist
(
X
/
self
.
length_scale2
,
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
dK_dl2
=
self
.
amplitude
*
self
.
weight
*
d2
/
self
.
length_scale2
*
np
.
exp
(
-
0.5
*
d2
)
dK_dl2
=
self
.
amplitude
*
self
.
weight
*
d2
*
np
.
exp
(
-
0.5
*
d2
)
return
dK_dl2
return
dK_dl2
def
dK_dw
(
self
,
X
):
def
dK_dw
(
self
,
X
):
d2
=
cdist
(
X
/
self
.
length_scale2
,
d2
=
cdist
(
X
/
self
.
length_scale2
,
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
X
/
self
.
length_scale2
,
metric
=
'sqeuclidean'
)
dK_dl2
=
self
.
amplitude
*
np
.
exp
(
-
0.5
*
d2
)
dK_dl2
=
self
.
amplitude
*
self
.
weight
*
np
.
exp
(
-
0.5
*
d2
)
return
dK_dl2
return
dK_dl2
def
dK_dn
(
self
,
X
):
def
dK_dn
(
self
,
X
):
dK_dn
=
self
.
amplitude
*
np
.
eye
(
X
.
shape
[
0
])
dK_dn
=
self
.
amplitude
*
self
.
noise
*
np
.
eye
(
X
.
shape
[
0
])
return
dK_dn
return
dK_dn
def
kernel_hyperparameter_gradient
(
self
,
X
):
def
kernel_hyperparameter_gradient
(
self
,
X
):
"""Calculates the derivative of the kernel with respect to the
log transformed hyperparameters.
"""
return
np
.
array
([
self
.
dK_da
(
X
),
self
.
dK_dl1
(
X
),
self
.
dK_dl2
(
X
),
self
.
dK_dw
(
X
),
self
.
dK_dn
(
X
)])
return
np
.
array
([
self
.
dK_da
(
X
),
self
.
dK_dl1
(
X
),
self
.
dK_dl2
(
X
),
self
.
dK_dw
(
X
),
self
.
dK_dn
(
X
)])
surrogate/test_descriptor.py
0 → 100644
View file @
ba29303e
import
numpy
as
np
import
matplotlib.pyplot
as
plt
import
unittest
from
gpr
import
gpr
from
ase.io
import
read
# old gpr
from
kernels
import
RBF
,
ConstantKernel
as
C
,
WhiteKernel
from
descriptor.fingerprint
import
Fingerprint
from
delta_functions_multi.delta
import
delta
as
deltaFunc
from
GPR
import
GPR
def
finite_diff
(
descriptor
,
a
,
dx
=
1e-5
):
Nf
=
descriptor
.
get_feature
(
a
).
shape
[
0
]
Natoms
,
dim
=
a
.
positions
.
shape
f_ddr
=
np
.
zeros
((
Natoms
,
dim
,
Nf
))
for
ia
in
range
(
Natoms
):
for
idim
in
range
(
dim
):
a_up
=
a
.
copy
()
a_down
=
a
.
copy
()
a_up
.
positions
[
ia
,
idim
]
+=
dx
/
2
a_down
.
positions
[
ia
,
idim
]
-=
dx
/
2
f_up
=
descriptor
.
get_feature
(
a_up
)
f_down
=
descriptor
.
get_feature
(
a_down
)
f_ddr
[
ia
,
idim
,:]
=
(
f_up
-
f_down
)
/
dx
return
f_ddr
.
reshape
((
-
1
,
Nf
))
def
get_E_with_std
(
traj
,
gpr
):
E
=
[]
F
=
[]
for
a
in
traj
:
e
=
gpr
.
predict_energy
(
a
,
)
class
test_gpr
(
unittest
.
TestCase
):
@
classmethod
def
setUpClass
(
cls
):
print
(
'setupClass'
)
@
classmethod
def
tearDownClass
(
cls
):
print
(
'teardownClass'
)
def
setUp
(
self
):
print
(
'setUp'
)
### Set up feature ###
# Initialize feature
self
.
descriptor
=
Fingerprint
()
def
tearDown
(
self
):
print
(
'tearDown
\n
'
)
def
test_forces
(
self
):
a
=
read
(
'structures.traj'
,
index
=
'0'
)
f_ddr
=
self
.
descriptor
.
get_featureGradient
(
a
)
f_ddr_num
=
finite_diff
(
self
.
descriptor
,
a
)
Na
,
Nf
=
f_ddr
.
shape
x
=
np
.
arange
(
Nf
)
fig
,
ax
=
plt
.
subplots
(
1
,
1
)
ax
.
plot
(
x
,
f_ddr
.
T
)
ax
.
plot
(
x
,
f_ddr_num
.
T
,
'k:'
)
plt
.
show
()
print
(
f_ddr
-
f_ddr_num
)
np
.
testing
.
assert_almost_equal
(
f_ddr
,
f_ddr_num
)
if
__name__
==
'__main__'
:
unittest
.
main
()
surrogate/test_gpr.py
View file @
ba29303e
...
@@ -6,7 +6,7 @@ from ase.io import read
...
@@ -6,7 +6,7 @@ from ase.io import read
# old gpr
# old gpr
from
kernels
import
RBF
,
ConstantKernel
as
C
,
WhiteKernel
from
kernels
import
RBF
,
ConstantKernel
as
C
,
WhiteKernel
from
featureCalculators_multi.angular_fingerprintFeature_cy
import
Angular_
Fingerprint
from
descriptor.fingerprint
import
Fingerprint
from
delta_functions_multi.delta
import
delta
as
deltaFunc
from
delta_functions_multi.delta
import
delta
as
deltaFunc
from
GPR
import
GPR
from
GPR
import
GPR
...
@@ -29,9 +29,10 @@ def initialize_old_gpr(atoms):
...
@@ -29,9 +29,10 @@ def initialize_old_gpr(atoms):
use_angular
=
True
use_angular
=
True
# Initialize feature
# Initialize feature
featureCalculator
=
Angular_
Fingerprint
(
atoms
,
Rc1
=
Rc1
,
Rc2
=
Rc2
,
binwidth1
=
binwidth1
,
Nbins2
=
Nbins2
,
sigma1
=
sigma1
,
sigma2
=
sigma2
,
gamma
=
gamma
,
eta
=
eta
,
use_angular
=
use_angular
)
featureCalculator
=
Fingerprint
(
Rc1
=
Rc1
,
Rc2
=
Rc2
,
binwidth1
=
binwidth1
,
Nbins2
=
Nbins2
,
sigma1
=
sigma1
,
sigma2
=
sigma2
,
gamma
=
gamma
,
eta
=
eta
,
use_angular
=
use_angular
)
kernel
=
C
(
10
,
(
1e1
,
1e6
))
*
(
C
(
1
,
(
1
,
1
))
*
RBF
(
10
,
(
1
,
1000
))
+
C
(
0.01
,
(
0.01
,
0.01
))
*
RBF
(
10
,
(
1
,
1000
))
+
WhiteKernel
(
1e-5
,
(
1e-6
,
1e-2
)))
kernel
=
C
(
10
,
(
1e1
,
1e6
))
*
RBF
(
10
,
(
1
,
1000
))
#kernel = C(10, (1e1, 1e6)) * (RBF(10, (1,1000)) + C(0.01, (0.01, 0.01)) * RBF(10, (1,1000)) + WhiteKernel(1e-5, (1e-6,1e-2)))
delta
=
deltaFunc
(
atoms
=
atoms
,
rcut
=
6
)
delta
=
deltaFunc
(
atoms
=
atoms
,
rcut
=
6
)
gpr
=
GPR
(
kernel
=
kernel
,
gpr
=
GPR
(
kernel
=
kernel
,
featureCalculator
=
featureCalculator
,
featureCalculator
=
featureCalculator
,
...
@@ -63,7 +64,8 @@ class test_gpr(unittest.TestCase):
...
@@ -63,7 +64,8 @@ class test_gpr(unittest.TestCase):
#self.kernel = gauss_kernel()
#self.kernel = gauss_kernel()
a
=
read
(
'structures.traj'
,
index
=
'0'
)
a
=
read
(
'structures.traj'
,
index
=
'0'
)
self
.
gpr_old
=
initialize_old_gpr
(
a
)
self
.
gpr_old
=
initialize_old_gpr
(
a
)
self
.
gpr
=
gpr
()
#self.gpr = gpr()
self
.
gpr
=
gpr
(
kernel
=
'single'
)
def
tearDown
(
self
):
def
tearDown
(
self
):
print
(
'tearDown
\n
'
)
print
(
'tearDown
\n
'
)
...
@@ -82,11 +84,11 @@ class test_gpr(unittest.TestCase):
...
@@ -82,11 +84,11 @@ class test_gpr(unittest.TestCase):
E
=
np
.
array
([
self
.
gpr
.
predict_energy
(
a
,
eval_std
=
True
)
for
a
in
traj_predict
])
E
=
np
.
array
([
self
.
gpr
.
predict_energy
(
a
,
eval_std
=
True
)
for
a
in
traj_predict
])
np
.
testing
.
assert_almost_equal
(
E
,
E_old
)
np
.
testing
.
assert_almost_equal
(
E
,
E_old
)
F_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
)
for
a
in
traj_predict
])
F_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
)
.
reshape
((
-
1
,
3
))
for
a
in
traj_predict
])
F
=
np
.
array
([
self
.
gpr
.
predict_forces
(
a
)
for
a
in
traj_predict
])
F
=
np
.
array
([
self
.
gpr
.
predict_forces
(
a
)
for
a
in
traj_predict
])
np
.
testing
.
assert_almost_equal
(
F
,
F_old
)
np
.
testing
.
assert_almost_equal
(
F
,
F_old
)
Fstd_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
,
return_error
=
True
)[
1
]
for
a
in
traj_predict
])
Fstd_old
=
np
.
array
([
self
.
gpr_old
.
predict_force
(
a
,
return_error
=
True
)[
1
]
.
reshape
((
-
1
,
3
))
for
a
in
traj_predict
])
Fstd
=
np
.
array
([
self
.
gpr
.
predict_forces
(
a
,
eval_std
=
True
)[
1
]
for
a
in
traj_predict
])
Fstd
=
np
.
array
([
self
.
gpr
.
predict_forces
(
a
,
eval_std
=
True
)[
1
]
for
a
in
traj_predict
])
np
.
testing
.
assert_almost_equal
(
Fstd
,
Fstd_old
)
np
.
testing
.
assert_almost_equal
(
Fstd
,
Fstd_old
)
...
@@ -113,7 +115,7 @@ class test_gpr(unittest.TestCase):
...
@@ -113,7 +115,7 @@ class test_gpr(unittest.TestCase):
_
,
lml_ddTheta_old
=
self
.
gpr_old
.
log_marginal_likelihood
(
self
.
gpr_old
.
kernel
.
theta
,
eval_gradient
=
True
)
_
,
lml_ddTheta_old
=
self
.
gpr_old
.
log_marginal_likelihood
(
self
.
gpr_old
.
kernel
.
theta
,
eval_gradient
=
True
)
_
,
lml_ddTheta
=
self
.
gpr
.
neg_log_marginal_likelihood
(
eval_gradient
=
True
)
_
,
lml_ddTheta
=
self
.
gpr
.
neg_log_marginal_likelihood
(
eval_gradient
=
True
)
np
.
testing
.
assert_almost_equal
(
lml_ddTheta
,
lml_ddTheta_old
)
np
.
testing
.
assert_almost_equal
(
-
lml_ddTheta
,
lml_ddTheta_old
)
def
test_lml_gradient
(
self
):
def
test_lml_gradient
(
self
):
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
...
@@ -128,7 +130,157 @@ class test_gpr(unittest.TestCase):
...
@@ -128,7 +130,157 @@ class test_gpr(unittest.TestCase):
np
.
testing
.
assert_almost_equal
(
lml_ddTheta
,
lml_ddTheta_numeric
)
np
.
testing
.
assert_almost_equal
(
lml_ddTheta
,
lml_ddTheta_numeric
)
def
test_forces
(
self
):
def
test_forces
(
self
):
pass
traj
=
read
(
'structures.traj'
,
index
=
':50'
)
traj_train
=
traj
[:
40
]
traj_predict
=
traj
[
40
:]
self
.
gpr_old
.
train
(
traj_train
)
self
.
gpr
.
train
(
traj_train
)
a
=
traj_predict
[
0
]
F
=
self
.
gpr
.
predict_forces
(
a
)